##// END OF EJS Templates
path: pass `path` to `peer` in `hg incoming`...
marmoute -
r50613:e64b1e9f default
parent child Browse files
Show More
@@ -1,1642 +1,1647 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 msg = b' > reposetup for %s took %s\n'
207 msg = b' > reposetup for %s took %s\n'
208 ui.log(b'extension', msg, name, stats)
208 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213
213
214
214
215 def repository(
215 def repository(
216 ui,
216 ui,
217 path=b'',
217 path=b'',
218 create=False,
218 create=False,
219 presetupfuncs=None,
219 presetupfuncs=None,
220 intents=None,
220 intents=None,
221 createopts=None,
221 createopts=None,
222 ):
222 ):
223 """return a repository object for the specified path"""
223 """return a repository object for the specified path"""
224 scheme = urlutil.url(path).scheme
224 scheme = urlutil.url(path).scheme
225 if scheme is None:
225 if scheme is None:
226 scheme = b'file'
226 scheme = b'file'
227 cls = repo_schemes.get(scheme)
227 cls = repo_schemes.get(scheme)
228 if cls is None:
228 if cls is None:
229 if scheme in peer_schemes:
229 if scheme in peer_schemes:
230 raise error.Abort(_(b"repository '%s' is not local") % path)
230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 cls = LocalFactory
231 cls = LocalFactory
232 repo = cls.instance(
232 repo = cls.instance(
233 ui,
233 ui,
234 path,
234 path,
235 create,
235 create,
236 intents=intents,
236 intents=intents,
237 createopts=createopts,
237 createopts=createopts,
238 )
238 )
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
241
241
242
242
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
246 if util.safehasattr(path, 'url'):
246 if util.safehasattr(path, 'url'):
247 # this is a urlutil.path object
247 # this is a urlutil.path object
248 scheme = path.url.scheme # pytype: disable=attribute-error
248 scheme = path.url.scheme # pytype: disable=attribute-error
249 # XXX for now we don't do anything more than that
249 # XXX for now we don't do anything more than that
250 path = path.loc # pytype: disable=attribute-error
250 path = path.loc # pytype: disable=attribute-error
251 else:
251 else:
252 scheme = urlutil.url(path).scheme
252 scheme = urlutil.url(path).scheme
253 if scheme in peer_schemes:
253 if scheme in peer_schemes:
254 cls = peer_schemes[scheme]
254 cls = peer_schemes[scheme]
255 peer = cls.instance(
255 peer = cls.instance(
256 rui,
256 rui,
257 path,
257 path,
258 create,
258 create,
259 intents=intents,
259 intents=intents,
260 createopts=createopts,
260 createopts=createopts,
261 )
261 )
262 _setup_repo_or_peer(rui, peer)
262 _setup_repo_or_peer(rui, peer)
263 else:
263 else:
264 # this is a repository
264 # this is a repository
265 repo = repository(
265 repo = repository(
266 rui,
266 rui,
267 path,
267 path,
268 create,
268 create,
269 intents=intents,
269 intents=intents,
270 createopts=createopts,
270 createopts=createopts,
271 )
271 )
272 peer = repo.peer()
272 peer = repo.peer()
273 return peer
273 return peer
274
274
275
275
276 def defaultdest(source):
276 def defaultdest(source):
277 """return default destination of clone if none is given
277 """return default destination of clone if none is given
278
278
279 >>> defaultdest(b'foo')
279 >>> defaultdest(b'foo')
280 'foo'
280 'foo'
281 >>> defaultdest(b'/foo/bar')
281 >>> defaultdest(b'/foo/bar')
282 'bar'
282 'bar'
283 >>> defaultdest(b'/')
283 >>> defaultdest(b'/')
284 ''
284 ''
285 >>> defaultdest(b'')
285 >>> defaultdest(b'')
286 ''
286 ''
287 >>> defaultdest(b'http://example.org/')
287 >>> defaultdest(b'http://example.org/')
288 ''
288 ''
289 >>> defaultdest(b'http://example.org/foo/')
289 >>> defaultdest(b'http://example.org/foo/')
290 'foo'
290 'foo'
291 """
291 """
292 path = urlutil.url(source).path
292 path = urlutil.url(source).path
293 if not path:
293 if not path:
294 return b''
294 return b''
295 return os.path.basename(os.path.normpath(path))
295 return os.path.basename(os.path.normpath(path))
296
296
297
297
298 def sharedreposource(repo):
298 def sharedreposource(repo):
299 """Returns repository object for source repository of a shared repo.
299 """Returns repository object for source repository of a shared repo.
300
300
301 If repo is not a shared repository, returns None.
301 If repo is not a shared repository, returns None.
302 """
302 """
303 if repo.sharedpath == repo.path:
303 if repo.sharedpath == repo.path:
304 return None
304 return None
305
305
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
307 return repo.srcrepo
307 return repo.srcrepo
308
308
309 # the sharedpath always ends in the .hg; we want the path to the repo
309 # the sharedpath always ends in the .hg; we want the path to the repo
310 source = repo.vfs.split(repo.sharedpath)[0]
310 source = repo.vfs.split(repo.sharedpath)[0]
311 srcurl, branches = urlutil.parseurl(source)
311 srcurl, branches = urlutil.parseurl(source)
312 srcrepo = repository(repo.ui, srcurl)
312 srcrepo = repository(repo.ui, srcurl)
313 repo.srcrepo = srcrepo
313 repo.srcrepo = srcrepo
314 return srcrepo
314 return srcrepo
315
315
316
316
317 def share(
317 def share(
318 ui,
318 ui,
319 source,
319 source,
320 dest=None,
320 dest=None,
321 update=True,
321 update=True,
322 bookmarks=True,
322 bookmarks=True,
323 defaultpath=None,
323 defaultpath=None,
324 relative=False,
324 relative=False,
325 ):
325 ):
326 '''create a shared repository'''
326 '''create a shared repository'''
327
327
328 not_local_msg = _(b'can only share local repositories')
328 not_local_msg = _(b'can only share local repositories')
329 if util.safehasattr(source, 'local'):
329 if util.safehasattr(source, 'local'):
330 if source.local() is None:
330 if source.local() is None:
331 raise error.Abort(not_local_msg)
331 raise error.Abort(not_local_msg)
332 elif not islocal(source):
332 elif not islocal(source):
333 # XXX why are we getting bytes here ?
333 # XXX why are we getting bytes here ?
334 raise error.Abort(not_local_msg)
334 raise error.Abort(not_local_msg)
335
335
336 if not dest:
336 if not dest:
337 dest = defaultdest(source)
337 dest = defaultdest(source)
338 else:
338 else:
339 dest = urlutil.get_clone_path(ui, dest)[1]
339 dest = urlutil.get_clone_path(ui, dest)[1]
340
340
341 if isinstance(source, bytes):
341 if isinstance(source, bytes):
342 origsource, source, branches = urlutil.get_clone_path(ui, source)
342 origsource, source, branches = urlutil.get_clone_path(ui, source)
343 srcrepo = repository(ui, source)
343 srcrepo = repository(ui, source)
344 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
344 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
345 else:
345 else:
346 srcrepo = source.local()
346 srcrepo = source.local()
347 checkout = None
347 checkout = None
348
348
349 shareditems = set()
349 shareditems = set()
350 if bookmarks:
350 if bookmarks:
351 shareditems.add(sharedbookmarks)
351 shareditems.add(sharedbookmarks)
352
352
353 r = repository(
353 r = repository(
354 ui,
354 ui,
355 dest,
355 dest,
356 create=True,
356 create=True,
357 createopts={
357 createopts={
358 b'sharedrepo': srcrepo,
358 b'sharedrepo': srcrepo,
359 b'sharedrelative': relative,
359 b'sharedrelative': relative,
360 b'shareditems': shareditems,
360 b'shareditems': shareditems,
361 },
361 },
362 )
362 )
363
363
364 postshare(srcrepo, r, defaultpath=defaultpath)
364 postshare(srcrepo, r, defaultpath=defaultpath)
365 r = repository(ui, dest)
365 r = repository(ui, dest)
366 _postshareupdate(r, update, checkout=checkout)
366 _postshareupdate(r, update, checkout=checkout)
367 return r
367 return r
368
368
369
369
370 def _prependsourcehgrc(repo):
370 def _prependsourcehgrc(repo):
371 """copies the source repo config and prepend it in current repo .hg/hgrc
371 """copies the source repo config and prepend it in current repo .hg/hgrc
372 on unshare. This is only done if the share was perfomed using share safe
372 on unshare. This is only done if the share was perfomed using share safe
373 method where we share config of source in shares"""
373 method where we share config of source in shares"""
374 srcvfs = vfsmod.vfs(repo.sharedpath)
374 srcvfs = vfsmod.vfs(repo.sharedpath)
375 dstvfs = vfsmod.vfs(repo.path)
375 dstvfs = vfsmod.vfs(repo.path)
376
376
377 if not srcvfs.exists(b'hgrc'):
377 if not srcvfs.exists(b'hgrc'):
378 return
378 return
379
379
380 currentconfig = b''
380 currentconfig = b''
381 if dstvfs.exists(b'hgrc'):
381 if dstvfs.exists(b'hgrc'):
382 currentconfig = dstvfs.read(b'hgrc')
382 currentconfig = dstvfs.read(b'hgrc')
383
383
384 with dstvfs(b'hgrc', b'wb') as fp:
384 with dstvfs(b'hgrc', b'wb') as fp:
385 sourceconfig = srcvfs.read(b'hgrc')
385 sourceconfig = srcvfs.read(b'hgrc')
386 fp.write(b"# Config copied from shared source\n")
386 fp.write(b"# Config copied from shared source\n")
387 fp.write(sourceconfig)
387 fp.write(sourceconfig)
388 fp.write(b'\n')
388 fp.write(b'\n')
389 fp.write(currentconfig)
389 fp.write(currentconfig)
390
390
391
391
392 def unshare(ui, repo):
392 def unshare(ui, repo):
393 """convert a shared repository to a normal one
393 """convert a shared repository to a normal one
394
394
395 Copy the store data to the repo and remove the sharedpath data.
395 Copy the store data to the repo and remove the sharedpath data.
396
396
397 Returns a new repository object representing the unshared repository.
397 Returns a new repository object representing the unshared repository.
398
398
399 The passed repository object is not usable after this function is
399 The passed repository object is not usable after this function is
400 called.
400 called.
401 """
401 """
402
402
403 with repo.lock():
403 with repo.lock():
404 # we use locks here because if we race with commit, we
404 # we use locks here because if we race with commit, we
405 # can end up with extra data in the cloned revlogs that's
405 # can end up with extra data in the cloned revlogs that's
406 # not pointed to by changesets, thus causing verify to
406 # not pointed to by changesets, thus causing verify to
407 # fail
407 # fail
408 destlock = copystore(ui, repo, repo.path)
408 destlock = copystore(ui, repo, repo.path)
409 with destlock or util.nullcontextmanager():
409 with destlock or util.nullcontextmanager():
410 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
410 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
411 # we were sharing .hg/hgrc of the share source with the current
411 # we were sharing .hg/hgrc of the share source with the current
412 # repo. We need to copy that while unsharing otherwise it can
412 # repo. We need to copy that while unsharing otherwise it can
413 # disable hooks and other checks
413 # disable hooks and other checks
414 _prependsourcehgrc(repo)
414 _prependsourcehgrc(repo)
415
415
416 sharefile = repo.vfs.join(b'sharedpath')
416 sharefile = repo.vfs.join(b'sharedpath')
417 util.rename(sharefile, sharefile + b'.old')
417 util.rename(sharefile, sharefile + b'.old')
418
418
419 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
419 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
421 scmutil.writereporequirements(repo)
421 scmutil.writereporequirements(repo)
422
422
423 # Removing share changes some fundamental properties of the repo instance.
423 # Removing share changes some fundamental properties of the repo instance.
424 # So we instantiate a new repo object and operate on it rather than
424 # So we instantiate a new repo object and operate on it rather than
425 # try to keep the existing repo usable.
425 # try to keep the existing repo usable.
426 newrepo = repository(repo.baseui, repo.root, create=False)
426 newrepo = repository(repo.baseui, repo.root, create=False)
427
427
428 # TODO: figure out how to access subrepos that exist, but were previously
428 # TODO: figure out how to access subrepos that exist, but were previously
429 # removed from .hgsub
429 # removed from .hgsub
430 c = newrepo[b'.']
430 c = newrepo[b'.']
431 subs = c.substate
431 subs = c.substate
432 for s in sorted(subs):
432 for s in sorted(subs):
433 c.sub(s).unshare()
433 c.sub(s).unshare()
434
434
435 localrepo.poisonrepository(repo)
435 localrepo.poisonrepository(repo)
436
436
437 return newrepo
437 return newrepo
438
438
439
439
440 def postshare(sourcerepo, destrepo, defaultpath=None):
440 def postshare(sourcerepo, destrepo, defaultpath=None):
441 """Called after a new shared repo is created.
441 """Called after a new shared repo is created.
442
442
443 The new repo only has a requirements file and pointer to the source.
443 The new repo only has a requirements file and pointer to the source.
444 This function configures additional shared data.
444 This function configures additional shared data.
445
445
446 Extensions can wrap this function and write additional entries to
446 Extensions can wrap this function and write additional entries to
447 destrepo/.hg/shared to indicate additional pieces of data to be shared.
447 destrepo/.hg/shared to indicate additional pieces of data to be shared.
448 """
448 """
449 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
449 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
450 if default:
450 if default:
451 template = b'[paths]\ndefault = %s\n'
451 template = b'[paths]\ndefault = %s\n'
452 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
452 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
453 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
453 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
454 with destrepo.wlock():
454 with destrepo.wlock():
455 narrowspec.copytoworkingcopy(destrepo)
455 narrowspec.copytoworkingcopy(destrepo)
456
456
457
457
458 def _postshareupdate(repo, update, checkout=None):
458 def _postshareupdate(repo, update, checkout=None):
459 """Maybe perform a working directory update after a shared repo is created.
459 """Maybe perform a working directory update after a shared repo is created.
460
460
461 ``update`` can be a boolean or a revision to update to.
461 ``update`` can be a boolean or a revision to update to.
462 """
462 """
463 if not update:
463 if not update:
464 return
464 return
465
465
466 repo.ui.status(_(b"updating working directory\n"))
466 repo.ui.status(_(b"updating working directory\n"))
467 if update is not True:
467 if update is not True:
468 checkout = update
468 checkout = update
469 for test in (checkout, b'default', b'tip'):
469 for test in (checkout, b'default', b'tip'):
470 if test is None:
470 if test is None:
471 continue
471 continue
472 try:
472 try:
473 uprev = repo.lookup(test)
473 uprev = repo.lookup(test)
474 break
474 break
475 except error.RepoLookupError:
475 except error.RepoLookupError:
476 continue
476 continue
477 _update(repo, uprev)
477 _update(repo, uprev)
478
478
479
479
480 def copystore(ui, srcrepo, destpath):
480 def copystore(ui, srcrepo, destpath):
481 """copy files from store of srcrepo in destpath
481 """copy files from store of srcrepo in destpath
482
482
483 returns destlock
483 returns destlock
484 """
484 """
485 destlock = None
485 destlock = None
486 try:
486 try:
487 hardlink = None
487 hardlink = None
488 topic = _(b'linking') if hardlink else _(b'copying')
488 topic = _(b'linking') if hardlink else _(b'copying')
489 with ui.makeprogress(topic, unit=_(b'files')) as progress:
489 with ui.makeprogress(topic, unit=_(b'files')) as progress:
490 num = 0
490 num = 0
491 srcpublishing = srcrepo.publishing()
491 srcpublishing = srcrepo.publishing()
492 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
492 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
493 dstvfs = vfsmod.vfs(destpath)
493 dstvfs = vfsmod.vfs(destpath)
494 for f in srcrepo.store.copylist():
494 for f in srcrepo.store.copylist():
495 if srcpublishing and f.endswith(b'phaseroots'):
495 if srcpublishing and f.endswith(b'phaseroots'):
496 continue
496 continue
497 dstbase = os.path.dirname(f)
497 dstbase = os.path.dirname(f)
498 if dstbase and not dstvfs.exists(dstbase):
498 if dstbase and not dstvfs.exists(dstbase):
499 dstvfs.mkdir(dstbase)
499 dstvfs.mkdir(dstbase)
500 if srcvfs.exists(f):
500 if srcvfs.exists(f):
501 if f.endswith(b'data'):
501 if f.endswith(b'data'):
502 # 'dstbase' may be empty (e.g. revlog format 0)
502 # 'dstbase' may be empty (e.g. revlog format 0)
503 lockfile = os.path.join(dstbase, b"lock")
503 lockfile = os.path.join(dstbase, b"lock")
504 # lock to avoid premature writing to the target
504 # lock to avoid premature writing to the target
505 destlock = lock.lock(dstvfs, lockfile)
505 destlock = lock.lock(dstvfs, lockfile)
506 hardlink, n = util.copyfiles(
506 hardlink, n = util.copyfiles(
507 srcvfs.join(f), dstvfs.join(f), hardlink, progress
507 srcvfs.join(f), dstvfs.join(f), hardlink, progress
508 )
508 )
509 num += n
509 num += n
510 if hardlink:
510 if hardlink:
511 ui.debug(b"linked %d files\n" % num)
511 ui.debug(b"linked %d files\n" % num)
512 else:
512 else:
513 ui.debug(b"copied %d files\n" % num)
513 ui.debug(b"copied %d files\n" % num)
514 return destlock
514 return destlock
515 except: # re-raises
515 except: # re-raises
516 release(destlock)
516 release(destlock)
517 raise
517 raise
518
518
519
519
520 def clonewithshare(
520 def clonewithshare(
521 ui,
521 ui,
522 peeropts,
522 peeropts,
523 sharepath,
523 sharepath,
524 source,
524 source,
525 srcpeer,
525 srcpeer,
526 dest,
526 dest,
527 pull=False,
527 pull=False,
528 rev=None,
528 rev=None,
529 update=True,
529 update=True,
530 stream=False,
530 stream=False,
531 ):
531 ):
532 """Perform a clone using a shared repo.
532 """Perform a clone using a shared repo.
533
533
534 The store for the repository will be located at <sharepath>/.hg. The
534 The store for the repository will be located at <sharepath>/.hg. The
535 specified revisions will be cloned or pulled from "source". A shared repo
535 specified revisions will be cloned or pulled from "source". A shared repo
536 will be created at "dest" and a working copy will be created if "update" is
536 will be created at "dest" and a working copy will be created if "update" is
537 True.
537 True.
538 """
538 """
539 revs = None
539 revs = None
540 if rev:
540 if rev:
541 if not srcpeer.capable(b'lookup'):
541 if not srcpeer.capable(b'lookup'):
542 raise error.Abort(
542 raise error.Abort(
543 _(
543 _(
544 b"src repository does not support "
544 b"src repository does not support "
545 b"revision lookup and so doesn't "
545 b"revision lookup and so doesn't "
546 b"support clone by revision"
546 b"support clone by revision"
547 )
547 )
548 )
548 )
549
549
550 # TODO this is batchable.
550 # TODO this is batchable.
551 remoterevs = []
551 remoterevs = []
552 for r in rev:
552 for r in rev:
553 with srcpeer.commandexecutor() as e:
553 with srcpeer.commandexecutor() as e:
554 remoterevs.append(
554 remoterevs.append(
555 e.callcommand(
555 e.callcommand(
556 b'lookup',
556 b'lookup',
557 {
557 {
558 b'key': r,
558 b'key': r,
559 },
559 },
560 ).result()
560 ).result()
561 )
561 )
562 revs = remoterevs
562 revs = remoterevs
563
563
564 # Obtain a lock before checking for or cloning the pooled repo otherwise
564 # Obtain a lock before checking for or cloning the pooled repo otherwise
565 # 2 clients may race creating or populating it.
565 # 2 clients may race creating or populating it.
566 pooldir = os.path.dirname(sharepath)
566 pooldir = os.path.dirname(sharepath)
567 # lock class requires the directory to exist.
567 # lock class requires the directory to exist.
568 try:
568 try:
569 util.makedir(pooldir, False)
569 util.makedir(pooldir, False)
570 except FileExistsError:
570 except FileExistsError:
571 pass
571 pass
572
572
573 poolvfs = vfsmod.vfs(pooldir)
573 poolvfs = vfsmod.vfs(pooldir)
574 basename = os.path.basename(sharepath)
574 basename = os.path.basename(sharepath)
575
575
576 with lock.lock(poolvfs, b'%s.lock' % basename):
576 with lock.lock(poolvfs, b'%s.lock' % basename):
577 if os.path.exists(sharepath):
577 if os.path.exists(sharepath):
578 ui.status(
578 ui.status(
579 _(b'(sharing from existing pooled repository %s)\n') % basename
579 _(b'(sharing from existing pooled repository %s)\n') % basename
580 )
580 )
581 else:
581 else:
582 ui.status(
582 ui.status(
583 _(b'(sharing from new pooled repository %s)\n') % basename
583 _(b'(sharing from new pooled repository %s)\n') % basename
584 )
584 )
585 # Always use pull mode because hardlinks in share mode don't work
585 # Always use pull mode because hardlinks in share mode don't work
586 # well. Never update because working copies aren't necessary in
586 # well. Never update because working copies aren't necessary in
587 # share mode.
587 # share mode.
588 clone(
588 clone(
589 ui,
589 ui,
590 peeropts,
590 peeropts,
591 source,
591 source,
592 dest=sharepath,
592 dest=sharepath,
593 pull=True,
593 pull=True,
594 revs=rev,
594 revs=rev,
595 update=False,
595 update=False,
596 stream=stream,
596 stream=stream,
597 )
597 )
598
598
599 # Resolve the value to put in [paths] section for the source.
599 # Resolve the value to put in [paths] section for the source.
600 if islocal(source):
600 if islocal(source):
601 defaultpath = util.abspath(urlutil.urllocalpath(source))
601 defaultpath = util.abspath(urlutil.urllocalpath(source))
602 else:
602 else:
603 defaultpath = source
603 defaultpath = source
604
604
605 sharerepo = repository(ui, path=sharepath)
605 sharerepo = repository(ui, path=sharepath)
606 destrepo = share(
606 destrepo = share(
607 ui,
607 ui,
608 sharerepo,
608 sharerepo,
609 dest=dest,
609 dest=dest,
610 update=False,
610 update=False,
611 bookmarks=False,
611 bookmarks=False,
612 defaultpath=defaultpath,
612 defaultpath=defaultpath,
613 )
613 )
614
614
615 # We need to perform a pull against the dest repo to fetch bookmarks
615 # We need to perform a pull against the dest repo to fetch bookmarks
616 # and other non-store data that isn't shared by default. In the case of
616 # and other non-store data that isn't shared by default. In the case of
617 # non-existing shared repo, this means we pull from the remote twice. This
617 # non-existing shared repo, this means we pull from the remote twice. This
618 # is a bit weird. But at the time it was implemented, there wasn't an easy
618 # is a bit weird. But at the time it was implemented, there wasn't an easy
619 # way to pull just non-changegroup data.
619 # way to pull just non-changegroup data.
620 exchange.pull(destrepo, srcpeer, heads=revs)
620 exchange.pull(destrepo, srcpeer, heads=revs)
621
621
622 _postshareupdate(destrepo, update)
622 _postshareupdate(destrepo, update)
623
623
624 return srcpeer, peer(ui, peeropts, dest)
624 return srcpeer, peer(ui, peeropts, dest)
625
625
626
626
627 # Recomputing caches is often slow on big repos, so copy them.
627 # Recomputing caches is often slow on big repos, so copy them.
628 def _copycache(srcrepo, dstcachedir, fname):
628 def _copycache(srcrepo, dstcachedir, fname):
629 """copy a cache from srcrepo to destcachedir (if it exists)"""
629 """copy a cache from srcrepo to destcachedir (if it exists)"""
630 srcfname = srcrepo.cachevfs.join(fname)
630 srcfname = srcrepo.cachevfs.join(fname)
631 dstfname = os.path.join(dstcachedir, fname)
631 dstfname = os.path.join(dstcachedir, fname)
632 if os.path.exists(srcfname):
632 if os.path.exists(srcfname):
633 if not os.path.exists(dstcachedir):
633 if not os.path.exists(dstcachedir):
634 os.mkdir(dstcachedir)
634 os.mkdir(dstcachedir)
635 util.copyfile(srcfname, dstfname)
635 util.copyfile(srcfname, dstfname)
636
636
637
637
638 def clone(
638 def clone(
639 ui,
639 ui,
640 peeropts,
640 peeropts,
641 source,
641 source,
642 dest=None,
642 dest=None,
643 pull=False,
643 pull=False,
644 revs=None,
644 revs=None,
645 update=True,
645 update=True,
646 stream=False,
646 stream=False,
647 branch=None,
647 branch=None,
648 shareopts=None,
648 shareopts=None,
649 storeincludepats=None,
649 storeincludepats=None,
650 storeexcludepats=None,
650 storeexcludepats=None,
651 depth=None,
651 depth=None,
652 ):
652 ):
653 """Make a copy of an existing repository.
653 """Make a copy of an existing repository.
654
654
655 Create a copy of an existing repository in a new directory. The
655 Create a copy of an existing repository in a new directory. The
656 source and destination are URLs, as passed to the repository
656 source and destination are URLs, as passed to the repository
657 function. Returns a pair of repository peers, the source and
657 function. Returns a pair of repository peers, the source and
658 newly created destination.
658 newly created destination.
659
659
660 The location of the source is added to the new repository's
660 The location of the source is added to the new repository's
661 .hg/hgrc file, as the default to be used for future pulls and
661 .hg/hgrc file, as the default to be used for future pulls and
662 pushes.
662 pushes.
663
663
664 If an exception is raised, the partly cloned/updated destination
664 If an exception is raised, the partly cloned/updated destination
665 repository will be deleted.
665 repository will be deleted.
666
666
667 Arguments:
667 Arguments:
668
668
669 source: repository object or URL
669 source: repository object or URL
670
670
671 dest: URL of destination repository to create (defaults to base
671 dest: URL of destination repository to create (defaults to base
672 name of source repository)
672 name of source repository)
673
673
674 pull: always pull from source repository, even in local case or if the
674 pull: always pull from source repository, even in local case or if the
675 server prefers streaming
675 server prefers streaming
676
676
677 stream: stream raw data uncompressed from repository (fast over
677 stream: stream raw data uncompressed from repository (fast over
678 LAN, slow over WAN)
678 LAN, slow over WAN)
679
679
680 revs: revision to clone up to (implies pull=True)
680 revs: revision to clone up to (implies pull=True)
681
681
682 update: update working directory after clone completes, if
682 update: update working directory after clone completes, if
683 destination is local repository (True means update to default rev,
683 destination is local repository (True means update to default rev,
684 anything else is treated as a revision)
684 anything else is treated as a revision)
685
685
686 branch: branches to clone
686 branch: branches to clone
687
687
688 shareopts: dict of options to control auto sharing behavior. The "pool" key
688 shareopts: dict of options to control auto sharing behavior. The "pool" key
689 activates auto sharing mode and defines the directory for stores. The
689 activates auto sharing mode and defines the directory for stores. The
690 "mode" key determines how to construct the directory name of the shared
690 "mode" key determines how to construct the directory name of the shared
691 repository. "identity" means the name is derived from the node of the first
691 repository. "identity" means the name is derived from the node of the first
692 changeset in the repository. "remote" means the name is derived from the
692 changeset in the repository. "remote" means the name is derived from the
693 remote's path/URL. Defaults to "identity."
693 remote's path/URL. Defaults to "identity."
694
694
695 storeincludepats and storeexcludepats: sets of file patterns to include and
695 storeincludepats and storeexcludepats: sets of file patterns to include and
696 exclude in the repository copy, respectively. If not defined, all files
696 exclude in the repository copy, respectively. If not defined, all files
697 will be included (a "full" clone). Otherwise a "narrow" clone containing
697 will be included (a "full" clone). Otherwise a "narrow" clone containing
698 only the requested files will be performed. If ``storeincludepats`` is not
698 only the requested files will be performed. If ``storeincludepats`` is not
699 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
699 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
700 ``path:.``. If both are empty sets, no files will be cloned.
700 ``path:.``. If both are empty sets, no files will be cloned.
701 """
701 """
702
702
703 if isinstance(source, bytes):
703 if isinstance(source, bytes):
704 src = urlutil.get_clone_path(ui, source, branch)
704 src = urlutil.get_clone_path(ui, source, branch)
705 origsource, source, branches = src
705 origsource, source, branches = src
706 srcpeer = peer(ui, peeropts, source)
706 srcpeer = peer(ui, peeropts, source)
707 else:
707 else:
708 srcpeer = source.peer() # in case we were called with a localrepo
708 srcpeer = source.peer() # in case we were called with a localrepo
709 branches = (None, branch or [])
709 branches = (None, branch or [])
710 origsource = source = srcpeer.url()
710 origsource = source = srcpeer.url()
711 srclock = destlock = destwlock = cleandir = None
711 srclock = destlock = destwlock = cleandir = None
712 destpeer = None
712 destpeer = None
713 try:
713 try:
714 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
714 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
715
715
716 if dest is None:
716 if dest is None:
717 dest = defaultdest(source)
717 dest = defaultdest(source)
718 if dest:
718 if dest:
719 ui.status(_(b"destination directory: %s\n") % dest)
719 ui.status(_(b"destination directory: %s\n") % dest)
720 else:
720 else:
721 dest = urlutil.get_clone_path(ui, dest)[0]
721 dest = urlutil.get_clone_path(ui, dest)[0]
722
722
723 dest = urlutil.urllocalpath(dest)
723 dest = urlutil.urllocalpath(dest)
724 source = urlutil.urllocalpath(source)
724 source = urlutil.urllocalpath(source)
725
725
726 if not dest:
726 if not dest:
727 raise error.InputError(_(b"empty destination path is not valid"))
727 raise error.InputError(_(b"empty destination path is not valid"))
728
728
729 destvfs = vfsmod.vfs(dest, expandpath=True)
729 destvfs = vfsmod.vfs(dest, expandpath=True)
730 if destvfs.lexists():
730 if destvfs.lexists():
731 if not destvfs.isdir():
731 if not destvfs.isdir():
732 raise error.InputError(
732 raise error.InputError(
733 _(b"destination '%s' already exists") % dest
733 _(b"destination '%s' already exists") % dest
734 )
734 )
735 elif destvfs.listdir():
735 elif destvfs.listdir():
736 raise error.InputError(
736 raise error.InputError(
737 _(b"destination '%s' is not empty") % dest
737 _(b"destination '%s' is not empty") % dest
738 )
738 )
739
739
740 createopts = {}
740 createopts = {}
741 narrow = False
741 narrow = False
742
742
743 if storeincludepats is not None:
743 if storeincludepats is not None:
744 narrowspec.validatepatterns(storeincludepats)
744 narrowspec.validatepatterns(storeincludepats)
745 narrow = True
745 narrow = True
746
746
747 if storeexcludepats is not None:
747 if storeexcludepats is not None:
748 narrowspec.validatepatterns(storeexcludepats)
748 narrowspec.validatepatterns(storeexcludepats)
749 narrow = True
749 narrow = True
750
750
751 if narrow:
751 if narrow:
752 # Include everything by default if only exclusion patterns defined.
752 # Include everything by default if only exclusion patterns defined.
753 if storeexcludepats and not storeincludepats:
753 if storeexcludepats and not storeincludepats:
754 storeincludepats = {b'path:.'}
754 storeincludepats = {b'path:.'}
755
755
756 createopts[b'narrowfiles'] = True
756 createopts[b'narrowfiles'] = True
757
757
758 if depth:
758 if depth:
759 createopts[b'shallowfilestore'] = True
759 createopts[b'shallowfilestore'] = True
760
760
761 if srcpeer.capable(b'lfs-serve'):
761 if srcpeer.capable(b'lfs-serve'):
762 # Repository creation honors the config if it disabled the extension, so
762 # Repository creation honors the config if it disabled the extension, so
763 # we can't just announce that lfs will be enabled. This check avoids
763 # we can't just announce that lfs will be enabled. This check avoids
764 # saying that lfs will be enabled, and then saying it's an unknown
764 # saying that lfs will be enabled, and then saying it's an unknown
765 # feature. The lfs creation option is set in either case so that a
765 # feature. The lfs creation option is set in either case so that a
766 # requirement is added. If the extension is explicitly disabled but the
766 # requirement is added. If the extension is explicitly disabled but the
767 # requirement is set, the clone aborts early, before transferring any
767 # requirement is set, the clone aborts early, before transferring any
768 # data.
768 # data.
769 createopts[b'lfs'] = True
769 createopts[b'lfs'] = True
770
770
771 if extensions.disabled_help(b'lfs'):
771 if extensions.disabled_help(b'lfs'):
772 ui.status(
772 ui.status(
773 _(
773 _(
774 b'(remote is using large file support (lfs), but it is '
774 b'(remote is using large file support (lfs), but it is '
775 b'explicitly disabled in the local configuration)\n'
775 b'explicitly disabled in the local configuration)\n'
776 )
776 )
777 )
777 )
778 else:
778 else:
779 ui.status(
779 ui.status(
780 _(
780 _(
781 b'(remote is using large file support (lfs); lfs will '
781 b'(remote is using large file support (lfs); lfs will '
782 b'be enabled for this repository)\n'
782 b'be enabled for this repository)\n'
783 )
783 )
784 )
784 )
785
785
786 shareopts = shareopts or {}
786 shareopts = shareopts or {}
787 sharepool = shareopts.get(b'pool')
787 sharepool = shareopts.get(b'pool')
788 sharenamemode = shareopts.get(b'mode')
788 sharenamemode = shareopts.get(b'mode')
789 if sharepool and islocal(dest):
789 if sharepool and islocal(dest):
790 sharepath = None
790 sharepath = None
791 if sharenamemode == b'identity':
791 if sharenamemode == b'identity':
792 # Resolve the name from the initial changeset in the remote
792 # Resolve the name from the initial changeset in the remote
793 # repository. This returns nullid when the remote is empty. It
793 # repository. This returns nullid when the remote is empty. It
794 # raises RepoLookupError if revision 0 is filtered or otherwise
794 # raises RepoLookupError if revision 0 is filtered or otherwise
795 # not available. If we fail to resolve, sharing is not enabled.
795 # not available. If we fail to resolve, sharing is not enabled.
796 try:
796 try:
797 with srcpeer.commandexecutor() as e:
797 with srcpeer.commandexecutor() as e:
798 rootnode = e.callcommand(
798 rootnode = e.callcommand(
799 b'lookup',
799 b'lookup',
800 {
800 {
801 b'key': b'0',
801 b'key': b'0',
802 },
802 },
803 ).result()
803 ).result()
804
804
805 if rootnode != sha1nodeconstants.nullid:
805 if rootnode != sha1nodeconstants.nullid:
806 sharepath = os.path.join(sharepool, hex(rootnode))
806 sharepath = os.path.join(sharepool, hex(rootnode))
807 else:
807 else:
808 ui.status(
808 ui.status(
809 _(
809 _(
810 b'(not using pooled storage: '
810 b'(not using pooled storage: '
811 b'remote appears to be empty)\n'
811 b'remote appears to be empty)\n'
812 )
812 )
813 )
813 )
814 except error.RepoLookupError:
814 except error.RepoLookupError:
815 ui.status(
815 ui.status(
816 _(
816 _(
817 b'(not using pooled storage: '
817 b'(not using pooled storage: '
818 b'unable to resolve identity of remote)\n'
818 b'unable to resolve identity of remote)\n'
819 )
819 )
820 )
820 )
821 elif sharenamemode == b'remote':
821 elif sharenamemode == b'remote':
822 sharepath = os.path.join(
822 sharepath = os.path.join(
823 sharepool, hex(hashutil.sha1(source).digest())
823 sharepool, hex(hashutil.sha1(source).digest())
824 )
824 )
825 else:
825 else:
826 raise error.Abort(
826 raise error.Abort(
827 _(b'unknown share naming mode: %s') % sharenamemode
827 _(b'unknown share naming mode: %s') % sharenamemode
828 )
828 )
829
829
830 # TODO this is a somewhat arbitrary restriction.
830 # TODO this is a somewhat arbitrary restriction.
831 if narrow:
831 if narrow:
832 ui.status(
832 ui.status(
833 _(b'(pooled storage not supported for narrow clones)\n')
833 _(b'(pooled storage not supported for narrow clones)\n')
834 )
834 )
835 sharepath = None
835 sharepath = None
836
836
837 if sharepath:
837 if sharepath:
838 return clonewithshare(
838 return clonewithshare(
839 ui,
839 ui,
840 peeropts,
840 peeropts,
841 sharepath,
841 sharepath,
842 source,
842 source,
843 srcpeer,
843 srcpeer,
844 dest,
844 dest,
845 pull=pull,
845 pull=pull,
846 rev=revs,
846 rev=revs,
847 update=update,
847 update=update,
848 stream=stream,
848 stream=stream,
849 )
849 )
850
850
851 srcrepo = srcpeer.local()
851 srcrepo = srcpeer.local()
852
852
853 abspath = origsource
853 abspath = origsource
854 if islocal(origsource):
854 if islocal(origsource):
855 abspath = util.abspath(urlutil.urllocalpath(origsource))
855 abspath = util.abspath(urlutil.urllocalpath(origsource))
856
856
857 if islocal(dest):
857 if islocal(dest):
858 if os.path.exists(dest):
858 if os.path.exists(dest):
859 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
861 cleandir = hgdir
861 cleandir = hgdir
862 else:
862 else:
863 cleandir = dest
863 cleandir = dest
864
864
865 copy = False
865 copy = False
866 if (
866 if (
867 srcrepo
867 srcrepo
868 and srcrepo.cancopy()
868 and srcrepo.cancopy()
869 and islocal(dest)
869 and islocal(dest)
870 and not phases.hassecret(srcrepo)
870 and not phases.hassecret(srcrepo)
871 ):
871 ):
872 copy = not pull and not revs
872 copy = not pull and not revs
873
873
874 # TODO this is a somewhat arbitrary restriction.
874 # TODO this is a somewhat arbitrary restriction.
875 if narrow:
875 if narrow:
876 copy = False
876 copy = False
877
877
878 if copy:
878 if copy:
879 try:
879 try:
880 # we use a lock here because if we race with commit, we
880 # we use a lock here because if we race with commit, we
881 # can end up with extra data in the cloned revlogs that's
881 # can end up with extra data in the cloned revlogs that's
882 # not pointed to by changesets, thus causing verify to
882 # not pointed to by changesets, thus causing verify to
883 # fail
883 # fail
884 srclock = srcrepo.lock(wait=False)
884 srclock = srcrepo.lock(wait=False)
885 except error.LockError:
885 except error.LockError:
886 copy = False
886 copy = False
887
887
888 if copy:
888 if copy:
889 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
889 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
890
890
891 destrootpath = urlutil.urllocalpath(dest)
891 destrootpath = urlutil.urllocalpath(dest)
892 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
892 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
893 localrepo.createrepository(
893 localrepo.createrepository(
894 ui,
894 ui,
895 destrootpath,
895 destrootpath,
896 requirements=dest_reqs,
896 requirements=dest_reqs,
897 )
897 )
898 destrepo = localrepo.makelocalrepository(ui, destrootpath)
898 destrepo = localrepo.makelocalrepository(ui, destrootpath)
899
899
900 destwlock = destrepo.wlock()
900 destwlock = destrepo.wlock()
901 destlock = destrepo.lock()
901 destlock = destrepo.lock()
902 from . import streamclone # avoid cycle
902 from . import streamclone # avoid cycle
903
903
904 streamclone.local_copy(srcrepo, destrepo)
904 streamclone.local_copy(srcrepo, destrepo)
905
905
906 # we need to re-init the repo after manually copying the data
906 # we need to re-init the repo after manually copying the data
907 # into it
907 # into it
908 destpeer = peer(srcrepo, peeropts, dest)
908 destpeer = peer(srcrepo, peeropts, dest)
909
909
910 # make the peer aware that is it already locked
910 # make the peer aware that is it already locked
911 #
911 #
912 # important:
912 # important:
913 #
913 #
914 # We still need to release that lock at the end of the function
914 # We still need to release that lock at the end of the function
915 destpeer.local()._lockref = weakref.ref(destlock)
915 destpeer.local()._lockref = weakref.ref(destlock)
916 destpeer.local()._wlockref = weakref.ref(destwlock)
916 destpeer.local()._wlockref = weakref.ref(destwlock)
917 # dirstate also needs to be copied because `_wlockref` has a reference
917 # dirstate also needs to be copied because `_wlockref` has a reference
918 # to it: this dirstate is saved to disk when the wlock is released
918 # to it: this dirstate is saved to disk when the wlock is released
919 destpeer.local().dirstate = destrepo.dirstate
919 destpeer.local().dirstate = destrepo.dirstate
920
920
921 srcrepo.hook(
921 srcrepo.hook(
922 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
922 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
923 )
923 )
924 else:
924 else:
925 try:
925 try:
926 # only pass ui when no srcrepo
926 # only pass ui when no srcrepo
927 destpeer = peer(
927 destpeer = peer(
928 srcrepo or ui,
928 srcrepo or ui,
929 peeropts,
929 peeropts,
930 dest,
930 dest,
931 create=True,
931 create=True,
932 createopts=createopts,
932 createopts=createopts,
933 )
933 )
934 except FileExistsError:
934 except FileExistsError:
935 cleandir = None
935 cleandir = None
936 raise error.Abort(_(b"destination '%s' already exists") % dest)
936 raise error.Abort(_(b"destination '%s' already exists") % dest)
937
937
938 if revs:
938 if revs:
939 if not srcpeer.capable(b'lookup'):
939 if not srcpeer.capable(b'lookup'):
940 raise error.Abort(
940 raise error.Abort(
941 _(
941 _(
942 b"src repository does not support "
942 b"src repository does not support "
943 b"revision lookup and so doesn't "
943 b"revision lookup and so doesn't "
944 b"support clone by revision"
944 b"support clone by revision"
945 )
945 )
946 )
946 )
947
947
948 # TODO this is batchable.
948 # TODO this is batchable.
949 remoterevs = []
949 remoterevs = []
950 for rev in revs:
950 for rev in revs:
951 with srcpeer.commandexecutor() as e:
951 with srcpeer.commandexecutor() as e:
952 remoterevs.append(
952 remoterevs.append(
953 e.callcommand(
953 e.callcommand(
954 b'lookup',
954 b'lookup',
955 {
955 {
956 b'key': rev,
956 b'key': rev,
957 },
957 },
958 ).result()
958 ).result()
959 )
959 )
960 revs = remoterevs
960 revs = remoterevs
961
961
962 checkout = revs[0]
962 checkout = revs[0]
963 else:
963 else:
964 revs = None
964 revs = None
965 local = destpeer.local()
965 local = destpeer.local()
966 if local:
966 if local:
967 if narrow:
967 if narrow:
968 with local.wlock(), local.lock():
968 with local.wlock(), local.lock():
969 local.setnarrowpats(storeincludepats, storeexcludepats)
969 local.setnarrowpats(storeincludepats, storeexcludepats)
970 narrowspec.copytoworkingcopy(local)
970 narrowspec.copytoworkingcopy(local)
971
971
972 u = urlutil.url(abspath)
972 u = urlutil.url(abspath)
973 defaulturl = bytes(u)
973 defaulturl = bytes(u)
974 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
974 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
975 if not stream:
975 if not stream:
976 if pull:
976 if pull:
977 stream = False
977 stream = False
978 else:
978 else:
979 stream = None
979 stream = None
980 # internal config: ui.quietbookmarkmove
980 # internal config: ui.quietbookmarkmove
981 overrides = {(b'ui', b'quietbookmarkmove'): True}
981 overrides = {(b'ui', b'quietbookmarkmove'): True}
982 with local.ui.configoverride(overrides, b'clone'):
982 with local.ui.configoverride(overrides, b'clone'):
983 exchange.pull(
983 exchange.pull(
984 local,
984 local,
985 srcpeer,
985 srcpeer,
986 heads=revs,
986 heads=revs,
987 streamclonerequested=stream,
987 streamclonerequested=stream,
988 includepats=storeincludepats,
988 includepats=storeincludepats,
989 excludepats=storeexcludepats,
989 excludepats=storeexcludepats,
990 depth=depth,
990 depth=depth,
991 )
991 )
992 elif srcrepo:
992 elif srcrepo:
993 # TODO lift restriction once exchange.push() accepts narrow
993 # TODO lift restriction once exchange.push() accepts narrow
994 # push.
994 # push.
995 if narrow:
995 if narrow:
996 raise error.Abort(
996 raise error.Abort(
997 _(
997 _(
998 b'narrow clone not available for '
998 b'narrow clone not available for '
999 b'remote destinations'
999 b'remote destinations'
1000 )
1000 )
1001 )
1001 )
1002
1002
1003 exchange.push(
1003 exchange.push(
1004 srcrepo,
1004 srcrepo,
1005 destpeer,
1005 destpeer,
1006 revs=revs,
1006 revs=revs,
1007 bookmarks=srcrepo._bookmarks.keys(),
1007 bookmarks=srcrepo._bookmarks.keys(),
1008 )
1008 )
1009 else:
1009 else:
1010 raise error.Abort(
1010 raise error.Abort(
1011 _(b"clone from remote to remote not supported")
1011 _(b"clone from remote to remote not supported")
1012 )
1012 )
1013
1013
1014 cleandir = None
1014 cleandir = None
1015
1015
1016 destrepo = destpeer.local()
1016 destrepo = destpeer.local()
1017 if destrepo:
1017 if destrepo:
1018 template = uimod.samplehgrcs[b'cloned']
1018 template = uimod.samplehgrcs[b'cloned']
1019 u = urlutil.url(abspath)
1019 u = urlutil.url(abspath)
1020 u.passwd = None
1020 u.passwd = None
1021 defaulturl = bytes(u)
1021 defaulturl = bytes(u)
1022 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1022 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1023 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1023 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1024
1024
1025 if ui.configbool(b'experimental', b'remotenames'):
1025 if ui.configbool(b'experimental', b'remotenames'):
1026 logexchange.pullremotenames(destrepo, srcpeer)
1026 logexchange.pullremotenames(destrepo, srcpeer)
1027
1027
1028 if update:
1028 if update:
1029 if update is not True:
1029 if update is not True:
1030 with srcpeer.commandexecutor() as e:
1030 with srcpeer.commandexecutor() as e:
1031 checkout = e.callcommand(
1031 checkout = e.callcommand(
1032 b'lookup',
1032 b'lookup',
1033 {
1033 {
1034 b'key': update,
1034 b'key': update,
1035 },
1035 },
1036 ).result()
1036 ).result()
1037
1037
1038 uprev = None
1038 uprev = None
1039 status = None
1039 status = None
1040 if checkout is not None:
1040 if checkout is not None:
1041 # Some extensions (at least hg-git and hg-subversion) have
1041 # Some extensions (at least hg-git and hg-subversion) have
1042 # a peer.lookup() implementation that returns a name instead
1042 # a peer.lookup() implementation that returns a name instead
1043 # of a nodeid. We work around it here until we've figured
1043 # of a nodeid. We work around it here until we've figured
1044 # out a better solution.
1044 # out a better solution.
1045 if len(checkout) == 20 and checkout in destrepo:
1045 if len(checkout) == 20 and checkout in destrepo:
1046 uprev = checkout
1046 uprev = checkout
1047 elif scmutil.isrevsymbol(destrepo, checkout):
1047 elif scmutil.isrevsymbol(destrepo, checkout):
1048 uprev = scmutil.revsymbol(destrepo, checkout).node()
1048 uprev = scmutil.revsymbol(destrepo, checkout).node()
1049 else:
1049 else:
1050 if update is not True:
1050 if update is not True:
1051 try:
1051 try:
1052 uprev = destrepo.lookup(update)
1052 uprev = destrepo.lookup(update)
1053 except error.RepoLookupError:
1053 except error.RepoLookupError:
1054 pass
1054 pass
1055 if uprev is None:
1055 if uprev is None:
1056 try:
1056 try:
1057 if destrepo._activebookmark:
1057 if destrepo._activebookmark:
1058 uprev = destrepo.lookup(destrepo._activebookmark)
1058 uprev = destrepo.lookup(destrepo._activebookmark)
1059 update = destrepo._activebookmark
1059 update = destrepo._activebookmark
1060 else:
1060 else:
1061 uprev = destrepo._bookmarks[b'@']
1061 uprev = destrepo._bookmarks[b'@']
1062 update = b'@'
1062 update = b'@'
1063 bn = destrepo[uprev].branch()
1063 bn = destrepo[uprev].branch()
1064 if bn == b'default':
1064 if bn == b'default':
1065 status = _(b"updating to bookmark %s\n" % update)
1065 status = _(b"updating to bookmark %s\n" % update)
1066 else:
1066 else:
1067 status = (
1067 status = (
1068 _(b"updating to bookmark %s on branch %s\n")
1068 _(b"updating to bookmark %s on branch %s\n")
1069 ) % (update, bn)
1069 ) % (update, bn)
1070 except KeyError:
1070 except KeyError:
1071 try:
1071 try:
1072 uprev = destrepo.branchtip(b'default')
1072 uprev = destrepo.branchtip(b'default')
1073 except error.RepoLookupError:
1073 except error.RepoLookupError:
1074 uprev = destrepo.lookup(b'tip')
1074 uprev = destrepo.lookup(b'tip')
1075 if not status:
1075 if not status:
1076 bn = destrepo[uprev].branch()
1076 bn = destrepo[uprev].branch()
1077 status = _(b"updating to branch %s\n") % bn
1077 status = _(b"updating to branch %s\n") % bn
1078 destrepo.ui.status(status)
1078 destrepo.ui.status(status)
1079 _update(destrepo, uprev)
1079 _update(destrepo, uprev)
1080 if update in destrepo._bookmarks:
1080 if update in destrepo._bookmarks:
1081 bookmarks.activate(destrepo, update)
1081 bookmarks.activate(destrepo, update)
1082 if destlock is not None:
1082 if destlock is not None:
1083 release(destlock)
1083 release(destlock)
1084 if destwlock is not None:
1084 if destwlock is not None:
1085 release(destlock)
1085 release(destlock)
1086 # here is a tiny windows were someone could end up writing the
1086 # here is a tiny windows were someone could end up writing the
1087 # repository before the cache are sure to be warm. This is "fine"
1087 # repository before the cache are sure to be warm. This is "fine"
1088 # as the only "bad" outcome would be some slowness. That potential
1088 # as the only "bad" outcome would be some slowness. That potential
1089 # slowness already affect reader.
1089 # slowness already affect reader.
1090 with destrepo.lock():
1090 with destrepo.lock():
1091 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1091 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1092 finally:
1092 finally:
1093 release(srclock, destlock, destwlock)
1093 release(srclock, destlock, destwlock)
1094 if cleandir is not None:
1094 if cleandir is not None:
1095 shutil.rmtree(cleandir, True)
1095 shutil.rmtree(cleandir, True)
1096 if srcpeer is not None:
1096 if srcpeer is not None:
1097 srcpeer.close()
1097 srcpeer.close()
1098 if destpeer and destpeer.local() is None:
1098 if destpeer and destpeer.local() is None:
1099 destpeer.close()
1099 destpeer.close()
1100 return srcpeer, destpeer
1100 return srcpeer, destpeer
1101
1101
1102
1102
1103 def _showstats(repo, stats, quietempty=False):
1103 def _showstats(repo, stats, quietempty=False):
1104 if quietempty and stats.isempty():
1104 if quietempty and stats.isempty():
1105 return
1105 return
1106 repo.ui.status(
1106 repo.ui.status(
1107 _(
1107 _(
1108 b"%d files updated, %d files merged, "
1108 b"%d files updated, %d files merged, "
1109 b"%d files removed, %d files unresolved\n"
1109 b"%d files removed, %d files unresolved\n"
1110 )
1110 )
1111 % (
1111 % (
1112 stats.updatedcount,
1112 stats.updatedcount,
1113 stats.mergedcount,
1113 stats.mergedcount,
1114 stats.removedcount,
1114 stats.removedcount,
1115 stats.unresolvedcount,
1115 stats.unresolvedcount,
1116 )
1116 )
1117 )
1117 )
1118
1118
1119
1119
1120 def updaterepo(repo, node, overwrite, updatecheck=None):
1120 def updaterepo(repo, node, overwrite, updatecheck=None):
1121 """Update the working directory to node.
1121 """Update the working directory to node.
1122
1122
1123 When overwrite is set, changes are clobbered, merged else
1123 When overwrite is set, changes are clobbered, merged else
1124
1124
1125 returns stats (see pydoc mercurial.merge.applyupdates)"""
1125 returns stats (see pydoc mercurial.merge.applyupdates)"""
1126 repo.ui.deprecwarn(
1126 repo.ui.deprecwarn(
1127 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1127 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1128 b'5.7',
1128 b'5.7',
1129 )
1129 )
1130 return mergemod._update(
1130 return mergemod._update(
1131 repo,
1131 repo,
1132 node,
1132 node,
1133 branchmerge=False,
1133 branchmerge=False,
1134 force=overwrite,
1134 force=overwrite,
1135 labels=[b'working copy', b'destination'],
1135 labels=[b'working copy', b'destination'],
1136 updatecheck=updatecheck,
1136 updatecheck=updatecheck,
1137 )
1137 )
1138
1138
1139
1139
1140 def update(repo, node, quietempty=False, updatecheck=None):
1140 def update(repo, node, quietempty=False, updatecheck=None):
1141 """update the working directory to node"""
1141 """update the working directory to node"""
1142 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1142 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1143 _showstats(repo, stats, quietempty)
1143 _showstats(repo, stats, quietempty)
1144 if stats.unresolvedcount:
1144 if stats.unresolvedcount:
1145 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1145 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1146 return stats.unresolvedcount > 0
1146 return stats.unresolvedcount > 0
1147
1147
1148
1148
1149 # naming conflict in clone()
1149 # naming conflict in clone()
1150 _update = update
1150 _update = update
1151
1151
1152
1152
1153 def clean(repo, node, show_stats=True, quietempty=False):
1153 def clean(repo, node, show_stats=True, quietempty=False):
1154 """forcibly switch the working directory to node, clobbering changes"""
1154 """forcibly switch the working directory to node, clobbering changes"""
1155 stats = mergemod.clean_update(repo[node])
1155 stats = mergemod.clean_update(repo[node])
1156 assert stats.unresolvedcount == 0
1156 assert stats.unresolvedcount == 0
1157 if show_stats:
1157 if show_stats:
1158 _showstats(repo, stats, quietempty)
1158 _showstats(repo, stats, quietempty)
1159 return False
1159 return False
1160
1160
1161
1161
1162 # naming conflict in updatetotally()
1162 # naming conflict in updatetotally()
1163 _clean = clean
1163 _clean = clean
1164
1164
1165 _VALID_UPDATECHECKS = {
1165 _VALID_UPDATECHECKS = {
1166 mergemod.UPDATECHECK_ABORT,
1166 mergemod.UPDATECHECK_ABORT,
1167 mergemod.UPDATECHECK_NONE,
1167 mergemod.UPDATECHECK_NONE,
1168 mergemod.UPDATECHECK_LINEAR,
1168 mergemod.UPDATECHECK_LINEAR,
1169 mergemod.UPDATECHECK_NO_CONFLICT,
1169 mergemod.UPDATECHECK_NO_CONFLICT,
1170 }
1170 }
1171
1171
1172
1172
1173 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1173 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1174 """Update the working directory with extra care for non-file components
1174 """Update the working directory with extra care for non-file components
1175
1175
1176 This takes care of non-file components below:
1176 This takes care of non-file components below:
1177
1177
1178 :bookmark: might be advanced or (in)activated
1178 :bookmark: might be advanced or (in)activated
1179
1179
1180 This takes arguments below:
1180 This takes arguments below:
1181
1181
1182 :checkout: to which revision the working directory is updated
1182 :checkout: to which revision the working directory is updated
1183 :brev: a name, which might be a bookmark to be activated after updating
1183 :brev: a name, which might be a bookmark to be activated after updating
1184 :clean: whether changes in the working directory can be discarded
1184 :clean: whether changes in the working directory can be discarded
1185 :updatecheck: how to deal with a dirty working directory
1185 :updatecheck: how to deal with a dirty working directory
1186
1186
1187 Valid values for updatecheck are the UPDATECHECK_* constants
1187 Valid values for updatecheck are the UPDATECHECK_* constants
1188 defined in the merge module. Passing `None` will result in using the
1188 defined in the merge module. Passing `None` will result in using the
1189 configured default.
1189 configured default.
1190
1190
1191 * ABORT: abort if the working directory is dirty
1191 * ABORT: abort if the working directory is dirty
1192 * NONE: don't check (merge working directory changes into destination)
1192 * NONE: don't check (merge working directory changes into destination)
1193 * LINEAR: check that update is linear before merging working directory
1193 * LINEAR: check that update is linear before merging working directory
1194 changes into destination
1194 changes into destination
1195 * NO_CONFLICT: check that the update does not result in file merges
1195 * NO_CONFLICT: check that the update does not result in file merges
1196
1196
1197 This returns whether conflict is detected at updating or not.
1197 This returns whether conflict is detected at updating or not.
1198 """
1198 """
1199 if updatecheck is None:
1199 if updatecheck is None:
1200 updatecheck = ui.config(b'commands', b'update.check')
1200 updatecheck = ui.config(b'commands', b'update.check')
1201 if updatecheck not in _VALID_UPDATECHECKS:
1201 if updatecheck not in _VALID_UPDATECHECKS:
1202 # If not configured, or invalid value configured
1202 # If not configured, or invalid value configured
1203 updatecheck = mergemod.UPDATECHECK_LINEAR
1203 updatecheck = mergemod.UPDATECHECK_LINEAR
1204 if updatecheck not in _VALID_UPDATECHECKS:
1204 if updatecheck not in _VALID_UPDATECHECKS:
1205 raise ValueError(
1205 raise ValueError(
1206 r'Invalid updatecheck value %r (can accept %r)'
1206 r'Invalid updatecheck value %r (can accept %r)'
1207 % (updatecheck, _VALID_UPDATECHECKS)
1207 % (updatecheck, _VALID_UPDATECHECKS)
1208 )
1208 )
1209 with repo.wlock():
1209 with repo.wlock():
1210 movemarkfrom = None
1210 movemarkfrom = None
1211 warndest = False
1211 warndest = False
1212 if checkout is None:
1212 if checkout is None:
1213 updata = destutil.destupdate(repo, clean=clean)
1213 updata = destutil.destupdate(repo, clean=clean)
1214 checkout, movemarkfrom, brev = updata
1214 checkout, movemarkfrom, brev = updata
1215 warndest = True
1215 warndest = True
1216
1216
1217 if clean:
1217 if clean:
1218 ret = _clean(repo, checkout)
1218 ret = _clean(repo, checkout)
1219 else:
1219 else:
1220 if updatecheck == mergemod.UPDATECHECK_ABORT:
1220 if updatecheck == mergemod.UPDATECHECK_ABORT:
1221 cmdutil.bailifchanged(repo, merge=False)
1221 cmdutil.bailifchanged(repo, merge=False)
1222 updatecheck = mergemod.UPDATECHECK_NONE
1222 updatecheck = mergemod.UPDATECHECK_NONE
1223 ret = _update(repo, checkout, updatecheck=updatecheck)
1223 ret = _update(repo, checkout, updatecheck=updatecheck)
1224
1224
1225 if not ret and movemarkfrom:
1225 if not ret and movemarkfrom:
1226 if movemarkfrom == repo[b'.'].node():
1226 if movemarkfrom == repo[b'.'].node():
1227 pass # no-op update
1227 pass # no-op update
1228 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1228 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1229 b = ui.label(repo._activebookmark, b'bookmarks.active')
1229 b = ui.label(repo._activebookmark, b'bookmarks.active')
1230 ui.status(_(b"updating bookmark %s\n") % b)
1230 ui.status(_(b"updating bookmark %s\n") % b)
1231 else:
1231 else:
1232 # this can happen with a non-linear update
1232 # this can happen with a non-linear update
1233 b = ui.label(repo._activebookmark, b'bookmarks')
1233 b = ui.label(repo._activebookmark, b'bookmarks')
1234 ui.status(_(b"(leaving bookmark %s)\n") % b)
1234 ui.status(_(b"(leaving bookmark %s)\n") % b)
1235 bookmarks.deactivate(repo)
1235 bookmarks.deactivate(repo)
1236 elif brev in repo._bookmarks:
1236 elif brev in repo._bookmarks:
1237 if brev != repo._activebookmark:
1237 if brev != repo._activebookmark:
1238 b = ui.label(brev, b'bookmarks.active')
1238 b = ui.label(brev, b'bookmarks.active')
1239 ui.status(_(b"(activating bookmark %s)\n") % b)
1239 ui.status(_(b"(activating bookmark %s)\n") % b)
1240 bookmarks.activate(repo, brev)
1240 bookmarks.activate(repo, brev)
1241 elif brev:
1241 elif brev:
1242 if repo._activebookmark:
1242 if repo._activebookmark:
1243 b = ui.label(repo._activebookmark, b'bookmarks')
1243 b = ui.label(repo._activebookmark, b'bookmarks')
1244 ui.status(_(b"(leaving bookmark %s)\n") % b)
1244 ui.status(_(b"(leaving bookmark %s)\n") % b)
1245 bookmarks.deactivate(repo)
1245 bookmarks.deactivate(repo)
1246
1246
1247 if warndest:
1247 if warndest:
1248 destutil.statusotherdests(ui, repo)
1248 destutil.statusotherdests(ui, repo)
1249
1249
1250 return ret
1250 return ret
1251
1251
1252
1252
1253 def merge(
1253 def merge(
1254 ctx,
1254 ctx,
1255 force=False,
1255 force=False,
1256 remind=True,
1256 remind=True,
1257 labels=None,
1257 labels=None,
1258 ):
1258 ):
1259 """Branch merge with node, resolving changes. Return true if any
1259 """Branch merge with node, resolving changes. Return true if any
1260 unresolved conflicts."""
1260 unresolved conflicts."""
1261 repo = ctx.repo()
1261 repo = ctx.repo()
1262 stats = mergemod.merge(ctx, force=force, labels=labels)
1262 stats = mergemod.merge(ctx, force=force, labels=labels)
1263 _showstats(repo, stats)
1263 _showstats(repo, stats)
1264 if stats.unresolvedcount:
1264 if stats.unresolvedcount:
1265 repo.ui.status(
1265 repo.ui.status(
1266 _(
1266 _(
1267 b"use 'hg resolve' to retry unresolved file merges "
1267 b"use 'hg resolve' to retry unresolved file merges "
1268 b"or 'hg merge --abort' to abandon\n"
1268 b"or 'hg merge --abort' to abandon\n"
1269 )
1269 )
1270 )
1270 )
1271 elif remind:
1271 elif remind:
1272 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1272 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1273 return stats.unresolvedcount > 0
1273 return stats.unresolvedcount > 0
1274
1274
1275
1275
1276 def abortmerge(ui, repo):
1276 def abortmerge(ui, repo):
1277 ms = mergestatemod.mergestate.read(repo)
1277 ms = mergestatemod.mergestate.read(repo)
1278 if ms.active():
1278 if ms.active():
1279 # there were conflicts
1279 # there were conflicts
1280 node = ms.localctx.hex()
1280 node = ms.localctx.hex()
1281 else:
1281 else:
1282 # there were no conficts, mergestate was not stored
1282 # there were no conficts, mergestate was not stored
1283 node = repo[b'.'].hex()
1283 node = repo[b'.'].hex()
1284
1284
1285 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1285 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1286 stats = mergemod.clean_update(repo[node])
1286 stats = mergemod.clean_update(repo[node])
1287 assert stats.unresolvedcount == 0
1287 assert stats.unresolvedcount == 0
1288 _showstats(repo, stats)
1288 _showstats(repo, stats)
1289
1289
1290
1290
1291 def _incoming(
1291 def _incoming(
1292 displaychlist,
1292 displaychlist,
1293 subreporecurse,
1293 subreporecurse,
1294 ui,
1294 ui,
1295 repo,
1295 repo,
1296 source,
1296 source,
1297 opts,
1297 opts,
1298 buffered=False,
1298 buffered=False,
1299 subpath=None,
1299 subpath=None,
1300 ):
1300 ):
1301 """
1301 """
1302 Helper for incoming / gincoming.
1302 Helper for incoming / gincoming.
1303 displaychlist gets called with
1303 displaychlist gets called with
1304 (remoterepo, incomingchangesetlist, displayer) parameters,
1304 (remoterepo, incomingchangesetlist, displayer) parameters,
1305 and is supposed to contain only code that can't be unified.
1305 and is supposed to contain only code that can't be unified.
1306 """
1306 """
1307 srcs = urlutil.get_pull_paths(repo, ui, [source])
1307 srcs = urlutil.get_pull_paths(repo, ui, [source])
1308 srcs = list(srcs)
1308 srcs = list(srcs)
1309 if len(srcs) != 1:
1309 if len(srcs) != 1:
1310 msg = _(b'for now, incoming supports only a single source, %d provided')
1310 msg = _(b'for now, incoming supports only a single source, %d provided')
1311 msg %= len(srcs)
1311 msg %= len(srcs)
1312 raise error.Abort(msg)
1312 raise error.Abort(msg)
1313 path = srcs[0]
1313 path = srcs[0]
1314 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1314 if subpath is None:
1315 if subpath is not None:
1315 peer_path = path
1316 url = path.loc
1317 else:
1318 # XXX path: we are losing the `path` object here. Keeping it would be
1319 # valuable. For example as a "variant" as we do for pushes.
1316 subpath = urlutil.url(subpath)
1320 subpath = urlutil.url(subpath)
1317 if subpath.isabs():
1321 if subpath.isabs():
1318 source = bytes(subpath)
1322 peer_path = url = bytes(subpath)
1319 else:
1323 else:
1320 p = urlutil.url(source)
1324 p = urlutil.url(path.loc)
1321 if p.islocal():
1325 if p.islocal():
1322 normpath = os.path.normpath
1326 normpath = os.path.normpath
1323 else:
1327 else:
1324 normpath = posixpath.normpath
1328 normpath = posixpath.normpath
1325 p.path = normpath(b'%s/%s' % (p.path, subpath))
1329 p.path = normpath(b'%s/%s' % (p.path, subpath))
1326 source = bytes(p)
1330 peer_path = url = bytes(p)
1327 other = peer(repo, opts, source)
1331 other = peer(repo, opts, peer_path)
1328 cleanupfn = other.close
1332 cleanupfn = other.close
1329 try:
1333 try:
1330 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1334 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1335 branches = (path.branch, opts.get(b'branch', []))
1331 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1336 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1332
1337
1333 if revs:
1338 if revs:
1334 revs = [other.lookup(rev) for rev in revs]
1339 revs = [other.lookup(rev) for rev in revs]
1335 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1340 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1336 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1341 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1337 )
1342 )
1338
1343
1339 if not chlist:
1344 if not chlist:
1340 ui.status(_(b"no changes found\n"))
1345 ui.status(_(b"no changes found\n"))
1341 return subreporecurse()
1346 return subreporecurse()
1342 ui.pager(b'incoming')
1347 ui.pager(b'incoming')
1343 displayer = logcmdutil.changesetdisplayer(
1348 displayer = logcmdutil.changesetdisplayer(
1344 ui, other, opts, buffered=buffered
1349 ui, other, opts, buffered=buffered
1345 )
1350 )
1346 displaychlist(other, chlist, displayer)
1351 displaychlist(other, chlist, displayer)
1347 displayer.close()
1352 displayer.close()
1348 finally:
1353 finally:
1349 cleanupfn()
1354 cleanupfn()
1350 subreporecurse()
1355 subreporecurse()
1351 return 0 # exit code is zero since we found incoming changes
1356 return 0 # exit code is zero since we found incoming changes
1352
1357
1353
1358
1354 def incoming(ui, repo, source, opts, subpath=None):
1359 def incoming(ui, repo, source, opts, subpath=None):
1355 def subreporecurse():
1360 def subreporecurse():
1356 ret = 1
1361 ret = 1
1357 if opts.get(b'subrepos'):
1362 if opts.get(b'subrepos'):
1358 ctx = repo[None]
1363 ctx = repo[None]
1359 for subpath in sorted(ctx.substate):
1364 for subpath in sorted(ctx.substate):
1360 sub = ctx.sub(subpath)
1365 sub = ctx.sub(subpath)
1361 ret = min(ret, sub.incoming(ui, source, opts))
1366 ret = min(ret, sub.incoming(ui, source, opts))
1362 return ret
1367 return ret
1363
1368
1364 def display(other, chlist, displayer):
1369 def display(other, chlist, displayer):
1365 limit = logcmdutil.getlimit(opts)
1370 limit = logcmdutil.getlimit(opts)
1366 if opts.get(b'newest_first'):
1371 if opts.get(b'newest_first'):
1367 chlist.reverse()
1372 chlist.reverse()
1368 count = 0
1373 count = 0
1369 for n in chlist:
1374 for n in chlist:
1370 if limit is not None and count >= limit:
1375 if limit is not None and count >= limit:
1371 break
1376 break
1372 parents = [
1377 parents = [
1373 p for p in other.changelog.parents(n) if p != repo.nullid
1378 p for p in other.changelog.parents(n) if p != repo.nullid
1374 ]
1379 ]
1375 if opts.get(b'no_merges') and len(parents) == 2:
1380 if opts.get(b'no_merges') and len(parents) == 2:
1376 continue
1381 continue
1377 count += 1
1382 count += 1
1378 displayer.show(other[n])
1383 displayer.show(other[n])
1379
1384
1380 return _incoming(
1385 return _incoming(
1381 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1386 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1382 )
1387 )
1383
1388
1384
1389
1385 def _outgoing(ui, repo, dests, opts, subpath=None):
1390 def _outgoing(ui, repo, dests, opts, subpath=None):
1386 out = set()
1391 out = set()
1387 others = []
1392 others = []
1388 for path in urlutil.get_push_paths(repo, ui, dests):
1393 for path in urlutil.get_push_paths(repo, ui, dests):
1389 dest = path.loc
1394 dest = path.loc
1390 if subpath is not None:
1395 if subpath is not None:
1391 subpath = urlutil.url(subpath)
1396 subpath = urlutil.url(subpath)
1392 if subpath.isabs():
1397 if subpath.isabs():
1393 dest = bytes(subpath)
1398 dest = bytes(subpath)
1394 else:
1399 else:
1395 p = urlutil.url(dest)
1400 p = urlutil.url(dest)
1396 if p.islocal():
1401 if p.islocal():
1397 normpath = os.path.normpath
1402 normpath = os.path.normpath
1398 else:
1403 else:
1399 normpath = posixpath.normpath
1404 normpath = posixpath.normpath
1400 p.path = normpath(b'%s/%s' % (p.path, subpath))
1405 p.path = normpath(b'%s/%s' % (p.path, subpath))
1401 dest = bytes(p)
1406 dest = bytes(p)
1402 branches = path.branch, opts.get(b'branch') or []
1407 branches = path.branch, opts.get(b'branch') or []
1403
1408
1404 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1409 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1405 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1410 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1406 if revs:
1411 if revs:
1407 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1412 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1408
1413
1409 other = peer(repo, opts, dest)
1414 other = peer(repo, opts, dest)
1410 try:
1415 try:
1411 outgoing = discovery.findcommonoutgoing(
1416 outgoing = discovery.findcommonoutgoing(
1412 repo, other, revs, force=opts.get(b'force')
1417 repo, other, revs, force=opts.get(b'force')
1413 )
1418 )
1414 o = outgoing.missing
1419 o = outgoing.missing
1415 out.update(o)
1420 out.update(o)
1416 if not o:
1421 if not o:
1417 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1422 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1418 others.append(other)
1423 others.append(other)
1419 except: # re-raises
1424 except: # re-raises
1420 other.close()
1425 other.close()
1421 raise
1426 raise
1422 # make sure this is ordered by revision number
1427 # make sure this is ordered by revision number
1423 outgoing_revs = list(out)
1428 outgoing_revs = list(out)
1424 cl = repo.changelog
1429 cl = repo.changelog
1425 outgoing_revs.sort(key=cl.rev)
1430 outgoing_revs.sort(key=cl.rev)
1426 return outgoing_revs, others
1431 return outgoing_revs, others
1427
1432
1428
1433
1429 def _outgoing_recurse(ui, repo, dests, opts):
1434 def _outgoing_recurse(ui, repo, dests, opts):
1430 ret = 1
1435 ret = 1
1431 if opts.get(b'subrepos'):
1436 if opts.get(b'subrepos'):
1432 ctx = repo[None]
1437 ctx = repo[None]
1433 for subpath in sorted(ctx.substate):
1438 for subpath in sorted(ctx.substate):
1434 sub = ctx.sub(subpath)
1439 sub = ctx.sub(subpath)
1435 ret = min(ret, sub.outgoing(ui, dests, opts))
1440 ret = min(ret, sub.outgoing(ui, dests, opts))
1436 return ret
1441 return ret
1437
1442
1438
1443
1439 def _outgoing_filter(repo, revs, opts):
1444 def _outgoing_filter(repo, revs, opts):
1440 """apply revision filtering/ordering option for outgoing"""
1445 """apply revision filtering/ordering option for outgoing"""
1441 limit = logcmdutil.getlimit(opts)
1446 limit = logcmdutil.getlimit(opts)
1442 no_merges = opts.get(b'no_merges')
1447 no_merges = opts.get(b'no_merges')
1443 if opts.get(b'newest_first'):
1448 if opts.get(b'newest_first'):
1444 revs.reverse()
1449 revs.reverse()
1445 if limit is None and not no_merges:
1450 if limit is None and not no_merges:
1446 for r in revs:
1451 for r in revs:
1447 yield r
1452 yield r
1448 return
1453 return
1449
1454
1450 count = 0
1455 count = 0
1451 cl = repo.changelog
1456 cl = repo.changelog
1452 for n in revs:
1457 for n in revs:
1453 if limit is not None and count >= limit:
1458 if limit is not None and count >= limit:
1454 break
1459 break
1455 parents = [p for p in cl.parents(n) if p != repo.nullid]
1460 parents = [p for p in cl.parents(n) if p != repo.nullid]
1456 if no_merges and len(parents) == 2:
1461 if no_merges and len(parents) == 2:
1457 continue
1462 continue
1458 count += 1
1463 count += 1
1459 yield n
1464 yield n
1460
1465
1461
1466
1462 def outgoing(ui, repo, dests, opts, subpath=None):
1467 def outgoing(ui, repo, dests, opts, subpath=None):
1463 if opts.get(b'graph'):
1468 if opts.get(b'graph'):
1464 logcmdutil.checkunsupportedgraphflags([], opts)
1469 logcmdutil.checkunsupportedgraphflags([], opts)
1465 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1470 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1466 ret = 1
1471 ret = 1
1467 try:
1472 try:
1468 if o:
1473 if o:
1469 ret = 0
1474 ret = 0
1470
1475
1471 if opts.get(b'graph'):
1476 if opts.get(b'graph'):
1472 revdag = logcmdutil.graphrevs(repo, o, opts)
1477 revdag = logcmdutil.graphrevs(repo, o, opts)
1473 ui.pager(b'outgoing')
1478 ui.pager(b'outgoing')
1474 displayer = logcmdutil.changesetdisplayer(
1479 displayer = logcmdutil.changesetdisplayer(
1475 ui, repo, opts, buffered=True
1480 ui, repo, opts, buffered=True
1476 )
1481 )
1477 logcmdutil.displaygraph(
1482 logcmdutil.displaygraph(
1478 ui, repo, revdag, displayer, graphmod.asciiedges
1483 ui, repo, revdag, displayer, graphmod.asciiedges
1479 )
1484 )
1480 else:
1485 else:
1481 ui.pager(b'outgoing')
1486 ui.pager(b'outgoing')
1482 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1487 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1483 for n in _outgoing_filter(repo, o, opts):
1488 for n in _outgoing_filter(repo, o, opts):
1484 displayer.show(repo[n])
1489 displayer.show(repo[n])
1485 displayer.close()
1490 displayer.close()
1486 for oth in others:
1491 for oth in others:
1487 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1492 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1488 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1493 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1489 return ret # exit code is zero since we found outgoing changes
1494 return ret # exit code is zero since we found outgoing changes
1490 finally:
1495 finally:
1491 for oth in others:
1496 for oth in others:
1492 oth.close()
1497 oth.close()
1493
1498
1494
1499
1495 def verify(repo, level=None):
1500 def verify(repo, level=None):
1496 """verify the consistency of a repository"""
1501 """verify the consistency of a repository"""
1497 ret = verifymod.verify(repo, level=level)
1502 ret = verifymod.verify(repo, level=level)
1498
1503
1499 # Broken subrepo references in hidden csets don't seem worth worrying about,
1504 # Broken subrepo references in hidden csets don't seem worth worrying about,
1500 # since they can't be pushed/pulled, and --hidden can be used if they are a
1505 # since they can't be pushed/pulled, and --hidden can be used if they are a
1501 # concern.
1506 # concern.
1502
1507
1503 # pathto() is needed for -R case
1508 # pathto() is needed for -R case
1504 revs = repo.revs(
1509 revs = repo.revs(
1505 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1510 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1506 )
1511 )
1507
1512
1508 if revs:
1513 if revs:
1509 repo.ui.status(_(b'checking subrepo links\n'))
1514 repo.ui.status(_(b'checking subrepo links\n'))
1510 for rev in revs:
1515 for rev in revs:
1511 ctx = repo[rev]
1516 ctx = repo[rev]
1512 try:
1517 try:
1513 for subpath in ctx.substate:
1518 for subpath in ctx.substate:
1514 try:
1519 try:
1515 ret = (
1520 ret = (
1516 ctx.sub(subpath, allowcreate=False).verify() or ret
1521 ctx.sub(subpath, allowcreate=False).verify() or ret
1517 )
1522 )
1518 except error.RepoError as e:
1523 except error.RepoError as e:
1519 repo.ui.warn(b'%d: %s\n' % (rev, e))
1524 repo.ui.warn(b'%d: %s\n' % (rev, e))
1520 except Exception:
1525 except Exception:
1521 repo.ui.warn(
1526 repo.ui.warn(
1522 _(b'.hgsubstate is corrupt in revision %s\n')
1527 _(b'.hgsubstate is corrupt in revision %s\n')
1523 % short(ctx.node())
1528 % short(ctx.node())
1524 )
1529 )
1525
1530
1526 return ret
1531 return ret
1527
1532
1528
1533
1529 def remoteui(src, opts):
1534 def remoteui(src, opts):
1530 """build a remote ui from ui or repo and opts"""
1535 """build a remote ui from ui or repo and opts"""
1531 if util.safehasattr(src, b'baseui'): # looks like a repository
1536 if util.safehasattr(src, b'baseui'): # looks like a repository
1532 dst = src.baseui.copy() # drop repo-specific config
1537 dst = src.baseui.copy() # drop repo-specific config
1533 src = src.ui # copy target options from repo
1538 src = src.ui # copy target options from repo
1534 else: # assume it's a global ui object
1539 else: # assume it's a global ui object
1535 dst = src.copy() # keep all global options
1540 dst = src.copy() # keep all global options
1536
1541
1537 # copy ssh-specific options
1542 # copy ssh-specific options
1538 for o in b'ssh', b'remotecmd':
1543 for o in b'ssh', b'remotecmd':
1539 v = opts.get(o) or src.config(b'ui', o)
1544 v = opts.get(o) or src.config(b'ui', o)
1540 if v:
1545 if v:
1541 dst.setconfig(b"ui", o, v, b'copied')
1546 dst.setconfig(b"ui", o, v, b'copied')
1542
1547
1543 # copy bundle-specific options
1548 # copy bundle-specific options
1544 r = src.config(b'bundle', b'mainreporoot')
1549 r = src.config(b'bundle', b'mainreporoot')
1545 if r:
1550 if r:
1546 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1551 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1547
1552
1548 # copy selected local settings to the remote ui
1553 # copy selected local settings to the remote ui
1549 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1554 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1550 for key, val in src.configitems(sect):
1555 for key, val in src.configitems(sect):
1551 dst.setconfig(sect, key, val, b'copied')
1556 dst.setconfig(sect, key, val, b'copied')
1552 v = src.config(b'web', b'cacerts')
1557 v = src.config(b'web', b'cacerts')
1553 if v:
1558 if v:
1554 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1559 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1555
1560
1556 return dst
1561 return dst
1557
1562
1558
1563
1559 # Files of interest
1564 # Files of interest
1560 # Used to check if the repository has changed looking at mtime and size of
1565 # Used to check if the repository has changed looking at mtime and size of
1561 # these files.
1566 # these files.
1562 foi = [
1567 foi = [
1563 (b'spath', b'00changelog.i'),
1568 (b'spath', b'00changelog.i'),
1564 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1569 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1565 (b'spath', b'obsstore'),
1570 (b'spath', b'obsstore'),
1566 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1571 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1567 ]
1572 ]
1568
1573
1569
1574
1570 class cachedlocalrepo:
1575 class cachedlocalrepo:
1571 """Holds a localrepository that can be cached and reused."""
1576 """Holds a localrepository that can be cached and reused."""
1572
1577
1573 def __init__(self, repo):
1578 def __init__(self, repo):
1574 """Create a new cached repo from an existing repo.
1579 """Create a new cached repo from an existing repo.
1575
1580
1576 We assume the passed in repo was recently created. If the
1581 We assume the passed in repo was recently created. If the
1577 repo has changed between when it was created and when it was
1582 repo has changed between when it was created and when it was
1578 turned into a cache, it may not refresh properly.
1583 turned into a cache, it may not refresh properly.
1579 """
1584 """
1580 assert isinstance(repo, localrepo.localrepository)
1585 assert isinstance(repo, localrepo.localrepository)
1581 self._repo = repo
1586 self._repo = repo
1582 self._state, self.mtime = self._repostate()
1587 self._state, self.mtime = self._repostate()
1583 self._filtername = repo.filtername
1588 self._filtername = repo.filtername
1584
1589
1585 def fetch(self):
1590 def fetch(self):
1586 """Refresh (if necessary) and return a repository.
1591 """Refresh (if necessary) and return a repository.
1587
1592
1588 If the cached instance is out of date, it will be recreated
1593 If the cached instance is out of date, it will be recreated
1589 automatically and returned.
1594 automatically and returned.
1590
1595
1591 Returns a tuple of the repo and a boolean indicating whether a new
1596 Returns a tuple of the repo and a boolean indicating whether a new
1592 repo instance was created.
1597 repo instance was created.
1593 """
1598 """
1594 # We compare the mtimes and sizes of some well-known files to
1599 # We compare the mtimes and sizes of some well-known files to
1595 # determine if the repo changed. This is not precise, as mtimes
1600 # determine if the repo changed. This is not precise, as mtimes
1596 # are susceptible to clock skew and imprecise filesystems and
1601 # are susceptible to clock skew and imprecise filesystems and
1597 # file content can change while maintaining the same size.
1602 # file content can change while maintaining the same size.
1598
1603
1599 state, mtime = self._repostate()
1604 state, mtime = self._repostate()
1600 if state == self._state:
1605 if state == self._state:
1601 return self._repo, False
1606 return self._repo, False
1602
1607
1603 repo = repository(self._repo.baseui, self._repo.url())
1608 repo = repository(self._repo.baseui, self._repo.url())
1604 if self._filtername:
1609 if self._filtername:
1605 self._repo = repo.filtered(self._filtername)
1610 self._repo = repo.filtered(self._filtername)
1606 else:
1611 else:
1607 self._repo = repo.unfiltered()
1612 self._repo = repo.unfiltered()
1608 self._state = state
1613 self._state = state
1609 self.mtime = mtime
1614 self.mtime = mtime
1610
1615
1611 return self._repo, True
1616 return self._repo, True
1612
1617
1613 def _repostate(self):
1618 def _repostate(self):
1614 state = []
1619 state = []
1615 maxmtime = -1
1620 maxmtime = -1
1616 for attr, fname in foi:
1621 for attr, fname in foi:
1617 prefix = getattr(self._repo, attr)
1622 prefix = getattr(self._repo, attr)
1618 p = os.path.join(prefix, fname)
1623 p = os.path.join(prefix, fname)
1619 try:
1624 try:
1620 st = os.stat(p)
1625 st = os.stat(p)
1621 except OSError:
1626 except OSError:
1622 st = os.stat(prefix)
1627 st = os.stat(prefix)
1623 state.append((st[stat.ST_MTIME], st.st_size))
1628 state.append((st[stat.ST_MTIME], st.st_size))
1624 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1629 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1625
1630
1626 return tuple(state), maxmtime
1631 return tuple(state), maxmtime
1627
1632
1628 def copy(self):
1633 def copy(self):
1629 """Obtain a copy of this class instance.
1634 """Obtain a copy of this class instance.
1630
1635
1631 A new localrepository instance is obtained. The new instance should be
1636 A new localrepository instance is obtained. The new instance should be
1632 completely independent of the original.
1637 completely independent of the original.
1633 """
1638 """
1634 repo = repository(self._repo.baseui, self._repo.origroot)
1639 repo = repository(self._repo.baseui, self._repo.origroot)
1635 if self._filtername:
1640 if self._filtername:
1636 repo = repo.filtered(self._filtername)
1641 repo = repo.filtered(self._filtername)
1637 else:
1642 else:
1638 repo = repo.unfiltered()
1643 repo = repo.unfiltered()
1639 c = cachedlocalrepo(repo)
1644 c = cachedlocalrepo(repo)
1640 c._state = self._state
1645 c._state = self._state
1641 c.mtime = self.mtime
1646 c.mtime = self.mtime
1642 return c
1647 return c
General Comments 0
You need to be logged in to leave comments. Login now