##// END OF EJS Templates
addbranchrevs: explicitly detect the need to fetch a peer...
marmoute -
r50641:2a5feacc default
parent child Browse files
Show More
@@ -1,1660 +1,1664 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
72 else:
73 peer = other
70 hashbranch, branches = branches
74 hashbranch, branches = branches
71 if not hashbranch and not branches:
75 if not hashbranch and not branches:
72 x = revs or None
76 x = revs or None
73 if revs:
77 if revs:
74 y = revs[0]
78 y = revs[0]
75 else:
79 else:
76 y = None
80 y = None
77 return x, y
81 return x, y
78 if revs:
82 if revs:
79 revs = list(revs)
83 revs = list(revs)
80 else:
84 else:
81 revs = []
85 revs = []
82
86
83 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
84 if branches:
88 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
90 revs.append(hashbranch)
87 return revs, revs[0]
91 return revs, revs[0]
88
92
89 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
91
95
92 def primary(branch):
96 def primary(branch):
93 if branch == b'.':
97 if branch == b'.':
94 if not lrepo:
98 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
101 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
103 return True
100 else:
104 else:
101 return False
105 return False
102
106
103 for branch in branches:
107 for branch in branches:
104 if not primary(branch):
108 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
110 if hashbranch:
107 if not primary(hashbranch):
111 if not primary(hashbranch):
108 revs.append(hashbranch)
112 revs.append(hashbranch)
109 return revs, revs[0]
113 return revs, revs[0]
110
114
111
115
112 def _isfile(path):
116 def _isfile(path):
113 try:
117 try:
114 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
121 # invalid paths specially here.
118 st = os.stat(path)
122 st = os.stat(path)
119 except ValueError as e:
123 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
126 except OSError:
123 return False
127 return False
124 else:
128 else:
125 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
126
130
127
131
128 class LocalFactory:
132 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
130
134
131 @staticmethod
135 @staticmethod
132 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
138 return not _isfile(path)
135
139
136 @staticmethod
140 @staticmethod
137 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
143 if _isfile(path):
140 cls = bundlerepo
144 cls = bundlerepo
141 else:
145 else:
142 cls = localrepo
146 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
144
148
145
149
146 repo_schemes = {
150 repo_schemes = {
147 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
148 b'union': unionrepo,
152 b'union': unionrepo,
149 b'file': LocalFactory,
153 b'file': LocalFactory,
150 }
154 }
151
155
152 peer_schemes = {
156 peer_schemes = {
153 b'http': httppeer,
157 b'http': httppeer,
154 b'https': httppeer,
158 b'https': httppeer,
155 b'ssh': sshpeer,
159 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
157 }
161 }
158
162
159
163
160 def _peerlookup(path):
164 def _peerlookup(path):
161 u = urlutil.url(path)
165 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
166 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
167 if scheme in peer_schemes:
164 return peer_schemes[scheme]
168 return peer_schemes[scheme]
165 if scheme in repo_schemes:
169 if scheme in repo_schemes:
166 return repo_schemes[scheme]
170 return repo_schemes[scheme]
167 return LocalFactory
171 return LocalFactory
168
172
169
173
170 def islocal(repo):
174 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
175 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
176 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
177 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
178 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
179 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
180 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
181 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
183 return repo.local()
180
184
181
185
182 def openpath(ui, path, sendaccept=True):
186 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
187 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
189 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
190 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
191 else:
188 return url.open(ui, path, sendaccept=sendaccept)
192 return url.open(ui, path, sendaccept=sendaccept)
189
193
190
194
191 # a list of (ui, repo) functions called for wire peer initialization
195 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
196 wirepeersetupfuncs = []
193
197
194
198
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 ui = getattr(obj, "ui", ui)
200 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
201 for f in presetupfuncs or []:
198 f(ui, obj)
202 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
203 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
204 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
205 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
207 hook = getattr(module, 'reposetup', None)
204 if hook:
208 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
209 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
210 hook(ui, obj)
207 msg = b' > reposetup for %s took %s\n'
211 msg = b' > reposetup for %s took %s\n'
208 ui.log(b'extension', msg, name, stats)
212 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
214 if not obj.local():
211 for f in wirepeersetupfuncs:
215 for f in wirepeersetupfuncs:
212 f(ui, obj)
216 f(ui, obj)
213
217
214
218
215 def repository(
219 def repository(
216 ui,
220 ui,
217 path=b'',
221 path=b'',
218 create=False,
222 create=False,
219 presetupfuncs=None,
223 presetupfuncs=None,
220 intents=None,
224 intents=None,
221 createopts=None,
225 createopts=None,
222 ):
226 ):
223 """return a repository object for the specified path"""
227 """return a repository object for the specified path"""
224 scheme = urlutil.url(path).scheme
228 scheme = urlutil.url(path).scheme
225 if scheme is None:
229 if scheme is None:
226 scheme = b'file'
230 scheme = b'file'
227 cls = repo_schemes.get(scheme)
231 cls = repo_schemes.get(scheme)
228 if cls is None:
232 if cls is None:
229 if scheme in peer_schemes:
233 if scheme in peer_schemes:
230 raise error.Abort(_(b"repository '%s' is not local") % path)
234 raise error.Abort(_(b"repository '%s' is not local") % path)
231 cls = LocalFactory
235 cls = LocalFactory
232 repo = cls.instance(
236 repo = cls.instance(
233 ui,
237 ui,
234 path,
238 path,
235 create,
239 create,
236 intents=intents,
240 intents=intents,
237 createopts=createopts,
241 createopts=createopts,
238 )
242 )
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 return repo.filtered(b'visible')
244 return repo.filtered(b'visible')
241
245
242
246
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
248 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
249 rui = remoteui(uiorrepo, opts)
246 if util.safehasattr(path, 'url'):
250 if util.safehasattr(path, 'url'):
247 # this is a urlutil.path object
251 # this is a urlutil.path object
248 scheme = path.url.scheme # pytype: disable=attribute-error
252 scheme = path.url.scheme # pytype: disable=attribute-error
249 # XXX for now we don't do anything more than that
253 # XXX for now we don't do anything more than that
250 path = path.loc # pytype: disable=attribute-error
254 path = path.loc # pytype: disable=attribute-error
251 else:
255 else:
252 scheme = urlutil.url(path).scheme
256 scheme = urlutil.url(path).scheme
253 if scheme in peer_schemes:
257 if scheme in peer_schemes:
254 cls = peer_schemes[scheme]
258 cls = peer_schemes[scheme]
255 peer = cls.instance(
259 peer = cls.instance(
256 rui,
260 rui,
257 path,
261 path,
258 create,
262 create,
259 intents=intents,
263 intents=intents,
260 createopts=createopts,
264 createopts=createopts,
261 )
265 )
262 _setup_repo_or_peer(rui, peer)
266 _setup_repo_or_peer(rui, peer)
263 else:
267 else:
264 # this is a repository
268 # this is a repository
265 repo = repository(
269 repo = repository(
266 rui,
270 rui,
267 path,
271 path,
268 create,
272 create,
269 intents=intents,
273 intents=intents,
270 createopts=createopts,
274 createopts=createopts,
271 )
275 )
272 peer = repo.peer()
276 peer = repo.peer()
273 return peer
277 return peer
274
278
275
279
276 def defaultdest(source):
280 def defaultdest(source):
277 """return default destination of clone if none is given
281 """return default destination of clone if none is given
278
282
279 >>> defaultdest(b'foo')
283 >>> defaultdest(b'foo')
280 'foo'
284 'foo'
281 >>> defaultdest(b'/foo/bar')
285 >>> defaultdest(b'/foo/bar')
282 'bar'
286 'bar'
283 >>> defaultdest(b'/')
287 >>> defaultdest(b'/')
284 ''
288 ''
285 >>> defaultdest(b'')
289 >>> defaultdest(b'')
286 ''
290 ''
287 >>> defaultdest(b'http://example.org/')
291 >>> defaultdest(b'http://example.org/')
288 ''
292 ''
289 >>> defaultdest(b'http://example.org/foo/')
293 >>> defaultdest(b'http://example.org/foo/')
290 'foo'
294 'foo'
291 """
295 """
292 path = urlutil.url(source).path
296 path = urlutil.url(source).path
293 if not path:
297 if not path:
294 return b''
298 return b''
295 return os.path.basename(os.path.normpath(path))
299 return os.path.basename(os.path.normpath(path))
296
300
297
301
298 def sharedreposource(repo):
302 def sharedreposource(repo):
299 """Returns repository object for source repository of a shared repo.
303 """Returns repository object for source repository of a shared repo.
300
304
301 If repo is not a shared repository, returns None.
305 If repo is not a shared repository, returns None.
302 """
306 """
303 if repo.sharedpath == repo.path:
307 if repo.sharedpath == repo.path:
304 return None
308 return None
305
309
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
307 return repo.srcrepo
311 return repo.srcrepo
308
312
309 # the sharedpath always ends in the .hg; we want the path to the repo
313 # the sharedpath always ends in the .hg; we want the path to the repo
310 source = repo.vfs.split(repo.sharedpath)[0]
314 source = repo.vfs.split(repo.sharedpath)[0]
311 srcurl, branches = urlutil.parseurl(source)
315 srcurl, branches = urlutil.parseurl(source)
312 srcrepo = repository(repo.ui, srcurl)
316 srcrepo = repository(repo.ui, srcurl)
313 repo.srcrepo = srcrepo
317 repo.srcrepo = srcrepo
314 return srcrepo
318 return srcrepo
315
319
316
320
317 def share(
321 def share(
318 ui,
322 ui,
319 source,
323 source,
320 dest=None,
324 dest=None,
321 update=True,
325 update=True,
322 bookmarks=True,
326 bookmarks=True,
323 defaultpath=None,
327 defaultpath=None,
324 relative=False,
328 relative=False,
325 ):
329 ):
326 '''create a shared repository'''
330 '''create a shared repository'''
327
331
328 not_local_msg = _(b'can only share local repositories')
332 not_local_msg = _(b'can only share local repositories')
329 if util.safehasattr(source, 'local'):
333 if util.safehasattr(source, 'local'):
330 if source.local() is None:
334 if source.local() is None:
331 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
332 elif not islocal(source):
336 elif not islocal(source):
333 # XXX why are we getting bytes here ?
337 # XXX why are we getting bytes here ?
334 raise error.Abort(not_local_msg)
338 raise error.Abort(not_local_msg)
335
339
336 if not dest:
340 if not dest:
337 dest = defaultdest(source)
341 dest = defaultdest(source)
338 else:
342 else:
339 dest = urlutil.get_clone_path_obj(ui, dest).loc
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
340
344
341 if isinstance(source, bytes):
345 if isinstance(source, bytes):
342 source_path = urlutil.get_clone_path_obj(ui, source)
346 source_path = urlutil.get_clone_path_obj(ui, source)
343 srcrepo = repository(ui, source_path.loc)
347 srcrepo = repository(ui, source_path.loc)
344 branches = (source_path.branch, [])
348 branches = (source_path.branch, [])
345 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
346 else:
350 else:
347 srcrepo = source.local()
351 srcrepo = source.local()
348 checkout = None
352 checkout = None
349
353
350 shareditems = set()
354 shareditems = set()
351 if bookmarks:
355 if bookmarks:
352 shareditems.add(sharedbookmarks)
356 shareditems.add(sharedbookmarks)
353
357
354 r = repository(
358 r = repository(
355 ui,
359 ui,
356 dest,
360 dest,
357 create=True,
361 create=True,
358 createopts={
362 createopts={
359 b'sharedrepo': srcrepo,
363 b'sharedrepo': srcrepo,
360 b'sharedrelative': relative,
364 b'sharedrelative': relative,
361 b'shareditems': shareditems,
365 b'shareditems': shareditems,
362 },
366 },
363 )
367 )
364
368
365 postshare(srcrepo, r, defaultpath=defaultpath)
369 postshare(srcrepo, r, defaultpath=defaultpath)
366 r = repository(ui, dest)
370 r = repository(ui, dest)
367 _postshareupdate(r, update, checkout=checkout)
371 _postshareupdate(r, update, checkout=checkout)
368 return r
372 return r
369
373
370
374
371 def _prependsourcehgrc(repo):
375 def _prependsourcehgrc(repo):
372 """copies the source repo config and prepend it in current repo .hg/hgrc
376 """copies the source repo config and prepend it in current repo .hg/hgrc
373 on unshare. This is only done if the share was perfomed using share safe
377 on unshare. This is only done if the share was perfomed using share safe
374 method where we share config of source in shares"""
378 method where we share config of source in shares"""
375 srcvfs = vfsmod.vfs(repo.sharedpath)
379 srcvfs = vfsmod.vfs(repo.sharedpath)
376 dstvfs = vfsmod.vfs(repo.path)
380 dstvfs = vfsmod.vfs(repo.path)
377
381
378 if not srcvfs.exists(b'hgrc'):
382 if not srcvfs.exists(b'hgrc'):
379 return
383 return
380
384
381 currentconfig = b''
385 currentconfig = b''
382 if dstvfs.exists(b'hgrc'):
386 if dstvfs.exists(b'hgrc'):
383 currentconfig = dstvfs.read(b'hgrc')
387 currentconfig = dstvfs.read(b'hgrc')
384
388
385 with dstvfs(b'hgrc', b'wb') as fp:
389 with dstvfs(b'hgrc', b'wb') as fp:
386 sourceconfig = srcvfs.read(b'hgrc')
390 sourceconfig = srcvfs.read(b'hgrc')
387 fp.write(b"# Config copied from shared source\n")
391 fp.write(b"# Config copied from shared source\n")
388 fp.write(sourceconfig)
392 fp.write(sourceconfig)
389 fp.write(b'\n')
393 fp.write(b'\n')
390 fp.write(currentconfig)
394 fp.write(currentconfig)
391
395
392
396
393 def unshare(ui, repo):
397 def unshare(ui, repo):
394 """convert a shared repository to a normal one
398 """convert a shared repository to a normal one
395
399
396 Copy the store data to the repo and remove the sharedpath data.
400 Copy the store data to the repo and remove the sharedpath data.
397
401
398 Returns a new repository object representing the unshared repository.
402 Returns a new repository object representing the unshared repository.
399
403
400 The passed repository object is not usable after this function is
404 The passed repository object is not usable after this function is
401 called.
405 called.
402 """
406 """
403
407
404 with repo.lock():
408 with repo.lock():
405 # we use locks here because if we race with commit, we
409 # we use locks here because if we race with commit, we
406 # can end up with extra data in the cloned revlogs that's
410 # can end up with extra data in the cloned revlogs that's
407 # not pointed to by changesets, thus causing verify to
411 # not pointed to by changesets, thus causing verify to
408 # fail
412 # fail
409 destlock = copystore(ui, repo, repo.path)
413 destlock = copystore(ui, repo, repo.path)
410 with destlock or util.nullcontextmanager():
414 with destlock or util.nullcontextmanager():
411 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
412 # we were sharing .hg/hgrc of the share source with the current
416 # we were sharing .hg/hgrc of the share source with the current
413 # repo. We need to copy that while unsharing otherwise it can
417 # repo. We need to copy that while unsharing otherwise it can
414 # disable hooks and other checks
418 # disable hooks and other checks
415 _prependsourcehgrc(repo)
419 _prependsourcehgrc(repo)
416
420
417 sharefile = repo.vfs.join(b'sharedpath')
421 sharefile = repo.vfs.join(b'sharedpath')
418 util.rename(sharefile, sharefile + b'.old')
422 util.rename(sharefile, sharefile + b'.old')
419
423
420 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
421 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
422 scmutil.writereporequirements(repo)
426 scmutil.writereporequirements(repo)
423
427
424 # Removing share changes some fundamental properties of the repo instance.
428 # Removing share changes some fundamental properties of the repo instance.
425 # So we instantiate a new repo object and operate on it rather than
429 # So we instantiate a new repo object and operate on it rather than
426 # try to keep the existing repo usable.
430 # try to keep the existing repo usable.
427 newrepo = repository(repo.baseui, repo.root, create=False)
431 newrepo = repository(repo.baseui, repo.root, create=False)
428
432
429 # TODO: figure out how to access subrepos that exist, but were previously
433 # TODO: figure out how to access subrepos that exist, but were previously
430 # removed from .hgsub
434 # removed from .hgsub
431 c = newrepo[b'.']
435 c = newrepo[b'.']
432 subs = c.substate
436 subs = c.substate
433 for s in sorted(subs):
437 for s in sorted(subs):
434 c.sub(s).unshare()
438 c.sub(s).unshare()
435
439
436 localrepo.poisonrepository(repo)
440 localrepo.poisonrepository(repo)
437
441
438 return newrepo
442 return newrepo
439
443
440
444
441 def postshare(sourcerepo, destrepo, defaultpath=None):
445 def postshare(sourcerepo, destrepo, defaultpath=None):
442 """Called after a new shared repo is created.
446 """Called after a new shared repo is created.
443
447
444 The new repo only has a requirements file and pointer to the source.
448 The new repo only has a requirements file and pointer to the source.
445 This function configures additional shared data.
449 This function configures additional shared data.
446
450
447 Extensions can wrap this function and write additional entries to
451 Extensions can wrap this function and write additional entries to
448 destrepo/.hg/shared to indicate additional pieces of data to be shared.
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
449 """
453 """
450 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
451 if default:
455 if default:
452 template = b'[paths]\ndefault = %s\n'
456 template = b'[paths]\ndefault = %s\n'
453 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
454 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
455 with destrepo.wlock():
459 with destrepo.wlock():
456 narrowspec.copytoworkingcopy(destrepo)
460 narrowspec.copytoworkingcopy(destrepo)
457
461
458
462
459 def _postshareupdate(repo, update, checkout=None):
463 def _postshareupdate(repo, update, checkout=None):
460 """Maybe perform a working directory update after a shared repo is created.
464 """Maybe perform a working directory update after a shared repo is created.
461
465
462 ``update`` can be a boolean or a revision to update to.
466 ``update`` can be a boolean or a revision to update to.
463 """
467 """
464 if not update:
468 if not update:
465 return
469 return
466
470
467 repo.ui.status(_(b"updating working directory\n"))
471 repo.ui.status(_(b"updating working directory\n"))
468 if update is not True:
472 if update is not True:
469 checkout = update
473 checkout = update
470 for test in (checkout, b'default', b'tip'):
474 for test in (checkout, b'default', b'tip'):
471 if test is None:
475 if test is None:
472 continue
476 continue
473 try:
477 try:
474 uprev = repo.lookup(test)
478 uprev = repo.lookup(test)
475 break
479 break
476 except error.RepoLookupError:
480 except error.RepoLookupError:
477 continue
481 continue
478 _update(repo, uprev)
482 _update(repo, uprev)
479
483
480
484
481 def copystore(ui, srcrepo, destpath):
485 def copystore(ui, srcrepo, destpath):
482 """copy files from store of srcrepo in destpath
486 """copy files from store of srcrepo in destpath
483
487
484 returns destlock
488 returns destlock
485 """
489 """
486 destlock = None
490 destlock = None
487 try:
491 try:
488 hardlink = None
492 hardlink = None
489 topic = _(b'linking') if hardlink else _(b'copying')
493 topic = _(b'linking') if hardlink else _(b'copying')
490 with ui.makeprogress(topic, unit=_(b'files')) as progress:
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
491 num = 0
495 num = 0
492 srcpublishing = srcrepo.publishing()
496 srcpublishing = srcrepo.publishing()
493 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
494 dstvfs = vfsmod.vfs(destpath)
498 dstvfs = vfsmod.vfs(destpath)
495 for f in srcrepo.store.copylist():
499 for f in srcrepo.store.copylist():
496 if srcpublishing and f.endswith(b'phaseroots'):
500 if srcpublishing and f.endswith(b'phaseroots'):
497 continue
501 continue
498 dstbase = os.path.dirname(f)
502 dstbase = os.path.dirname(f)
499 if dstbase and not dstvfs.exists(dstbase):
503 if dstbase and not dstvfs.exists(dstbase):
500 dstvfs.mkdir(dstbase)
504 dstvfs.mkdir(dstbase)
501 if srcvfs.exists(f):
505 if srcvfs.exists(f):
502 if f.endswith(b'data'):
506 if f.endswith(b'data'):
503 # 'dstbase' may be empty (e.g. revlog format 0)
507 # 'dstbase' may be empty (e.g. revlog format 0)
504 lockfile = os.path.join(dstbase, b"lock")
508 lockfile = os.path.join(dstbase, b"lock")
505 # lock to avoid premature writing to the target
509 # lock to avoid premature writing to the target
506 destlock = lock.lock(dstvfs, lockfile)
510 destlock = lock.lock(dstvfs, lockfile)
507 hardlink, n = util.copyfiles(
511 hardlink, n = util.copyfiles(
508 srcvfs.join(f), dstvfs.join(f), hardlink, progress
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
509 )
513 )
510 num += n
514 num += n
511 if hardlink:
515 if hardlink:
512 ui.debug(b"linked %d files\n" % num)
516 ui.debug(b"linked %d files\n" % num)
513 else:
517 else:
514 ui.debug(b"copied %d files\n" % num)
518 ui.debug(b"copied %d files\n" % num)
515 return destlock
519 return destlock
516 except: # re-raises
520 except: # re-raises
517 release(destlock)
521 release(destlock)
518 raise
522 raise
519
523
520
524
521 def clonewithshare(
525 def clonewithshare(
522 ui,
526 ui,
523 peeropts,
527 peeropts,
524 sharepath,
528 sharepath,
525 source,
529 source,
526 srcpeer,
530 srcpeer,
527 dest,
531 dest,
528 pull=False,
532 pull=False,
529 rev=None,
533 rev=None,
530 update=True,
534 update=True,
531 stream=False,
535 stream=False,
532 ):
536 ):
533 """Perform a clone using a shared repo.
537 """Perform a clone using a shared repo.
534
538
535 The store for the repository will be located at <sharepath>/.hg. The
539 The store for the repository will be located at <sharepath>/.hg. The
536 specified revisions will be cloned or pulled from "source". A shared repo
540 specified revisions will be cloned or pulled from "source". A shared repo
537 will be created at "dest" and a working copy will be created if "update" is
541 will be created at "dest" and a working copy will be created if "update" is
538 True.
542 True.
539 """
543 """
540 revs = None
544 revs = None
541 if rev:
545 if rev:
542 if not srcpeer.capable(b'lookup'):
546 if not srcpeer.capable(b'lookup'):
543 raise error.Abort(
547 raise error.Abort(
544 _(
548 _(
545 b"src repository does not support "
549 b"src repository does not support "
546 b"revision lookup and so doesn't "
550 b"revision lookup and so doesn't "
547 b"support clone by revision"
551 b"support clone by revision"
548 )
552 )
549 )
553 )
550
554
551 # TODO this is batchable.
555 # TODO this is batchable.
552 remoterevs = []
556 remoterevs = []
553 for r in rev:
557 for r in rev:
554 with srcpeer.commandexecutor() as e:
558 with srcpeer.commandexecutor() as e:
555 remoterevs.append(
559 remoterevs.append(
556 e.callcommand(
560 e.callcommand(
557 b'lookup',
561 b'lookup',
558 {
562 {
559 b'key': r,
563 b'key': r,
560 },
564 },
561 ).result()
565 ).result()
562 )
566 )
563 revs = remoterevs
567 revs = remoterevs
564
568
565 # Obtain a lock before checking for or cloning the pooled repo otherwise
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
566 # 2 clients may race creating or populating it.
570 # 2 clients may race creating or populating it.
567 pooldir = os.path.dirname(sharepath)
571 pooldir = os.path.dirname(sharepath)
568 # lock class requires the directory to exist.
572 # lock class requires the directory to exist.
569 try:
573 try:
570 util.makedir(pooldir, False)
574 util.makedir(pooldir, False)
571 except FileExistsError:
575 except FileExistsError:
572 pass
576 pass
573
577
574 poolvfs = vfsmod.vfs(pooldir)
578 poolvfs = vfsmod.vfs(pooldir)
575 basename = os.path.basename(sharepath)
579 basename = os.path.basename(sharepath)
576
580
577 with lock.lock(poolvfs, b'%s.lock' % basename):
581 with lock.lock(poolvfs, b'%s.lock' % basename):
578 if os.path.exists(sharepath):
582 if os.path.exists(sharepath):
579 ui.status(
583 ui.status(
580 _(b'(sharing from existing pooled repository %s)\n') % basename
584 _(b'(sharing from existing pooled repository %s)\n') % basename
581 )
585 )
582 else:
586 else:
583 ui.status(
587 ui.status(
584 _(b'(sharing from new pooled repository %s)\n') % basename
588 _(b'(sharing from new pooled repository %s)\n') % basename
585 )
589 )
586 # Always use pull mode because hardlinks in share mode don't work
590 # Always use pull mode because hardlinks in share mode don't work
587 # well. Never update because working copies aren't necessary in
591 # well. Never update because working copies aren't necessary in
588 # share mode.
592 # share mode.
589 clone(
593 clone(
590 ui,
594 ui,
591 peeropts,
595 peeropts,
592 source,
596 source,
593 dest=sharepath,
597 dest=sharepath,
594 pull=True,
598 pull=True,
595 revs=rev,
599 revs=rev,
596 update=False,
600 update=False,
597 stream=stream,
601 stream=stream,
598 )
602 )
599
603
600 # Resolve the value to put in [paths] section for the source.
604 # Resolve the value to put in [paths] section for the source.
601 if islocal(source):
605 if islocal(source):
602 defaultpath = util.abspath(urlutil.urllocalpath(source))
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
603 else:
607 else:
604 defaultpath = source
608 defaultpath = source
605
609
606 sharerepo = repository(ui, path=sharepath)
610 sharerepo = repository(ui, path=sharepath)
607 destrepo = share(
611 destrepo = share(
608 ui,
612 ui,
609 sharerepo,
613 sharerepo,
610 dest=dest,
614 dest=dest,
611 update=False,
615 update=False,
612 bookmarks=False,
616 bookmarks=False,
613 defaultpath=defaultpath,
617 defaultpath=defaultpath,
614 )
618 )
615
619
616 # We need to perform a pull against the dest repo to fetch bookmarks
620 # We need to perform a pull against the dest repo to fetch bookmarks
617 # and other non-store data that isn't shared by default. In the case of
621 # and other non-store data that isn't shared by default. In the case of
618 # non-existing shared repo, this means we pull from the remote twice. This
622 # non-existing shared repo, this means we pull from the remote twice. This
619 # is a bit weird. But at the time it was implemented, there wasn't an easy
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
620 # way to pull just non-changegroup data.
624 # way to pull just non-changegroup data.
621 exchange.pull(destrepo, srcpeer, heads=revs)
625 exchange.pull(destrepo, srcpeer, heads=revs)
622
626
623 _postshareupdate(destrepo, update)
627 _postshareupdate(destrepo, update)
624
628
625 return srcpeer, peer(ui, peeropts, dest)
629 return srcpeer, peer(ui, peeropts, dest)
626
630
627
631
628 # Recomputing caches is often slow on big repos, so copy them.
632 # Recomputing caches is often slow on big repos, so copy them.
629 def _copycache(srcrepo, dstcachedir, fname):
633 def _copycache(srcrepo, dstcachedir, fname):
630 """copy a cache from srcrepo to destcachedir (if it exists)"""
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
631 srcfname = srcrepo.cachevfs.join(fname)
635 srcfname = srcrepo.cachevfs.join(fname)
632 dstfname = os.path.join(dstcachedir, fname)
636 dstfname = os.path.join(dstcachedir, fname)
633 if os.path.exists(srcfname):
637 if os.path.exists(srcfname):
634 if not os.path.exists(dstcachedir):
638 if not os.path.exists(dstcachedir):
635 os.mkdir(dstcachedir)
639 os.mkdir(dstcachedir)
636 util.copyfile(srcfname, dstfname)
640 util.copyfile(srcfname, dstfname)
637
641
638
642
639 def clone(
643 def clone(
640 ui,
644 ui,
641 peeropts,
645 peeropts,
642 source,
646 source,
643 dest=None,
647 dest=None,
644 pull=False,
648 pull=False,
645 revs=None,
649 revs=None,
646 update=True,
650 update=True,
647 stream=False,
651 stream=False,
648 branch=None,
652 branch=None,
649 shareopts=None,
653 shareopts=None,
650 storeincludepats=None,
654 storeincludepats=None,
651 storeexcludepats=None,
655 storeexcludepats=None,
652 depth=None,
656 depth=None,
653 ):
657 ):
654 """Make a copy of an existing repository.
658 """Make a copy of an existing repository.
655
659
656 Create a copy of an existing repository in a new directory. The
660 Create a copy of an existing repository in a new directory. The
657 source and destination are URLs, as passed to the repository
661 source and destination are URLs, as passed to the repository
658 function. Returns a pair of repository peers, the source and
662 function. Returns a pair of repository peers, the source and
659 newly created destination.
663 newly created destination.
660
664
661 The location of the source is added to the new repository's
665 The location of the source is added to the new repository's
662 .hg/hgrc file, as the default to be used for future pulls and
666 .hg/hgrc file, as the default to be used for future pulls and
663 pushes.
667 pushes.
664
668
665 If an exception is raised, the partly cloned/updated destination
669 If an exception is raised, the partly cloned/updated destination
666 repository will be deleted.
670 repository will be deleted.
667
671
668 Arguments:
672 Arguments:
669
673
670 source: repository object or URL
674 source: repository object or URL
671
675
672 dest: URL of destination repository to create (defaults to base
676 dest: URL of destination repository to create (defaults to base
673 name of source repository)
677 name of source repository)
674
678
675 pull: always pull from source repository, even in local case or if the
679 pull: always pull from source repository, even in local case or if the
676 server prefers streaming
680 server prefers streaming
677
681
678 stream: stream raw data uncompressed from repository (fast over
682 stream: stream raw data uncompressed from repository (fast over
679 LAN, slow over WAN)
683 LAN, slow over WAN)
680
684
681 revs: revision to clone up to (implies pull=True)
685 revs: revision to clone up to (implies pull=True)
682
686
683 update: update working directory after clone completes, if
687 update: update working directory after clone completes, if
684 destination is local repository (True means update to default rev,
688 destination is local repository (True means update to default rev,
685 anything else is treated as a revision)
689 anything else is treated as a revision)
686
690
687 branch: branches to clone
691 branch: branches to clone
688
692
689 shareopts: dict of options to control auto sharing behavior. The "pool" key
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
690 activates auto sharing mode and defines the directory for stores. The
694 activates auto sharing mode and defines the directory for stores. The
691 "mode" key determines how to construct the directory name of the shared
695 "mode" key determines how to construct the directory name of the shared
692 repository. "identity" means the name is derived from the node of the first
696 repository. "identity" means the name is derived from the node of the first
693 changeset in the repository. "remote" means the name is derived from the
697 changeset in the repository. "remote" means the name is derived from the
694 remote's path/URL. Defaults to "identity."
698 remote's path/URL. Defaults to "identity."
695
699
696 storeincludepats and storeexcludepats: sets of file patterns to include and
700 storeincludepats and storeexcludepats: sets of file patterns to include and
697 exclude in the repository copy, respectively. If not defined, all files
701 exclude in the repository copy, respectively. If not defined, all files
698 will be included (a "full" clone). Otherwise a "narrow" clone containing
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
699 only the requested files will be performed. If ``storeincludepats`` is not
703 only the requested files will be performed. If ``storeincludepats`` is not
700 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
701 ``path:.``. If both are empty sets, no files will be cloned.
705 ``path:.``. If both are empty sets, no files will be cloned.
702 """
706 """
703
707
704 if isinstance(source, bytes):
708 if isinstance(source, bytes):
705 src_path = urlutil.get_clone_path_obj(ui, source)
709 src_path = urlutil.get_clone_path_obj(ui, source)
706 if src_path is None:
710 if src_path is None:
707 srcpeer = peer(ui, peeropts, b'')
711 srcpeer = peer(ui, peeropts, b'')
708 origsource = source = b''
712 origsource = source = b''
709 branches = (None, branch or [])
713 branches = (None, branch or [])
710 else:
714 else:
711 srcpeer = peer(ui, peeropts, src_path)
715 srcpeer = peer(ui, peeropts, src_path)
712 origsource = src_path.rawloc
716 origsource = src_path.rawloc
713 branches = (src_path.branch, branch or [])
717 branches = (src_path.branch, branch or [])
714 source = src_path.loc
718 source = src_path.loc
715 else:
719 else:
716 # XXX path: simply use the peer `path` object when this become available
720 # XXX path: simply use the peer `path` object when this become available
717 srcpeer = source.peer() # in case we were called with a localrepo
721 srcpeer = source.peer() # in case we were called with a localrepo
718 branches = (None, branch or [])
722 branches = (None, branch or [])
719 origsource = source = srcpeer.url()
723 origsource = source = srcpeer.url()
720 srclock = destlock = destwlock = cleandir = None
724 srclock = destlock = destwlock = cleandir = None
721 destpeer = None
725 destpeer = None
722 try:
726 try:
723 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
727 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
724
728
725 if dest is None:
729 if dest is None:
726 dest = defaultdest(source)
730 dest = defaultdest(source)
727 if dest:
731 if dest:
728 ui.status(_(b"destination directory: %s\n") % dest)
732 ui.status(_(b"destination directory: %s\n") % dest)
729 else:
733 else:
730 dest_path = urlutil.get_clone_path_obj(ui, dest)
734 dest_path = urlutil.get_clone_path_obj(ui, dest)
731 if dest_path is not None:
735 if dest_path is not None:
732 dest = dest_path.rawloc
736 dest = dest_path.rawloc
733 else:
737 else:
734 dest = b''
738 dest = b''
735
739
736 dest = urlutil.urllocalpath(dest)
740 dest = urlutil.urllocalpath(dest)
737 source = urlutil.urllocalpath(source)
741 source = urlutil.urllocalpath(source)
738
742
739 if not dest:
743 if not dest:
740 raise error.InputError(_(b"empty destination path is not valid"))
744 raise error.InputError(_(b"empty destination path is not valid"))
741
745
742 destvfs = vfsmod.vfs(dest, expandpath=True)
746 destvfs = vfsmod.vfs(dest, expandpath=True)
743 if destvfs.lexists():
747 if destvfs.lexists():
744 if not destvfs.isdir():
748 if not destvfs.isdir():
745 raise error.InputError(
749 raise error.InputError(
746 _(b"destination '%s' already exists") % dest
750 _(b"destination '%s' already exists") % dest
747 )
751 )
748 elif destvfs.listdir():
752 elif destvfs.listdir():
749 raise error.InputError(
753 raise error.InputError(
750 _(b"destination '%s' is not empty") % dest
754 _(b"destination '%s' is not empty") % dest
751 )
755 )
752
756
753 createopts = {}
757 createopts = {}
754 narrow = False
758 narrow = False
755
759
756 if storeincludepats is not None:
760 if storeincludepats is not None:
757 narrowspec.validatepatterns(storeincludepats)
761 narrowspec.validatepatterns(storeincludepats)
758 narrow = True
762 narrow = True
759
763
760 if storeexcludepats is not None:
764 if storeexcludepats is not None:
761 narrowspec.validatepatterns(storeexcludepats)
765 narrowspec.validatepatterns(storeexcludepats)
762 narrow = True
766 narrow = True
763
767
764 if narrow:
768 if narrow:
765 # Include everything by default if only exclusion patterns defined.
769 # Include everything by default if only exclusion patterns defined.
766 if storeexcludepats and not storeincludepats:
770 if storeexcludepats and not storeincludepats:
767 storeincludepats = {b'path:.'}
771 storeincludepats = {b'path:.'}
768
772
769 createopts[b'narrowfiles'] = True
773 createopts[b'narrowfiles'] = True
770
774
771 if depth:
775 if depth:
772 createopts[b'shallowfilestore'] = True
776 createopts[b'shallowfilestore'] = True
773
777
774 if srcpeer.capable(b'lfs-serve'):
778 if srcpeer.capable(b'lfs-serve'):
775 # Repository creation honors the config if it disabled the extension, so
779 # Repository creation honors the config if it disabled the extension, so
776 # we can't just announce that lfs will be enabled. This check avoids
780 # we can't just announce that lfs will be enabled. This check avoids
777 # saying that lfs will be enabled, and then saying it's an unknown
781 # saying that lfs will be enabled, and then saying it's an unknown
778 # feature. The lfs creation option is set in either case so that a
782 # feature. The lfs creation option is set in either case so that a
779 # requirement is added. If the extension is explicitly disabled but the
783 # requirement is added. If the extension is explicitly disabled but the
780 # requirement is set, the clone aborts early, before transferring any
784 # requirement is set, the clone aborts early, before transferring any
781 # data.
785 # data.
782 createopts[b'lfs'] = True
786 createopts[b'lfs'] = True
783
787
784 if extensions.disabled_help(b'lfs'):
788 if extensions.disabled_help(b'lfs'):
785 ui.status(
789 ui.status(
786 _(
790 _(
787 b'(remote is using large file support (lfs), but it is '
791 b'(remote is using large file support (lfs), but it is '
788 b'explicitly disabled in the local configuration)\n'
792 b'explicitly disabled in the local configuration)\n'
789 )
793 )
790 )
794 )
791 else:
795 else:
792 ui.status(
796 ui.status(
793 _(
797 _(
794 b'(remote is using large file support (lfs); lfs will '
798 b'(remote is using large file support (lfs); lfs will '
795 b'be enabled for this repository)\n'
799 b'be enabled for this repository)\n'
796 )
800 )
797 )
801 )
798
802
799 shareopts = shareopts or {}
803 shareopts = shareopts or {}
800 sharepool = shareopts.get(b'pool')
804 sharepool = shareopts.get(b'pool')
801 sharenamemode = shareopts.get(b'mode')
805 sharenamemode = shareopts.get(b'mode')
802 if sharepool and islocal(dest):
806 if sharepool and islocal(dest):
803 sharepath = None
807 sharepath = None
804 if sharenamemode == b'identity':
808 if sharenamemode == b'identity':
805 # Resolve the name from the initial changeset in the remote
809 # Resolve the name from the initial changeset in the remote
806 # repository. This returns nullid when the remote is empty. It
810 # repository. This returns nullid when the remote is empty. It
807 # raises RepoLookupError if revision 0 is filtered or otherwise
811 # raises RepoLookupError if revision 0 is filtered or otherwise
808 # not available. If we fail to resolve, sharing is not enabled.
812 # not available. If we fail to resolve, sharing is not enabled.
809 try:
813 try:
810 with srcpeer.commandexecutor() as e:
814 with srcpeer.commandexecutor() as e:
811 rootnode = e.callcommand(
815 rootnode = e.callcommand(
812 b'lookup',
816 b'lookup',
813 {
817 {
814 b'key': b'0',
818 b'key': b'0',
815 },
819 },
816 ).result()
820 ).result()
817
821
818 if rootnode != sha1nodeconstants.nullid:
822 if rootnode != sha1nodeconstants.nullid:
819 sharepath = os.path.join(sharepool, hex(rootnode))
823 sharepath = os.path.join(sharepool, hex(rootnode))
820 else:
824 else:
821 ui.status(
825 ui.status(
822 _(
826 _(
823 b'(not using pooled storage: '
827 b'(not using pooled storage: '
824 b'remote appears to be empty)\n'
828 b'remote appears to be empty)\n'
825 )
829 )
826 )
830 )
827 except error.RepoLookupError:
831 except error.RepoLookupError:
828 ui.status(
832 ui.status(
829 _(
833 _(
830 b'(not using pooled storage: '
834 b'(not using pooled storage: '
831 b'unable to resolve identity of remote)\n'
835 b'unable to resolve identity of remote)\n'
832 )
836 )
833 )
837 )
834 elif sharenamemode == b'remote':
838 elif sharenamemode == b'remote':
835 sharepath = os.path.join(
839 sharepath = os.path.join(
836 sharepool, hex(hashutil.sha1(source).digest())
840 sharepool, hex(hashutil.sha1(source).digest())
837 )
841 )
838 else:
842 else:
839 raise error.Abort(
843 raise error.Abort(
840 _(b'unknown share naming mode: %s') % sharenamemode
844 _(b'unknown share naming mode: %s') % sharenamemode
841 )
845 )
842
846
843 # TODO this is a somewhat arbitrary restriction.
847 # TODO this is a somewhat arbitrary restriction.
844 if narrow:
848 if narrow:
845 ui.status(
849 ui.status(
846 _(b'(pooled storage not supported for narrow clones)\n')
850 _(b'(pooled storage not supported for narrow clones)\n')
847 )
851 )
848 sharepath = None
852 sharepath = None
849
853
850 if sharepath:
854 if sharepath:
851 return clonewithshare(
855 return clonewithshare(
852 ui,
856 ui,
853 peeropts,
857 peeropts,
854 sharepath,
858 sharepath,
855 source,
859 source,
856 srcpeer,
860 srcpeer,
857 dest,
861 dest,
858 pull=pull,
862 pull=pull,
859 rev=revs,
863 rev=revs,
860 update=update,
864 update=update,
861 stream=stream,
865 stream=stream,
862 )
866 )
863
867
864 srcrepo = srcpeer.local()
868 srcrepo = srcpeer.local()
865
869
866 abspath = origsource
870 abspath = origsource
867 if islocal(origsource):
871 if islocal(origsource):
868 abspath = util.abspath(urlutil.urllocalpath(origsource))
872 abspath = util.abspath(urlutil.urllocalpath(origsource))
869
873
870 if islocal(dest):
874 if islocal(dest):
871 if os.path.exists(dest):
875 if os.path.exists(dest):
872 # only clean up directories we create ourselves
876 # only clean up directories we create ourselves
873 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
877 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
874 cleandir = hgdir
878 cleandir = hgdir
875 else:
879 else:
876 cleandir = dest
880 cleandir = dest
877
881
878 copy = False
882 copy = False
879 if (
883 if (
880 srcrepo
884 srcrepo
881 and srcrepo.cancopy()
885 and srcrepo.cancopy()
882 and islocal(dest)
886 and islocal(dest)
883 and not phases.hassecret(srcrepo)
887 and not phases.hassecret(srcrepo)
884 ):
888 ):
885 copy = not pull and not revs
889 copy = not pull and not revs
886
890
887 # TODO this is a somewhat arbitrary restriction.
891 # TODO this is a somewhat arbitrary restriction.
888 if narrow:
892 if narrow:
889 copy = False
893 copy = False
890
894
891 if copy:
895 if copy:
892 try:
896 try:
893 # we use a lock here because if we race with commit, we
897 # we use a lock here because if we race with commit, we
894 # can end up with extra data in the cloned revlogs that's
898 # can end up with extra data in the cloned revlogs that's
895 # not pointed to by changesets, thus causing verify to
899 # not pointed to by changesets, thus causing verify to
896 # fail
900 # fail
897 srclock = srcrepo.lock(wait=False)
901 srclock = srcrepo.lock(wait=False)
898 except error.LockError:
902 except error.LockError:
899 copy = False
903 copy = False
900
904
901 if copy:
905 if copy:
902 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
906 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
903
907
904 destrootpath = urlutil.urllocalpath(dest)
908 destrootpath = urlutil.urllocalpath(dest)
905 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
909 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
906 localrepo.createrepository(
910 localrepo.createrepository(
907 ui,
911 ui,
908 destrootpath,
912 destrootpath,
909 requirements=dest_reqs,
913 requirements=dest_reqs,
910 )
914 )
911 destrepo = localrepo.makelocalrepository(ui, destrootpath)
915 destrepo = localrepo.makelocalrepository(ui, destrootpath)
912
916
913 destwlock = destrepo.wlock()
917 destwlock = destrepo.wlock()
914 destlock = destrepo.lock()
918 destlock = destrepo.lock()
915 from . import streamclone # avoid cycle
919 from . import streamclone # avoid cycle
916
920
917 streamclone.local_copy(srcrepo, destrepo)
921 streamclone.local_copy(srcrepo, destrepo)
918
922
919 # we need to re-init the repo after manually copying the data
923 # we need to re-init the repo after manually copying the data
920 # into it
924 # into it
921 destpeer = peer(srcrepo, peeropts, dest)
925 destpeer = peer(srcrepo, peeropts, dest)
922
926
923 # make the peer aware that is it already locked
927 # make the peer aware that is it already locked
924 #
928 #
925 # important:
929 # important:
926 #
930 #
927 # We still need to release that lock at the end of the function
931 # We still need to release that lock at the end of the function
928 destpeer.local()._lockref = weakref.ref(destlock)
932 destpeer.local()._lockref = weakref.ref(destlock)
929 destpeer.local()._wlockref = weakref.ref(destwlock)
933 destpeer.local()._wlockref = weakref.ref(destwlock)
930 # dirstate also needs to be copied because `_wlockref` has a reference
934 # dirstate also needs to be copied because `_wlockref` has a reference
931 # to it: this dirstate is saved to disk when the wlock is released
935 # to it: this dirstate is saved to disk when the wlock is released
932 destpeer.local().dirstate = destrepo.dirstate
936 destpeer.local().dirstate = destrepo.dirstate
933
937
934 srcrepo.hook(
938 srcrepo.hook(
935 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
939 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
936 )
940 )
937 else:
941 else:
938 try:
942 try:
939 # only pass ui when no srcrepo
943 # only pass ui when no srcrepo
940 destpeer = peer(
944 destpeer = peer(
941 srcrepo or ui,
945 srcrepo or ui,
942 peeropts,
946 peeropts,
943 dest,
947 dest,
944 create=True,
948 create=True,
945 createopts=createopts,
949 createopts=createopts,
946 )
950 )
947 except FileExistsError:
951 except FileExistsError:
948 cleandir = None
952 cleandir = None
949 raise error.Abort(_(b"destination '%s' already exists") % dest)
953 raise error.Abort(_(b"destination '%s' already exists") % dest)
950
954
951 if revs:
955 if revs:
952 if not srcpeer.capable(b'lookup'):
956 if not srcpeer.capable(b'lookup'):
953 raise error.Abort(
957 raise error.Abort(
954 _(
958 _(
955 b"src repository does not support "
959 b"src repository does not support "
956 b"revision lookup and so doesn't "
960 b"revision lookup and so doesn't "
957 b"support clone by revision"
961 b"support clone by revision"
958 )
962 )
959 )
963 )
960
964
961 # TODO this is batchable.
965 # TODO this is batchable.
962 remoterevs = []
966 remoterevs = []
963 for rev in revs:
967 for rev in revs:
964 with srcpeer.commandexecutor() as e:
968 with srcpeer.commandexecutor() as e:
965 remoterevs.append(
969 remoterevs.append(
966 e.callcommand(
970 e.callcommand(
967 b'lookup',
971 b'lookup',
968 {
972 {
969 b'key': rev,
973 b'key': rev,
970 },
974 },
971 ).result()
975 ).result()
972 )
976 )
973 revs = remoterevs
977 revs = remoterevs
974
978
975 checkout = revs[0]
979 checkout = revs[0]
976 else:
980 else:
977 revs = None
981 revs = None
978 local = destpeer.local()
982 local = destpeer.local()
979 if local:
983 if local:
980 if narrow:
984 if narrow:
981 with local.wlock(), local.lock():
985 with local.wlock(), local.lock():
982 local.setnarrowpats(storeincludepats, storeexcludepats)
986 local.setnarrowpats(storeincludepats, storeexcludepats)
983 narrowspec.copytoworkingcopy(local)
987 narrowspec.copytoworkingcopy(local)
984
988
985 u = urlutil.url(abspath)
989 u = urlutil.url(abspath)
986 defaulturl = bytes(u)
990 defaulturl = bytes(u)
987 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
988 if not stream:
992 if not stream:
989 if pull:
993 if pull:
990 stream = False
994 stream = False
991 else:
995 else:
992 stream = None
996 stream = None
993 # internal config: ui.quietbookmarkmove
997 # internal config: ui.quietbookmarkmove
994 overrides = {(b'ui', b'quietbookmarkmove'): True}
998 overrides = {(b'ui', b'quietbookmarkmove'): True}
995 with local.ui.configoverride(overrides, b'clone'):
999 with local.ui.configoverride(overrides, b'clone'):
996 exchange.pull(
1000 exchange.pull(
997 local,
1001 local,
998 srcpeer,
1002 srcpeer,
999 heads=revs,
1003 heads=revs,
1000 streamclonerequested=stream,
1004 streamclonerequested=stream,
1001 includepats=storeincludepats,
1005 includepats=storeincludepats,
1002 excludepats=storeexcludepats,
1006 excludepats=storeexcludepats,
1003 depth=depth,
1007 depth=depth,
1004 )
1008 )
1005 elif srcrepo:
1009 elif srcrepo:
1006 # TODO lift restriction once exchange.push() accepts narrow
1010 # TODO lift restriction once exchange.push() accepts narrow
1007 # push.
1011 # push.
1008 if narrow:
1012 if narrow:
1009 raise error.Abort(
1013 raise error.Abort(
1010 _(
1014 _(
1011 b'narrow clone not available for '
1015 b'narrow clone not available for '
1012 b'remote destinations'
1016 b'remote destinations'
1013 )
1017 )
1014 )
1018 )
1015
1019
1016 exchange.push(
1020 exchange.push(
1017 srcrepo,
1021 srcrepo,
1018 destpeer,
1022 destpeer,
1019 revs=revs,
1023 revs=revs,
1020 bookmarks=srcrepo._bookmarks.keys(),
1024 bookmarks=srcrepo._bookmarks.keys(),
1021 )
1025 )
1022 else:
1026 else:
1023 raise error.Abort(
1027 raise error.Abort(
1024 _(b"clone from remote to remote not supported")
1028 _(b"clone from remote to remote not supported")
1025 )
1029 )
1026
1030
1027 cleandir = None
1031 cleandir = None
1028
1032
1029 destrepo = destpeer.local()
1033 destrepo = destpeer.local()
1030 if destrepo:
1034 if destrepo:
1031 template = uimod.samplehgrcs[b'cloned']
1035 template = uimod.samplehgrcs[b'cloned']
1032 u = urlutil.url(abspath)
1036 u = urlutil.url(abspath)
1033 u.passwd = None
1037 u.passwd = None
1034 defaulturl = bytes(u)
1038 defaulturl = bytes(u)
1035 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1039 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1036 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1040 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1037
1041
1038 if ui.configbool(b'experimental', b'remotenames'):
1042 if ui.configbool(b'experimental', b'remotenames'):
1039 logexchange.pullremotenames(destrepo, srcpeer)
1043 logexchange.pullremotenames(destrepo, srcpeer)
1040
1044
1041 if update:
1045 if update:
1042 if update is not True:
1046 if update is not True:
1043 with srcpeer.commandexecutor() as e:
1047 with srcpeer.commandexecutor() as e:
1044 checkout = e.callcommand(
1048 checkout = e.callcommand(
1045 b'lookup',
1049 b'lookup',
1046 {
1050 {
1047 b'key': update,
1051 b'key': update,
1048 },
1052 },
1049 ).result()
1053 ).result()
1050
1054
1051 uprev = None
1055 uprev = None
1052 status = None
1056 status = None
1053 if checkout is not None:
1057 if checkout is not None:
1054 # Some extensions (at least hg-git and hg-subversion) have
1058 # Some extensions (at least hg-git and hg-subversion) have
1055 # a peer.lookup() implementation that returns a name instead
1059 # a peer.lookup() implementation that returns a name instead
1056 # of a nodeid. We work around it here until we've figured
1060 # of a nodeid. We work around it here until we've figured
1057 # out a better solution.
1061 # out a better solution.
1058 if len(checkout) == 20 and checkout in destrepo:
1062 if len(checkout) == 20 and checkout in destrepo:
1059 uprev = checkout
1063 uprev = checkout
1060 elif scmutil.isrevsymbol(destrepo, checkout):
1064 elif scmutil.isrevsymbol(destrepo, checkout):
1061 uprev = scmutil.revsymbol(destrepo, checkout).node()
1065 uprev = scmutil.revsymbol(destrepo, checkout).node()
1062 else:
1066 else:
1063 if update is not True:
1067 if update is not True:
1064 try:
1068 try:
1065 uprev = destrepo.lookup(update)
1069 uprev = destrepo.lookup(update)
1066 except error.RepoLookupError:
1070 except error.RepoLookupError:
1067 pass
1071 pass
1068 if uprev is None:
1072 if uprev is None:
1069 try:
1073 try:
1070 if destrepo._activebookmark:
1074 if destrepo._activebookmark:
1071 uprev = destrepo.lookup(destrepo._activebookmark)
1075 uprev = destrepo.lookup(destrepo._activebookmark)
1072 update = destrepo._activebookmark
1076 update = destrepo._activebookmark
1073 else:
1077 else:
1074 uprev = destrepo._bookmarks[b'@']
1078 uprev = destrepo._bookmarks[b'@']
1075 update = b'@'
1079 update = b'@'
1076 bn = destrepo[uprev].branch()
1080 bn = destrepo[uprev].branch()
1077 if bn == b'default':
1081 if bn == b'default':
1078 status = _(b"updating to bookmark %s\n" % update)
1082 status = _(b"updating to bookmark %s\n" % update)
1079 else:
1083 else:
1080 status = (
1084 status = (
1081 _(b"updating to bookmark %s on branch %s\n")
1085 _(b"updating to bookmark %s on branch %s\n")
1082 ) % (update, bn)
1086 ) % (update, bn)
1083 except KeyError:
1087 except KeyError:
1084 try:
1088 try:
1085 uprev = destrepo.branchtip(b'default')
1089 uprev = destrepo.branchtip(b'default')
1086 except error.RepoLookupError:
1090 except error.RepoLookupError:
1087 uprev = destrepo.lookup(b'tip')
1091 uprev = destrepo.lookup(b'tip')
1088 if not status:
1092 if not status:
1089 bn = destrepo[uprev].branch()
1093 bn = destrepo[uprev].branch()
1090 status = _(b"updating to branch %s\n") % bn
1094 status = _(b"updating to branch %s\n") % bn
1091 destrepo.ui.status(status)
1095 destrepo.ui.status(status)
1092 _update(destrepo, uprev)
1096 _update(destrepo, uprev)
1093 if update in destrepo._bookmarks:
1097 if update in destrepo._bookmarks:
1094 bookmarks.activate(destrepo, update)
1098 bookmarks.activate(destrepo, update)
1095 if destlock is not None:
1099 if destlock is not None:
1096 release(destlock)
1100 release(destlock)
1097 if destwlock is not None:
1101 if destwlock is not None:
1098 release(destlock)
1102 release(destlock)
1099 # here is a tiny windows were someone could end up writing the
1103 # here is a tiny windows were someone could end up writing the
1100 # repository before the cache are sure to be warm. This is "fine"
1104 # repository before the cache are sure to be warm. This is "fine"
1101 # as the only "bad" outcome would be some slowness. That potential
1105 # as the only "bad" outcome would be some slowness. That potential
1102 # slowness already affect reader.
1106 # slowness already affect reader.
1103 with destrepo.lock():
1107 with destrepo.lock():
1104 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1108 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1105 finally:
1109 finally:
1106 release(srclock, destlock, destwlock)
1110 release(srclock, destlock, destwlock)
1107 if cleandir is not None:
1111 if cleandir is not None:
1108 shutil.rmtree(cleandir, True)
1112 shutil.rmtree(cleandir, True)
1109 if srcpeer is not None:
1113 if srcpeer is not None:
1110 srcpeer.close()
1114 srcpeer.close()
1111 if destpeer and destpeer.local() is None:
1115 if destpeer and destpeer.local() is None:
1112 destpeer.close()
1116 destpeer.close()
1113 return srcpeer, destpeer
1117 return srcpeer, destpeer
1114
1118
1115
1119
1116 def _showstats(repo, stats, quietempty=False):
1120 def _showstats(repo, stats, quietempty=False):
1117 if quietempty and stats.isempty():
1121 if quietempty and stats.isempty():
1118 return
1122 return
1119 repo.ui.status(
1123 repo.ui.status(
1120 _(
1124 _(
1121 b"%d files updated, %d files merged, "
1125 b"%d files updated, %d files merged, "
1122 b"%d files removed, %d files unresolved\n"
1126 b"%d files removed, %d files unresolved\n"
1123 )
1127 )
1124 % (
1128 % (
1125 stats.updatedcount,
1129 stats.updatedcount,
1126 stats.mergedcount,
1130 stats.mergedcount,
1127 stats.removedcount,
1131 stats.removedcount,
1128 stats.unresolvedcount,
1132 stats.unresolvedcount,
1129 )
1133 )
1130 )
1134 )
1131
1135
1132
1136
1133 def updaterepo(repo, node, overwrite, updatecheck=None):
1137 def updaterepo(repo, node, overwrite, updatecheck=None):
1134 """Update the working directory to node.
1138 """Update the working directory to node.
1135
1139
1136 When overwrite is set, changes are clobbered, merged else
1140 When overwrite is set, changes are clobbered, merged else
1137
1141
1138 returns stats (see pydoc mercurial.merge.applyupdates)"""
1142 returns stats (see pydoc mercurial.merge.applyupdates)"""
1139 repo.ui.deprecwarn(
1143 repo.ui.deprecwarn(
1140 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1144 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1141 b'5.7',
1145 b'5.7',
1142 )
1146 )
1143 return mergemod._update(
1147 return mergemod._update(
1144 repo,
1148 repo,
1145 node,
1149 node,
1146 branchmerge=False,
1150 branchmerge=False,
1147 force=overwrite,
1151 force=overwrite,
1148 labels=[b'working copy', b'destination'],
1152 labels=[b'working copy', b'destination'],
1149 updatecheck=updatecheck,
1153 updatecheck=updatecheck,
1150 )
1154 )
1151
1155
1152
1156
1153 def update(repo, node, quietempty=False, updatecheck=None):
1157 def update(repo, node, quietempty=False, updatecheck=None):
1154 """update the working directory to node"""
1158 """update the working directory to node"""
1155 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1159 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1156 _showstats(repo, stats, quietempty)
1160 _showstats(repo, stats, quietempty)
1157 if stats.unresolvedcount:
1161 if stats.unresolvedcount:
1158 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1162 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1159 return stats.unresolvedcount > 0
1163 return stats.unresolvedcount > 0
1160
1164
1161
1165
1162 # naming conflict in clone()
1166 # naming conflict in clone()
1163 _update = update
1167 _update = update
1164
1168
1165
1169
1166 def clean(repo, node, show_stats=True, quietempty=False):
1170 def clean(repo, node, show_stats=True, quietempty=False):
1167 """forcibly switch the working directory to node, clobbering changes"""
1171 """forcibly switch the working directory to node, clobbering changes"""
1168 stats = mergemod.clean_update(repo[node])
1172 stats = mergemod.clean_update(repo[node])
1169 assert stats.unresolvedcount == 0
1173 assert stats.unresolvedcount == 0
1170 if show_stats:
1174 if show_stats:
1171 _showstats(repo, stats, quietempty)
1175 _showstats(repo, stats, quietempty)
1172 return False
1176 return False
1173
1177
1174
1178
1175 # naming conflict in updatetotally()
1179 # naming conflict in updatetotally()
1176 _clean = clean
1180 _clean = clean
1177
1181
1178 _VALID_UPDATECHECKS = {
1182 _VALID_UPDATECHECKS = {
1179 mergemod.UPDATECHECK_ABORT,
1183 mergemod.UPDATECHECK_ABORT,
1180 mergemod.UPDATECHECK_NONE,
1184 mergemod.UPDATECHECK_NONE,
1181 mergemod.UPDATECHECK_LINEAR,
1185 mergemod.UPDATECHECK_LINEAR,
1182 mergemod.UPDATECHECK_NO_CONFLICT,
1186 mergemod.UPDATECHECK_NO_CONFLICT,
1183 }
1187 }
1184
1188
1185
1189
1186 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1190 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1187 """Update the working directory with extra care for non-file components
1191 """Update the working directory with extra care for non-file components
1188
1192
1189 This takes care of non-file components below:
1193 This takes care of non-file components below:
1190
1194
1191 :bookmark: might be advanced or (in)activated
1195 :bookmark: might be advanced or (in)activated
1192
1196
1193 This takes arguments below:
1197 This takes arguments below:
1194
1198
1195 :checkout: to which revision the working directory is updated
1199 :checkout: to which revision the working directory is updated
1196 :brev: a name, which might be a bookmark to be activated after updating
1200 :brev: a name, which might be a bookmark to be activated after updating
1197 :clean: whether changes in the working directory can be discarded
1201 :clean: whether changes in the working directory can be discarded
1198 :updatecheck: how to deal with a dirty working directory
1202 :updatecheck: how to deal with a dirty working directory
1199
1203
1200 Valid values for updatecheck are the UPDATECHECK_* constants
1204 Valid values for updatecheck are the UPDATECHECK_* constants
1201 defined in the merge module. Passing `None` will result in using the
1205 defined in the merge module. Passing `None` will result in using the
1202 configured default.
1206 configured default.
1203
1207
1204 * ABORT: abort if the working directory is dirty
1208 * ABORT: abort if the working directory is dirty
1205 * NONE: don't check (merge working directory changes into destination)
1209 * NONE: don't check (merge working directory changes into destination)
1206 * LINEAR: check that update is linear before merging working directory
1210 * LINEAR: check that update is linear before merging working directory
1207 changes into destination
1211 changes into destination
1208 * NO_CONFLICT: check that the update does not result in file merges
1212 * NO_CONFLICT: check that the update does not result in file merges
1209
1213
1210 This returns whether conflict is detected at updating or not.
1214 This returns whether conflict is detected at updating or not.
1211 """
1215 """
1212 if updatecheck is None:
1216 if updatecheck is None:
1213 updatecheck = ui.config(b'commands', b'update.check')
1217 updatecheck = ui.config(b'commands', b'update.check')
1214 if updatecheck not in _VALID_UPDATECHECKS:
1218 if updatecheck not in _VALID_UPDATECHECKS:
1215 # If not configured, or invalid value configured
1219 # If not configured, or invalid value configured
1216 updatecheck = mergemod.UPDATECHECK_LINEAR
1220 updatecheck = mergemod.UPDATECHECK_LINEAR
1217 if updatecheck not in _VALID_UPDATECHECKS:
1221 if updatecheck not in _VALID_UPDATECHECKS:
1218 raise ValueError(
1222 raise ValueError(
1219 r'Invalid updatecheck value %r (can accept %r)'
1223 r'Invalid updatecheck value %r (can accept %r)'
1220 % (updatecheck, _VALID_UPDATECHECKS)
1224 % (updatecheck, _VALID_UPDATECHECKS)
1221 )
1225 )
1222 with repo.wlock():
1226 with repo.wlock():
1223 movemarkfrom = None
1227 movemarkfrom = None
1224 warndest = False
1228 warndest = False
1225 if checkout is None:
1229 if checkout is None:
1226 updata = destutil.destupdate(repo, clean=clean)
1230 updata = destutil.destupdate(repo, clean=clean)
1227 checkout, movemarkfrom, brev = updata
1231 checkout, movemarkfrom, brev = updata
1228 warndest = True
1232 warndest = True
1229
1233
1230 if clean:
1234 if clean:
1231 ret = _clean(repo, checkout)
1235 ret = _clean(repo, checkout)
1232 else:
1236 else:
1233 if updatecheck == mergemod.UPDATECHECK_ABORT:
1237 if updatecheck == mergemod.UPDATECHECK_ABORT:
1234 cmdutil.bailifchanged(repo, merge=False)
1238 cmdutil.bailifchanged(repo, merge=False)
1235 updatecheck = mergemod.UPDATECHECK_NONE
1239 updatecheck = mergemod.UPDATECHECK_NONE
1236 ret = _update(repo, checkout, updatecheck=updatecheck)
1240 ret = _update(repo, checkout, updatecheck=updatecheck)
1237
1241
1238 if not ret and movemarkfrom:
1242 if not ret and movemarkfrom:
1239 if movemarkfrom == repo[b'.'].node():
1243 if movemarkfrom == repo[b'.'].node():
1240 pass # no-op update
1244 pass # no-op update
1241 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1245 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1242 b = ui.label(repo._activebookmark, b'bookmarks.active')
1246 b = ui.label(repo._activebookmark, b'bookmarks.active')
1243 ui.status(_(b"updating bookmark %s\n") % b)
1247 ui.status(_(b"updating bookmark %s\n") % b)
1244 else:
1248 else:
1245 # this can happen with a non-linear update
1249 # this can happen with a non-linear update
1246 b = ui.label(repo._activebookmark, b'bookmarks')
1250 b = ui.label(repo._activebookmark, b'bookmarks')
1247 ui.status(_(b"(leaving bookmark %s)\n") % b)
1251 ui.status(_(b"(leaving bookmark %s)\n") % b)
1248 bookmarks.deactivate(repo)
1252 bookmarks.deactivate(repo)
1249 elif brev in repo._bookmarks:
1253 elif brev in repo._bookmarks:
1250 if brev != repo._activebookmark:
1254 if brev != repo._activebookmark:
1251 b = ui.label(brev, b'bookmarks.active')
1255 b = ui.label(brev, b'bookmarks.active')
1252 ui.status(_(b"(activating bookmark %s)\n") % b)
1256 ui.status(_(b"(activating bookmark %s)\n") % b)
1253 bookmarks.activate(repo, brev)
1257 bookmarks.activate(repo, brev)
1254 elif brev:
1258 elif brev:
1255 if repo._activebookmark:
1259 if repo._activebookmark:
1256 b = ui.label(repo._activebookmark, b'bookmarks')
1260 b = ui.label(repo._activebookmark, b'bookmarks')
1257 ui.status(_(b"(leaving bookmark %s)\n") % b)
1261 ui.status(_(b"(leaving bookmark %s)\n") % b)
1258 bookmarks.deactivate(repo)
1262 bookmarks.deactivate(repo)
1259
1263
1260 if warndest:
1264 if warndest:
1261 destutil.statusotherdests(ui, repo)
1265 destutil.statusotherdests(ui, repo)
1262
1266
1263 return ret
1267 return ret
1264
1268
1265
1269
1266 def merge(
1270 def merge(
1267 ctx,
1271 ctx,
1268 force=False,
1272 force=False,
1269 remind=True,
1273 remind=True,
1270 labels=None,
1274 labels=None,
1271 ):
1275 ):
1272 """Branch merge with node, resolving changes. Return true if any
1276 """Branch merge with node, resolving changes. Return true if any
1273 unresolved conflicts."""
1277 unresolved conflicts."""
1274 repo = ctx.repo()
1278 repo = ctx.repo()
1275 stats = mergemod.merge(ctx, force=force, labels=labels)
1279 stats = mergemod.merge(ctx, force=force, labels=labels)
1276 _showstats(repo, stats)
1280 _showstats(repo, stats)
1277 if stats.unresolvedcount:
1281 if stats.unresolvedcount:
1278 repo.ui.status(
1282 repo.ui.status(
1279 _(
1283 _(
1280 b"use 'hg resolve' to retry unresolved file merges "
1284 b"use 'hg resolve' to retry unresolved file merges "
1281 b"or 'hg merge --abort' to abandon\n"
1285 b"or 'hg merge --abort' to abandon\n"
1282 )
1286 )
1283 )
1287 )
1284 elif remind:
1288 elif remind:
1285 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1289 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1286 return stats.unresolvedcount > 0
1290 return stats.unresolvedcount > 0
1287
1291
1288
1292
1289 def abortmerge(ui, repo):
1293 def abortmerge(ui, repo):
1290 ms = mergestatemod.mergestate.read(repo)
1294 ms = mergestatemod.mergestate.read(repo)
1291 if ms.active():
1295 if ms.active():
1292 # there were conflicts
1296 # there were conflicts
1293 node = ms.localctx.hex()
1297 node = ms.localctx.hex()
1294 else:
1298 else:
1295 # there were no conficts, mergestate was not stored
1299 # there were no conficts, mergestate was not stored
1296 node = repo[b'.'].hex()
1300 node = repo[b'.'].hex()
1297
1301
1298 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1302 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1299 stats = mergemod.clean_update(repo[node])
1303 stats = mergemod.clean_update(repo[node])
1300 assert stats.unresolvedcount == 0
1304 assert stats.unresolvedcount == 0
1301 _showstats(repo, stats)
1305 _showstats(repo, stats)
1302
1306
1303
1307
1304 def _incoming(
1308 def _incoming(
1305 displaychlist,
1309 displaychlist,
1306 subreporecurse,
1310 subreporecurse,
1307 ui,
1311 ui,
1308 repo,
1312 repo,
1309 source,
1313 source,
1310 opts,
1314 opts,
1311 buffered=False,
1315 buffered=False,
1312 subpath=None,
1316 subpath=None,
1313 ):
1317 ):
1314 """
1318 """
1315 Helper for incoming / gincoming.
1319 Helper for incoming / gincoming.
1316 displaychlist gets called with
1320 displaychlist gets called with
1317 (remoterepo, incomingchangesetlist, displayer) parameters,
1321 (remoterepo, incomingchangesetlist, displayer) parameters,
1318 and is supposed to contain only code that can't be unified.
1322 and is supposed to contain only code that can't be unified.
1319 """
1323 """
1320 srcs = urlutil.get_pull_paths(repo, ui, [source])
1324 srcs = urlutil.get_pull_paths(repo, ui, [source])
1321 srcs = list(srcs)
1325 srcs = list(srcs)
1322 if len(srcs) != 1:
1326 if len(srcs) != 1:
1323 msg = _(b'for now, incoming supports only a single source, %d provided')
1327 msg = _(b'for now, incoming supports only a single source, %d provided')
1324 msg %= len(srcs)
1328 msg %= len(srcs)
1325 raise error.Abort(msg)
1329 raise error.Abort(msg)
1326 path = srcs[0]
1330 path = srcs[0]
1327 if subpath is None:
1331 if subpath is None:
1328 peer_path = path
1332 peer_path = path
1329 url = path.loc
1333 url = path.loc
1330 else:
1334 else:
1331 # XXX path: we are losing the `path` object here. Keeping it would be
1335 # XXX path: we are losing the `path` object here. Keeping it would be
1332 # valuable. For example as a "variant" as we do for pushes.
1336 # valuable. For example as a "variant" as we do for pushes.
1333 subpath = urlutil.url(subpath)
1337 subpath = urlutil.url(subpath)
1334 if subpath.isabs():
1338 if subpath.isabs():
1335 peer_path = url = bytes(subpath)
1339 peer_path = url = bytes(subpath)
1336 else:
1340 else:
1337 p = urlutil.url(path.loc)
1341 p = urlutil.url(path.loc)
1338 if p.islocal():
1342 if p.islocal():
1339 normpath = os.path.normpath
1343 normpath = os.path.normpath
1340 else:
1344 else:
1341 normpath = posixpath.normpath
1345 normpath = posixpath.normpath
1342 p.path = normpath(b'%s/%s' % (p.path, subpath))
1346 p.path = normpath(b'%s/%s' % (p.path, subpath))
1343 peer_path = url = bytes(p)
1347 peer_path = url = bytes(p)
1344 other = peer(repo, opts, peer_path)
1348 other = peer(repo, opts, peer_path)
1345 cleanupfn = other.close
1349 cleanupfn = other.close
1346 try:
1350 try:
1347 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1351 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1348 branches = (path.branch, opts.get(b'branch', []))
1352 branches = (path.branch, opts.get(b'branch', []))
1349 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1353 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1350
1354
1351 if revs:
1355 if revs:
1352 revs = [other.lookup(rev) for rev in revs]
1356 revs = [other.lookup(rev) for rev in revs]
1353 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1357 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1354 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1358 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1355 )
1359 )
1356
1360
1357 if not chlist:
1361 if not chlist:
1358 ui.status(_(b"no changes found\n"))
1362 ui.status(_(b"no changes found\n"))
1359 return subreporecurse()
1363 return subreporecurse()
1360 ui.pager(b'incoming')
1364 ui.pager(b'incoming')
1361 displayer = logcmdutil.changesetdisplayer(
1365 displayer = logcmdutil.changesetdisplayer(
1362 ui, other, opts, buffered=buffered
1366 ui, other, opts, buffered=buffered
1363 )
1367 )
1364 displaychlist(other, chlist, displayer)
1368 displaychlist(other, chlist, displayer)
1365 displayer.close()
1369 displayer.close()
1366 finally:
1370 finally:
1367 cleanupfn()
1371 cleanupfn()
1368 subreporecurse()
1372 subreporecurse()
1369 return 0 # exit code is zero since we found incoming changes
1373 return 0 # exit code is zero since we found incoming changes
1370
1374
1371
1375
1372 def incoming(ui, repo, source, opts, subpath=None):
1376 def incoming(ui, repo, source, opts, subpath=None):
1373 def subreporecurse():
1377 def subreporecurse():
1374 ret = 1
1378 ret = 1
1375 if opts.get(b'subrepos'):
1379 if opts.get(b'subrepos'):
1376 ctx = repo[None]
1380 ctx = repo[None]
1377 for subpath in sorted(ctx.substate):
1381 for subpath in sorted(ctx.substate):
1378 sub = ctx.sub(subpath)
1382 sub = ctx.sub(subpath)
1379 ret = min(ret, sub.incoming(ui, source, opts))
1383 ret = min(ret, sub.incoming(ui, source, opts))
1380 return ret
1384 return ret
1381
1385
1382 def display(other, chlist, displayer):
1386 def display(other, chlist, displayer):
1383 limit = logcmdutil.getlimit(opts)
1387 limit = logcmdutil.getlimit(opts)
1384 if opts.get(b'newest_first'):
1388 if opts.get(b'newest_first'):
1385 chlist.reverse()
1389 chlist.reverse()
1386 count = 0
1390 count = 0
1387 for n in chlist:
1391 for n in chlist:
1388 if limit is not None and count >= limit:
1392 if limit is not None and count >= limit:
1389 break
1393 break
1390 parents = [
1394 parents = [
1391 p for p in other.changelog.parents(n) if p != repo.nullid
1395 p for p in other.changelog.parents(n) if p != repo.nullid
1392 ]
1396 ]
1393 if opts.get(b'no_merges') and len(parents) == 2:
1397 if opts.get(b'no_merges') and len(parents) == 2:
1394 continue
1398 continue
1395 count += 1
1399 count += 1
1396 displayer.show(other[n])
1400 displayer.show(other[n])
1397
1401
1398 return _incoming(
1402 return _incoming(
1399 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1403 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1400 )
1404 )
1401
1405
1402
1406
1403 def _outgoing(ui, repo, dests, opts, subpath=None):
1407 def _outgoing(ui, repo, dests, opts, subpath=None):
1404 out = set()
1408 out = set()
1405 others = []
1409 others = []
1406 for path in urlutil.get_push_paths(repo, ui, dests):
1410 for path in urlutil.get_push_paths(repo, ui, dests):
1407 dest = path.loc
1411 dest = path.loc
1408 if subpath is not None:
1412 if subpath is not None:
1409 subpath = urlutil.url(subpath)
1413 subpath = urlutil.url(subpath)
1410 if subpath.isabs():
1414 if subpath.isabs():
1411 dest = bytes(subpath)
1415 dest = bytes(subpath)
1412 else:
1416 else:
1413 p = urlutil.url(dest)
1417 p = urlutil.url(dest)
1414 if p.islocal():
1418 if p.islocal():
1415 normpath = os.path.normpath
1419 normpath = os.path.normpath
1416 else:
1420 else:
1417 normpath = posixpath.normpath
1421 normpath = posixpath.normpath
1418 p.path = normpath(b'%s/%s' % (p.path, subpath))
1422 p.path = normpath(b'%s/%s' % (p.path, subpath))
1419 dest = bytes(p)
1423 dest = bytes(p)
1420 branches = path.branch, opts.get(b'branch') or []
1424 branches = path.branch, opts.get(b'branch') or []
1421
1425
1422 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1426 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1423 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1427 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1424 if revs:
1428 if revs:
1425 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1429 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1426
1430
1427 other = peer(repo, opts, dest)
1431 other = peer(repo, opts, dest)
1428 try:
1432 try:
1429 outgoing = discovery.findcommonoutgoing(
1433 outgoing = discovery.findcommonoutgoing(
1430 repo, other, revs, force=opts.get(b'force')
1434 repo, other, revs, force=opts.get(b'force')
1431 )
1435 )
1432 o = outgoing.missing
1436 o = outgoing.missing
1433 out.update(o)
1437 out.update(o)
1434 if not o:
1438 if not o:
1435 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1439 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1436 others.append(other)
1440 others.append(other)
1437 except: # re-raises
1441 except: # re-raises
1438 other.close()
1442 other.close()
1439 raise
1443 raise
1440 # make sure this is ordered by revision number
1444 # make sure this is ordered by revision number
1441 outgoing_revs = list(out)
1445 outgoing_revs = list(out)
1442 cl = repo.changelog
1446 cl = repo.changelog
1443 outgoing_revs.sort(key=cl.rev)
1447 outgoing_revs.sort(key=cl.rev)
1444 return outgoing_revs, others
1448 return outgoing_revs, others
1445
1449
1446
1450
1447 def _outgoing_recurse(ui, repo, dests, opts):
1451 def _outgoing_recurse(ui, repo, dests, opts):
1448 ret = 1
1452 ret = 1
1449 if opts.get(b'subrepos'):
1453 if opts.get(b'subrepos'):
1450 ctx = repo[None]
1454 ctx = repo[None]
1451 for subpath in sorted(ctx.substate):
1455 for subpath in sorted(ctx.substate):
1452 sub = ctx.sub(subpath)
1456 sub = ctx.sub(subpath)
1453 ret = min(ret, sub.outgoing(ui, dests, opts))
1457 ret = min(ret, sub.outgoing(ui, dests, opts))
1454 return ret
1458 return ret
1455
1459
1456
1460
1457 def _outgoing_filter(repo, revs, opts):
1461 def _outgoing_filter(repo, revs, opts):
1458 """apply revision filtering/ordering option for outgoing"""
1462 """apply revision filtering/ordering option for outgoing"""
1459 limit = logcmdutil.getlimit(opts)
1463 limit = logcmdutil.getlimit(opts)
1460 no_merges = opts.get(b'no_merges')
1464 no_merges = opts.get(b'no_merges')
1461 if opts.get(b'newest_first'):
1465 if opts.get(b'newest_first'):
1462 revs.reverse()
1466 revs.reverse()
1463 if limit is None and not no_merges:
1467 if limit is None and not no_merges:
1464 for r in revs:
1468 for r in revs:
1465 yield r
1469 yield r
1466 return
1470 return
1467
1471
1468 count = 0
1472 count = 0
1469 cl = repo.changelog
1473 cl = repo.changelog
1470 for n in revs:
1474 for n in revs:
1471 if limit is not None and count >= limit:
1475 if limit is not None and count >= limit:
1472 break
1476 break
1473 parents = [p for p in cl.parents(n) if p != repo.nullid]
1477 parents = [p for p in cl.parents(n) if p != repo.nullid]
1474 if no_merges and len(parents) == 2:
1478 if no_merges and len(parents) == 2:
1475 continue
1479 continue
1476 count += 1
1480 count += 1
1477 yield n
1481 yield n
1478
1482
1479
1483
1480 def outgoing(ui, repo, dests, opts, subpath=None):
1484 def outgoing(ui, repo, dests, opts, subpath=None):
1481 if opts.get(b'graph'):
1485 if opts.get(b'graph'):
1482 logcmdutil.checkunsupportedgraphflags([], opts)
1486 logcmdutil.checkunsupportedgraphflags([], opts)
1483 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1487 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1484 ret = 1
1488 ret = 1
1485 try:
1489 try:
1486 if o:
1490 if o:
1487 ret = 0
1491 ret = 0
1488
1492
1489 if opts.get(b'graph'):
1493 if opts.get(b'graph'):
1490 revdag = logcmdutil.graphrevs(repo, o, opts)
1494 revdag = logcmdutil.graphrevs(repo, o, opts)
1491 ui.pager(b'outgoing')
1495 ui.pager(b'outgoing')
1492 displayer = logcmdutil.changesetdisplayer(
1496 displayer = logcmdutil.changesetdisplayer(
1493 ui, repo, opts, buffered=True
1497 ui, repo, opts, buffered=True
1494 )
1498 )
1495 logcmdutil.displaygraph(
1499 logcmdutil.displaygraph(
1496 ui, repo, revdag, displayer, graphmod.asciiedges
1500 ui, repo, revdag, displayer, graphmod.asciiedges
1497 )
1501 )
1498 else:
1502 else:
1499 ui.pager(b'outgoing')
1503 ui.pager(b'outgoing')
1500 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1501 for n in _outgoing_filter(repo, o, opts):
1505 for n in _outgoing_filter(repo, o, opts):
1502 displayer.show(repo[n])
1506 displayer.show(repo[n])
1503 displayer.close()
1507 displayer.close()
1504 for oth in others:
1508 for oth in others:
1505 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1509 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1506 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1510 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1507 return ret # exit code is zero since we found outgoing changes
1511 return ret # exit code is zero since we found outgoing changes
1508 finally:
1512 finally:
1509 for oth in others:
1513 for oth in others:
1510 oth.close()
1514 oth.close()
1511
1515
1512
1516
1513 def verify(repo, level=None):
1517 def verify(repo, level=None):
1514 """verify the consistency of a repository"""
1518 """verify the consistency of a repository"""
1515 ret = verifymod.verify(repo, level=level)
1519 ret = verifymod.verify(repo, level=level)
1516
1520
1517 # Broken subrepo references in hidden csets don't seem worth worrying about,
1521 # Broken subrepo references in hidden csets don't seem worth worrying about,
1518 # since they can't be pushed/pulled, and --hidden can be used if they are a
1522 # since they can't be pushed/pulled, and --hidden can be used if they are a
1519 # concern.
1523 # concern.
1520
1524
1521 # pathto() is needed for -R case
1525 # pathto() is needed for -R case
1522 revs = repo.revs(
1526 revs = repo.revs(
1523 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1527 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1524 )
1528 )
1525
1529
1526 if revs:
1530 if revs:
1527 repo.ui.status(_(b'checking subrepo links\n'))
1531 repo.ui.status(_(b'checking subrepo links\n'))
1528 for rev in revs:
1532 for rev in revs:
1529 ctx = repo[rev]
1533 ctx = repo[rev]
1530 try:
1534 try:
1531 for subpath in ctx.substate:
1535 for subpath in ctx.substate:
1532 try:
1536 try:
1533 ret = (
1537 ret = (
1534 ctx.sub(subpath, allowcreate=False).verify() or ret
1538 ctx.sub(subpath, allowcreate=False).verify() or ret
1535 )
1539 )
1536 except error.RepoError as e:
1540 except error.RepoError as e:
1537 repo.ui.warn(b'%d: %s\n' % (rev, e))
1541 repo.ui.warn(b'%d: %s\n' % (rev, e))
1538 except Exception:
1542 except Exception:
1539 repo.ui.warn(
1543 repo.ui.warn(
1540 _(b'.hgsubstate is corrupt in revision %s\n')
1544 _(b'.hgsubstate is corrupt in revision %s\n')
1541 % short(ctx.node())
1545 % short(ctx.node())
1542 )
1546 )
1543
1547
1544 return ret
1548 return ret
1545
1549
1546
1550
1547 def remoteui(src, opts):
1551 def remoteui(src, opts):
1548 """build a remote ui from ui or repo and opts"""
1552 """build a remote ui from ui or repo and opts"""
1549 if util.safehasattr(src, b'baseui'): # looks like a repository
1553 if util.safehasattr(src, b'baseui'): # looks like a repository
1550 dst = src.baseui.copy() # drop repo-specific config
1554 dst = src.baseui.copy() # drop repo-specific config
1551 src = src.ui # copy target options from repo
1555 src = src.ui # copy target options from repo
1552 else: # assume it's a global ui object
1556 else: # assume it's a global ui object
1553 dst = src.copy() # keep all global options
1557 dst = src.copy() # keep all global options
1554
1558
1555 # copy ssh-specific options
1559 # copy ssh-specific options
1556 for o in b'ssh', b'remotecmd':
1560 for o in b'ssh', b'remotecmd':
1557 v = opts.get(o) or src.config(b'ui', o)
1561 v = opts.get(o) or src.config(b'ui', o)
1558 if v:
1562 if v:
1559 dst.setconfig(b"ui", o, v, b'copied')
1563 dst.setconfig(b"ui", o, v, b'copied')
1560
1564
1561 # copy bundle-specific options
1565 # copy bundle-specific options
1562 r = src.config(b'bundle', b'mainreporoot')
1566 r = src.config(b'bundle', b'mainreporoot')
1563 if r:
1567 if r:
1564 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1568 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1565
1569
1566 # copy selected local settings to the remote ui
1570 # copy selected local settings to the remote ui
1567 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1571 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1568 for key, val in src.configitems(sect):
1572 for key, val in src.configitems(sect):
1569 dst.setconfig(sect, key, val, b'copied')
1573 dst.setconfig(sect, key, val, b'copied')
1570 v = src.config(b'web', b'cacerts')
1574 v = src.config(b'web', b'cacerts')
1571 if v:
1575 if v:
1572 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1576 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1573
1577
1574 return dst
1578 return dst
1575
1579
1576
1580
1577 # Files of interest
1581 # Files of interest
1578 # Used to check if the repository has changed looking at mtime and size of
1582 # Used to check if the repository has changed looking at mtime and size of
1579 # these files.
1583 # these files.
1580 foi = [
1584 foi = [
1581 (b'spath', b'00changelog.i'),
1585 (b'spath', b'00changelog.i'),
1582 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1586 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1583 (b'spath', b'obsstore'),
1587 (b'spath', b'obsstore'),
1584 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1588 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1585 ]
1589 ]
1586
1590
1587
1591
1588 class cachedlocalrepo:
1592 class cachedlocalrepo:
1589 """Holds a localrepository that can be cached and reused."""
1593 """Holds a localrepository that can be cached and reused."""
1590
1594
1591 def __init__(self, repo):
1595 def __init__(self, repo):
1592 """Create a new cached repo from an existing repo.
1596 """Create a new cached repo from an existing repo.
1593
1597
1594 We assume the passed in repo was recently created. If the
1598 We assume the passed in repo was recently created. If the
1595 repo has changed between when it was created and when it was
1599 repo has changed between when it was created and when it was
1596 turned into a cache, it may not refresh properly.
1600 turned into a cache, it may not refresh properly.
1597 """
1601 """
1598 assert isinstance(repo, localrepo.localrepository)
1602 assert isinstance(repo, localrepo.localrepository)
1599 self._repo = repo
1603 self._repo = repo
1600 self._state, self.mtime = self._repostate()
1604 self._state, self.mtime = self._repostate()
1601 self._filtername = repo.filtername
1605 self._filtername = repo.filtername
1602
1606
1603 def fetch(self):
1607 def fetch(self):
1604 """Refresh (if necessary) and return a repository.
1608 """Refresh (if necessary) and return a repository.
1605
1609
1606 If the cached instance is out of date, it will be recreated
1610 If the cached instance is out of date, it will be recreated
1607 automatically and returned.
1611 automatically and returned.
1608
1612
1609 Returns a tuple of the repo and a boolean indicating whether a new
1613 Returns a tuple of the repo and a boolean indicating whether a new
1610 repo instance was created.
1614 repo instance was created.
1611 """
1615 """
1612 # We compare the mtimes and sizes of some well-known files to
1616 # We compare the mtimes and sizes of some well-known files to
1613 # determine if the repo changed. This is not precise, as mtimes
1617 # determine if the repo changed. This is not precise, as mtimes
1614 # are susceptible to clock skew and imprecise filesystems and
1618 # are susceptible to clock skew and imprecise filesystems and
1615 # file content can change while maintaining the same size.
1619 # file content can change while maintaining the same size.
1616
1620
1617 state, mtime = self._repostate()
1621 state, mtime = self._repostate()
1618 if state == self._state:
1622 if state == self._state:
1619 return self._repo, False
1623 return self._repo, False
1620
1624
1621 repo = repository(self._repo.baseui, self._repo.url())
1625 repo = repository(self._repo.baseui, self._repo.url())
1622 if self._filtername:
1626 if self._filtername:
1623 self._repo = repo.filtered(self._filtername)
1627 self._repo = repo.filtered(self._filtername)
1624 else:
1628 else:
1625 self._repo = repo.unfiltered()
1629 self._repo = repo.unfiltered()
1626 self._state = state
1630 self._state = state
1627 self.mtime = mtime
1631 self.mtime = mtime
1628
1632
1629 return self._repo, True
1633 return self._repo, True
1630
1634
1631 def _repostate(self):
1635 def _repostate(self):
1632 state = []
1636 state = []
1633 maxmtime = -1
1637 maxmtime = -1
1634 for attr, fname in foi:
1638 for attr, fname in foi:
1635 prefix = getattr(self._repo, attr)
1639 prefix = getattr(self._repo, attr)
1636 p = os.path.join(prefix, fname)
1640 p = os.path.join(prefix, fname)
1637 try:
1641 try:
1638 st = os.stat(p)
1642 st = os.stat(p)
1639 except OSError:
1643 except OSError:
1640 st = os.stat(prefix)
1644 st = os.stat(prefix)
1641 state.append((st[stat.ST_MTIME], st.st_size))
1645 state.append((st[stat.ST_MTIME], st.st_size))
1642 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1646 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1643
1647
1644 return tuple(state), maxmtime
1648 return tuple(state), maxmtime
1645
1649
1646 def copy(self):
1650 def copy(self):
1647 """Obtain a copy of this class instance.
1651 """Obtain a copy of this class instance.
1648
1652
1649 A new localrepository instance is obtained. The new instance should be
1653 A new localrepository instance is obtained. The new instance should be
1650 completely independent of the original.
1654 completely independent of the original.
1651 """
1655 """
1652 repo = repository(self._repo.baseui, self._repo.origroot)
1656 repo = repository(self._repo.baseui, self._repo.origroot)
1653 if self._filtername:
1657 if self._filtername:
1654 repo = repo.filtered(self._filtername)
1658 repo = repo.filtered(self._filtername)
1655 else:
1659 else:
1656 repo = repo.unfiltered()
1660 repo = repo.unfiltered()
1657 c = cachedlocalrepo(repo)
1661 c = cachedlocalrepo(repo)
1658 c._state = self._state
1662 c._state = self._state
1659 c.mtime = self.mtime
1663 c.mtime = self.mtime
1660 return c
1664 return c
General Comments 0
You need to be logged in to leave comments. Login now