##// END OF EJS Templates
path: have `peer` constructor accept a `path` object...
marmoute -
r50602:aa36771e default
parent child Browse files
Show More
@@ -1,1636 +1,1642 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 msg = b' > reposetup for %s took %s\n'
207 msg = b' > reposetup for %s took %s\n'
208 ui.log(b'extension', msg, name, stats)
208 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213
213
214
214
215 def repository(
215 def repository(
216 ui,
216 ui,
217 path=b'',
217 path=b'',
218 create=False,
218 create=False,
219 presetupfuncs=None,
219 presetupfuncs=None,
220 intents=None,
220 intents=None,
221 createopts=None,
221 createopts=None,
222 ):
222 ):
223 """return a repository object for the specified path"""
223 """return a repository object for the specified path"""
224 scheme = urlutil.url(path).scheme
224 scheme = urlutil.url(path).scheme
225 if scheme is None:
225 if scheme is None:
226 scheme = b'file'
226 scheme = b'file'
227 cls = repo_schemes.get(scheme)
227 cls = repo_schemes.get(scheme)
228 if cls is None:
228 if cls is None:
229 if scheme in peer_schemes:
229 if scheme in peer_schemes:
230 raise error.Abort(_(b"repository '%s' is not local") % path)
230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 cls = LocalFactory
231 cls = LocalFactory
232 repo = cls.instance(
232 repo = cls.instance(
233 ui,
233 ui,
234 path,
234 path,
235 create,
235 create,
236 intents=intents,
236 intents=intents,
237 createopts=createopts,
237 createopts=createopts,
238 )
238 )
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
241
241
242
242
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
246 scheme = urlutil.url(path).scheme
246 if util.safehasattr(path, 'url'):
247 # this is a urlutil.path object
248 scheme = path.url.scheme # pytype: disable=attribute-error
249 # XXX for now we don't do anything more than that
250 path = path.loc # pytype: disable=attribute-error
251 else:
252 scheme = urlutil.url(path).scheme
247 if scheme in peer_schemes:
253 if scheme in peer_schemes:
248 cls = peer_schemes[scheme]
254 cls = peer_schemes[scheme]
249 peer = cls.instance(
255 peer = cls.instance(
250 rui,
256 rui,
251 path,
257 path,
252 create,
258 create,
253 intents=intents,
259 intents=intents,
254 createopts=createopts,
260 createopts=createopts,
255 )
261 )
256 _setup_repo_or_peer(rui, peer)
262 _setup_repo_or_peer(rui, peer)
257 else:
263 else:
258 # this is a repository
264 # this is a repository
259 repo = repository(
265 repo = repository(
260 rui,
266 rui,
261 path,
267 path,
262 create,
268 create,
263 intents=intents,
269 intents=intents,
264 createopts=createopts,
270 createopts=createopts,
265 )
271 )
266 peer = repo.peer()
272 peer = repo.peer()
267 return peer
273 return peer
268
274
269
275
270 def defaultdest(source):
276 def defaultdest(source):
271 """return default destination of clone if none is given
277 """return default destination of clone if none is given
272
278
273 >>> defaultdest(b'foo')
279 >>> defaultdest(b'foo')
274 'foo'
280 'foo'
275 >>> defaultdest(b'/foo/bar')
281 >>> defaultdest(b'/foo/bar')
276 'bar'
282 'bar'
277 >>> defaultdest(b'/')
283 >>> defaultdest(b'/')
278 ''
284 ''
279 >>> defaultdest(b'')
285 >>> defaultdest(b'')
280 ''
286 ''
281 >>> defaultdest(b'http://example.org/')
287 >>> defaultdest(b'http://example.org/')
282 ''
288 ''
283 >>> defaultdest(b'http://example.org/foo/')
289 >>> defaultdest(b'http://example.org/foo/')
284 'foo'
290 'foo'
285 """
291 """
286 path = urlutil.url(source).path
292 path = urlutil.url(source).path
287 if not path:
293 if not path:
288 return b''
294 return b''
289 return os.path.basename(os.path.normpath(path))
295 return os.path.basename(os.path.normpath(path))
290
296
291
297
292 def sharedreposource(repo):
298 def sharedreposource(repo):
293 """Returns repository object for source repository of a shared repo.
299 """Returns repository object for source repository of a shared repo.
294
300
295 If repo is not a shared repository, returns None.
301 If repo is not a shared repository, returns None.
296 """
302 """
297 if repo.sharedpath == repo.path:
303 if repo.sharedpath == repo.path:
298 return None
304 return None
299
305
300 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
301 return repo.srcrepo
307 return repo.srcrepo
302
308
303 # the sharedpath always ends in the .hg; we want the path to the repo
309 # the sharedpath always ends in the .hg; we want the path to the repo
304 source = repo.vfs.split(repo.sharedpath)[0]
310 source = repo.vfs.split(repo.sharedpath)[0]
305 srcurl, branches = urlutil.parseurl(source)
311 srcurl, branches = urlutil.parseurl(source)
306 srcrepo = repository(repo.ui, srcurl)
312 srcrepo = repository(repo.ui, srcurl)
307 repo.srcrepo = srcrepo
313 repo.srcrepo = srcrepo
308 return srcrepo
314 return srcrepo
309
315
310
316
311 def share(
317 def share(
312 ui,
318 ui,
313 source,
319 source,
314 dest=None,
320 dest=None,
315 update=True,
321 update=True,
316 bookmarks=True,
322 bookmarks=True,
317 defaultpath=None,
323 defaultpath=None,
318 relative=False,
324 relative=False,
319 ):
325 ):
320 '''create a shared repository'''
326 '''create a shared repository'''
321
327
322 not_local_msg = _(b'can only share local repositories')
328 not_local_msg = _(b'can only share local repositories')
323 if util.safehasattr(source, 'local'):
329 if util.safehasattr(source, 'local'):
324 if source.local() is None:
330 if source.local() is None:
325 raise error.Abort(not_local_msg)
331 raise error.Abort(not_local_msg)
326 elif not islocal(source):
332 elif not islocal(source):
327 # XXX why are we getting bytes here ?
333 # XXX why are we getting bytes here ?
328 raise error.Abort(not_local_msg)
334 raise error.Abort(not_local_msg)
329
335
330 if not dest:
336 if not dest:
331 dest = defaultdest(source)
337 dest = defaultdest(source)
332 else:
338 else:
333 dest = urlutil.get_clone_path(ui, dest)[1]
339 dest = urlutil.get_clone_path(ui, dest)[1]
334
340
335 if isinstance(source, bytes):
341 if isinstance(source, bytes):
336 origsource, source, branches = urlutil.get_clone_path(ui, source)
342 origsource, source, branches = urlutil.get_clone_path(ui, source)
337 srcrepo = repository(ui, source)
343 srcrepo = repository(ui, source)
338 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
344 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
339 else:
345 else:
340 srcrepo = source.local()
346 srcrepo = source.local()
341 checkout = None
347 checkout = None
342
348
343 shareditems = set()
349 shareditems = set()
344 if bookmarks:
350 if bookmarks:
345 shareditems.add(sharedbookmarks)
351 shareditems.add(sharedbookmarks)
346
352
347 r = repository(
353 r = repository(
348 ui,
354 ui,
349 dest,
355 dest,
350 create=True,
356 create=True,
351 createopts={
357 createopts={
352 b'sharedrepo': srcrepo,
358 b'sharedrepo': srcrepo,
353 b'sharedrelative': relative,
359 b'sharedrelative': relative,
354 b'shareditems': shareditems,
360 b'shareditems': shareditems,
355 },
361 },
356 )
362 )
357
363
358 postshare(srcrepo, r, defaultpath=defaultpath)
364 postshare(srcrepo, r, defaultpath=defaultpath)
359 r = repository(ui, dest)
365 r = repository(ui, dest)
360 _postshareupdate(r, update, checkout=checkout)
366 _postshareupdate(r, update, checkout=checkout)
361 return r
367 return r
362
368
363
369
364 def _prependsourcehgrc(repo):
370 def _prependsourcehgrc(repo):
365 """copies the source repo config and prepend it in current repo .hg/hgrc
371 """copies the source repo config and prepend it in current repo .hg/hgrc
366 on unshare. This is only done if the share was perfomed using share safe
372 on unshare. This is only done if the share was perfomed using share safe
367 method where we share config of source in shares"""
373 method where we share config of source in shares"""
368 srcvfs = vfsmod.vfs(repo.sharedpath)
374 srcvfs = vfsmod.vfs(repo.sharedpath)
369 dstvfs = vfsmod.vfs(repo.path)
375 dstvfs = vfsmod.vfs(repo.path)
370
376
371 if not srcvfs.exists(b'hgrc'):
377 if not srcvfs.exists(b'hgrc'):
372 return
378 return
373
379
374 currentconfig = b''
380 currentconfig = b''
375 if dstvfs.exists(b'hgrc'):
381 if dstvfs.exists(b'hgrc'):
376 currentconfig = dstvfs.read(b'hgrc')
382 currentconfig = dstvfs.read(b'hgrc')
377
383
378 with dstvfs(b'hgrc', b'wb') as fp:
384 with dstvfs(b'hgrc', b'wb') as fp:
379 sourceconfig = srcvfs.read(b'hgrc')
385 sourceconfig = srcvfs.read(b'hgrc')
380 fp.write(b"# Config copied from shared source\n")
386 fp.write(b"# Config copied from shared source\n")
381 fp.write(sourceconfig)
387 fp.write(sourceconfig)
382 fp.write(b'\n')
388 fp.write(b'\n')
383 fp.write(currentconfig)
389 fp.write(currentconfig)
384
390
385
391
386 def unshare(ui, repo):
392 def unshare(ui, repo):
387 """convert a shared repository to a normal one
393 """convert a shared repository to a normal one
388
394
389 Copy the store data to the repo and remove the sharedpath data.
395 Copy the store data to the repo and remove the sharedpath data.
390
396
391 Returns a new repository object representing the unshared repository.
397 Returns a new repository object representing the unshared repository.
392
398
393 The passed repository object is not usable after this function is
399 The passed repository object is not usable after this function is
394 called.
400 called.
395 """
401 """
396
402
397 with repo.lock():
403 with repo.lock():
398 # we use locks here because if we race with commit, we
404 # we use locks here because if we race with commit, we
399 # can end up with extra data in the cloned revlogs that's
405 # can end up with extra data in the cloned revlogs that's
400 # not pointed to by changesets, thus causing verify to
406 # not pointed to by changesets, thus causing verify to
401 # fail
407 # fail
402 destlock = copystore(ui, repo, repo.path)
408 destlock = copystore(ui, repo, repo.path)
403 with destlock or util.nullcontextmanager():
409 with destlock or util.nullcontextmanager():
404 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
410 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
405 # we were sharing .hg/hgrc of the share source with the current
411 # we were sharing .hg/hgrc of the share source with the current
406 # repo. We need to copy that while unsharing otherwise it can
412 # repo. We need to copy that while unsharing otherwise it can
407 # disable hooks and other checks
413 # disable hooks and other checks
408 _prependsourcehgrc(repo)
414 _prependsourcehgrc(repo)
409
415
410 sharefile = repo.vfs.join(b'sharedpath')
416 sharefile = repo.vfs.join(b'sharedpath')
411 util.rename(sharefile, sharefile + b'.old')
417 util.rename(sharefile, sharefile + b'.old')
412
418
413 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
419 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
414 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
415 scmutil.writereporequirements(repo)
421 scmutil.writereporequirements(repo)
416
422
417 # Removing share changes some fundamental properties of the repo instance.
423 # Removing share changes some fundamental properties of the repo instance.
418 # So we instantiate a new repo object and operate on it rather than
424 # So we instantiate a new repo object and operate on it rather than
419 # try to keep the existing repo usable.
425 # try to keep the existing repo usable.
420 newrepo = repository(repo.baseui, repo.root, create=False)
426 newrepo = repository(repo.baseui, repo.root, create=False)
421
427
422 # TODO: figure out how to access subrepos that exist, but were previously
428 # TODO: figure out how to access subrepos that exist, but were previously
423 # removed from .hgsub
429 # removed from .hgsub
424 c = newrepo[b'.']
430 c = newrepo[b'.']
425 subs = c.substate
431 subs = c.substate
426 for s in sorted(subs):
432 for s in sorted(subs):
427 c.sub(s).unshare()
433 c.sub(s).unshare()
428
434
429 localrepo.poisonrepository(repo)
435 localrepo.poisonrepository(repo)
430
436
431 return newrepo
437 return newrepo
432
438
433
439
434 def postshare(sourcerepo, destrepo, defaultpath=None):
440 def postshare(sourcerepo, destrepo, defaultpath=None):
435 """Called after a new shared repo is created.
441 """Called after a new shared repo is created.
436
442
437 The new repo only has a requirements file and pointer to the source.
443 The new repo only has a requirements file and pointer to the source.
438 This function configures additional shared data.
444 This function configures additional shared data.
439
445
440 Extensions can wrap this function and write additional entries to
446 Extensions can wrap this function and write additional entries to
441 destrepo/.hg/shared to indicate additional pieces of data to be shared.
447 destrepo/.hg/shared to indicate additional pieces of data to be shared.
442 """
448 """
443 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
449 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
444 if default:
450 if default:
445 template = b'[paths]\ndefault = %s\n'
451 template = b'[paths]\ndefault = %s\n'
446 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
452 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
447 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
453 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
448 with destrepo.wlock():
454 with destrepo.wlock():
449 narrowspec.copytoworkingcopy(destrepo)
455 narrowspec.copytoworkingcopy(destrepo)
450
456
451
457
452 def _postshareupdate(repo, update, checkout=None):
458 def _postshareupdate(repo, update, checkout=None):
453 """Maybe perform a working directory update after a shared repo is created.
459 """Maybe perform a working directory update after a shared repo is created.
454
460
455 ``update`` can be a boolean or a revision to update to.
461 ``update`` can be a boolean or a revision to update to.
456 """
462 """
457 if not update:
463 if not update:
458 return
464 return
459
465
460 repo.ui.status(_(b"updating working directory\n"))
466 repo.ui.status(_(b"updating working directory\n"))
461 if update is not True:
467 if update is not True:
462 checkout = update
468 checkout = update
463 for test in (checkout, b'default', b'tip'):
469 for test in (checkout, b'default', b'tip'):
464 if test is None:
470 if test is None:
465 continue
471 continue
466 try:
472 try:
467 uprev = repo.lookup(test)
473 uprev = repo.lookup(test)
468 break
474 break
469 except error.RepoLookupError:
475 except error.RepoLookupError:
470 continue
476 continue
471 _update(repo, uprev)
477 _update(repo, uprev)
472
478
473
479
474 def copystore(ui, srcrepo, destpath):
480 def copystore(ui, srcrepo, destpath):
475 """copy files from store of srcrepo in destpath
481 """copy files from store of srcrepo in destpath
476
482
477 returns destlock
483 returns destlock
478 """
484 """
479 destlock = None
485 destlock = None
480 try:
486 try:
481 hardlink = None
487 hardlink = None
482 topic = _(b'linking') if hardlink else _(b'copying')
488 topic = _(b'linking') if hardlink else _(b'copying')
483 with ui.makeprogress(topic, unit=_(b'files')) as progress:
489 with ui.makeprogress(topic, unit=_(b'files')) as progress:
484 num = 0
490 num = 0
485 srcpublishing = srcrepo.publishing()
491 srcpublishing = srcrepo.publishing()
486 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
492 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
487 dstvfs = vfsmod.vfs(destpath)
493 dstvfs = vfsmod.vfs(destpath)
488 for f in srcrepo.store.copylist():
494 for f in srcrepo.store.copylist():
489 if srcpublishing and f.endswith(b'phaseroots'):
495 if srcpublishing and f.endswith(b'phaseroots'):
490 continue
496 continue
491 dstbase = os.path.dirname(f)
497 dstbase = os.path.dirname(f)
492 if dstbase and not dstvfs.exists(dstbase):
498 if dstbase and not dstvfs.exists(dstbase):
493 dstvfs.mkdir(dstbase)
499 dstvfs.mkdir(dstbase)
494 if srcvfs.exists(f):
500 if srcvfs.exists(f):
495 if f.endswith(b'data'):
501 if f.endswith(b'data'):
496 # 'dstbase' may be empty (e.g. revlog format 0)
502 # 'dstbase' may be empty (e.g. revlog format 0)
497 lockfile = os.path.join(dstbase, b"lock")
503 lockfile = os.path.join(dstbase, b"lock")
498 # lock to avoid premature writing to the target
504 # lock to avoid premature writing to the target
499 destlock = lock.lock(dstvfs, lockfile)
505 destlock = lock.lock(dstvfs, lockfile)
500 hardlink, n = util.copyfiles(
506 hardlink, n = util.copyfiles(
501 srcvfs.join(f), dstvfs.join(f), hardlink, progress
507 srcvfs.join(f), dstvfs.join(f), hardlink, progress
502 )
508 )
503 num += n
509 num += n
504 if hardlink:
510 if hardlink:
505 ui.debug(b"linked %d files\n" % num)
511 ui.debug(b"linked %d files\n" % num)
506 else:
512 else:
507 ui.debug(b"copied %d files\n" % num)
513 ui.debug(b"copied %d files\n" % num)
508 return destlock
514 return destlock
509 except: # re-raises
515 except: # re-raises
510 release(destlock)
516 release(destlock)
511 raise
517 raise
512
518
513
519
514 def clonewithshare(
520 def clonewithshare(
515 ui,
521 ui,
516 peeropts,
522 peeropts,
517 sharepath,
523 sharepath,
518 source,
524 source,
519 srcpeer,
525 srcpeer,
520 dest,
526 dest,
521 pull=False,
527 pull=False,
522 rev=None,
528 rev=None,
523 update=True,
529 update=True,
524 stream=False,
530 stream=False,
525 ):
531 ):
526 """Perform a clone using a shared repo.
532 """Perform a clone using a shared repo.
527
533
528 The store for the repository will be located at <sharepath>/.hg. The
534 The store for the repository will be located at <sharepath>/.hg. The
529 specified revisions will be cloned or pulled from "source". A shared repo
535 specified revisions will be cloned or pulled from "source". A shared repo
530 will be created at "dest" and a working copy will be created if "update" is
536 will be created at "dest" and a working copy will be created if "update" is
531 True.
537 True.
532 """
538 """
533 revs = None
539 revs = None
534 if rev:
540 if rev:
535 if not srcpeer.capable(b'lookup'):
541 if not srcpeer.capable(b'lookup'):
536 raise error.Abort(
542 raise error.Abort(
537 _(
543 _(
538 b"src repository does not support "
544 b"src repository does not support "
539 b"revision lookup and so doesn't "
545 b"revision lookup and so doesn't "
540 b"support clone by revision"
546 b"support clone by revision"
541 )
547 )
542 )
548 )
543
549
544 # TODO this is batchable.
550 # TODO this is batchable.
545 remoterevs = []
551 remoterevs = []
546 for r in rev:
552 for r in rev:
547 with srcpeer.commandexecutor() as e:
553 with srcpeer.commandexecutor() as e:
548 remoterevs.append(
554 remoterevs.append(
549 e.callcommand(
555 e.callcommand(
550 b'lookup',
556 b'lookup',
551 {
557 {
552 b'key': r,
558 b'key': r,
553 },
559 },
554 ).result()
560 ).result()
555 )
561 )
556 revs = remoterevs
562 revs = remoterevs
557
563
558 # Obtain a lock before checking for or cloning the pooled repo otherwise
564 # Obtain a lock before checking for or cloning the pooled repo otherwise
559 # 2 clients may race creating or populating it.
565 # 2 clients may race creating or populating it.
560 pooldir = os.path.dirname(sharepath)
566 pooldir = os.path.dirname(sharepath)
561 # lock class requires the directory to exist.
567 # lock class requires the directory to exist.
562 try:
568 try:
563 util.makedir(pooldir, False)
569 util.makedir(pooldir, False)
564 except FileExistsError:
570 except FileExistsError:
565 pass
571 pass
566
572
567 poolvfs = vfsmod.vfs(pooldir)
573 poolvfs = vfsmod.vfs(pooldir)
568 basename = os.path.basename(sharepath)
574 basename = os.path.basename(sharepath)
569
575
570 with lock.lock(poolvfs, b'%s.lock' % basename):
576 with lock.lock(poolvfs, b'%s.lock' % basename):
571 if os.path.exists(sharepath):
577 if os.path.exists(sharepath):
572 ui.status(
578 ui.status(
573 _(b'(sharing from existing pooled repository %s)\n') % basename
579 _(b'(sharing from existing pooled repository %s)\n') % basename
574 )
580 )
575 else:
581 else:
576 ui.status(
582 ui.status(
577 _(b'(sharing from new pooled repository %s)\n') % basename
583 _(b'(sharing from new pooled repository %s)\n') % basename
578 )
584 )
579 # Always use pull mode because hardlinks in share mode don't work
585 # Always use pull mode because hardlinks in share mode don't work
580 # well. Never update because working copies aren't necessary in
586 # well. Never update because working copies aren't necessary in
581 # share mode.
587 # share mode.
582 clone(
588 clone(
583 ui,
589 ui,
584 peeropts,
590 peeropts,
585 source,
591 source,
586 dest=sharepath,
592 dest=sharepath,
587 pull=True,
593 pull=True,
588 revs=rev,
594 revs=rev,
589 update=False,
595 update=False,
590 stream=stream,
596 stream=stream,
591 )
597 )
592
598
593 # Resolve the value to put in [paths] section for the source.
599 # Resolve the value to put in [paths] section for the source.
594 if islocal(source):
600 if islocal(source):
595 defaultpath = util.abspath(urlutil.urllocalpath(source))
601 defaultpath = util.abspath(urlutil.urllocalpath(source))
596 else:
602 else:
597 defaultpath = source
603 defaultpath = source
598
604
599 sharerepo = repository(ui, path=sharepath)
605 sharerepo = repository(ui, path=sharepath)
600 destrepo = share(
606 destrepo = share(
601 ui,
607 ui,
602 sharerepo,
608 sharerepo,
603 dest=dest,
609 dest=dest,
604 update=False,
610 update=False,
605 bookmarks=False,
611 bookmarks=False,
606 defaultpath=defaultpath,
612 defaultpath=defaultpath,
607 )
613 )
608
614
609 # We need to perform a pull against the dest repo to fetch bookmarks
615 # We need to perform a pull against the dest repo to fetch bookmarks
610 # and other non-store data that isn't shared by default. In the case of
616 # and other non-store data that isn't shared by default. In the case of
611 # non-existing shared repo, this means we pull from the remote twice. This
617 # non-existing shared repo, this means we pull from the remote twice. This
612 # is a bit weird. But at the time it was implemented, there wasn't an easy
618 # is a bit weird. But at the time it was implemented, there wasn't an easy
613 # way to pull just non-changegroup data.
619 # way to pull just non-changegroup data.
614 exchange.pull(destrepo, srcpeer, heads=revs)
620 exchange.pull(destrepo, srcpeer, heads=revs)
615
621
616 _postshareupdate(destrepo, update)
622 _postshareupdate(destrepo, update)
617
623
618 return srcpeer, peer(ui, peeropts, dest)
624 return srcpeer, peer(ui, peeropts, dest)
619
625
620
626
621 # Recomputing caches is often slow on big repos, so copy them.
627 # Recomputing caches is often slow on big repos, so copy them.
622 def _copycache(srcrepo, dstcachedir, fname):
628 def _copycache(srcrepo, dstcachedir, fname):
623 """copy a cache from srcrepo to destcachedir (if it exists)"""
629 """copy a cache from srcrepo to destcachedir (if it exists)"""
624 srcfname = srcrepo.cachevfs.join(fname)
630 srcfname = srcrepo.cachevfs.join(fname)
625 dstfname = os.path.join(dstcachedir, fname)
631 dstfname = os.path.join(dstcachedir, fname)
626 if os.path.exists(srcfname):
632 if os.path.exists(srcfname):
627 if not os.path.exists(dstcachedir):
633 if not os.path.exists(dstcachedir):
628 os.mkdir(dstcachedir)
634 os.mkdir(dstcachedir)
629 util.copyfile(srcfname, dstfname)
635 util.copyfile(srcfname, dstfname)
630
636
631
637
632 def clone(
638 def clone(
633 ui,
639 ui,
634 peeropts,
640 peeropts,
635 source,
641 source,
636 dest=None,
642 dest=None,
637 pull=False,
643 pull=False,
638 revs=None,
644 revs=None,
639 update=True,
645 update=True,
640 stream=False,
646 stream=False,
641 branch=None,
647 branch=None,
642 shareopts=None,
648 shareopts=None,
643 storeincludepats=None,
649 storeincludepats=None,
644 storeexcludepats=None,
650 storeexcludepats=None,
645 depth=None,
651 depth=None,
646 ):
652 ):
647 """Make a copy of an existing repository.
653 """Make a copy of an existing repository.
648
654
649 Create a copy of an existing repository in a new directory. The
655 Create a copy of an existing repository in a new directory. The
650 source and destination are URLs, as passed to the repository
656 source and destination are URLs, as passed to the repository
651 function. Returns a pair of repository peers, the source and
657 function. Returns a pair of repository peers, the source and
652 newly created destination.
658 newly created destination.
653
659
654 The location of the source is added to the new repository's
660 The location of the source is added to the new repository's
655 .hg/hgrc file, as the default to be used for future pulls and
661 .hg/hgrc file, as the default to be used for future pulls and
656 pushes.
662 pushes.
657
663
658 If an exception is raised, the partly cloned/updated destination
664 If an exception is raised, the partly cloned/updated destination
659 repository will be deleted.
665 repository will be deleted.
660
666
661 Arguments:
667 Arguments:
662
668
663 source: repository object or URL
669 source: repository object or URL
664
670
665 dest: URL of destination repository to create (defaults to base
671 dest: URL of destination repository to create (defaults to base
666 name of source repository)
672 name of source repository)
667
673
668 pull: always pull from source repository, even in local case or if the
674 pull: always pull from source repository, even in local case or if the
669 server prefers streaming
675 server prefers streaming
670
676
671 stream: stream raw data uncompressed from repository (fast over
677 stream: stream raw data uncompressed from repository (fast over
672 LAN, slow over WAN)
678 LAN, slow over WAN)
673
679
674 revs: revision to clone up to (implies pull=True)
680 revs: revision to clone up to (implies pull=True)
675
681
676 update: update working directory after clone completes, if
682 update: update working directory after clone completes, if
677 destination is local repository (True means update to default rev,
683 destination is local repository (True means update to default rev,
678 anything else is treated as a revision)
684 anything else is treated as a revision)
679
685
680 branch: branches to clone
686 branch: branches to clone
681
687
682 shareopts: dict of options to control auto sharing behavior. The "pool" key
688 shareopts: dict of options to control auto sharing behavior. The "pool" key
683 activates auto sharing mode and defines the directory for stores. The
689 activates auto sharing mode and defines the directory for stores. The
684 "mode" key determines how to construct the directory name of the shared
690 "mode" key determines how to construct the directory name of the shared
685 repository. "identity" means the name is derived from the node of the first
691 repository. "identity" means the name is derived from the node of the first
686 changeset in the repository. "remote" means the name is derived from the
692 changeset in the repository. "remote" means the name is derived from the
687 remote's path/URL. Defaults to "identity."
693 remote's path/URL. Defaults to "identity."
688
694
689 storeincludepats and storeexcludepats: sets of file patterns to include and
695 storeincludepats and storeexcludepats: sets of file patterns to include and
690 exclude in the repository copy, respectively. If not defined, all files
696 exclude in the repository copy, respectively. If not defined, all files
691 will be included (a "full" clone). Otherwise a "narrow" clone containing
697 will be included (a "full" clone). Otherwise a "narrow" clone containing
692 only the requested files will be performed. If ``storeincludepats`` is not
698 only the requested files will be performed. If ``storeincludepats`` is not
693 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
699 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
694 ``path:.``. If both are empty sets, no files will be cloned.
700 ``path:.``. If both are empty sets, no files will be cloned.
695 """
701 """
696
702
697 if isinstance(source, bytes):
703 if isinstance(source, bytes):
698 src = urlutil.get_clone_path(ui, source, branch)
704 src = urlutil.get_clone_path(ui, source, branch)
699 origsource, source, branches = src
705 origsource, source, branches = src
700 srcpeer = peer(ui, peeropts, source)
706 srcpeer = peer(ui, peeropts, source)
701 else:
707 else:
702 srcpeer = source.peer() # in case we were called with a localrepo
708 srcpeer = source.peer() # in case we were called with a localrepo
703 branches = (None, branch or [])
709 branches = (None, branch or [])
704 origsource = source = srcpeer.url()
710 origsource = source = srcpeer.url()
705 srclock = destlock = destwlock = cleandir = None
711 srclock = destlock = destwlock = cleandir = None
706 destpeer = None
712 destpeer = None
707 try:
713 try:
708 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
714 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
709
715
710 if dest is None:
716 if dest is None:
711 dest = defaultdest(source)
717 dest = defaultdest(source)
712 if dest:
718 if dest:
713 ui.status(_(b"destination directory: %s\n") % dest)
719 ui.status(_(b"destination directory: %s\n") % dest)
714 else:
720 else:
715 dest = urlutil.get_clone_path(ui, dest)[0]
721 dest = urlutil.get_clone_path(ui, dest)[0]
716
722
717 dest = urlutil.urllocalpath(dest)
723 dest = urlutil.urllocalpath(dest)
718 source = urlutil.urllocalpath(source)
724 source = urlutil.urllocalpath(source)
719
725
720 if not dest:
726 if not dest:
721 raise error.InputError(_(b"empty destination path is not valid"))
727 raise error.InputError(_(b"empty destination path is not valid"))
722
728
723 destvfs = vfsmod.vfs(dest, expandpath=True)
729 destvfs = vfsmod.vfs(dest, expandpath=True)
724 if destvfs.lexists():
730 if destvfs.lexists():
725 if not destvfs.isdir():
731 if not destvfs.isdir():
726 raise error.InputError(
732 raise error.InputError(
727 _(b"destination '%s' already exists") % dest
733 _(b"destination '%s' already exists") % dest
728 )
734 )
729 elif destvfs.listdir():
735 elif destvfs.listdir():
730 raise error.InputError(
736 raise error.InputError(
731 _(b"destination '%s' is not empty") % dest
737 _(b"destination '%s' is not empty") % dest
732 )
738 )
733
739
734 createopts = {}
740 createopts = {}
735 narrow = False
741 narrow = False
736
742
737 if storeincludepats is not None:
743 if storeincludepats is not None:
738 narrowspec.validatepatterns(storeincludepats)
744 narrowspec.validatepatterns(storeincludepats)
739 narrow = True
745 narrow = True
740
746
741 if storeexcludepats is not None:
747 if storeexcludepats is not None:
742 narrowspec.validatepatterns(storeexcludepats)
748 narrowspec.validatepatterns(storeexcludepats)
743 narrow = True
749 narrow = True
744
750
745 if narrow:
751 if narrow:
746 # Include everything by default if only exclusion patterns defined.
752 # Include everything by default if only exclusion patterns defined.
747 if storeexcludepats and not storeincludepats:
753 if storeexcludepats and not storeincludepats:
748 storeincludepats = {b'path:.'}
754 storeincludepats = {b'path:.'}
749
755
750 createopts[b'narrowfiles'] = True
756 createopts[b'narrowfiles'] = True
751
757
752 if depth:
758 if depth:
753 createopts[b'shallowfilestore'] = True
759 createopts[b'shallowfilestore'] = True
754
760
755 if srcpeer.capable(b'lfs-serve'):
761 if srcpeer.capable(b'lfs-serve'):
756 # Repository creation honors the config if it disabled the extension, so
762 # Repository creation honors the config if it disabled the extension, so
757 # we can't just announce that lfs will be enabled. This check avoids
763 # we can't just announce that lfs will be enabled. This check avoids
758 # saying that lfs will be enabled, and then saying it's an unknown
764 # saying that lfs will be enabled, and then saying it's an unknown
759 # feature. The lfs creation option is set in either case so that a
765 # feature. The lfs creation option is set in either case so that a
760 # requirement is added. If the extension is explicitly disabled but the
766 # requirement is added. If the extension is explicitly disabled but the
761 # requirement is set, the clone aborts early, before transferring any
767 # requirement is set, the clone aborts early, before transferring any
762 # data.
768 # data.
763 createopts[b'lfs'] = True
769 createopts[b'lfs'] = True
764
770
765 if extensions.disabled_help(b'lfs'):
771 if extensions.disabled_help(b'lfs'):
766 ui.status(
772 ui.status(
767 _(
773 _(
768 b'(remote is using large file support (lfs), but it is '
774 b'(remote is using large file support (lfs), but it is '
769 b'explicitly disabled in the local configuration)\n'
775 b'explicitly disabled in the local configuration)\n'
770 )
776 )
771 )
777 )
772 else:
778 else:
773 ui.status(
779 ui.status(
774 _(
780 _(
775 b'(remote is using large file support (lfs); lfs will '
781 b'(remote is using large file support (lfs); lfs will '
776 b'be enabled for this repository)\n'
782 b'be enabled for this repository)\n'
777 )
783 )
778 )
784 )
779
785
780 shareopts = shareopts or {}
786 shareopts = shareopts or {}
781 sharepool = shareopts.get(b'pool')
787 sharepool = shareopts.get(b'pool')
782 sharenamemode = shareopts.get(b'mode')
788 sharenamemode = shareopts.get(b'mode')
783 if sharepool and islocal(dest):
789 if sharepool and islocal(dest):
784 sharepath = None
790 sharepath = None
785 if sharenamemode == b'identity':
791 if sharenamemode == b'identity':
786 # Resolve the name from the initial changeset in the remote
792 # Resolve the name from the initial changeset in the remote
787 # repository. This returns nullid when the remote is empty. It
793 # repository. This returns nullid when the remote is empty. It
788 # raises RepoLookupError if revision 0 is filtered or otherwise
794 # raises RepoLookupError if revision 0 is filtered or otherwise
789 # not available. If we fail to resolve, sharing is not enabled.
795 # not available. If we fail to resolve, sharing is not enabled.
790 try:
796 try:
791 with srcpeer.commandexecutor() as e:
797 with srcpeer.commandexecutor() as e:
792 rootnode = e.callcommand(
798 rootnode = e.callcommand(
793 b'lookup',
799 b'lookup',
794 {
800 {
795 b'key': b'0',
801 b'key': b'0',
796 },
802 },
797 ).result()
803 ).result()
798
804
799 if rootnode != sha1nodeconstants.nullid:
805 if rootnode != sha1nodeconstants.nullid:
800 sharepath = os.path.join(sharepool, hex(rootnode))
806 sharepath = os.path.join(sharepool, hex(rootnode))
801 else:
807 else:
802 ui.status(
808 ui.status(
803 _(
809 _(
804 b'(not using pooled storage: '
810 b'(not using pooled storage: '
805 b'remote appears to be empty)\n'
811 b'remote appears to be empty)\n'
806 )
812 )
807 )
813 )
808 except error.RepoLookupError:
814 except error.RepoLookupError:
809 ui.status(
815 ui.status(
810 _(
816 _(
811 b'(not using pooled storage: '
817 b'(not using pooled storage: '
812 b'unable to resolve identity of remote)\n'
818 b'unable to resolve identity of remote)\n'
813 )
819 )
814 )
820 )
815 elif sharenamemode == b'remote':
821 elif sharenamemode == b'remote':
816 sharepath = os.path.join(
822 sharepath = os.path.join(
817 sharepool, hex(hashutil.sha1(source).digest())
823 sharepool, hex(hashutil.sha1(source).digest())
818 )
824 )
819 else:
825 else:
820 raise error.Abort(
826 raise error.Abort(
821 _(b'unknown share naming mode: %s') % sharenamemode
827 _(b'unknown share naming mode: %s') % sharenamemode
822 )
828 )
823
829
824 # TODO this is a somewhat arbitrary restriction.
830 # TODO this is a somewhat arbitrary restriction.
825 if narrow:
831 if narrow:
826 ui.status(
832 ui.status(
827 _(b'(pooled storage not supported for narrow clones)\n')
833 _(b'(pooled storage not supported for narrow clones)\n')
828 )
834 )
829 sharepath = None
835 sharepath = None
830
836
831 if sharepath:
837 if sharepath:
832 return clonewithshare(
838 return clonewithshare(
833 ui,
839 ui,
834 peeropts,
840 peeropts,
835 sharepath,
841 sharepath,
836 source,
842 source,
837 srcpeer,
843 srcpeer,
838 dest,
844 dest,
839 pull=pull,
845 pull=pull,
840 rev=revs,
846 rev=revs,
841 update=update,
847 update=update,
842 stream=stream,
848 stream=stream,
843 )
849 )
844
850
845 srcrepo = srcpeer.local()
851 srcrepo = srcpeer.local()
846
852
847 abspath = origsource
853 abspath = origsource
848 if islocal(origsource):
854 if islocal(origsource):
849 abspath = util.abspath(urlutil.urllocalpath(origsource))
855 abspath = util.abspath(urlutil.urllocalpath(origsource))
850
856
851 if islocal(dest):
857 if islocal(dest):
852 if os.path.exists(dest):
858 if os.path.exists(dest):
853 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 cleandir = hgdir
861 cleandir = hgdir
856 else:
862 else:
857 cleandir = dest
863 cleandir = dest
858
864
859 copy = False
865 copy = False
860 if (
866 if (
861 srcrepo
867 srcrepo
862 and srcrepo.cancopy()
868 and srcrepo.cancopy()
863 and islocal(dest)
869 and islocal(dest)
864 and not phases.hassecret(srcrepo)
870 and not phases.hassecret(srcrepo)
865 ):
871 ):
866 copy = not pull and not revs
872 copy = not pull and not revs
867
873
868 # TODO this is a somewhat arbitrary restriction.
874 # TODO this is a somewhat arbitrary restriction.
869 if narrow:
875 if narrow:
870 copy = False
876 copy = False
871
877
872 if copy:
878 if copy:
873 try:
879 try:
874 # we use a lock here because if we race with commit, we
880 # we use a lock here because if we race with commit, we
875 # can end up with extra data in the cloned revlogs that's
881 # can end up with extra data in the cloned revlogs that's
876 # not pointed to by changesets, thus causing verify to
882 # not pointed to by changesets, thus causing verify to
877 # fail
883 # fail
878 srclock = srcrepo.lock(wait=False)
884 srclock = srcrepo.lock(wait=False)
879 except error.LockError:
885 except error.LockError:
880 copy = False
886 copy = False
881
887
882 if copy:
888 if copy:
883 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
889 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
884
890
885 destrootpath = urlutil.urllocalpath(dest)
891 destrootpath = urlutil.urllocalpath(dest)
886 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
892 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
887 localrepo.createrepository(
893 localrepo.createrepository(
888 ui,
894 ui,
889 destrootpath,
895 destrootpath,
890 requirements=dest_reqs,
896 requirements=dest_reqs,
891 )
897 )
892 destrepo = localrepo.makelocalrepository(ui, destrootpath)
898 destrepo = localrepo.makelocalrepository(ui, destrootpath)
893
899
894 destwlock = destrepo.wlock()
900 destwlock = destrepo.wlock()
895 destlock = destrepo.lock()
901 destlock = destrepo.lock()
896 from . import streamclone # avoid cycle
902 from . import streamclone # avoid cycle
897
903
898 streamclone.local_copy(srcrepo, destrepo)
904 streamclone.local_copy(srcrepo, destrepo)
899
905
900 # we need to re-init the repo after manually copying the data
906 # we need to re-init the repo after manually copying the data
901 # into it
907 # into it
902 destpeer = peer(srcrepo, peeropts, dest)
908 destpeer = peer(srcrepo, peeropts, dest)
903
909
904 # make the peer aware that is it already locked
910 # make the peer aware that is it already locked
905 #
911 #
906 # important:
912 # important:
907 #
913 #
908 # We still need to release that lock at the end of the function
914 # We still need to release that lock at the end of the function
909 destpeer.local()._lockref = weakref.ref(destlock)
915 destpeer.local()._lockref = weakref.ref(destlock)
910 destpeer.local()._wlockref = weakref.ref(destwlock)
916 destpeer.local()._wlockref = weakref.ref(destwlock)
911 # dirstate also needs to be copied because `_wlockref` has a reference
917 # dirstate also needs to be copied because `_wlockref` has a reference
912 # to it: this dirstate is saved to disk when the wlock is released
918 # to it: this dirstate is saved to disk when the wlock is released
913 destpeer.local().dirstate = destrepo.dirstate
919 destpeer.local().dirstate = destrepo.dirstate
914
920
915 srcrepo.hook(
921 srcrepo.hook(
916 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
922 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
917 )
923 )
918 else:
924 else:
919 try:
925 try:
920 # only pass ui when no srcrepo
926 # only pass ui when no srcrepo
921 destpeer = peer(
927 destpeer = peer(
922 srcrepo or ui,
928 srcrepo or ui,
923 peeropts,
929 peeropts,
924 dest,
930 dest,
925 create=True,
931 create=True,
926 createopts=createopts,
932 createopts=createopts,
927 )
933 )
928 except FileExistsError:
934 except FileExistsError:
929 cleandir = None
935 cleandir = None
930 raise error.Abort(_(b"destination '%s' already exists") % dest)
936 raise error.Abort(_(b"destination '%s' already exists") % dest)
931
937
932 if revs:
938 if revs:
933 if not srcpeer.capable(b'lookup'):
939 if not srcpeer.capable(b'lookup'):
934 raise error.Abort(
940 raise error.Abort(
935 _(
941 _(
936 b"src repository does not support "
942 b"src repository does not support "
937 b"revision lookup and so doesn't "
943 b"revision lookup and so doesn't "
938 b"support clone by revision"
944 b"support clone by revision"
939 )
945 )
940 )
946 )
941
947
942 # TODO this is batchable.
948 # TODO this is batchable.
943 remoterevs = []
949 remoterevs = []
944 for rev in revs:
950 for rev in revs:
945 with srcpeer.commandexecutor() as e:
951 with srcpeer.commandexecutor() as e:
946 remoterevs.append(
952 remoterevs.append(
947 e.callcommand(
953 e.callcommand(
948 b'lookup',
954 b'lookup',
949 {
955 {
950 b'key': rev,
956 b'key': rev,
951 },
957 },
952 ).result()
958 ).result()
953 )
959 )
954 revs = remoterevs
960 revs = remoterevs
955
961
956 checkout = revs[0]
962 checkout = revs[0]
957 else:
963 else:
958 revs = None
964 revs = None
959 local = destpeer.local()
965 local = destpeer.local()
960 if local:
966 if local:
961 if narrow:
967 if narrow:
962 with local.wlock(), local.lock():
968 with local.wlock(), local.lock():
963 local.setnarrowpats(storeincludepats, storeexcludepats)
969 local.setnarrowpats(storeincludepats, storeexcludepats)
964 narrowspec.copytoworkingcopy(local)
970 narrowspec.copytoworkingcopy(local)
965
971
966 u = urlutil.url(abspath)
972 u = urlutil.url(abspath)
967 defaulturl = bytes(u)
973 defaulturl = bytes(u)
968 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
974 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
969 if not stream:
975 if not stream:
970 if pull:
976 if pull:
971 stream = False
977 stream = False
972 else:
978 else:
973 stream = None
979 stream = None
974 # internal config: ui.quietbookmarkmove
980 # internal config: ui.quietbookmarkmove
975 overrides = {(b'ui', b'quietbookmarkmove'): True}
981 overrides = {(b'ui', b'quietbookmarkmove'): True}
976 with local.ui.configoverride(overrides, b'clone'):
982 with local.ui.configoverride(overrides, b'clone'):
977 exchange.pull(
983 exchange.pull(
978 local,
984 local,
979 srcpeer,
985 srcpeer,
980 heads=revs,
986 heads=revs,
981 streamclonerequested=stream,
987 streamclonerequested=stream,
982 includepats=storeincludepats,
988 includepats=storeincludepats,
983 excludepats=storeexcludepats,
989 excludepats=storeexcludepats,
984 depth=depth,
990 depth=depth,
985 )
991 )
986 elif srcrepo:
992 elif srcrepo:
987 # TODO lift restriction once exchange.push() accepts narrow
993 # TODO lift restriction once exchange.push() accepts narrow
988 # push.
994 # push.
989 if narrow:
995 if narrow:
990 raise error.Abort(
996 raise error.Abort(
991 _(
997 _(
992 b'narrow clone not available for '
998 b'narrow clone not available for '
993 b'remote destinations'
999 b'remote destinations'
994 )
1000 )
995 )
1001 )
996
1002
997 exchange.push(
1003 exchange.push(
998 srcrepo,
1004 srcrepo,
999 destpeer,
1005 destpeer,
1000 revs=revs,
1006 revs=revs,
1001 bookmarks=srcrepo._bookmarks.keys(),
1007 bookmarks=srcrepo._bookmarks.keys(),
1002 )
1008 )
1003 else:
1009 else:
1004 raise error.Abort(
1010 raise error.Abort(
1005 _(b"clone from remote to remote not supported")
1011 _(b"clone from remote to remote not supported")
1006 )
1012 )
1007
1013
1008 cleandir = None
1014 cleandir = None
1009
1015
1010 destrepo = destpeer.local()
1016 destrepo = destpeer.local()
1011 if destrepo:
1017 if destrepo:
1012 template = uimod.samplehgrcs[b'cloned']
1018 template = uimod.samplehgrcs[b'cloned']
1013 u = urlutil.url(abspath)
1019 u = urlutil.url(abspath)
1014 u.passwd = None
1020 u.passwd = None
1015 defaulturl = bytes(u)
1021 defaulturl = bytes(u)
1016 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1022 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1017 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1023 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1018
1024
1019 if ui.configbool(b'experimental', b'remotenames'):
1025 if ui.configbool(b'experimental', b'remotenames'):
1020 logexchange.pullremotenames(destrepo, srcpeer)
1026 logexchange.pullremotenames(destrepo, srcpeer)
1021
1027
1022 if update:
1028 if update:
1023 if update is not True:
1029 if update is not True:
1024 with srcpeer.commandexecutor() as e:
1030 with srcpeer.commandexecutor() as e:
1025 checkout = e.callcommand(
1031 checkout = e.callcommand(
1026 b'lookup',
1032 b'lookup',
1027 {
1033 {
1028 b'key': update,
1034 b'key': update,
1029 },
1035 },
1030 ).result()
1036 ).result()
1031
1037
1032 uprev = None
1038 uprev = None
1033 status = None
1039 status = None
1034 if checkout is not None:
1040 if checkout is not None:
1035 # Some extensions (at least hg-git and hg-subversion) have
1041 # Some extensions (at least hg-git and hg-subversion) have
1036 # a peer.lookup() implementation that returns a name instead
1042 # a peer.lookup() implementation that returns a name instead
1037 # of a nodeid. We work around it here until we've figured
1043 # of a nodeid. We work around it here until we've figured
1038 # out a better solution.
1044 # out a better solution.
1039 if len(checkout) == 20 and checkout in destrepo:
1045 if len(checkout) == 20 and checkout in destrepo:
1040 uprev = checkout
1046 uprev = checkout
1041 elif scmutil.isrevsymbol(destrepo, checkout):
1047 elif scmutil.isrevsymbol(destrepo, checkout):
1042 uprev = scmutil.revsymbol(destrepo, checkout).node()
1048 uprev = scmutil.revsymbol(destrepo, checkout).node()
1043 else:
1049 else:
1044 if update is not True:
1050 if update is not True:
1045 try:
1051 try:
1046 uprev = destrepo.lookup(update)
1052 uprev = destrepo.lookup(update)
1047 except error.RepoLookupError:
1053 except error.RepoLookupError:
1048 pass
1054 pass
1049 if uprev is None:
1055 if uprev is None:
1050 try:
1056 try:
1051 if destrepo._activebookmark:
1057 if destrepo._activebookmark:
1052 uprev = destrepo.lookup(destrepo._activebookmark)
1058 uprev = destrepo.lookup(destrepo._activebookmark)
1053 update = destrepo._activebookmark
1059 update = destrepo._activebookmark
1054 else:
1060 else:
1055 uprev = destrepo._bookmarks[b'@']
1061 uprev = destrepo._bookmarks[b'@']
1056 update = b'@'
1062 update = b'@'
1057 bn = destrepo[uprev].branch()
1063 bn = destrepo[uprev].branch()
1058 if bn == b'default':
1064 if bn == b'default':
1059 status = _(b"updating to bookmark %s\n" % update)
1065 status = _(b"updating to bookmark %s\n" % update)
1060 else:
1066 else:
1061 status = (
1067 status = (
1062 _(b"updating to bookmark %s on branch %s\n")
1068 _(b"updating to bookmark %s on branch %s\n")
1063 ) % (update, bn)
1069 ) % (update, bn)
1064 except KeyError:
1070 except KeyError:
1065 try:
1071 try:
1066 uprev = destrepo.branchtip(b'default')
1072 uprev = destrepo.branchtip(b'default')
1067 except error.RepoLookupError:
1073 except error.RepoLookupError:
1068 uprev = destrepo.lookup(b'tip')
1074 uprev = destrepo.lookup(b'tip')
1069 if not status:
1075 if not status:
1070 bn = destrepo[uprev].branch()
1076 bn = destrepo[uprev].branch()
1071 status = _(b"updating to branch %s\n") % bn
1077 status = _(b"updating to branch %s\n") % bn
1072 destrepo.ui.status(status)
1078 destrepo.ui.status(status)
1073 _update(destrepo, uprev)
1079 _update(destrepo, uprev)
1074 if update in destrepo._bookmarks:
1080 if update in destrepo._bookmarks:
1075 bookmarks.activate(destrepo, update)
1081 bookmarks.activate(destrepo, update)
1076 if destlock is not None:
1082 if destlock is not None:
1077 release(destlock)
1083 release(destlock)
1078 if destwlock is not None:
1084 if destwlock is not None:
1079 release(destlock)
1085 release(destlock)
1080 # here is a tiny windows were someone could end up writing the
1086 # here is a tiny windows were someone could end up writing the
1081 # repository before the cache are sure to be warm. This is "fine"
1087 # repository before the cache are sure to be warm. This is "fine"
1082 # as the only "bad" outcome would be some slowness. That potential
1088 # as the only "bad" outcome would be some slowness. That potential
1083 # slowness already affect reader.
1089 # slowness already affect reader.
1084 with destrepo.lock():
1090 with destrepo.lock():
1085 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1091 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1086 finally:
1092 finally:
1087 release(srclock, destlock, destwlock)
1093 release(srclock, destlock, destwlock)
1088 if cleandir is not None:
1094 if cleandir is not None:
1089 shutil.rmtree(cleandir, True)
1095 shutil.rmtree(cleandir, True)
1090 if srcpeer is not None:
1096 if srcpeer is not None:
1091 srcpeer.close()
1097 srcpeer.close()
1092 if destpeer and destpeer.local() is None:
1098 if destpeer and destpeer.local() is None:
1093 destpeer.close()
1099 destpeer.close()
1094 return srcpeer, destpeer
1100 return srcpeer, destpeer
1095
1101
1096
1102
1097 def _showstats(repo, stats, quietempty=False):
1103 def _showstats(repo, stats, quietempty=False):
1098 if quietempty and stats.isempty():
1104 if quietempty and stats.isempty():
1099 return
1105 return
1100 repo.ui.status(
1106 repo.ui.status(
1101 _(
1107 _(
1102 b"%d files updated, %d files merged, "
1108 b"%d files updated, %d files merged, "
1103 b"%d files removed, %d files unresolved\n"
1109 b"%d files removed, %d files unresolved\n"
1104 )
1110 )
1105 % (
1111 % (
1106 stats.updatedcount,
1112 stats.updatedcount,
1107 stats.mergedcount,
1113 stats.mergedcount,
1108 stats.removedcount,
1114 stats.removedcount,
1109 stats.unresolvedcount,
1115 stats.unresolvedcount,
1110 )
1116 )
1111 )
1117 )
1112
1118
1113
1119
1114 def updaterepo(repo, node, overwrite, updatecheck=None):
1120 def updaterepo(repo, node, overwrite, updatecheck=None):
1115 """Update the working directory to node.
1121 """Update the working directory to node.
1116
1122
1117 When overwrite is set, changes are clobbered, merged else
1123 When overwrite is set, changes are clobbered, merged else
1118
1124
1119 returns stats (see pydoc mercurial.merge.applyupdates)"""
1125 returns stats (see pydoc mercurial.merge.applyupdates)"""
1120 repo.ui.deprecwarn(
1126 repo.ui.deprecwarn(
1121 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1127 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1122 b'5.7',
1128 b'5.7',
1123 )
1129 )
1124 return mergemod._update(
1130 return mergemod._update(
1125 repo,
1131 repo,
1126 node,
1132 node,
1127 branchmerge=False,
1133 branchmerge=False,
1128 force=overwrite,
1134 force=overwrite,
1129 labels=[b'working copy', b'destination'],
1135 labels=[b'working copy', b'destination'],
1130 updatecheck=updatecheck,
1136 updatecheck=updatecheck,
1131 )
1137 )
1132
1138
1133
1139
1134 def update(repo, node, quietempty=False, updatecheck=None):
1140 def update(repo, node, quietempty=False, updatecheck=None):
1135 """update the working directory to node"""
1141 """update the working directory to node"""
1136 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1142 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1137 _showstats(repo, stats, quietempty)
1143 _showstats(repo, stats, quietempty)
1138 if stats.unresolvedcount:
1144 if stats.unresolvedcount:
1139 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1145 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1140 return stats.unresolvedcount > 0
1146 return stats.unresolvedcount > 0
1141
1147
1142
1148
1143 # naming conflict in clone()
1149 # naming conflict in clone()
1144 _update = update
1150 _update = update
1145
1151
1146
1152
1147 def clean(repo, node, show_stats=True, quietempty=False):
1153 def clean(repo, node, show_stats=True, quietempty=False):
1148 """forcibly switch the working directory to node, clobbering changes"""
1154 """forcibly switch the working directory to node, clobbering changes"""
1149 stats = mergemod.clean_update(repo[node])
1155 stats = mergemod.clean_update(repo[node])
1150 assert stats.unresolvedcount == 0
1156 assert stats.unresolvedcount == 0
1151 if show_stats:
1157 if show_stats:
1152 _showstats(repo, stats, quietempty)
1158 _showstats(repo, stats, quietempty)
1153 return False
1159 return False
1154
1160
1155
1161
1156 # naming conflict in updatetotally()
1162 # naming conflict in updatetotally()
1157 _clean = clean
1163 _clean = clean
1158
1164
1159 _VALID_UPDATECHECKS = {
1165 _VALID_UPDATECHECKS = {
1160 mergemod.UPDATECHECK_ABORT,
1166 mergemod.UPDATECHECK_ABORT,
1161 mergemod.UPDATECHECK_NONE,
1167 mergemod.UPDATECHECK_NONE,
1162 mergemod.UPDATECHECK_LINEAR,
1168 mergemod.UPDATECHECK_LINEAR,
1163 mergemod.UPDATECHECK_NO_CONFLICT,
1169 mergemod.UPDATECHECK_NO_CONFLICT,
1164 }
1170 }
1165
1171
1166
1172
1167 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1173 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1168 """Update the working directory with extra care for non-file components
1174 """Update the working directory with extra care for non-file components
1169
1175
1170 This takes care of non-file components below:
1176 This takes care of non-file components below:
1171
1177
1172 :bookmark: might be advanced or (in)activated
1178 :bookmark: might be advanced or (in)activated
1173
1179
1174 This takes arguments below:
1180 This takes arguments below:
1175
1181
1176 :checkout: to which revision the working directory is updated
1182 :checkout: to which revision the working directory is updated
1177 :brev: a name, which might be a bookmark to be activated after updating
1183 :brev: a name, which might be a bookmark to be activated after updating
1178 :clean: whether changes in the working directory can be discarded
1184 :clean: whether changes in the working directory can be discarded
1179 :updatecheck: how to deal with a dirty working directory
1185 :updatecheck: how to deal with a dirty working directory
1180
1186
1181 Valid values for updatecheck are the UPDATECHECK_* constants
1187 Valid values for updatecheck are the UPDATECHECK_* constants
1182 defined in the merge module. Passing `None` will result in using the
1188 defined in the merge module. Passing `None` will result in using the
1183 configured default.
1189 configured default.
1184
1190
1185 * ABORT: abort if the working directory is dirty
1191 * ABORT: abort if the working directory is dirty
1186 * NONE: don't check (merge working directory changes into destination)
1192 * NONE: don't check (merge working directory changes into destination)
1187 * LINEAR: check that update is linear before merging working directory
1193 * LINEAR: check that update is linear before merging working directory
1188 changes into destination
1194 changes into destination
1189 * NO_CONFLICT: check that the update does not result in file merges
1195 * NO_CONFLICT: check that the update does not result in file merges
1190
1196
1191 This returns whether conflict is detected at updating or not.
1197 This returns whether conflict is detected at updating or not.
1192 """
1198 """
1193 if updatecheck is None:
1199 if updatecheck is None:
1194 updatecheck = ui.config(b'commands', b'update.check')
1200 updatecheck = ui.config(b'commands', b'update.check')
1195 if updatecheck not in _VALID_UPDATECHECKS:
1201 if updatecheck not in _VALID_UPDATECHECKS:
1196 # If not configured, or invalid value configured
1202 # If not configured, or invalid value configured
1197 updatecheck = mergemod.UPDATECHECK_LINEAR
1203 updatecheck = mergemod.UPDATECHECK_LINEAR
1198 if updatecheck not in _VALID_UPDATECHECKS:
1204 if updatecheck not in _VALID_UPDATECHECKS:
1199 raise ValueError(
1205 raise ValueError(
1200 r'Invalid updatecheck value %r (can accept %r)'
1206 r'Invalid updatecheck value %r (can accept %r)'
1201 % (updatecheck, _VALID_UPDATECHECKS)
1207 % (updatecheck, _VALID_UPDATECHECKS)
1202 )
1208 )
1203 with repo.wlock():
1209 with repo.wlock():
1204 movemarkfrom = None
1210 movemarkfrom = None
1205 warndest = False
1211 warndest = False
1206 if checkout is None:
1212 if checkout is None:
1207 updata = destutil.destupdate(repo, clean=clean)
1213 updata = destutil.destupdate(repo, clean=clean)
1208 checkout, movemarkfrom, brev = updata
1214 checkout, movemarkfrom, brev = updata
1209 warndest = True
1215 warndest = True
1210
1216
1211 if clean:
1217 if clean:
1212 ret = _clean(repo, checkout)
1218 ret = _clean(repo, checkout)
1213 else:
1219 else:
1214 if updatecheck == mergemod.UPDATECHECK_ABORT:
1220 if updatecheck == mergemod.UPDATECHECK_ABORT:
1215 cmdutil.bailifchanged(repo, merge=False)
1221 cmdutil.bailifchanged(repo, merge=False)
1216 updatecheck = mergemod.UPDATECHECK_NONE
1222 updatecheck = mergemod.UPDATECHECK_NONE
1217 ret = _update(repo, checkout, updatecheck=updatecheck)
1223 ret = _update(repo, checkout, updatecheck=updatecheck)
1218
1224
1219 if not ret and movemarkfrom:
1225 if not ret and movemarkfrom:
1220 if movemarkfrom == repo[b'.'].node():
1226 if movemarkfrom == repo[b'.'].node():
1221 pass # no-op update
1227 pass # no-op update
1222 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1228 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1223 b = ui.label(repo._activebookmark, b'bookmarks.active')
1229 b = ui.label(repo._activebookmark, b'bookmarks.active')
1224 ui.status(_(b"updating bookmark %s\n") % b)
1230 ui.status(_(b"updating bookmark %s\n") % b)
1225 else:
1231 else:
1226 # this can happen with a non-linear update
1232 # this can happen with a non-linear update
1227 b = ui.label(repo._activebookmark, b'bookmarks')
1233 b = ui.label(repo._activebookmark, b'bookmarks')
1228 ui.status(_(b"(leaving bookmark %s)\n") % b)
1234 ui.status(_(b"(leaving bookmark %s)\n") % b)
1229 bookmarks.deactivate(repo)
1235 bookmarks.deactivate(repo)
1230 elif brev in repo._bookmarks:
1236 elif brev in repo._bookmarks:
1231 if brev != repo._activebookmark:
1237 if brev != repo._activebookmark:
1232 b = ui.label(brev, b'bookmarks.active')
1238 b = ui.label(brev, b'bookmarks.active')
1233 ui.status(_(b"(activating bookmark %s)\n") % b)
1239 ui.status(_(b"(activating bookmark %s)\n") % b)
1234 bookmarks.activate(repo, brev)
1240 bookmarks.activate(repo, brev)
1235 elif brev:
1241 elif brev:
1236 if repo._activebookmark:
1242 if repo._activebookmark:
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1243 b = ui.label(repo._activebookmark, b'bookmarks')
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1244 ui.status(_(b"(leaving bookmark %s)\n") % b)
1239 bookmarks.deactivate(repo)
1245 bookmarks.deactivate(repo)
1240
1246
1241 if warndest:
1247 if warndest:
1242 destutil.statusotherdests(ui, repo)
1248 destutil.statusotherdests(ui, repo)
1243
1249
1244 return ret
1250 return ret
1245
1251
1246
1252
1247 def merge(
1253 def merge(
1248 ctx,
1254 ctx,
1249 force=False,
1255 force=False,
1250 remind=True,
1256 remind=True,
1251 labels=None,
1257 labels=None,
1252 ):
1258 ):
1253 """Branch merge with node, resolving changes. Return true if any
1259 """Branch merge with node, resolving changes. Return true if any
1254 unresolved conflicts."""
1260 unresolved conflicts."""
1255 repo = ctx.repo()
1261 repo = ctx.repo()
1256 stats = mergemod.merge(ctx, force=force, labels=labels)
1262 stats = mergemod.merge(ctx, force=force, labels=labels)
1257 _showstats(repo, stats)
1263 _showstats(repo, stats)
1258 if stats.unresolvedcount:
1264 if stats.unresolvedcount:
1259 repo.ui.status(
1265 repo.ui.status(
1260 _(
1266 _(
1261 b"use 'hg resolve' to retry unresolved file merges "
1267 b"use 'hg resolve' to retry unresolved file merges "
1262 b"or 'hg merge --abort' to abandon\n"
1268 b"or 'hg merge --abort' to abandon\n"
1263 )
1269 )
1264 )
1270 )
1265 elif remind:
1271 elif remind:
1266 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1272 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1267 return stats.unresolvedcount > 0
1273 return stats.unresolvedcount > 0
1268
1274
1269
1275
1270 def abortmerge(ui, repo):
1276 def abortmerge(ui, repo):
1271 ms = mergestatemod.mergestate.read(repo)
1277 ms = mergestatemod.mergestate.read(repo)
1272 if ms.active():
1278 if ms.active():
1273 # there were conflicts
1279 # there were conflicts
1274 node = ms.localctx.hex()
1280 node = ms.localctx.hex()
1275 else:
1281 else:
1276 # there were no conficts, mergestate was not stored
1282 # there were no conficts, mergestate was not stored
1277 node = repo[b'.'].hex()
1283 node = repo[b'.'].hex()
1278
1284
1279 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1285 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1280 stats = mergemod.clean_update(repo[node])
1286 stats = mergemod.clean_update(repo[node])
1281 assert stats.unresolvedcount == 0
1287 assert stats.unresolvedcount == 0
1282 _showstats(repo, stats)
1288 _showstats(repo, stats)
1283
1289
1284
1290
1285 def _incoming(
1291 def _incoming(
1286 displaychlist,
1292 displaychlist,
1287 subreporecurse,
1293 subreporecurse,
1288 ui,
1294 ui,
1289 repo,
1295 repo,
1290 source,
1296 source,
1291 opts,
1297 opts,
1292 buffered=False,
1298 buffered=False,
1293 subpath=None,
1299 subpath=None,
1294 ):
1300 ):
1295 """
1301 """
1296 Helper for incoming / gincoming.
1302 Helper for incoming / gincoming.
1297 displaychlist gets called with
1303 displaychlist gets called with
1298 (remoterepo, incomingchangesetlist, displayer) parameters,
1304 (remoterepo, incomingchangesetlist, displayer) parameters,
1299 and is supposed to contain only code that can't be unified.
1305 and is supposed to contain only code that can't be unified.
1300 """
1306 """
1301 srcs = urlutil.get_pull_paths(repo, ui, [source])
1307 srcs = urlutil.get_pull_paths(repo, ui, [source])
1302 srcs = list(srcs)
1308 srcs = list(srcs)
1303 if len(srcs) != 1:
1309 if len(srcs) != 1:
1304 msg = _(b'for now, incoming supports only a single source, %d provided')
1310 msg = _(b'for now, incoming supports only a single source, %d provided')
1305 msg %= len(srcs)
1311 msg %= len(srcs)
1306 raise error.Abort(msg)
1312 raise error.Abort(msg)
1307 path = srcs[0]
1313 path = srcs[0]
1308 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1314 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1309 if subpath is not None:
1315 if subpath is not None:
1310 subpath = urlutil.url(subpath)
1316 subpath = urlutil.url(subpath)
1311 if subpath.isabs():
1317 if subpath.isabs():
1312 source = bytes(subpath)
1318 source = bytes(subpath)
1313 else:
1319 else:
1314 p = urlutil.url(source)
1320 p = urlutil.url(source)
1315 if p.islocal():
1321 if p.islocal():
1316 normpath = os.path.normpath
1322 normpath = os.path.normpath
1317 else:
1323 else:
1318 normpath = posixpath.normpath
1324 normpath = posixpath.normpath
1319 p.path = normpath(b'%s/%s' % (p.path, subpath))
1325 p.path = normpath(b'%s/%s' % (p.path, subpath))
1320 source = bytes(p)
1326 source = bytes(p)
1321 other = peer(repo, opts, source)
1327 other = peer(repo, opts, source)
1322 cleanupfn = other.close
1328 cleanupfn = other.close
1323 try:
1329 try:
1324 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1330 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1325 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1331 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1326
1332
1327 if revs:
1333 if revs:
1328 revs = [other.lookup(rev) for rev in revs]
1334 revs = [other.lookup(rev) for rev in revs]
1329 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1335 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1330 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1336 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1331 )
1337 )
1332
1338
1333 if not chlist:
1339 if not chlist:
1334 ui.status(_(b"no changes found\n"))
1340 ui.status(_(b"no changes found\n"))
1335 return subreporecurse()
1341 return subreporecurse()
1336 ui.pager(b'incoming')
1342 ui.pager(b'incoming')
1337 displayer = logcmdutil.changesetdisplayer(
1343 displayer = logcmdutil.changesetdisplayer(
1338 ui, other, opts, buffered=buffered
1344 ui, other, opts, buffered=buffered
1339 )
1345 )
1340 displaychlist(other, chlist, displayer)
1346 displaychlist(other, chlist, displayer)
1341 displayer.close()
1347 displayer.close()
1342 finally:
1348 finally:
1343 cleanupfn()
1349 cleanupfn()
1344 subreporecurse()
1350 subreporecurse()
1345 return 0 # exit code is zero since we found incoming changes
1351 return 0 # exit code is zero since we found incoming changes
1346
1352
1347
1353
1348 def incoming(ui, repo, source, opts, subpath=None):
1354 def incoming(ui, repo, source, opts, subpath=None):
1349 def subreporecurse():
1355 def subreporecurse():
1350 ret = 1
1356 ret = 1
1351 if opts.get(b'subrepos'):
1357 if opts.get(b'subrepos'):
1352 ctx = repo[None]
1358 ctx = repo[None]
1353 for subpath in sorted(ctx.substate):
1359 for subpath in sorted(ctx.substate):
1354 sub = ctx.sub(subpath)
1360 sub = ctx.sub(subpath)
1355 ret = min(ret, sub.incoming(ui, source, opts))
1361 ret = min(ret, sub.incoming(ui, source, opts))
1356 return ret
1362 return ret
1357
1363
1358 def display(other, chlist, displayer):
1364 def display(other, chlist, displayer):
1359 limit = logcmdutil.getlimit(opts)
1365 limit = logcmdutil.getlimit(opts)
1360 if opts.get(b'newest_first'):
1366 if opts.get(b'newest_first'):
1361 chlist.reverse()
1367 chlist.reverse()
1362 count = 0
1368 count = 0
1363 for n in chlist:
1369 for n in chlist:
1364 if limit is not None and count >= limit:
1370 if limit is not None and count >= limit:
1365 break
1371 break
1366 parents = [
1372 parents = [
1367 p for p in other.changelog.parents(n) if p != repo.nullid
1373 p for p in other.changelog.parents(n) if p != repo.nullid
1368 ]
1374 ]
1369 if opts.get(b'no_merges') and len(parents) == 2:
1375 if opts.get(b'no_merges') and len(parents) == 2:
1370 continue
1376 continue
1371 count += 1
1377 count += 1
1372 displayer.show(other[n])
1378 displayer.show(other[n])
1373
1379
1374 return _incoming(
1380 return _incoming(
1375 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1381 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1376 )
1382 )
1377
1383
1378
1384
1379 def _outgoing(ui, repo, dests, opts, subpath=None):
1385 def _outgoing(ui, repo, dests, opts, subpath=None):
1380 out = set()
1386 out = set()
1381 others = []
1387 others = []
1382 for path in urlutil.get_push_paths(repo, ui, dests):
1388 for path in urlutil.get_push_paths(repo, ui, dests):
1383 dest = path.loc
1389 dest = path.loc
1384 if subpath is not None:
1390 if subpath is not None:
1385 subpath = urlutil.url(subpath)
1391 subpath = urlutil.url(subpath)
1386 if subpath.isabs():
1392 if subpath.isabs():
1387 dest = bytes(subpath)
1393 dest = bytes(subpath)
1388 else:
1394 else:
1389 p = urlutil.url(dest)
1395 p = urlutil.url(dest)
1390 if p.islocal():
1396 if p.islocal():
1391 normpath = os.path.normpath
1397 normpath = os.path.normpath
1392 else:
1398 else:
1393 normpath = posixpath.normpath
1399 normpath = posixpath.normpath
1394 p.path = normpath(b'%s/%s' % (p.path, subpath))
1400 p.path = normpath(b'%s/%s' % (p.path, subpath))
1395 dest = bytes(p)
1401 dest = bytes(p)
1396 branches = path.branch, opts.get(b'branch') or []
1402 branches = path.branch, opts.get(b'branch') or []
1397
1403
1398 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1404 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1399 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1405 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1400 if revs:
1406 if revs:
1401 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1407 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1402
1408
1403 other = peer(repo, opts, dest)
1409 other = peer(repo, opts, dest)
1404 try:
1410 try:
1405 outgoing = discovery.findcommonoutgoing(
1411 outgoing = discovery.findcommonoutgoing(
1406 repo, other, revs, force=opts.get(b'force')
1412 repo, other, revs, force=opts.get(b'force')
1407 )
1413 )
1408 o = outgoing.missing
1414 o = outgoing.missing
1409 out.update(o)
1415 out.update(o)
1410 if not o:
1416 if not o:
1411 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1417 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1412 others.append(other)
1418 others.append(other)
1413 except: # re-raises
1419 except: # re-raises
1414 other.close()
1420 other.close()
1415 raise
1421 raise
1416 # make sure this is ordered by revision number
1422 # make sure this is ordered by revision number
1417 outgoing_revs = list(out)
1423 outgoing_revs = list(out)
1418 cl = repo.changelog
1424 cl = repo.changelog
1419 outgoing_revs.sort(key=cl.rev)
1425 outgoing_revs.sort(key=cl.rev)
1420 return outgoing_revs, others
1426 return outgoing_revs, others
1421
1427
1422
1428
1423 def _outgoing_recurse(ui, repo, dests, opts):
1429 def _outgoing_recurse(ui, repo, dests, opts):
1424 ret = 1
1430 ret = 1
1425 if opts.get(b'subrepos'):
1431 if opts.get(b'subrepos'):
1426 ctx = repo[None]
1432 ctx = repo[None]
1427 for subpath in sorted(ctx.substate):
1433 for subpath in sorted(ctx.substate):
1428 sub = ctx.sub(subpath)
1434 sub = ctx.sub(subpath)
1429 ret = min(ret, sub.outgoing(ui, dests, opts))
1435 ret = min(ret, sub.outgoing(ui, dests, opts))
1430 return ret
1436 return ret
1431
1437
1432
1438
1433 def _outgoing_filter(repo, revs, opts):
1439 def _outgoing_filter(repo, revs, opts):
1434 """apply revision filtering/ordering option for outgoing"""
1440 """apply revision filtering/ordering option for outgoing"""
1435 limit = logcmdutil.getlimit(opts)
1441 limit = logcmdutil.getlimit(opts)
1436 no_merges = opts.get(b'no_merges')
1442 no_merges = opts.get(b'no_merges')
1437 if opts.get(b'newest_first'):
1443 if opts.get(b'newest_first'):
1438 revs.reverse()
1444 revs.reverse()
1439 if limit is None and not no_merges:
1445 if limit is None and not no_merges:
1440 for r in revs:
1446 for r in revs:
1441 yield r
1447 yield r
1442 return
1448 return
1443
1449
1444 count = 0
1450 count = 0
1445 cl = repo.changelog
1451 cl = repo.changelog
1446 for n in revs:
1452 for n in revs:
1447 if limit is not None and count >= limit:
1453 if limit is not None and count >= limit:
1448 break
1454 break
1449 parents = [p for p in cl.parents(n) if p != repo.nullid]
1455 parents = [p for p in cl.parents(n) if p != repo.nullid]
1450 if no_merges and len(parents) == 2:
1456 if no_merges and len(parents) == 2:
1451 continue
1457 continue
1452 count += 1
1458 count += 1
1453 yield n
1459 yield n
1454
1460
1455
1461
1456 def outgoing(ui, repo, dests, opts, subpath=None):
1462 def outgoing(ui, repo, dests, opts, subpath=None):
1457 if opts.get(b'graph'):
1463 if opts.get(b'graph'):
1458 logcmdutil.checkunsupportedgraphflags([], opts)
1464 logcmdutil.checkunsupportedgraphflags([], opts)
1459 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1465 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1460 ret = 1
1466 ret = 1
1461 try:
1467 try:
1462 if o:
1468 if o:
1463 ret = 0
1469 ret = 0
1464
1470
1465 if opts.get(b'graph'):
1471 if opts.get(b'graph'):
1466 revdag = logcmdutil.graphrevs(repo, o, opts)
1472 revdag = logcmdutil.graphrevs(repo, o, opts)
1467 ui.pager(b'outgoing')
1473 ui.pager(b'outgoing')
1468 displayer = logcmdutil.changesetdisplayer(
1474 displayer = logcmdutil.changesetdisplayer(
1469 ui, repo, opts, buffered=True
1475 ui, repo, opts, buffered=True
1470 )
1476 )
1471 logcmdutil.displaygraph(
1477 logcmdutil.displaygraph(
1472 ui, repo, revdag, displayer, graphmod.asciiedges
1478 ui, repo, revdag, displayer, graphmod.asciiedges
1473 )
1479 )
1474 else:
1480 else:
1475 ui.pager(b'outgoing')
1481 ui.pager(b'outgoing')
1476 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1482 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1477 for n in _outgoing_filter(repo, o, opts):
1483 for n in _outgoing_filter(repo, o, opts):
1478 displayer.show(repo[n])
1484 displayer.show(repo[n])
1479 displayer.close()
1485 displayer.close()
1480 for oth in others:
1486 for oth in others:
1481 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1487 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1482 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1488 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1483 return ret # exit code is zero since we found outgoing changes
1489 return ret # exit code is zero since we found outgoing changes
1484 finally:
1490 finally:
1485 for oth in others:
1491 for oth in others:
1486 oth.close()
1492 oth.close()
1487
1493
1488
1494
1489 def verify(repo, level=None):
1495 def verify(repo, level=None):
1490 """verify the consistency of a repository"""
1496 """verify the consistency of a repository"""
1491 ret = verifymod.verify(repo, level=level)
1497 ret = verifymod.verify(repo, level=level)
1492
1498
1493 # Broken subrepo references in hidden csets don't seem worth worrying about,
1499 # Broken subrepo references in hidden csets don't seem worth worrying about,
1494 # since they can't be pushed/pulled, and --hidden can be used if they are a
1500 # since they can't be pushed/pulled, and --hidden can be used if they are a
1495 # concern.
1501 # concern.
1496
1502
1497 # pathto() is needed for -R case
1503 # pathto() is needed for -R case
1498 revs = repo.revs(
1504 revs = repo.revs(
1499 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1505 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1500 )
1506 )
1501
1507
1502 if revs:
1508 if revs:
1503 repo.ui.status(_(b'checking subrepo links\n'))
1509 repo.ui.status(_(b'checking subrepo links\n'))
1504 for rev in revs:
1510 for rev in revs:
1505 ctx = repo[rev]
1511 ctx = repo[rev]
1506 try:
1512 try:
1507 for subpath in ctx.substate:
1513 for subpath in ctx.substate:
1508 try:
1514 try:
1509 ret = (
1515 ret = (
1510 ctx.sub(subpath, allowcreate=False).verify() or ret
1516 ctx.sub(subpath, allowcreate=False).verify() or ret
1511 )
1517 )
1512 except error.RepoError as e:
1518 except error.RepoError as e:
1513 repo.ui.warn(b'%d: %s\n' % (rev, e))
1519 repo.ui.warn(b'%d: %s\n' % (rev, e))
1514 except Exception:
1520 except Exception:
1515 repo.ui.warn(
1521 repo.ui.warn(
1516 _(b'.hgsubstate is corrupt in revision %s\n')
1522 _(b'.hgsubstate is corrupt in revision %s\n')
1517 % short(ctx.node())
1523 % short(ctx.node())
1518 )
1524 )
1519
1525
1520 return ret
1526 return ret
1521
1527
1522
1528
1523 def remoteui(src, opts):
1529 def remoteui(src, opts):
1524 """build a remote ui from ui or repo and opts"""
1530 """build a remote ui from ui or repo and opts"""
1525 if util.safehasattr(src, b'baseui'): # looks like a repository
1531 if util.safehasattr(src, b'baseui'): # looks like a repository
1526 dst = src.baseui.copy() # drop repo-specific config
1532 dst = src.baseui.copy() # drop repo-specific config
1527 src = src.ui # copy target options from repo
1533 src = src.ui # copy target options from repo
1528 else: # assume it's a global ui object
1534 else: # assume it's a global ui object
1529 dst = src.copy() # keep all global options
1535 dst = src.copy() # keep all global options
1530
1536
1531 # copy ssh-specific options
1537 # copy ssh-specific options
1532 for o in b'ssh', b'remotecmd':
1538 for o in b'ssh', b'remotecmd':
1533 v = opts.get(o) or src.config(b'ui', o)
1539 v = opts.get(o) or src.config(b'ui', o)
1534 if v:
1540 if v:
1535 dst.setconfig(b"ui", o, v, b'copied')
1541 dst.setconfig(b"ui", o, v, b'copied')
1536
1542
1537 # copy bundle-specific options
1543 # copy bundle-specific options
1538 r = src.config(b'bundle', b'mainreporoot')
1544 r = src.config(b'bundle', b'mainreporoot')
1539 if r:
1545 if r:
1540 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1546 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1541
1547
1542 # copy selected local settings to the remote ui
1548 # copy selected local settings to the remote ui
1543 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1549 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1544 for key, val in src.configitems(sect):
1550 for key, val in src.configitems(sect):
1545 dst.setconfig(sect, key, val, b'copied')
1551 dst.setconfig(sect, key, val, b'copied')
1546 v = src.config(b'web', b'cacerts')
1552 v = src.config(b'web', b'cacerts')
1547 if v:
1553 if v:
1548 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1554 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1549
1555
1550 return dst
1556 return dst
1551
1557
1552
1558
1553 # Files of interest
1559 # Files of interest
1554 # Used to check if the repository has changed looking at mtime and size of
1560 # Used to check if the repository has changed looking at mtime and size of
1555 # these files.
1561 # these files.
1556 foi = [
1562 foi = [
1557 (b'spath', b'00changelog.i'),
1563 (b'spath', b'00changelog.i'),
1558 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1564 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1559 (b'spath', b'obsstore'),
1565 (b'spath', b'obsstore'),
1560 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1566 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1561 ]
1567 ]
1562
1568
1563
1569
1564 class cachedlocalrepo:
1570 class cachedlocalrepo:
1565 """Holds a localrepository that can be cached and reused."""
1571 """Holds a localrepository that can be cached and reused."""
1566
1572
1567 def __init__(self, repo):
1573 def __init__(self, repo):
1568 """Create a new cached repo from an existing repo.
1574 """Create a new cached repo from an existing repo.
1569
1575
1570 We assume the passed in repo was recently created. If the
1576 We assume the passed in repo was recently created. If the
1571 repo has changed between when it was created and when it was
1577 repo has changed between when it was created and when it was
1572 turned into a cache, it may not refresh properly.
1578 turned into a cache, it may not refresh properly.
1573 """
1579 """
1574 assert isinstance(repo, localrepo.localrepository)
1580 assert isinstance(repo, localrepo.localrepository)
1575 self._repo = repo
1581 self._repo = repo
1576 self._state, self.mtime = self._repostate()
1582 self._state, self.mtime = self._repostate()
1577 self._filtername = repo.filtername
1583 self._filtername = repo.filtername
1578
1584
1579 def fetch(self):
1585 def fetch(self):
1580 """Refresh (if necessary) and return a repository.
1586 """Refresh (if necessary) and return a repository.
1581
1587
1582 If the cached instance is out of date, it will be recreated
1588 If the cached instance is out of date, it will be recreated
1583 automatically and returned.
1589 automatically and returned.
1584
1590
1585 Returns a tuple of the repo and a boolean indicating whether a new
1591 Returns a tuple of the repo and a boolean indicating whether a new
1586 repo instance was created.
1592 repo instance was created.
1587 """
1593 """
1588 # We compare the mtimes and sizes of some well-known files to
1594 # We compare the mtimes and sizes of some well-known files to
1589 # determine if the repo changed. This is not precise, as mtimes
1595 # determine if the repo changed. This is not precise, as mtimes
1590 # are susceptible to clock skew and imprecise filesystems and
1596 # are susceptible to clock skew and imprecise filesystems and
1591 # file content can change while maintaining the same size.
1597 # file content can change while maintaining the same size.
1592
1598
1593 state, mtime = self._repostate()
1599 state, mtime = self._repostate()
1594 if state == self._state:
1600 if state == self._state:
1595 return self._repo, False
1601 return self._repo, False
1596
1602
1597 repo = repository(self._repo.baseui, self._repo.url())
1603 repo = repository(self._repo.baseui, self._repo.url())
1598 if self._filtername:
1604 if self._filtername:
1599 self._repo = repo.filtered(self._filtername)
1605 self._repo = repo.filtered(self._filtername)
1600 else:
1606 else:
1601 self._repo = repo.unfiltered()
1607 self._repo = repo.unfiltered()
1602 self._state = state
1608 self._state = state
1603 self.mtime = mtime
1609 self.mtime = mtime
1604
1610
1605 return self._repo, True
1611 return self._repo, True
1606
1612
1607 def _repostate(self):
1613 def _repostate(self):
1608 state = []
1614 state = []
1609 maxmtime = -1
1615 maxmtime = -1
1610 for attr, fname in foi:
1616 for attr, fname in foi:
1611 prefix = getattr(self._repo, attr)
1617 prefix = getattr(self._repo, attr)
1612 p = os.path.join(prefix, fname)
1618 p = os.path.join(prefix, fname)
1613 try:
1619 try:
1614 st = os.stat(p)
1620 st = os.stat(p)
1615 except OSError:
1621 except OSError:
1616 st = os.stat(prefix)
1622 st = os.stat(prefix)
1617 state.append((st[stat.ST_MTIME], st.st_size))
1623 state.append((st[stat.ST_MTIME], st.st_size))
1618 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1624 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1619
1625
1620 return tuple(state), maxmtime
1626 return tuple(state), maxmtime
1621
1627
1622 def copy(self):
1628 def copy(self):
1623 """Obtain a copy of this class instance.
1629 """Obtain a copy of this class instance.
1624
1630
1625 A new localrepository instance is obtained. The new instance should be
1631 A new localrepository instance is obtained. The new instance should be
1626 completely independent of the original.
1632 completely independent of the original.
1627 """
1633 """
1628 repo = repository(self._repo.baseui, self._repo.origroot)
1634 repo = repository(self._repo.baseui, self._repo.origroot)
1629 if self._filtername:
1635 if self._filtername:
1630 repo = repo.filtered(self._filtername)
1636 repo = repo.filtered(self._filtername)
1631 else:
1637 else:
1632 repo = repo.unfiltered()
1638 repo = repo.unfiltered()
1633 c = cachedlocalrepo(repo)
1639 c = cachedlocalrepo(repo)
1634 c._state = self._state
1640 c._state = self._state
1635 c.mtime = self.mtime
1641 c.mtime = self.mtime
1636 return c
1642 return c
General Comments 0
You need to be logged in to leave comments. Login now