##// END OF EJS Templates
cachedlocalrepo: use sysstr for attribute name of "file of interest"...
marmoute -
r51804:1339158a default
parent child Browse files
Show More
@@ -1,1681 +1,1681 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
68 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer(remotehidden=remotehidden)
71 peer = other.peer(remotehidden=remotehidden)
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 u = urlutil.url(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
170 cls = peer_schemes[scheme]
171 cls.make_peer # make sure we load the module
171 cls.make_peer # make sure we load the module
172 elif scheme in repo_schemes:
172 elif scheme in repo_schemes:
173 cls = repo_schemes[scheme]
173 cls = repo_schemes[scheme]
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 else:
175 else:
176 cls = LocalFactory
176 cls = LocalFactory
177 if util.safehasattr(cls, 'islocal'):
177 if util.safehasattr(cls, 'islocal'):
178 return cls.islocal(repo) # pytype: disable=module-attr
178 return cls.islocal(repo) # pytype: disable=module-attr
179 return False
179 return False
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 return repo.local()
181 return repo.local()
182
182
183
183
184 def openpath(ui, path, sendaccept=True):
184 def openpath(ui, path, sendaccept=True):
185 '''open path with open if local, url.open if remote'''
185 '''open path with open if local, url.open if remote'''
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 if pathurl.islocal():
187 if pathurl.islocal():
188 return util.posixfile(pathurl.localpath(), b'rb')
188 return util.posixfile(pathurl.localpath(), b'rb')
189 else:
189 else:
190 return url.open(ui, path, sendaccept=sendaccept)
190 return url.open(ui, path, sendaccept=sendaccept)
191
191
192
192
193 # a list of (ui, repo) functions called for wire peer initialization
193 # a list of (ui, repo) functions called for wire peer initialization
194 wirepeersetupfuncs = []
194 wirepeersetupfuncs = []
195
195
196
196
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
199 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
200 f(ui, obj)
200 f(ui, obj)
201 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
203 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
206 if hook:
206 if hook:
207 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
208 hook(ui, obj)
208 hook(ui, obj)
209 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 if not obj.local():
212 if not obj.local():
213 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
214 f(ui, obj)
214 f(ui, obj)
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 scheme = urlutil.url(path).scheme
226 scheme = urlutil.url(path).scheme
227 if scheme is None:
227 if scheme is None:
228 scheme = b'file'
228 scheme = b'file'
229 cls = repo_schemes.get(scheme)
229 cls = repo_schemes.get(scheme)
230 if cls is None:
230 if cls is None:
231 if scheme in peer_schemes:
231 if scheme in peer_schemes:
232 raise error.Abort(_(b"repository '%s' is not local") % path)
232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 cls = LocalFactory
233 cls = LocalFactory
234 repo = cls.instance(
234 repo = cls.instance(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 intents=intents,
238 intents=intents,
239 createopts=createopts,
239 createopts=createopts,
240 )
240 )
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 return repo.filtered(b'visible')
242 return repo.filtered(b'visible')
243
243
244
244
245 def peer(
245 def peer(
246 uiorrepo,
246 uiorrepo,
247 opts,
247 opts,
248 path,
248 path,
249 create=False,
249 create=False,
250 intents=None,
250 intents=None,
251 createopts=None,
251 createopts=None,
252 remotehidden=False,
252 remotehidden=False,
253 ):
253 ):
254 '''return a repository peer for the specified path'''
254 '''return a repository peer for the specified path'''
255 ui = getattr(uiorrepo, 'ui', uiorrepo)
255 ui = getattr(uiorrepo, 'ui', uiorrepo)
256 rui = remoteui(uiorrepo, opts)
256 rui = remoteui(uiorrepo, opts)
257 if util.safehasattr(path, 'url'):
257 if util.safehasattr(path, 'url'):
258 # this is already a urlutil.path object
258 # this is already a urlutil.path object
259 peer_path = path
259 peer_path = path
260 else:
260 else:
261 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
261 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
262 scheme = peer_path.url.scheme # pytype: disable=attribute-error
262 scheme = peer_path.url.scheme # pytype: disable=attribute-error
263 if scheme in peer_schemes:
263 if scheme in peer_schemes:
264 cls = peer_schemes[scheme]
264 cls = peer_schemes[scheme]
265 peer = cls.make_peer(
265 peer = cls.make_peer(
266 rui,
266 rui,
267 peer_path,
267 peer_path,
268 create,
268 create,
269 intents=intents,
269 intents=intents,
270 createopts=createopts,
270 createopts=createopts,
271 remotehidden=remotehidden,
271 remotehidden=remotehidden,
272 )
272 )
273 _setup_repo_or_peer(rui, peer)
273 _setup_repo_or_peer(rui, peer)
274 else:
274 else:
275 # this is a repository
275 # this is a repository
276 repo_path = peer_path.loc # pytype: disable=attribute-error
276 repo_path = peer_path.loc # pytype: disable=attribute-error
277 if not repo_path:
277 if not repo_path:
278 repo_path = peer_path.rawloc # pytype: disable=attribute-error
278 repo_path = peer_path.rawloc # pytype: disable=attribute-error
279 repo = repository(
279 repo = repository(
280 rui,
280 rui,
281 repo_path,
281 repo_path,
282 create,
282 create,
283 intents=intents,
283 intents=intents,
284 createopts=createopts,
284 createopts=createopts,
285 )
285 )
286 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
286 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
287 return peer
287 return peer
288
288
289
289
290 def defaultdest(source):
290 def defaultdest(source):
291 """return default destination of clone if none is given
291 """return default destination of clone if none is given
292
292
293 >>> defaultdest(b'foo')
293 >>> defaultdest(b'foo')
294 'foo'
294 'foo'
295 >>> defaultdest(b'/foo/bar')
295 >>> defaultdest(b'/foo/bar')
296 'bar'
296 'bar'
297 >>> defaultdest(b'/')
297 >>> defaultdest(b'/')
298 ''
298 ''
299 >>> defaultdest(b'')
299 >>> defaultdest(b'')
300 ''
300 ''
301 >>> defaultdest(b'http://example.org/')
301 >>> defaultdest(b'http://example.org/')
302 ''
302 ''
303 >>> defaultdest(b'http://example.org/foo/')
303 >>> defaultdest(b'http://example.org/foo/')
304 'foo'
304 'foo'
305 """
305 """
306 path = urlutil.url(source).path
306 path = urlutil.url(source).path
307 if not path:
307 if not path:
308 return b''
308 return b''
309 return os.path.basename(os.path.normpath(path))
309 return os.path.basename(os.path.normpath(path))
310
310
311
311
312 def sharedreposource(repo):
312 def sharedreposource(repo):
313 """Returns repository object for source repository of a shared repo.
313 """Returns repository object for source repository of a shared repo.
314
314
315 If repo is not a shared repository, returns None.
315 If repo is not a shared repository, returns None.
316 """
316 """
317 if repo.sharedpath == repo.path:
317 if repo.sharedpath == repo.path:
318 return None
318 return None
319
319
320 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
320 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
321 return repo.srcrepo
321 return repo.srcrepo
322
322
323 # the sharedpath always ends in the .hg; we want the path to the repo
323 # the sharedpath always ends in the .hg; we want the path to the repo
324 source = repo.vfs.split(repo.sharedpath)[0]
324 source = repo.vfs.split(repo.sharedpath)[0]
325 srcurl, branches = urlutil.parseurl(source)
325 srcurl, branches = urlutil.parseurl(source)
326 srcrepo = repository(repo.ui, srcurl)
326 srcrepo = repository(repo.ui, srcurl)
327 repo.srcrepo = srcrepo
327 repo.srcrepo = srcrepo
328 return srcrepo
328 return srcrepo
329
329
330
330
331 def share(
331 def share(
332 ui,
332 ui,
333 source,
333 source,
334 dest=None,
334 dest=None,
335 update=True,
335 update=True,
336 bookmarks=True,
336 bookmarks=True,
337 defaultpath=None,
337 defaultpath=None,
338 relative=False,
338 relative=False,
339 ):
339 ):
340 '''create a shared repository'''
340 '''create a shared repository'''
341
341
342 not_local_msg = _(b'can only share local repositories')
342 not_local_msg = _(b'can only share local repositories')
343 if util.safehasattr(source, 'local'):
343 if util.safehasattr(source, 'local'):
344 if source.local() is None:
344 if source.local() is None:
345 raise error.Abort(not_local_msg)
345 raise error.Abort(not_local_msg)
346 elif not islocal(source):
346 elif not islocal(source):
347 # XXX why are we getting bytes here ?
347 # XXX why are we getting bytes here ?
348 raise error.Abort(not_local_msg)
348 raise error.Abort(not_local_msg)
349
349
350 if not dest:
350 if not dest:
351 dest = defaultdest(source)
351 dest = defaultdest(source)
352 else:
352 else:
353 dest = urlutil.get_clone_path_obj(ui, dest).loc
353 dest = urlutil.get_clone_path_obj(ui, dest).loc
354
354
355 if isinstance(source, bytes):
355 if isinstance(source, bytes):
356 source_path = urlutil.get_clone_path_obj(ui, source)
356 source_path = urlutil.get_clone_path_obj(ui, source)
357 srcrepo = repository(ui, source_path.loc)
357 srcrepo = repository(ui, source_path.loc)
358 branches = (source_path.branch, [])
358 branches = (source_path.branch, [])
359 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
359 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
360 else:
360 else:
361 srcrepo = source.local()
361 srcrepo = source.local()
362 checkout = None
362 checkout = None
363
363
364 shareditems = set()
364 shareditems = set()
365 if bookmarks:
365 if bookmarks:
366 shareditems.add(sharedbookmarks)
366 shareditems.add(sharedbookmarks)
367
367
368 r = repository(
368 r = repository(
369 ui,
369 ui,
370 dest,
370 dest,
371 create=True,
371 create=True,
372 createopts={
372 createopts={
373 b'sharedrepo': srcrepo,
373 b'sharedrepo': srcrepo,
374 b'sharedrelative': relative,
374 b'sharedrelative': relative,
375 b'shareditems': shareditems,
375 b'shareditems': shareditems,
376 },
376 },
377 )
377 )
378
378
379 postshare(srcrepo, r, defaultpath=defaultpath)
379 postshare(srcrepo, r, defaultpath=defaultpath)
380 r = repository(ui, dest)
380 r = repository(ui, dest)
381 _postshareupdate(r, update, checkout=checkout)
381 _postshareupdate(r, update, checkout=checkout)
382 return r
382 return r
383
383
384
384
385 def _prependsourcehgrc(repo):
385 def _prependsourcehgrc(repo):
386 """copies the source repo config and prepend it in current repo .hg/hgrc
386 """copies the source repo config and prepend it in current repo .hg/hgrc
387 on unshare. This is only done if the share was perfomed using share safe
387 on unshare. This is only done if the share was perfomed using share safe
388 method where we share config of source in shares"""
388 method where we share config of source in shares"""
389 srcvfs = vfsmod.vfs(repo.sharedpath)
389 srcvfs = vfsmod.vfs(repo.sharedpath)
390 dstvfs = vfsmod.vfs(repo.path)
390 dstvfs = vfsmod.vfs(repo.path)
391
391
392 if not srcvfs.exists(b'hgrc'):
392 if not srcvfs.exists(b'hgrc'):
393 return
393 return
394
394
395 currentconfig = b''
395 currentconfig = b''
396 if dstvfs.exists(b'hgrc'):
396 if dstvfs.exists(b'hgrc'):
397 currentconfig = dstvfs.read(b'hgrc')
397 currentconfig = dstvfs.read(b'hgrc')
398
398
399 with dstvfs(b'hgrc', b'wb') as fp:
399 with dstvfs(b'hgrc', b'wb') as fp:
400 sourceconfig = srcvfs.read(b'hgrc')
400 sourceconfig = srcvfs.read(b'hgrc')
401 fp.write(b"# Config copied from shared source\n")
401 fp.write(b"# Config copied from shared source\n")
402 fp.write(sourceconfig)
402 fp.write(sourceconfig)
403 fp.write(b'\n')
403 fp.write(b'\n')
404 fp.write(currentconfig)
404 fp.write(currentconfig)
405
405
406
406
407 def unshare(ui, repo):
407 def unshare(ui, repo):
408 """convert a shared repository to a normal one
408 """convert a shared repository to a normal one
409
409
410 Copy the store data to the repo and remove the sharedpath data.
410 Copy the store data to the repo and remove the sharedpath data.
411
411
412 Returns a new repository object representing the unshared repository.
412 Returns a new repository object representing the unshared repository.
413
413
414 The passed repository object is not usable after this function is
414 The passed repository object is not usable after this function is
415 called.
415 called.
416 """
416 """
417
417
418 with repo.lock():
418 with repo.lock():
419 # we use locks here because if we race with commit, we
419 # we use locks here because if we race with commit, we
420 # can end up with extra data in the cloned revlogs that's
420 # can end up with extra data in the cloned revlogs that's
421 # not pointed to by changesets, thus causing verify to
421 # not pointed to by changesets, thus causing verify to
422 # fail
422 # fail
423 destlock = copystore(ui, repo, repo.path)
423 destlock = copystore(ui, repo, repo.path)
424 with destlock or util.nullcontextmanager():
424 with destlock or util.nullcontextmanager():
425 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
425 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
426 # we were sharing .hg/hgrc of the share source with the current
426 # we were sharing .hg/hgrc of the share source with the current
427 # repo. We need to copy that while unsharing otherwise it can
427 # repo. We need to copy that while unsharing otherwise it can
428 # disable hooks and other checks
428 # disable hooks and other checks
429 _prependsourcehgrc(repo)
429 _prependsourcehgrc(repo)
430
430
431 sharefile = repo.vfs.join(b'sharedpath')
431 sharefile = repo.vfs.join(b'sharedpath')
432 util.rename(sharefile, sharefile + b'.old')
432 util.rename(sharefile, sharefile + b'.old')
433
433
434 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
434 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
435 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
435 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
436 scmutil.writereporequirements(repo)
436 scmutil.writereporequirements(repo)
437
437
438 # Removing share changes some fundamental properties of the repo instance.
438 # Removing share changes some fundamental properties of the repo instance.
439 # So we instantiate a new repo object and operate on it rather than
439 # So we instantiate a new repo object and operate on it rather than
440 # try to keep the existing repo usable.
440 # try to keep the existing repo usable.
441 newrepo = repository(repo.baseui, repo.root, create=False)
441 newrepo = repository(repo.baseui, repo.root, create=False)
442
442
443 # TODO: figure out how to access subrepos that exist, but were previously
443 # TODO: figure out how to access subrepos that exist, but were previously
444 # removed from .hgsub
444 # removed from .hgsub
445 c = newrepo[b'.']
445 c = newrepo[b'.']
446 subs = c.substate
446 subs = c.substate
447 for s in sorted(subs):
447 for s in sorted(subs):
448 c.sub(s).unshare()
448 c.sub(s).unshare()
449
449
450 localrepo.poisonrepository(repo)
450 localrepo.poisonrepository(repo)
451
451
452 return newrepo
452 return newrepo
453
453
454
454
455 def postshare(sourcerepo, destrepo, defaultpath=None):
455 def postshare(sourcerepo, destrepo, defaultpath=None):
456 """Called after a new shared repo is created.
456 """Called after a new shared repo is created.
457
457
458 The new repo only has a requirements file and pointer to the source.
458 The new repo only has a requirements file and pointer to the source.
459 This function configures additional shared data.
459 This function configures additional shared data.
460
460
461 Extensions can wrap this function and write additional entries to
461 Extensions can wrap this function and write additional entries to
462 destrepo/.hg/shared to indicate additional pieces of data to be shared.
462 destrepo/.hg/shared to indicate additional pieces of data to be shared.
463 """
463 """
464 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
464 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
465 if default:
465 if default:
466 template = b'[paths]\ndefault = %s\n'
466 template = b'[paths]\ndefault = %s\n'
467 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
467 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
468 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
468 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
469 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
469 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
470 b"narrow-share"
470 b"narrow-share"
471 ):
471 ):
472 narrowspec.copytoworkingcopy(destrepo)
472 narrowspec.copytoworkingcopy(destrepo)
473
473
474
474
475 def _postshareupdate(repo, update, checkout=None):
475 def _postshareupdate(repo, update, checkout=None):
476 """Maybe perform a working directory update after a shared repo is created.
476 """Maybe perform a working directory update after a shared repo is created.
477
477
478 ``update`` can be a boolean or a revision to update to.
478 ``update`` can be a boolean or a revision to update to.
479 """
479 """
480 if not update:
480 if not update:
481 return
481 return
482
482
483 repo.ui.status(_(b"updating working directory\n"))
483 repo.ui.status(_(b"updating working directory\n"))
484 if update is not True:
484 if update is not True:
485 checkout = update
485 checkout = update
486 for test in (checkout, b'default', b'tip'):
486 for test in (checkout, b'default', b'tip'):
487 if test is None:
487 if test is None:
488 continue
488 continue
489 try:
489 try:
490 uprev = repo.lookup(test)
490 uprev = repo.lookup(test)
491 break
491 break
492 except error.RepoLookupError:
492 except error.RepoLookupError:
493 continue
493 continue
494 _update(repo, uprev)
494 _update(repo, uprev)
495
495
496
496
497 def copystore(ui, srcrepo, destpath):
497 def copystore(ui, srcrepo, destpath):
498 """copy files from store of srcrepo in destpath
498 """copy files from store of srcrepo in destpath
499
499
500 returns destlock
500 returns destlock
501 """
501 """
502 destlock = None
502 destlock = None
503 try:
503 try:
504 hardlink = None
504 hardlink = None
505 topic = _(b'linking') if hardlink else _(b'copying')
505 topic = _(b'linking') if hardlink else _(b'copying')
506 with ui.makeprogress(topic, unit=_(b'files')) as progress:
506 with ui.makeprogress(topic, unit=_(b'files')) as progress:
507 num = 0
507 num = 0
508 srcpublishing = srcrepo.publishing()
508 srcpublishing = srcrepo.publishing()
509 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
509 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
510 dstvfs = vfsmod.vfs(destpath)
510 dstvfs = vfsmod.vfs(destpath)
511 for f in srcrepo.store.copylist():
511 for f in srcrepo.store.copylist():
512 if srcpublishing and f.endswith(b'phaseroots'):
512 if srcpublishing and f.endswith(b'phaseroots'):
513 continue
513 continue
514 dstbase = os.path.dirname(f)
514 dstbase = os.path.dirname(f)
515 if dstbase and not dstvfs.exists(dstbase):
515 if dstbase and not dstvfs.exists(dstbase):
516 dstvfs.mkdir(dstbase)
516 dstvfs.mkdir(dstbase)
517 if srcvfs.exists(f):
517 if srcvfs.exists(f):
518 if f.endswith(b'data'):
518 if f.endswith(b'data'):
519 # 'dstbase' may be empty (e.g. revlog format 0)
519 # 'dstbase' may be empty (e.g. revlog format 0)
520 lockfile = os.path.join(dstbase, b"lock")
520 lockfile = os.path.join(dstbase, b"lock")
521 # lock to avoid premature writing to the target
521 # lock to avoid premature writing to the target
522 destlock = lock.lock(dstvfs, lockfile)
522 destlock = lock.lock(dstvfs, lockfile)
523 hardlink, n = util.copyfiles(
523 hardlink, n = util.copyfiles(
524 srcvfs.join(f), dstvfs.join(f), hardlink, progress
524 srcvfs.join(f), dstvfs.join(f), hardlink, progress
525 )
525 )
526 num += n
526 num += n
527 if hardlink:
527 if hardlink:
528 ui.debug(b"linked %d files\n" % num)
528 ui.debug(b"linked %d files\n" % num)
529 else:
529 else:
530 ui.debug(b"copied %d files\n" % num)
530 ui.debug(b"copied %d files\n" % num)
531 return destlock
531 return destlock
532 except: # re-raises
532 except: # re-raises
533 release(destlock)
533 release(destlock)
534 raise
534 raise
535
535
536
536
537 def clonewithshare(
537 def clonewithshare(
538 ui,
538 ui,
539 peeropts,
539 peeropts,
540 sharepath,
540 sharepath,
541 source,
541 source,
542 srcpeer,
542 srcpeer,
543 dest,
543 dest,
544 pull=False,
544 pull=False,
545 rev=None,
545 rev=None,
546 update=True,
546 update=True,
547 stream=False,
547 stream=False,
548 ):
548 ):
549 """Perform a clone using a shared repo.
549 """Perform a clone using a shared repo.
550
550
551 The store for the repository will be located at <sharepath>/.hg. The
551 The store for the repository will be located at <sharepath>/.hg. The
552 specified revisions will be cloned or pulled from "source". A shared repo
552 specified revisions will be cloned or pulled from "source". A shared repo
553 will be created at "dest" and a working copy will be created if "update" is
553 will be created at "dest" and a working copy will be created if "update" is
554 True.
554 True.
555 """
555 """
556 revs = None
556 revs = None
557 if rev:
557 if rev:
558 if not srcpeer.capable(b'lookup'):
558 if not srcpeer.capable(b'lookup'):
559 raise error.Abort(
559 raise error.Abort(
560 _(
560 _(
561 b"src repository does not support "
561 b"src repository does not support "
562 b"revision lookup and so doesn't "
562 b"revision lookup and so doesn't "
563 b"support clone by revision"
563 b"support clone by revision"
564 )
564 )
565 )
565 )
566
566
567 # TODO this is batchable.
567 # TODO this is batchable.
568 remoterevs = []
568 remoterevs = []
569 for r in rev:
569 for r in rev:
570 with srcpeer.commandexecutor() as e:
570 with srcpeer.commandexecutor() as e:
571 remoterevs.append(
571 remoterevs.append(
572 e.callcommand(
572 e.callcommand(
573 b'lookup',
573 b'lookup',
574 {
574 {
575 b'key': r,
575 b'key': r,
576 },
576 },
577 ).result()
577 ).result()
578 )
578 )
579 revs = remoterevs
579 revs = remoterevs
580
580
581 # Obtain a lock before checking for or cloning the pooled repo otherwise
581 # Obtain a lock before checking for or cloning the pooled repo otherwise
582 # 2 clients may race creating or populating it.
582 # 2 clients may race creating or populating it.
583 pooldir = os.path.dirname(sharepath)
583 pooldir = os.path.dirname(sharepath)
584 # lock class requires the directory to exist.
584 # lock class requires the directory to exist.
585 try:
585 try:
586 util.makedir(pooldir, False)
586 util.makedir(pooldir, False)
587 except FileExistsError:
587 except FileExistsError:
588 pass
588 pass
589
589
590 poolvfs = vfsmod.vfs(pooldir)
590 poolvfs = vfsmod.vfs(pooldir)
591 basename = os.path.basename(sharepath)
591 basename = os.path.basename(sharepath)
592
592
593 with lock.lock(poolvfs, b'%s.lock' % basename):
593 with lock.lock(poolvfs, b'%s.lock' % basename):
594 if os.path.exists(sharepath):
594 if os.path.exists(sharepath):
595 ui.status(
595 ui.status(
596 _(b'(sharing from existing pooled repository %s)\n') % basename
596 _(b'(sharing from existing pooled repository %s)\n') % basename
597 )
597 )
598 else:
598 else:
599 ui.status(
599 ui.status(
600 _(b'(sharing from new pooled repository %s)\n') % basename
600 _(b'(sharing from new pooled repository %s)\n') % basename
601 )
601 )
602 # Always use pull mode because hardlinks in share mode don't work
602 # Always use pull mode because hardlinks in share mode don't work
603 # well. Never update because working copies aren't necessary in
603 # well. Never update because working copies aren't necessary in
604 # share mode.
604 # share mode.
605 clone(
605 clone(
606 ui,
606 ui,
607 peeropts,
607 peeropts,
608 source,
608 source,
609 dest=sharepath,
609 dest=sharepath,
610 pull=True,
610 pull=True,
611 revs=rev,
611 revs=rev,
612 update=False,
612 update=False,
613 stream=stream,
613 stream=stream,
614 )
614 )
615
615
616 # Resolve the value to put in [paths] section for the source.
616 # Resolve the value to put in [paths] section for the source.
617 if islocal(source):
617 if islocal(source):
618 defaultpath = util.abspath(urlutil.urllocalpath(source))
618 defaultpath = util.abspath(urlutil.urllocalpath(source))
619 else:
619 else:
620 defaultpath = source
620 defaultpath = source
621
621
622 sharerepo = repository(ui, path=sharepath)
622 sharerepo = repository(ui, path=sharepath)
623 destrepo = share(
623 destrepo = share(
624 ui,
624 ui,
625 sharerepo,
625 sharerepo,
626 dest=dest,
626 dest=dest,
627 update=False,
627 update=False,
628 bookmarks=False,
628 bookmarks=False,
629 defaultpath=defaultpath,
629 defaultpath=defaultpath,
630 )
630 )
631
631
632 # We need to perform a pull against the dest repo to fetch bookmarks
632 # We need to perform a pull against the dest repo to fetch bookmarks
633 # and other non-store data that isn't shared by default. In the case of
633 # and other non-store data that isn't shared by default. In the case of
634 # non-existing shared repo, this means we pull from the remote twice. This
634 # non-existing shared repo, this means we pull from the remote twice. This
635 # is a bit weird. But at the time it was implemented, there wasn't an easy
635 # is a bit weird. But at the time it was implemented, there wasn't an easy
636 # way to pull just non-changegroup data.
636 # way to pull just non-changegroup data.
637 exchange.pull(destrepo, srcpeer, heads=revs)
637 exchange.pull(destrepo, srcpeer, heads=revs)
638
638
639 _postshareupdate(destrepo, update)
639 _postshareupdate(destrepo, update)
640
640
641 return srcpeer, peer(ui, peeropts, dest)
641 return srcpeer, peer(ui, peeropts, dest)
642
642
643
643
644 # Recomputing caches is often slow on big repos, so copy them.
644 # Recomputing caches is often slow on big repos, so copy them.
645 def _copycache(srcrepo, dstcachedir, fname):
645 def _copycache(srcrepo, dstcachedir, fname):
646 """copy a cache from srcrepo to destcachedir (if it exists)"""
646 """copy a cache from srcrepo to destcachedir (if it exists)"""
647 srcfname = srcrepo.cachevfs.join(fname)
647 srcfname = srcrepo.cachevfs.join(fname)
648 dstfname = os.path.join(dstcachedir, fname)
648 dstfname = os.path.join(dstcachedir, fname)
649 if os.path.exists(srcfname):
649 if os.path.exists(srcfname):
650 if not os.path.exists(dstcachedir):
650 if not os.path.exists(dstcachedir):
651 os.mkdir(dstcachedir)
651 os.mkdir(dstcachedir)
652 util.copyfile(srcfname, dstfname)
652 util.copyfile(srcfname, dstfname)
653
653
654
654
655 def clone(
655 def clone(
656 ui,
656 ui,
657 peeropts,
657 peeropts,
658 source,
658 source,
659 dest=None,
659 dest=None,
660 pull=False,
660 pull=False,
661 revs=None,
661 revs=None,
662 update=True,
662 update=True,
663 stream=False,
663 stream=False,
664 branch=None,
664 branch=None,
665 shareopts=None,
665 shareopts=None,
666 storeincludepats=None,
666 storeincludepats=None,
667 storeexcludepats=None,
667 storeexcludepats=None,
668 depth=None,
668 depth=None,
669 ):
669 ):
670 """Make a copy of an existing repository.
670 """Make a copy of an existing repository.
671
671
672 Create a copy of an existing repository in a new directory. The
672 Create a copy of an existing repository in a new directory. The
673 source and destination are URLs, as passed to the repository
673 source and destination are URLs, as passed to the repository
674 function. Returns a pair of repository peers, the source and
674 function. Returns a pair of repository peers, the source and
675 newly created destination.
675 newly created destination.
676
676
677 The location of the source is added to the new repository's
677 The location of the source is added to the new repository's
678 .hg/hgrc file, as the default to be used for future pulls and
678 .hg/hgrc file, as the default to be used for future pulls and
679 pushes.
679 pushes.
680
680
681 If an exception is raised, the partly cloned/updated destination
681 If an exception is raised, the partly cloned/updated destination
682 repository will be deleted.
682 repository will be deleted.
683
683
684 Arguments:
684 Arguments:
685
685
686 source: repository object or URL
686 source: repository object or URL
687
687
688 dest: URL of destination repository to create (defaults to base
688 dest: URL of destination repository to create (defaults to base
689 name of source repository)
689 name of source repository)
690
690
691 pull: always pull from source repository, even in local case or if the
691 pull: always pull from source repository, even in local case or if the
692 server prefers streaming
692 server prefers streaming
693
693
694 stream: stream raw data uncompressed from repository (fast over
694 stream: stream raw data uncompressed from repository (fast over
695 LAN, slow over WAN)
695 LAN, slow over WAN)
696
696
697 revs: revision to clone up to (implies pull=True)
697 revs: revision to clone up to (implies pull=True)
698
698
699 update: update working directory after clone completes, if
699 update: update working directory after clone completes, if
700 destination is local repository (True means update to default rev,
700 destination is local repository (True means update to default rev,
701 anything else is treated as a revision)
701 anything else is treated as a revision)
702
702
703 branch: branches to clone
703 branch: branches to clone
704
704
705 shareopts: dict of options to control auto sharing behavior. The "pool" key
705 shareopts: dict of options to control auto sharing behavior. The "pool" key
706 activates auto sharing mode and defines the directory for stores. The
706 activates auto sharing mode and defines the directory for stores. The
707 "mode" key determines how to construct the directory name of the shared
707 "mode" key determines how to construct the directory name of the shared
708 repository. "identity" means the name is derived from the node of the first
708 repository. "identity" means the name is derived from the node of the first
709 changeset in the repository. "remote" means the name is derived from the
709 changeset in the repository. "remote" means the name is derived from the
710 remote's path/URL. Defaults to "identity."
710 remote's path/URL. Defaults to "identity."
711
711
712 storeincludepats and storeexcludepats: sets of file patterns to include and
712 storeincludepats and storeexcludepats: sets of file patterns to include and
713 exclude in the repository copy, respectively. If not defined, all files
713 exclude in the repository copy, respectively. If not defined, all files
714 will be included (a "full" clone). Otherwise a "narrow" clone containing
714 will be included (a "full" clone). Otherwise a "narrow" clone containing
715 only the requested files will be performed. If ``storeincludepats`` is not
715 only the requested files will be performed. If ``storeincludepats`` is not
716 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
716 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
717 ``path:.``. If both are empty sets, no files will be cloned.
717 ``path:.``. If both are empty sets, no files will be cloned.
718 """
718 """
719
719
720 if isinstance(source, bytes):
720 if isinstance(source, bytes):
721 src_path = urlutil.get_clone_path_obj(ui, source)
721 src_path = urlutil.get_clone_path_obj(ui, source)
722 if src_path is None:
722 if src_path is None:
723 srcpeer = peer(ui, peeropts, b'')
723 srcpeer = peer(ui, peeropts, b'')
724 origsource = source = b''
724 origsource = source = b''
725 branches = (None, branch or [])
725 branches = (None, branch or [])
726 else:
726 else:
727 srcpeer = peer(ui, peeropts, src_path)
727 srcpeer = peer(ui, peeropts, src_path)
728 origsource = src_path.rawloc
728 origsource = src_path.rawloc
729 branches = (src_path.branch, branch or [])
729 branches = (src_path.branch, branch or [])
730 source = src_path.loc
730 source = src_path.loc
731 else:
731 else:
732 if util.safehasattr(source, 'peer'):
732 if util.safehasattr(source, 'peer'):
733 srcpeer = source.peer() # in case we were called with a localrepo
733 srcpeer = source.peer() # in case we were called with a localrepo
734 else:
734 else:
735 srcpeer = source
735 srcpeer = source
736 branches = (None, branch or [])
736 branches = (None, branch or [])
737 # XXX path: simply use the peer `path` object when this become available
737 # XXX path: simply use the peer `path` object when this become available
738 origsource = source = srcpeer.url()
738 origsource = source = srcpeer.url()
739 srclock = destlock = destwlock = cleandir = None
739 srclock = destlock = destwlock = cleandir = None
740 destpeer = None
740 destpeer = None
741 try:
741 try:
742 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
742 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
743
743
744 if dest is None:
744 if dest is None:
745 dest = defaultdest(source)
745 dest = defaultdest(source)
746 if dest:
746 if dest:
747 ui.status(_(b"destination directory: %s\n") % dest)
747 ui.status(_(b"destination directory: %s\n") % dest)
748 else:
748 else:
749 dest_path = urlutil.get_clone_path_obj(ui, dest)
749 dest_path = urlutil.get_clone_path_obj(ui, dest)
750 if dest_path is not None:
750 if dest_path is not None:
751 dest = dest_path.rawloc
751 dest = dest_path.rawloc
752 else:
752 else:
753 dest = b''
753 dest = b''
754
754
755 dest = urlutil.urllocalpath(dest)
755 dest = urlutil.urllocalpath(dest)
756 source = urlutil.urllocalpath(source)
756 source = urlutil.urllocalpath(source)
757
757
758 if not dest:
758 if not dest:
759 raise error.InputError(_(b"empty destination path is not valid"))
759 raise error.InputError(_(b"empty destination path is not valid"))
760
760
761 destvfs = vfsmod.vfs(dest, expandpath=True)
761 destvfs = vfsmod.vfs(dest, expandpath=True)
762 if destvfs.lexists():
762 if destvfs.lexists():
763 if not destvfs.isdir():
763 if not destvfs.isdir():
764 raise error.InputError(
764 raise error.InputError(
765 _(b"destination '%s' already exists") % dest
765 _(b"destination '%s' already exists") % dest
766 )
766 )
767 elif destvfs.listdir():
767 elif destvfs.listdir():
768 raise error.InputError(
768 raise error.InputError(
769 _(b"destination '%s' is not empty") % dest
769 _(b"destination '%s' is not empty") % dest
770 )
770 )
771
771
772 createopts = {}
772 createopts = {}
773 narrow = False
773 narrow = False
774
774
775 if storeincludepats is not None:
775 if storeincludepats is not None:
776 narrowspec.validatepatterns(storeincludepats)
776 narrowspec.validatepatterns(storeincludepats)
777 narrow = True
777 narrow = True
778
778
779 if storeexcludepats is not None:
779 if storeexcludepats is not None:
780 narrowspec.validatepatterns(storeexcludepats)
780 narrowspec.validatepatterns(storeexcludepats)
781 narrow = True
781 narrow = True
782
782
783 if narrow:
783 if narrow:
784 # Include everything by default if only exclusion patterns defined.
784 # Include everything by default if only exclusion patterns defined.
785 if storeexcludepats and not storeincludepats:
785 if storeexcludepats and not storeincludepats:
786 storeincludepats = {b'path:.'}
786 storeincludepats = {b'path:.'}
787
787
788 createopts[b'narrowfiles'] = True
788 createopts[b'narrowfiles'] = True
789
789
790 if depth:
790 if depth:
791 createopts[b'shallowfilestore'] = True
791 createopts[b'shallowfilestore'] = True
792
792
793 if srcpeer.capable(b'lfs-serve'):
793 if srcpeer.capable(b'lfs-serve'):
794 # Repository creation honors the config if it disabled the extension, so
794 # Repository creation honors the config if it disabled the extension, so
795 # we can't just announce that lfs will be enabled. This check avoids
795 # we can't just announce that lfs will be enabled. This check avoids
796 # saying that lfs will be enabled, and then saying it's an unknown
796 # saying that lfs will be enabled, and then saying it's an unknown
797 # feature. The lfs creation option is set in either case so that a
797 # feature. The lfs creation option is set in either case so that a
798 # requirement is added. If the extension is explicitly disabled but the
798 # requirement is added. If the extension is explicitly disabled but the
799 # requirement is set, the clone aborts early, before transferring any
799 # requirement is set, the clone aborts early, before transferring any
800 # data.
800 # data.
801 createopts[b'lfs'] = True
801 createopts[b'lfs'] = True
802
802
803 if b'lfs' in extensions.disabled():
803 if b'lfs' in extensions.disabled():
804 ui.status(
804 ui.status(
805 _(
805 _(
806 b'(remote is using large file support (lfs), but it is '
806 b'(remote is using large file support (lfs), but it is '
807 b'explicitly disabled in the local configuration)\n'
807 b'explicitly disabled in the local configuration)\n'
808 )
808 )
809 )
809 )
810 else:
810 else:
811 ui.status(
811 ui.status(
812 _(
812 _(
813 b'(remote is using large file support (lfs); lfs will '
813 b'(remote is using large file support (lfs); lfs will '
814 b'be enabled for this repository)\n'
814 b'be enabled for this repository)\n'
815 )
815 )
816 )
816 )
817
817
818 shareopts = shareopts or {}
818 shareopts = shareopts or {}
819 sharepool = shareopts.get(b'pool')
819 sharepool = shareopts.get(b'pool')
820 sharenamemode = shareopts.get(b'mode')
820 sharenamemode = shareopts.get(b'mode')
821 if sharepool and islocal(dest):
821 if sharepool and islocal(dest):
822 sharepath = None
822 sharepath = None
823 if sharenamemode == b'identity':
823 if sharenamemode == b'identity':
824 # Resolve the name from the initial changeset in the remote
824 # Resolve the name from the initial changeset in the remote
825 # repository. This returns nullid when the remote is empty. It
825 # repository. This returns nullid when the remote is empty. It
826 # raises RepoLookupError if revision 0 is filtered or otherwise
826 # raises RepoLookupError if revision 0 is filtered or otherwise
827 # not available. If we fail to resolve, sharing is not enabled.
827 # not available. If we fail to resolve, sharing is not enabled.
828 try:
828 try:
829 with srcpeer.commandexecutor() as e:
829 with srcpeer.commandexecutor() as e:
830 rootnode = e.callcommand(
830 rootnode = e.callcommand(
831 b'lookup',
831 b'lookup',
832 {
832 {
833 b'key': b'0',
833 b'key': b'0',
834 },
834 },
835 ).result()
835 ).result()
836
836
837 if rootnode != sha1nodeconstants.nullid:
837 if rootnode != sha1nodeconstants.nullid:
838 sharepath = os.path.join(sharepool, hex(rootnode))
838 sharepath = os.path.join(sharepool, hex(rootnode))
839 else:
839 else:
840 ui.status(
840 ui.status(
841 _(
841 _(
842 b'(not using pooled storage: '
842 b'(not using pooled storage: '
843 b'remote appears to be empty)\n'
843 b'remote appears to be empty)\n'
844 )
844 )
845 )
845 )
846 except error.RepoLookupError:
846 except error.RepoLookupError:
847 ui.status(
847 ui.status(
848 _(
848 _(
849 b'(not using pooled storage: '
849 b'(not using pooled storage: '
850 b'unable to resolve identity of remote)\n'
850 b'unable to resolve identity of remote)\n'
851 )
851 )
852 )
852 )
853 elif sharenamemode == b'remote':
853 elif sharenamemode == b'remote':
854 sharepath = os.path.join(
854 sharepath = os.path.join(
855 sharepool, hex(hashutil.sha1(source).digest())
855 sharepool, hex(hashutil.sha1(source).digest())
856 )
856 )
857 else:
857 else:
858 raise error.Abort(
858 raise error.Abort(
859 _(b'unknown share naming mode: %s') % sharenamemode
859 _(b'unknown share naming mode: %s') % sharenamemode
860 )
860 )
861
861
862 # TODO this is a somewhat arbitrary restriction.
862 # TODO this is a somewhat arbitrary restriction.
863 if narrow:
863 if narrow:
864 ui.status(
864 ui.status(
865 _(b'(pooled storage not supported for narrow clones)\n')
865 _(b'(pooled storage not supported for narrow clones)\n')
866 )
866 )
867 sharepath = None
867 sharepath = None
868
868
869 if sharepath:
869 if sharepath:
870 return clonewithshare(
870 return clonewithshare(
871 ui,
871 ui,
872 peeropts,
872 peeropts,
873 sharepath,
873 sharepath,
874 source,
874 source,
875 srcpeer,
875 srcpeer,
876 dest,
876 dest,
877 pull=pull,
877 pull=pull,
878 rev=revs,
878 rev=revs,
879 update=update,
879 update=update,
880 stream=stream,
880 stream=stream,
881 )
881 )
882
882
883 srcrepo = srcpeer.local()
883 srcrepo = srcpeer.local()
884
884
885 abspath = origsource
885 abspath = origsource
886 if islocal(origsource):
886 if islocal(origsource):
887 abspath = util.abspath(urlutil.urllocalpath(origsource))
887 abspath = util.abspath(urlutil.urllocalpath(origsource))
888
888
889 if islocal(dest):
889 if islocal(dest):
890 if os.path.exists(dest):
890 if os.path.exists(dest):
891 # only clean up directories we create ourselves
891 # only clean up directories we create ourselves
892 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
892 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
893 cleandir = hgdir
893 cleandir = hgdir
894 else:
894 else:
895 cleandir = dest
895 cleandir = dest
896
896
897 copy = False
897 copy = False
898 if (
898 if (
899 srcrepo
899 srcrepo
900 and srcrepo.cancopy()
900 and srcrepo.cancopy()
901 and islocal(dest)
901 and islocal(dest)
902 and not phases.hassecret(srcrepo)
902 and not phases.hassecret(srcrepo)
903 ):
903 ):
904 copy = not pull and not revs
904 copy = not pull and not revs
905
905
906 # TODO this is a somewhat arbitrary restriction.
906 # TODO this is a somewhat arbitrary restriction.
907 if narrow:
907 if narrow:
908 copy = False
908 copy = False
909
909
910 if copy:
910 if copy:
911 try:
911 try:
912 # we use a lock here because if we race with commit, we
912 # we use a lock here because if we race with commit, we
913 # can end up with extra data in the cloned revlogs that's
913 # can end up with extra data in the cloned revlogs that's
914 # not pointed to by changesets, thus causing verify to
914 # not pointed to by changesets, thus causing verify to
915 # fail
915 # fail
916 srclock = srcrepo.lock(wait=False)
916 srclock = srcrepo.lock(wait=False)
917 except error.LockError:
917 except error.LockError:
918 copy = False
918 copy = False
919
919
920 if copy:
920 if copy:
921 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
921 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
922
922
923 destrootpath = urlutil.urllocalpath(dest)
923 destrootpath = urlutil.urllocalpath(dest)
924 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
924 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
925 localrepo.createrepository(
925 localrepo.createrepository(
926 ui,
926 ui,
927 destrootpath,
927 destrootpath,
928 requirements=dest_reqs,
928 requirements=dest_reqs,
929 )
929 )
930 destrepo = localrepo.makelocalrepository(ui, destrootpath)
930 destrepo = localrepo.makelocalrepository(ui, destrootpath)
931
931
932 destwlock = destrepo.wlock()
932 destwlock = destrepo.wlock()
933 destlock = destrepo.lock()
933 destlock = destrepo.lock()
934 from . import streamclone # avoid cycle
934 from . import streamclone # avoid cycle
935
935
936 streamclone.local_copy(srcrepo, destrepo)
936 streamclone.local_copy(srcrepo, destrepo)
937
937
938 # we need to re-init the repo after manually copying the data
938 # we need to re-init the repo after manually copying the data
939 # into it
939 # into it
940 destpeer = peer(srcrepo, peeropts, dest)
940 destpeer = peer(srcrepo, peeropts, dest)
941
941
942 # make the peer aware that is it already locked
942 # make the peer aware that is it already locked
943 #
943 #
944 # important:
944 # important:
945 #
945 #
946 # We still need to release that lock at the end of the function
946 # We still need to release that lock at the end of the function
947 destpeer.local()._lockref = weakref.ref(destlock)
947 destpeer.local()._lockref = weakref.ref(destlock)
948 destpeer.local()._wlockref = weakref.ref(destwlock)
948 destpeer.local()._wlockref = weakref.ref(destwlock)
949 # dirstate also needs to be copied because `_wlockref` has a reference
949 # dirstate also needs to be copied because `_wlockref` has a reference
950 # to it: this dirstate is saved to disk when the wlock is released
950 # to it: this dirstate is saved to disk when the wlock is released
951 destpeer.local().dirstate = destrepo.dirstate
951 destpeer.local().dirstate = destrepo.dirstate
952
952
953 srcrepo.hook(
953 srcrepo.hook(
954 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
954 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
955 )
955 )
956 else:
956 else:
957 try:
957 try:
958 # only pass ui when no srcrepo
958 # only pass ui when no srcrepo
959 destpeer = peer(
959 destpeer = peer(
960 srcrepo or ui,
960 srcrepo or ui,
961 peeropts,
961 peeropts,
962 dest,
962 dest,
963 create=True,
963 create=True,
964 createopts=createopts,
964 createopts=createopts,
965 )
965 )
966 except FileExistsError:
966 except FileExistsError:
967 cleandir = None
967 cleandir = None
968 raise error.Abort(_(b"destination '%s' already exists") % dest)
968 raise error.Abort(_(b"destination '%s' already exists") % dest)
969
969
970 if revs:
970 if revs:
971 if not srcpeer.capable(b'lookup'):
971 if not srcpeer.capable(b'lookup'):
972 raise error.Abort(
972 raise error.Abort(
973 _(
973 _(
974 b"src repository does not support "
974 b"src repository does not support "
975 b"revision lookup and so doesn't "
975 b"revision lookup and so doesn't "
976 b"support clone by revision"
976 b"support clone by revision"
977 )
977 )
978 )
978 )
979
979
980 # TODO this is batchable.
980 # TODO this is batchable.
981 remoterevs = []
981 remoterevs = []
982 for rev in revs:
982 for rev in revs:
983 with srcpeer.commandexecutor() as e:
983 with srcpeer.commandexecutor() as e:
984 remoterevs.append(
984 remoterevs.append(
985 e.callcommand(
985 e.callcommand(
986 b'lookup',
986 b'lookup',
987 {
987 {
988 b'key': rev,
988 b'key': rev,
989 },
989 },
990 ).result()
990 ).result()
991 )
991 )
992 revs = remoterevs
992 revs = remoterevs
993
993
994 checkout = revs[0]
994 checkout = revs[0]
995 else:
995 else:
996 revs = None
996 revs = None
997 local = destpeer.local()
997 local = destpeer.local()
998 if local:
998 if local:
999 if narrow:
999 if narrow:
1000 with local.wlock(), local.lock(), local.transaction(
1000 with local.wlock(), local.lock(), local.transaction(
1001 b'narrow-clone'
1001 b'narrow-clone'
1002 ):
1002 ):
1003 local.setnarrowpats(storeincludepats, storeexcludepats)
1003 local.setnarrowpats(storeincludepats, storeexcludepats)
1004 narrowspec.copytoworkingcopy(local)
1004 narrowspec.copytoworkingcopy(local)
1005
1005
1006 u = urlutil.url(abspath)
1006 u = urlutil.url(abspath)
1007 defaulturl = bytes(u)
1007 defaulturl = bytes(u)
1008 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1008 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1009 if not stream:
1009 if not stream:
1010 if pull:
1010 if pull:
1011 stream = False
1011 stream = False
1012 else:
1012 else:
1013 stream = None
1013 stream = None
1014 # internal config: ui.quietbookmarkmove
1014 # internal config: ui.quietbookmarkmove
1015 overrides = {(b'ui', b'quietbookmarkmove'): True}
1015 overrides = {(b'ui', b'quietbookmarkmove'): True}
1016 with local.ui.configoverride(overrides, b'clone'):
1016 with local.ui.configoverride(overrides, b'clone'):
1017 exchange.pull(
1017 exchange.pull(
1018 local,
1018 local,
1019 srcpeer,
1019 srcpeer,
1020 heads=revs,
1020 heads=revs,
1021 streamclonerequested=stream,
1021 streamclonerequested=stream,
1022 includepats=storeincludepats,
1022 includepats=storeincludepats,
1023 excludepats=storeexcludepats,
1023 excludepats=storeexcludepats,
1024 depth=depth,
1024 depth=depth,
1025 )
1025 )
1026 elif srcrepo:
1026 elif srcrepo:
1027 # TODO lift restriction once exchange.push() accepts narrow
1027 # TODO lift restriction once exchange.push() accepts narrow
1028 # push.
1028 # push.
1029 if narrow:
1029 if narrow:
1030 raise error.Abort(
1030 raise error.Abort(
1031 _(
1031 _(
1032 b'narrow clone not available for '
1032 b'narrow clone not available for '
1033 b'remote destinations'
1033 b'remote destinations'
1034 )
1034 )
1035 )
1035 )
1036
1036
1037 exchange.push(
1037 exchange.push(
1038 srcrepo,
1038 srcrepo,
1039 destpeer,
1039 destpeer,
1040 revs=revs,
1040 revs=revs,
1041 bookmarks=srcrepo._bookmarks.keys(),
1041 bookmarks=srcrepo._bookmarks.keys(),
1042 )
1042 )
1043 else:
1043 else:
1044 raise error.Abort(
1044 raise error.Abort(
1045 _(b"clone from remote to remote not supported")
1045 _(b"clone from remote to remote not supported")
1046 )
1046 )
1047
1047
1048 cleandir = None
1048 cleandir = None
1049
1049
1050 destrepo = destpeer.local()
1050 destrepo = destpeer.local()
1051 if destrepo:
1051 if destrepo:
1052 template = uimod.samplehgrcs[b'cloned']
1052 template = uimod.samplehgrcs[b'cloned']
1053 u = urlutil.url(abspath)
1053 u = urlutil.url(abspath)
1054 u.passwd = None
1054 u.passwd = None
1055 defaulturl = bytes(u)
1055 defaulturl = bytes(u)
1056 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1056 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1057 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1057 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1058
1058
1059 if ui.configbool(b'experimental', b'remotenames'):
1059 if ui.configbool(b'experimental', b'remotenames'):
1060 logexchange.pullremotenames(destrepo, srcpeer)
1060 logexchange.pullremotenames(destrepo, srcpeer)
1061
1061
1062 if update:
1062 if update:
1063 if update is not True:
1063 if update is not True:
1064 with srcpeer.commandexecutor() as e:
1064 with srcpeer.commandexecutor() as e:
1065 checkout = e.callcommand(
1065 checkout = e.callcommand(
1066 b'lookup',
1066 b'lookup',
1067 {
1067 {
1068 b'key': update,
1068 b'key': update,
1069 },
1069 },
1070 ).result()
1070 ).result()
1071
1071
1072 uprev = None
1072 uprev = None
1073 status = None
1073 status = None
1074 if checkout is not None:
1074 if checkout is not None:
1075 # Some extensions (at least hg-git and hg-subversion) have
1075 # Some extensions (at least hg-git and hg-subversion) have
1076 # a peer.lookup() implementation that returns a name instead
1076 # a peer.lookup() implementation that returns a name instead
1077 # of a nodeid. We work around it here until we've figured
1077 # of a nodeid. We work around it here until we've figured
1078 # out a better solution.
1078 # out a better solution.
1079 if len(checkout) == 20 and checkout in destrepo:
1079 if len(checkout) == 20 and checkout in destrepo:
1080 uprev = checkout
1080 uprev = checkout
1081 elif scmutil.isrevsymbol(destrepo, checkout):
1081 elif scmutil.isrevsymbol(destrepo, checkout):
1082 uprev = scmutil.revsymbol(destrepo, checkout).node()
1082 uprev = scmutil.revsymbol(destrepo, checkout).node()
1083 else:
1083 else:
1084 if update is not True:
1084 if update is not True:
1085 try:
1085 try:
1086 uprev = destrepo.lookup(update)
1086 uprev = destrepo.lookup(update)
1087 except error.RepoLookupError:
1087 except error.RepoLookupError:
1088 pass
1088 pass
1089 if uprev is None:
1089 if uprev is None:
1090 try:
1090 try:
1091 if destrepo._activebookmark:
1091 if destrepo._activebookmark:
1092 uprev = destrepo.lookup(destrepo._activebookmark)
1092 uprev = destrepo.lookup(destrepo._activebookmark)
1093 update = destrepo._activebookmark
1093 update = destrepo._activebookmark
1094 else:
1094 else:
1095 uprev = destrepo._bookmarks[b'@']
1095 uprev = destrepo._bookmarks[b'@']
1096 update = b'@'
1096 update = b'@'
1097 bn = destrepo[uprev].branch()
1097 bn = destrepo[uprev].branch()
1098 if bn == b'default':
1098 if bn == b'default':
1099 status = _(b"updating to bookmark %s\n" % update)
1099 status = _(b"updating to bookmark %s\n" % update)
1100 else:
1100 else:
1101 status = (
1101 status = (
1102 _(b"updating to bookmark %s on branch %s\n")
1102 _(b"updating to bookmark %s on branch %s\n")
1103 ) % (update, bn)
1103 ) % (update, bn)
1104 except KeyError:
1104 except KeyError:
1105 try:
1105 try:
1106 uprev = destrepo.branchtip(b'default')
1106 uprev = destrepo.branchtip(b'default')
1107 except error.RepoLookupError:
1107 except error.RepoLookupError:
1108 uprev = destrepo.lookup(b'tip')
1108 uprev = destrepo.lookup(b'tip')
1109 if not status:
1109 if not status:
1110 bn = destrepo[uprev].branch()
1110 bn = destrepo[uprev].branch()
1111 status = _(b"updating to branch %s\n") % bn
1111 status = _(b"updating to branch %s\n") % bn
1112 destrepo.ui.status(status)
1112 destrepo.ui.status(status)
1113 _update(destrepo, uprev)
1113 _update(destrepo, uprev)
1114 if update in destrepo._bookmarks:
1114 if update in destrepo._bookmarks:
1115 bookmarks.activate(destrepo, update)
1115 bookmarks.activate(destrepo, update)
1116 if destlock is not None:
1116 if destlock is not None:
1117 release(destlock)
1117 release(destlock)
1118 if destwlock is not None:
1118 if destwlock is not None:
1119 release(destlock)
1119 release(destlock)
1120 # here is a tiny windows were someone could end up writing the
1120 # here is a tiny windows were someone could end up writing the
1121 # repository before the cache are sure to be warm. This is "fine"
1121 # repository before the cache are sure to be warm. This is "fine"
1122 # as the only "bad" outcome would be some slowness. That potential
1122 # as the only "bad" outcome would be some slowness. That potential
1123 # slowness already affect reader.
1123 # slowness already affect reader.
1124 with destrepo.lock():
1124 with destrepo.lock():
1125 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1125 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1126 finally:
1126 finally:
1127 release(srclock, destlock, destwlock)
1127 release(srclock, destlock, destwlock)
1128 if cleandir is not None:
1128 if cleandir is not None:
1129 shutil.rmtree(cleandir, True)
1129 shutil.rmtree(cleandir, True)
1130 if srcpeer is not None:
1130 if srcpeer is not None:
1131 srcpeer.close()
1131 srcpeer.close()
1132 if destpeer and destpeer.local() is None:
1132 if destpeer and destpeer.local() is None:
1133 destpeer.close()
1133 destpeer.close()
1134 return srcpeer, destpeer
1134 return srcpeer, destpeer
1135
1135
1136
1136
1137 def _showstats(repo, stats, quietempty=False):
1137 def _showstats(repo, stats, quietempty=False):
1138 if quietempty and stats.isempty():
1138 if quietempty and stats.isempty():
1139 return
1139 return
1140 repo.ui.status(
1140 repo.ui.status(
1141 _(
1141 _(
1142 b"%d files updated, %d files merged, "
1142 b"%d files updated, %d files merged, "
1143 b"%d files removed, %d files unresolved\n"
1143 b"%d files removed, %d files unresolved\n"
1144 )
1144 )
1145 % (
1145 % (
1146 stats.updatedcount,
1146 stats.updatedcount,
1147 stats.mergedcount,
1147 stats.mergedcount,
1148 stats.removedcount,
1148 stats.removedcount,
1149 stats.unresolvedcount,
1149 stats.unresolvedcount,
1150 )
1150 )
1151 )
1151 )
1152
1152
1153
1153
1154 def updaterepo(repo, node, overwrite, updatecheck=None):
1154 def updaterepo(repo, node, overwrite, updatecheck=None):
1155 """Update the working directory to node.
1155 """Update the working directory to node.
1156
1156
1157 When overwrite is set, changes are clobbered, merged else
1157 When overwrite is set, changes are clobbered, merged else
1158
1158
1159 returns stats (see pydoc mercurial.merge.applyupdates)"""
1159 returns stats (see pydoc mercurial.merge.applyupdates)"""
1160 repo.ui.deprecwarn(
1160 repo.ui.deprecwarn(
1161 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1161 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1162 b'5.7',
1162 b'5.7',
1163 )
1163 )
1164 return mergemod._update(
1164 return mergemod._update(
1165 repo,
1165 repo,
1166 node,
1166 node,
1167 branchmerge=False,
1167 branchmerge=False,
1168 force=overwrite,
1168 force=overwrite,
1169 labels=[b'working copy', b'destination'],
1169 labels=[b'working copy', b'destination'],
1170 updatecheck=updatecheck,
1170 updatecheck=updatecheck,
1171 )
1171 )
1172
1172
1173
1173
1174 def update(repo, node, quietempty=False, updatecheck=None):
1174 def update(repo, node, quietempty=False, updatecheck=None):
1175 """update the working directory to node"""
1175 """update the working directory to node"""
1176 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1176 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1177 _showstats(repo, stats, quietempty)
1177 _showstats(repo, stats, quietempty)
1178 if stats.unresolvedcount:
1178 if stats.unresolvedcount:
1179 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1179 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1180 return stats.unresolvedcount > 0
1180 return stats.unresolvedcount > 0
1181
1181
1182
1182
1183 # naming conflict in clone()
1183 # naming conflict in clone()
1184 _update = update
1184 _update = update
1185
1185
1186
1186
1187 def clean(repo, node, show_stats=True, quietempty=False):
1187 def clean(repo, node, show_stats=True, quietempty=False):
1188 """forcibly switch the working directory to node, clobbering changes"""
1188 """forcibly switch the working directory to node, clobbering changes"""
1189 stats = mergemod.clean_update(repo[node])
1189 stats = mergemod.clean_update(repo[node])
1190 assert stats.unresolvedcount == 0
1190 assert stats.unresolvedcount == 0
1191 if show_stats:
1191 if show_stats:
1192 _showstats(repo, stats, quietempty)
1192 _showstats(repo, stats, quietempty)
1193 return False
1193 return False
1194
1194
1195
1195
1196 # naming conflict in updatetotally()
1196 # naming conflict in updatetotally()
1197 _clean = clean
1197 _clean = clean
1198
1198
1199 _VALID_UPDATECHECKS = {
1199 _VALID_UPDATECHECKS = {
1200 mergemod.UPDATECHECK_ABORT,
1200 mergemod.UPDATECHECK_ABORT,
1201 mergemod.UPDATECHECK_NONE,
1201 mergemod.UPDATECHECK_NONE,
1202 mergemod.UPDATECHECK_LINEAR,
1202 mergemod.UPDATECHECK_LINEAR,
1203 mergemod.UPDATECHECK_NO_CONFLICT,
1203 mergemod.UPDATECHECK_NO_CONFLICT,
1204 }
1204 }
1205
1205
1206
1206
1207 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1207 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1208 """Update the working directory with extra care for non-file components
1208 """Update the working directory with extra care for non-file components
1209
1209
1210 This takes care of non-file components below:
1210 This takes care of non-file components below:
1211
1211
1212 :bookmark: might be advanced or (in)activated
1212 :bookmark: might be advanced or (in)activated
1213
1213
1214 This takes arguments below:
1214 This takes arguments below:
1215
1215
1216 :checkout: to which revision the working directory is updated
1216 :checkout: to which revision the working directory is updated
1217 :brev: a name, which might be a bookmark to be activated after updating
1217 :brev: a name, which might be a bookmark to be activated after updating
1218 :clean: whether changes in the working directory can be discarded
1218 :clean: whether changes in the working directory can be discarded
1219 :updatecheck: how to deal with a dirty working directory
1219 :updatecheck: how to deal with a dirty working directory
1220
1220
1221 Valid values for updatecheck are the UPDATECHECK_* constants
1221 Valid values for updatecheck are the UPDATECHECK_* constants
1222 defined in the merge module. Passing `None` will result in using the
1222 defined in the merge module. Passing `None` will result in using the
1223 configured default.
1223 configured default.
1224
1224
1225 * ABORT: abort if the working directory is dirty
1225 * ABORT: abort if the working directory is dirty
1226 * NONE: don't check (merge working directory changes into destination)
1226 * NONE: don't check (merge working directory changes into destination)
1227 * LINEAR: check that update is linear before merging working directory
1227 * LINEAR: check that update is linear before merging working directory
1228 changes into destination
1228 changes into destination
1229 * NO_CONFLICT: check that the update does not result in file merges
1229 * NO_CONFLICT: check that the update does not result in file merges
1230
1230
1231 This returns whether conflict is detected at updating or not.
1231 This returns whether conflict is detected at updating or not.
1232 """
1232 """
1233 if updatecheck is None:
1233 if updatecheck is None:
1234 updatecheck = ui.config(b'commands', b'update.check')
1234 updatecheck = ui.config(b'commands', b'update.check')
1235 if updatecheck not in _VALID_UPDATECHECKS:
1235 if updatecheck not in _VALID_UPDATECHECKS:
1236 # If not configured, or invalid value configured
1236 # If not configured, or invalid value configured
1237 updatecheck = mergemod.UPDATECHECK_LINEAR
1237 updatecheck = mergemod.UPDATECHECK_LINEAR
1238 if updatecheck not in _VALID_UPDATECHECKS:
1238 if updatecheck not in _VALID_UPDATECHECKS:
1239 raise ValueError(
1239 raise ValueError(
1240 r'Invalid updatecheck value %r (can accept %r)'
1240 r'Invalid updatecheck value %r (can accept %r)'
1241 % (updatecheck, _VALID_UPDATECHECKS)
1241 % (updatecheck, _VALID_UPDATECHECKS)
1242 )
1242 )
1243 with repo.wlock():
1243 with repo.wlock():
1244 movemarkfrom = None
1244 movemarkfrom = None
1245 warndest = False
1245 warndest = False
1246 if checkout is None:
1246 if checkout is None:
1247 updata = destutil.destupdate(repo, clean=clean)
1247 updata = destutil.destupdate(repo, clean=clean)
1248 checkout, movemarkfrom, brev = updata
1248 checkout, movemarkfrom, brev = updata
1249 warndest = True
1249 warndest = True
1250
1250
1251 if clean:
1251 if clean:
1252 ret = _clean(repo, checkout)
1252 ret = _clean(repo, checkout)
1253 else:
1253 else:
1254 if updatecheck == mergemod.UPDATECHECK_ABORT:
1254 if updatecheck == mergemod.UPDATECHECK_ABORT:
1255 cmdutil.bailifchanged(repo, merge=False)
1255 cmdutil.bailifchanged(repo, merge=False)
1256 updatecheck = mergemod.UPDATECHECK_NONE
1256 updatecheck = mergemod.UPDATECHECK_NONE
1257 ret = _update(repo, checkout, updatecheck=updatecheck)
1257 ret = _update(repo, checkout, updatecheck=updatecheck)
1258
1258
1259 if not ret and movemarkfrom:
1259 if not ret and movemarkfrom:
1260 if movemarkfrom == repo[b'.'].node():
1260 if movemarkfrom == repo[b'.'].node():
1261 pass # no-op update
1261 pass # no-op update
1262 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1262 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1263 b = ui.label(repo._activebookmark, b'bookmarks.active')
1263 b = ui.label(repo._activebookmark, b'bookmarks.active')
1264 ui.status(_(b"updating bookmark %s\n") % b)
1264 ui.status(_(b"updating bookmark %s\n") % b)
1265 else:
1265 else:
1266 # this can happen with a non-linear update
1266 # this can happen with a non-linear update
1267 b = ui.label(repo._activebookmark, b'bookmarks')
1267 b = ui.label(repo._activebookmark, b'bookmarks')
1268 ui.status(_(b"(leaving bookmark %s)\n") % b)
1268 ui.status(_(b"(leaving bookmark %s)\n") % b)
1269 bookmarks.deactivate(repo)
1269 bookmarks.deactivate(repo)
1270 elif brev in repo._bookmarks:
1270 elif brev in repo._bookmarks:
1271 if brev != repo._activebookmark:
1271 if brev != repo._activebookmark:
1272 b = ui.label(brev, b'bookmarks.active')
1272 b = ui.label(brev, b'bookmarks.active')
1273 ui.status(_(b"(activating bookmark %s)\n") % b)
1273 ui.status(_(b"(activating bookmark %s)\n") % b)
1274 bookmarks.activate(repo, brev)
1274 bookmarks.activate(repo, brev)
1275 elif brev:
1275 elif brev:
1276 if repo._activebookmark:
1276 if repo._activebookmark:
1277 b = ui.label(repo._activebookmark, b'bookmarks')
1277 b = ui.label(repo._activebookmark, b'bookmarks')
1278 ui.status(_(b"(leaving bookmark %s)\n") % b)
1278 ui.status(_(b"(leaving bookmark %s)\n") % b)
1279 bookmarks.deactivate(repo)
1279 bookmarks.deactivate(repo)
1280
1280
1281 if warndest:
1281 if warndest:
1282 destutil.statusotherdests(ui, repo)
1282 destutil.statusotherdests(ui, repo)
1283
1283
1284 return ret
1284 return ret
1285
1285
1286
1286
1287 def merge(
1287 def merge(
1288 ctx,
1288 ctx,
1289 force=False,
1289 force=False,
1290 remind=True,
1290 remind=True,
1291 labels=None,
1291 labels=None,
1292 ):
1292 ):
1293 """Branch merge with node, resolving changes. Return true if any
1293 """Branch merge with node, resolving changes. Return true if any
1294 unresolved conflicts."""
1294 unresolved conflicts."""
1295 repo = ctx.repo()
1295 repo = ctx.repo()
1296 stats = mergemod.merge(ctx, force=force, labels=labels)
1296 stats = mergemod.merge(ctx, force=force, labels=labels)
1297 _showstats(repo, stats)
1297 _showstats(repo, stats)
1298 if stats.unresolvedcount:
1298 if stats.unresolvedcount:
1299 repo.ui.status(
1299 repo.ui.status(
1300 _(
1300 _(
1301 b"use 'hg resolve' to retry unresolved file merges "
1301 b"use 'hg resolve' to retry unresolved file merges "
1302 b"or 'hg merge --abort' to abandon\n"
1302 b"or 'hg merge --abort' to abandon\n"
1303 )
1303 )
1304 )
1304 )
1305 elif remind:
1305 elif remind:
1306 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1306 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1307 return stats.unresolvedcount > 0
1307 return stats.unresolvedcount > 0
1308
1308
1309
1309
1310 def abortmerge(ui, repo):
1310 def abortmerge(ui, repo):
1311 ms = mergestatemod.mergestate.read(repo)
1311 ms = mergestatemod.mergestate.read(repo)
1312 if ms.active():
1312 if ms.active():
1313 # there were conflicts
1313 # there were conflicts
1314 node = ms.localctx.hex()
1314 node = ms.localctx.hex()
1315 else:
1315 else:
1316 # there were no conficts, mergestate was not stored
1316 # there were no conficts, mergestate was not stored
1317 node = repo[b'.'].hex()
1317 node = repo[b'.'].hex()
1318
1318
1319 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1319 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1320 stats = mergemod.clean_update(repo[node])
1320 stats = mergemod.clean_update(repo[node])
1321 assert stats.unresolvedcount == 0
1321 assert stats.unresolvedcount == 0
1322 _showstats(repo, stats)
1322 _showstats(repo, stats)
1323
1323
1324
1324
1325 def _incoming(
1325 def _incoming(
1326 displaychlist,
1326 displaychlist,
1327 subreporecurse,
1327 subreporecurse,
1328 ui,
1328 ui,
1329 repo,
1329 repo,
1330 source,
1330 source,
1331 opts,
1331 opts,
1332 buffered=False,
1332 buffered=False,
1333 subpath=None,
1333 subpath=None,
1334 ):
1334 ):
1335 """
1335 """
1336 Helper for incoming / gincoming.
1336 Helper for incoming / gincoming.
1337 displaychlist gets called with
1337 displaychlist gets called with
1338 (remoterepo, incomingchangesetlist, displayer) parameters,
1338 (remoterepo, incomingchangesetlist, displayer) parameters,
1339 and is supposed to contain only code that can't be unified.
1339 and is supposed to contain only code that can't be unified.
1340 """
1340 """
1341 srcs = urlutil.get_pull_paths(repo, ui, [source])
1341 srcs = urlutil.get_pull_paths(repo, ui, [source])
1342 srcs = list(srcs)
1342 srcs = list(srcs)
1343 if len(srcs) != 1:
1343 if len(srcs) != 1:
1344 msg = _(b'for now, incoming supports only a single source, %d provided')
1344 msg = _(b'for now, incoming supports only a single source, %d provided')
1345 msg %= len(srcs)
1345 msg %= len(srcs)
1346 raise error.Abort(msg)
1346 raise error.Abort(msg)
1347 path = srcs[0]
1347 path = srcs[0]
1348 if subpath is None:
1348 if subpath is None:
1349 peer_path = path
1349 peer_path = path
1350 url = path.loc
1350 url = path.loc
1351 else:
1351 else:
1352 # XXX path: we are losing the `path` object here. Keeping it would be
1352 # XXX path: we are losing the `path` object here. Keeping it would be
1353 # valuable. For example as a "variant" as we do for pushes.
1353 # valuable. For example as a "variant" as we do for pushes.
1354 subpath = urlutil.url(subpath)
1354 subpath = urlutil.url(subpath)
1355 if subpath.isabs():
1355 if subpath.isabs():
1356 peer_path = url = bytes(subpath)
1356 peer_path = url = bytes(subpath)
1357 else:
1357 else:
1358 p = urlutil.url(path.loc)
1358 p = urlutil.url(path.loc)
1359 if p.islocal():
1359 if p.islocal():
1360 normpath = os.path.normpath
1360 normpath = os.path.normpath
1361 else:
1361 else:
1362 normpath = posixpath.normpath
1362 normpath = posixpath.normpath
1363 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 p.path = normpath(b'%s/%s' % (p.path, subpath))
1364 peer_path = url = bytes(p)
1364 peer_path = url = bytes(p)
1365 other = peer(repo, opts, peer_path)
1365 other = peer(repo, opts, peer_path)
1366 cleanupfn = other.close
1366 cleanupfn = other.close
1367 try:
1367 try:
1368 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1368 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1369 branches = (path.branch, opts.get(b'branch', []))
1369 branches = (path.branch, opts.get(b'branch', []))
1370 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1370 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1371
1371
1372 if revs:
1372 if revs:
1373 revs = [other.lookup(rev) for rev in revs]
1373 revs = [other.lookup(rev) for rev in revs]
1374 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1374 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1375 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1375 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1376 )
1376 )
1377
1377
1378 if not chlist:
1378 if not chlist:
1379 ui.status(_(b"no changes found\n"))
1379 ui.status(_(b"no changes found\n"))
1380 return subreporecurse()
1380 return subreporecurse()
1381 ui.pager(b'incoming')
1381 ui.pager(b'incoming')
1382 displayer = logcmdutil.changesetdisplayer(
1382 displayer = logcmdutil.changesetdisplayer(
1383 ui, other, opts, buffered=buffered
1383 ui, other, opts, buffered=buffered
1384 )
1384 )
1385 displaychlist(other, chlist, displayer)
1385 displaychlist(other, chlist, displayer)
1386 displayer.close()
1386 displayer.close()
1387 finally:
1387 finally:
1388 cleanupfn()
1388 cleanupfn()
1389 subreporecurse()
1389 subreporecurse()
1390 return 0 # exit code is zero since we found incoming changes
1390 return 0 # exit code is zero since we found incoming changes
1391
1391
1392
1392
1393 def incoming(ui, repo, source, opts, subpath=None):
1393 def incoming(ui, repo, source, opts, subpath=None):
1394 def subreporecurse():
1394 def subreporecurse():
1395 ret = 1
1395 ret = 1
1396 if opts.get(b'subrepos'):
1396 if opts.get(b'subrepos'):
1397 ctx = repo[None]
1397 ctx = repo[None]
1398 for subpath in sorted(ctx.substate):
1398 for subpath in sorted(ctx.substate):
1399 sub = ctx.sub(subpath)
1399 sub = ctx.sub(subpath)
1400 ret = min(ret, sub.incoming(ui, source, opts))
1400 ret = min(ret, sub.incoming(ui, source, opts))
1401 return ret
1401 return ret
1402
1402
1403 def display(other, chlist, displayer):
1403 def display(other, chlist, displayer):
1404 limit = logcmdutil.getlimit(opts)
1404 limit = logcmdutil.getlimit(opts)
1405 if opts.get(b'newest_first'):
1405 if opts.get(b'newest_first'):
1406 chlist.reverse()
1406 chlist.reverse()
1407 count = 0
1407 count = 0
1408 for n in chlist:
1408 for n in chlist:
1409 if limit is not None and count >= limit:
1409 if limit is not None and count >= limit:
1410 break
1410 break
1411 parents = [
1411 parents = [
1412 p for p in other.changelog.parents(n) if p != repo.nullid
1412 p for p in other.changelog.parents(n) if p != repo.nullid
1413 ]
1413 ]
1414 if opts.get(b'no_merges') and len(parents) == 2:
1414 if opts.get(b'no_merges') and len(parents) == 2:
1415 continue
1415 continue
1416 count += 1
1416 count += 1
1417 displayer.show(other[n])
1417 displayer.show(other[n])
1418
1418
1419 return _incoming(
1419 return _incoming(
1420 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1420 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1421 )
1421 )
1422
1422
1423
1423
1424 def _outgoing(ui, repo, dests, opts, subpath=None):
1424 def _outgoing(ui, repo, dests, opts, subpath=None):
1425 out = set()
1425 out = set()
1426 others = []
1426 others = []
1427 for path in urlutil.get_push_paths(repo, ui, dests):
1427 for path in urlutil.get_push_paths(repo, ui, dests):
1428 dest = path.loc
1428 dest = path.loc
1429 if subpath is not None:
1429 if subpath is not None:
1430 subpath = urlutil.url(subpath)
1430 subpath = urlutil.url(subpath)
1431 if subpath.isabs():
1431 if subpath.isabs():
1432 dest = bytes(subpath)
1432 dest = bytes(subpath)
1433 else:
1433 else:
1434 p = urlutil.url(dest)
1434 p = urlutil.url(dest)
1435 if p.islocal():
1435 if p.islocal():
1436 normpath = os.path.normpath
1436 normpath = os.path.normpath
1437 else:
1437 else:
1438 normpath = posixpath.normpath
1438 normpath = posixpath.normpath
1439 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 p.path = normpath(b'%s/%s' % (p.path, subpath))
1440 dest = bytes(p)
1440 dest = bytes(p)
1441 branches = path.branch, opts.get(b'branch') or []
1441 branches = path.branch, opts.get(b'branch') or []
1442
1442
1443 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1444 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1445 if revs:
1445 if revs:
1446 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1447
1447
1448 other = peer(repo, opts, dest)
1448 other = peer(repo, opts, dest)
1449 try:
1449 try:
1450 outgoing = discovery.findcommonoutgoing(
1450 outgoing = discovery.findcommonoutgoing(
1451 repo, other, revs, force=opts.get(b'force')
1451 repo, other, revs, force=opts.get(b'force')
1452 )
1452 )
1453 o = outgoing.missing
1453 o = outgoing.missing
1454 out.update(o)
1454 out.update(o)
1455 if not o:
1455 if not o:
1456 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1457 others.append(other)
1457 others.append(other)
1458 except: # re-raises
1458 except: # re-raises
1459 other.close()
1459 other.close()
1460 raise
1460 raise
1461 # make sure this is ordered by revision number
1461 # make sure this is ordered by revision number
1462 outgoing_revs = list(out)
1462 outgoing_revs = list(out)
1463 cl = repo.changelog
1463 cl = repo.changelog
1464 outgoing_revs.sort(key=cl.rev)
1464 outgoing_revs.sort(key=cl.rev)
1465 return outgoing_revs, others
1465 return outgoing_revs, others
1466
1466
1467
1467
1468 def _outgoing_recurse(ui, repo, dests, opts):
1468 def _outgoing_recurse(ui, repo, dests, opts):
1469 ret = 1
1469 ret = 1
1470 if opts.get(b'subrepos'):
1470 if opts.get(b'subrepos'):
1471 ctx = repo[None]
1471 ctx = repo[None]
1472 for subpath in sorted(ctx.substate):
1472 for subpath in sorted(ctx.substate):
1473 sub = ctx.sub(subpath)
1473 sub = ctx.sub(subpath)
1474 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 ret = min(ret, sub.outgoing(ui, dests, opts))
1475 return ret
1475 return ret
1476
1476
1477
1477
1478 def _outgoing_filter(repo, revs, opts):
1478 def _outgoing_filter(repo, revs, opts):
1479 """apply revision filtering/ordering option for outgoing"""
1479 """apply revision filtering/ordering option for outgoing"""
1480 limit = logcmdutil.getlimit(opts)
1480 limit = logcmdutil.getlimit(opts)
1481 no_merges = opts.get(b'no_merges')
1481 no_merges = opts.get(b'no_merges')
1482 if opts.get(b'newest_first'):
1482 if opts.get(b'newest_first'):
1483 revs.reverse()
1483 revs.reverse()
1484 if limit is None and not no_merges:
1484 if limit is None and not no_merges:
1485 for r in revs:
1485 for r in revs:
1486 yield r
1486 yield r
1487 return
1487 return
1488
1488
1489 count = 0
1489 count = 0
1490 cl = repo.changelog
1490 cl = repo.changelog
1491 for n in revs:
1491 for n in revs:
1492 if limit is not None and count >= limit:
1492 if limit is not None and count >= limit:
1493 break
1493 break
1494 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 parents = [p for p in cl.parents(n) if p != repo.nullid]
1495 if no_merges and len(parents) == 2:
1495 if no_merges and len(parents) == 2:
1496 continue
1496 continue
1497 count += 1
1497 count += 1
1498 yield n
1498 yield n
1499
1499
1500
1500
1501 def outgoing(ui, repo, dests, opts, subpath=None):
1501 def outgoing(ui, repo, dests, opts, subpath=None):
1502 if opts.get(b'graph'):
1502 if opts.get(b'graph'):
1503 logcmdutil.checkunsupportedgraphflags([], opts)
1503 logcmdutil.checkunsupportedgraphflags([], opts)
1504 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1505 ret = 1
1505 ret = 1
1506 try:
1506 try:
1507 if o:
1507 if o:
1508 ret = 0
1508 ret = 0
1509
1509
1510 if opts.get(b'graph'):
1510 if opts.get(b'graph'):
1511 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 revdag = logcmdutil.graphrevs(repo, o, opts)
1512 ui.pager(b'outgoing')
1512 ui.pager(b'outgoing')
1513 displayer = logcmdutil.changesetdisplayer(
1513 displayer = logcmdutil.changesetdisplayer(
1514 ui, repo, opts, buffered=True
1514 ui, repo, opts, buffered=True
1515 )
1515 )
1516 logcmdutil.displaygraph(
1516 logcmdutil.displaygraph(
1517 ui, repo, revdag, displayer, graphmod.asciiedges
1517 ui, repo, revdag, displayer, graphmod.asciiedges
1518 )
1518 )
1519 else:
1519 else:
1520 ui.pager(b'outgoing')
1520 ui.pager(b'outgoing')
1521 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1522 for n in _outgoing_filter(repo, o, opts):
1522 for n in _outgoing_filter(repo, o, opts):
1523 displayer.show(repo[n])
1523 displayer.show(repo[n])
1524 displayer.close()
1524 displayer.close()
1525 for oth in others:
1525 for oth in others:
1526 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1527 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1528 return ret # exit code is zero since we found outgoing changes
1528 return ret # exit code is zero since we found outgoing changes
1529 finally:
1529 finally:
1530 for oth in others:
1530 for oth in others:
1531 oth.close()
1531 oth.close()
1532
1532
1533
1533
1534 def verify(repo, level=None):
1534 def verify(repo, level=None):
1535 """verify the consistency of a repository"""
1535 """verify the consistency of a repository"""
1536 ret = verifymod.verify(repo, level=level)
1536 ret = verifymod.verify(repo, level=level)
1537
1537
1538 # Broken subrepo references in hidden csets don't seem worth worrying about,
1538 # Broken subrepo references in hidden csets don't seem worth worrying about,
1539 # since they can't be pushed/pulled, and --hidden can be used if they are a
1539 # since they can't be pushed/pulled, and --hidden can be used if they are a
1540 # concern.
1540 # concern.
1541
1541
1542 # pathto() is needed for -R case
1542 # pathto() is needed for -R case
1543 revs = repo.revs(
1543 revs = repo.revs(
1544 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1544 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1545 )
1545 )
1546
1546
1547 if revs:
1547 if revs:
1548 repo.ui.status(_(b'checking subrepo links\n'))
1548 repo.ui.status(_(b'checking subrepo links\n'))
1549 for rev in revs:
1549 for rev in revs:
1550 ctx = repo[rev]
1550 ctx = repo[rev]
1551 try:
1551 try:
1552 for subpath in ctx.substate:
1552 for subpath in ctx.substate:
1553 try:
1553 try:
1554 ret = (
1554 ret = (
1555 ctx.sub(subpath, allowcreate=False).verify() or ret
1555 ctx.sub(subpath, allowcreate=False).verify() or ret
1556 )
1556 )
1557 except error.RepoError as e:
1557 except error.RepoError as e:
1558 repo.ui.warn(b'%d: %s\n' % (rev, e))
1558 repo.ui.warn(b'%d: %s\n' % (rev, e))
1559 except Exception:
1559 except Exception:
1560 repo.ui.warn(
1560 repo.ui.warn(
1561 _(b'.hgsubstate is corrupt in revision %s\n')
1561 _(b'.hgsubstate is corrupt in revision %s\n')
1562 % short(ctx.node())
1562 % short(ctx.node())
1563 )
1563 )
1564
1564
1565 return ret
1565 return ret
1566
1566
1567
1567
1568 def remoteui(src, opts):
1568 def remoteui(src, opts):
1569 """build a remote ui from ui or repo and opts"""
1569 """build a remote ui from ui or repo and opts"""
1570 if util.safehasattr(src, 'baseui'): # looks like a repository
1570 if util.safehasattr(src, 'baseui'): # looks like a repository
1571 dst = src.baseui.copy() # drop repo-specific config
1571 dst = src.baseui.copy() # drop repo-specific config
1572 src = src.ui # copy target options from repo
1572 src = src.ui # copy target options from repo
1573 else: # assume it's a global ui object
1573 else: # assume it's a global ui object
1574 dst = src.copy() # keep all global options
1574 dst = src.copy() # keep all global options
1575
1575
1576 # copy ssh-specific options
1576 # copy ssh-specific options
1577 for o in b'ssh', b'remotecmd':
1577 for o in b'ssh', b'remotecmd':
1578 v = opts.get(o) or src.config(b'ui', o)
1578 v = opts.get(o) or src.config(b'ui', o)
1579 if v:
1579 if v:
1580 dst.setconfig(b"ui", o, v, b'copied')
1580 dst.setconfig(b"ui", o, v, b'copied')
1581
1581
1582 # copy bundle-specific options
1582 # copy bundle-specific options
1583 r = src.config(b'bundle', b'mainreporoot')
1583 r = src.config(b'bundle', b'mainreporoot')
1584 if r:
1584 if r:
1585 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1585 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1586
1586
1587 # copy selected local settings to the remote ui
1587 # copy selected local settings to the remote ui
1588 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1588 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1589 for key, val in src.configitems(sect):
1589 for key, val in src.configitems(sect):
1590 dst.setconfig(sect, key, val, b'copied')
1590 dst.setconfig(sect, key, val, b'copied')
1591 v = src.config(b'web', b'cacerts')
1591 v = src.config(b'web', b'cacerts')
1592 if v:
1592 if v:
1593 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1593 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1594
1594
1595 return dst
1595 return dst
1596
1596
1597
1597
1598 # Files of interest
1598 # Files of interest
1599 # Used to check if the repository has changed looking at mtime and size of
1599 # Used to check if the repository has changed looking at mtime and size of
1600 # these files.
1600 # these files.
1601 foi = [
1601 foi = [
1602 (b'spath', b'00changelog.i'),
1602 ('spath', b'00changelog.i'),
1603 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1603 ('spath', b'phaseroots'), # ! phase can change content at the same size
1604 (b'spath', b'obsstore'),
1604 ('spath', b'obsstore'),
1605 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1605 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1606 ]
1606 ]
1607
1607
1608
1608
1609 class cachedlocalrepo:
1609 class cachedlocalrepo:
1610 """Holds a localrepository that can be cached and reused."""
1610 """Holds a localrepository that can be cached and reused."""
1611
1611
1612 def __init__(self, repo):
1612 def __init__(self, repo):
1613 """Create a new cached repo from an existing repo.
1613 """Create a new cached repo from an existing repo.
1614
1614
1615 We assume the passed in repo was recently created. If the
1615 We assume the passed in repo was recently created. If the
1616 repo has changed between when it was created and when it was
1616 repo has changed between when it was created and when it was
1617 turned into a cache, it may not refresh properly.
1617 turned into a cache, it may not refresh properly.
1618 """
1618 """
1619 assert isinstance(repo, localrepo.localrepository)
1619 assert isinstance(repo, localrepo.localrepository)
1620 self._repo = repo
1620 self._repo = repo
1621 self._state, self.mtime = self._repostate()
1621 self._state, self.mtime = self._repostate()
1622 self._filtername = repo.filtername
1622 self._filtername = repo.filtername
1623
1623
1624 def fetch(self):
1624 def fetch(self):
1625 """Refresh (if necessary) and return a repository.
1625 """Refresh (if necessary) and return a repository.
1626
1626
1627 If the cached instance is out of date, it will be recreated
1627 If the cached instance is out of date, it will be recreated
1628 automatically and returned.
1628 automatically and returned.
1629
1629
1630 Returns a tuple of the repo and a boolean indicating whether a new
1630 Returns a tuple of the repo and a boolean indicating whether a new
1631 repo instance was created.
1631 repo instance was created.
1632 """
1632 """
1633 # We compare the mtimes and sizes of some well-known files to
1633 # We compare the mtimes and sizes of some well-known files to
1634 # determine if the repo changed. This is not precise, as mtimes
1634 # determine if the repo changed. This is not precise, as mtimes
1635 # are susceptible to clock skew and imprecise filesystems and
1635 # are susceptible to clock skew and imprecise filesystems and
1636 # file content can change while maintaining the same size.
1636 # file content can change while maintaining the same size.
1637
1637
1638 state, mtime = self._repostate()
1638 state, mtime = self._repostate()
1639 if state == self._state:
1639 if state == self._state:
1640 return self._repo, False
1640 return self._repo, False
1641
1641
1642 repo = repository(self._repo.baseui, self._repo.url())
1642 repo = repository(self._repo.baseui, self._repo.url())
1643 if self._filtername:
1643 if self._filtername:
1644 self._repo = repo.filtered(self._filtername)
1644 self._repo = repo.filtered(self._filtername)
1645 else:
1645 else:
1646 self._repo = repo.unfiltered()
1646 self._repo = repo.unfiltered()
1647 self._state = state
1647 self._state = state
1648 self.mtime = mtime
1648 self.mtime = mtime
1649
1649
1650 return self._repo, True
1650 return self._repo, True
1651
1651
1652 def _repostate(self):
1652 def _repostate(self):
1653 state = []
1653 state = []
1654 maxmtime = -1
1654 maxmtime = -1
1655 for attr, fname in foi:
1655 for attr, fname in foi:
1656 prefix = getattr(self._repo, attr)
1656 prefix = getattr(self._repo, attr)
1657 p = os.path.join(prefix, fname)
1657 p = os.path.join(prefix, fname)
1658 try:
1658 try:
1659 st = os.stat(p)
1659 st = os.stat(p)
1660 except OSError:
1660 except OSError:
1661 st = os.stat(prefix)
1661 st = os.stat(prefix)
1662 state.append((st[stat.ST_MTIME], st.st_size))
1662 state.append((st[stat.ST_MTIME], st.st_size))
1663 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1663 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1664
1664
1665 return tuple(state), maxmtime
1665 return tuple(state), maxmtime
1666
1666
1667 def copy(self):
1667 def copy(self):
1668 """Obtain a copy of this class instance.
1668 """Obtain a copy of this class instance.
1669
1669
1670 A new localrepository instance is obtained. The new instance should be
1670 A new localrepository instance is obtained. The new instance should be
1671 completely independent of the original.
1671 completely independent of the original.
1672 """
1672 """
1673 repo = repository(self._repo.baseui, self._repo.origroot)
1673 repo = repository(self._repo.baseui, self._repo.origroot)
1674 if self._filtername:
1674 if self._filtername:
1675 repo = repo.filtered(self._filtername)
1675 repo = repo.filtered(self._filtername)
1676 else:
1676 else:
1677 repo = repo.unfiltered()
1677 repo = repo.unfiltered()
1678 c = cachedlocalrepo(repo)
1678 c = cachedlocalrepo(repo)
1679 c._state = self._state
1679 c._state = self._state
1680 c.mtime = self.mtime
1680 c.mtime = self.mtime
1681 return c
1681 return c
General Comments 0
You need to be logged in to leave comments. Login now