##// END OF EJS Templates
typing: add a type hint to `mercurial/hg.py`...
Matt Harbison -
r52567:e8f58714 default
parent child Browse files
Show More
@@ -1,1680 +1,1686
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import typing
14 import weakref
15 import weakref
15
16
16 from .i18n import _
17 from .i18n import _
17 from .node import (
18 from .node import (
18 hex,
19 hex,
19 sha1nodeconstants,
20 sha1nodeconstants,
20 short,
21 short,
21 )
22 )
22
23
23 from . import (
24 from . import (
24 bookmarks,
25 bookmarks,
25 bundlerepo,
26 bundlerepo,
26 cmdutil,
27 cmdutil,
27 destutil,
28 destutil,
28 discovery,
29 discovery,
29 error,
30 error,
30 exchange,
31 exchange,
31 extensions,
32 extensions,
32 graphmod,
33 graphmod,
33 httppeer,
34 httppeer,
34 localrepo,
35 localrepo,
35 lock,
36 lock,
36 logcmdutil,
37 logcmdutil,
37 logexchange,
38 logexchange,
38 merge as mergemod,
39 merge as mergemod,
39 mergestate as mergestatemod,
40 mergestate as mergestatemod,
40 narrowspec,
41 narrowspec,
41 phases,
42 phases,
42 requirements,
43 requirements,
43 scmutil,
44 scmutil,
44 sshpeer,
45 sshpeer,
45 statichttprepo,
46 statichttprepo,
46 ui as uimod,
47 ui as uimod,
47 unionrepo,
48 unionrepo,
48 url,
49 url,
49 util,
50 util,
50 verify as verifymod,
51 verify as verifymod,
51 vfs as vfsmod,
52 vfs as vfsmod,
52 )
53 )
53 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
54 from .utils import (
55 from .utils import (
55 hashutil,
56 hashutil,
56 stringutil,
57 stringutil,
57 urlutil,
58 urlutil,
58 )
59 )
59
60
61 if typing.TYPE_CHECKING:
62 from typing import (
63 List,
64 Tuple,
65 )
60
66
61 release = lock.release
67 release = lock.release
62
68
63 # shared features
69 # shared features
64 sharedbookmarks = b'bookmarks'
70 sharedbookmarks = b'bookmarks'
65
71
66
72
67 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
73 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
68 if hasattr(other, 'peer'):
74 if hasattr(other, 'peer'):
69 # a courtesy to callers using a localrepo for other
75 # a courtesy to callers using a localrepo for other
70 peer = other.peer(remotehidden=remotehidden)
76 peer = other.peer(remotehidden=remotehidden)
71 else:
77 else:
72 peer = other
78 peer = other
73 hashbranch, branches = branches
79 hashbranch, branches = branches
74 if not hashbranch and not branches:
80 if not hashbranch and not branches:
75 x = revs or None
81 x = revs or None
76 if revs:
82 if revs:
77 y = revs[0]
83 y = revs[0]
78 else:
84 else:
79 y = None
85 y = None
80 return x, y
86 return x, y
81 if revs:
87 if revs:
82 revs = list(revs)
88 revs = list(revs)
83 else:
89 else:
84 revs = []
90 revs = []
85
91
86 if not peer.capable(b'branchmap'):
92 if not peer.capable(b'branchmap'):
87 if branches:
93 if branches:
88 raise error.Abort(_(b"remote branch lookup not supported"))
94 raise error.Abort(_(b"remote branch lookup not supported"))
89 revs.append(hashbranch)
95 revs.append(hashbranch)
90 return revs, revs[0]
96 return revs, revs[0]
91
97
92 with peer.commandexecutor() as e:
98 with peer.commandexecutor() as e:
93 branchmap = e.callcommand(b'branchmap', {}).result()
99 branchmap = e.callcommand(b'branchmap', {}).result()
94
100
95 def primary(branch):
101 def primary(branch):
96 if branch == b'.':
102 if branch == b'.':
97 if not lrepo:
103 if not lrepo:
98 raise error.Abort(_(b"dirstate branch not accessible"))
104 raise error.Abort(_(b"dirstate branch not accessible"))
99 branch = lrepo.dirstate.branch()
105 branch = lrepo.dirstate.branch()
100 if branch in branchmap:
106 if branch in branchmap:
101 revs.extend(hex(r) for r in reversed(branchmap[branch]))
107 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 return True
108 return True
103 else:
109 else:
104 return False
110 return False
105
111
106 for branch in branches:
112 for branch in branches:
107 if not primary(branch):
113 if not primary(branch):
108 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
114 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 if hashbranch:
115 if hashbranch:
110 if not primary(hashbranch):
116 if not primary(hashbranch):
111 revs.append(hashbranch)
117 revs.append(hashbranch)
112 return revs, revs[0]
118 return revs, revs[0]
113
119
114
120
115 def _isfile(path):
121 def _isfile(path):
116 try:
122 try:
117 # we use os.stat() directly here instead of os.path.isfile()
123 # we use os.stat() directly here instead of os.path.isfile()
118 # because the latter started returning `False` on invalid path
124 # because the latter started returning `False` on invalid path
119 # exceptions starting in 3.8 and we care about handling
125 # exceptions starting in 3.8 and we care about handling
120 # invalid paths specially here.
126 # invalid paths specially here.
121 st = os.stat(path)
127 st = os.stat(path)
122 except ValueError as e:
128 except ValueError as e:
123 msg = stringutil.forcebytestr(e)
129 msg = stringutil.forcebytestr(e)
124 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
130 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 except OSError:
131 except OSError:
126 return False
132 return False
127 else:
133 else:
128 return stat.S_ISREG(st.st_mode)
134 return stat.S_ISREG(st.st_mode)
129
135
130
136
131 class LocalFactory:
137 class LocalFactory:
132 """thin wrapper to dispatch between localrepo and bundle repo"""
138 """thin wrapper to dispatch between localrepo and bundle repo"""
133
139
134 @staticmethod
140 @staticmethod
135 def islocal(path: bytes) -> bool:
141 def islocal(path: bytes) -> bool:
136 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
137 return not _isfile(path)
143 return not _isfile(path)
138
144
139 @staticmethod
145 @staticmethod
140 def instance(ui, path, *args, **kwargs):
146 def instance(ui, path, *args, **kwargs):
141 path = util.expandpath(urlutil.urllocalpath(path))
147 path = util.expandpath(urlutil.urllocalpath(path))
142 if _isfile(path):
148 if _isfile(path):
143 cls = bundlerepo
149 cls = bundlerepo
144 else:
150 else:
145 cls = localrepo
151 cls = localrepo
146 return cls.instance(ui, path, *args, **kwargs)
152 return cls.instance(ui, path, *args, **kwargs)
147
153
148
154
149 repo_schemes = {
155 repo_schemes = {
150 b'bundle': bundlerepo,
156 b'bundle': bundlerepo,
151 b'union': unionrepo,
157 b'union': unionrepo,
152 b'file': LocalFactory,
158 b'file': LocalFactory,
153 }
159 }
154
160
155 peer_schemes = {
161 peer_schemes = {
156 b'http': httppeer,
162 b'http': httppeer,
157 b'https': httppeer,
163 b'https': httppeer,
158 b'ssh': sshpeer,
164 b'ssh': sshpeer,
159 b'static-http': statichttprepo,
165 b'static-http': statichttprepo,
160 }
166 }
161
167
162
168
163 def islocal(repo):
169 def islocal(repo):
164 '''return true if repo (or path pointing to repo) is local'''
170 '''return true if repo (or path pointing to repo) is local'''
165 if isinstance(repo, bytes):
171 if isinstance(repo, bytes):
166 u = urlutil.url(repo)
172 u = urlutil.url(repo)
167 scheme = u.scheme or b'file'
173 scheme = u.scheme or b'file'
168 if scheme in peer_schemes:
174 if scheme in peer_schemes:
169 cls = peer_schemes[scheme]
175 cls = peer_schemes[scheme]
170 cls.make_peer # make sure we load the module
176 cls.make_peer # make sure we load the module
171 elif scheme in repo_schemes:
177 elif scheme in repo_schemes:
172 cls = repo_schemes[scheme]
178 cls = repo_schemes[scheme]
173 cls.instance # make sure we load the module
179 cls.instance # make sure we load the module
174 else:
180 else:
175 cls = LocalFactory
181 cls = LocalFactory
176 if hasattr(cls, 'islocal'):
182 if hasattr(cls, 'islocal'):
177 return cls.islocal(repo) # pytype: disable=module-attr
183 return cls.islocal(repo) # pytype: disable=module-attr
178 return False
184 return False
179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
185 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 return repo.local()
186 return repo.local()
181
187
182
188
183 def openpath(ui, path, sendaccept=True):
189 def openpath(ui, path, sendaccept=True):
184 '''open path with open if local, url.open if remote'''
190 '''open path with open if local, url.open if remote'''
185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
191 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 if pathurl.islocal():
192 if pathurl.islocal():
187 return util.posixfile(pathurl.localpath(), b'rb')
193 return util.posixfile(pathurl.localpath(), b'rb')
188 else:
194 else:
189 return url.open(ui, path, sendaccept=sendaccept)
195 return url.open(ui, path, sendaccept=sendaccept)
190
196
191
197
192 # a list of (ui, repo) functions called for wire peer initialization
198 # a list of (ui, repo) functions called for wire peer initialization
193 wirepeersetupfuncs = []
199 wirepeersetupfuncs = []
194
200
195
201
196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
202 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 ui = getattr(obj, "ui", ui)
203 ui = getattr(obj, "ui", ui)
198 for f in presetupfuncs or []:
204 for f in presetupfuncs or []:
199 f(ui, obj)
205 f(ui, obj)
200 ui.log(b'extension', b'- executing reposetup hooks\n')
206 ui.log(b'extension', b'- executing reposetup hooks\n')
201 with util.timedcm('all reposetup') as allreposetupstats:
207 with util.timedcm('all reposetup') as allreposetupstats:
202 for name, module in extensions.extensions(ui):
208 for name, module in extensions.extensions(ui):
203 ui.log(b'extension', b' - running reposetup for %s\n', name)
209 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 hook = getattr(module, 'reposetup', None)
210 hook = getattr(module, 'reposetup', None)
205 if hook:
211 if hook:
206 with util.timedcm('reposetup %r', name) as stats:
212 with util.timedcm('reposetup %r', name) as stats:
207 hook(ui, obj)
213 hook(ui, obj)
208 msg = b' > reposetup for %s took %s\n'
214 msg = b' > reposetup for %s took %s\n'
209 ui.log(b'extension', msg, name, stats)
215 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
216 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
217 if not obj.local():
212 for f in wirepeersetupfuncs:
218 for f in wirepeersetupfuncs:
213 f(ui, obj)
219 f(ui, obj)
214
220
215
221
216 def repository(
222 def repository(
217 ui,
223 ui,
218 path=b'',
224 path=b'',
219 create=False,
225 create=False,
220 presetupfuncs=None,
226 presetupfuncs=None,
221 intents=None,
227 intents=None,
222 createopts=None,
228 createopts=None,
223 ):
229 ):
224 """return a repository object for the specified path"""
230 """return a repository object for the specified path"""
225 scheme = urlutil.url(path).scheme
231 scheme = urlutil.url(path).scheme
226 if scheme is None:
232 if scheme is None:
227 scheme = b'file'
233 scheme = b'file'
228 cls = repo_schemes.get(scheme)
234 cls = repo_schemes.get(scheme)
229 if cls is None:
235 if cls is None:
230 if scheme in peer_schemes:
236 if scheme in peer_schemes:
231 raise error.Abort(_(b"repository '%s' is not local") % path)
237 raise error.Abort(_(b"repository '%s' is not local") % path)
232 cls = LocalFactory
238 cls = LocalFactory
233 repo = cls.instance(
239 repo = cls.instance(
234 ui,
240 ui,
235 path,
241 path,
236 create,
242 create,
237 intents=intents,
243 intents=intents,
238 createopts=createopts,
244 createopts=createopts,
239 )
245 )
240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
246 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 return repo.filtered(b'visible')
247 return repo.filtered(b'visible')
242
248
243
249
244 def peer(
250 def peer(
245 uiorrepo,
251 uiorrepo,
246 opts,
252 opts,
247 path,
253 path,
248 create=False,
254 create=False,
249 intents=None,
255 intents=None,
250 createopts=None,
256 createopts=None,
251 remotehidden=False,
257 remotehidden=False,
252 ):
258 ):
253 '''return a repository peer for the specified path'''
259 '''return a repository peer for the specified path'''
254 ui = getattr(uiorrepo, 'ui', uiorrepo)
260 ui = getattr(uiorrepo, 'ui', uiorrepo)
255 rui = remoteui(uiorrepo, opts)
261 rui = remoteui(uiorrepo, opts)
256 if hasattr(path, 'url'):
262 if hasattr(path, 'url'):
257 # this is already a urlutil.path object
263 # this is already a urlutil.path object
258 peer_path = path
264 peer_path = path
259 else:
265 else:
260 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
266 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
261 scheme = peer_path.url.scheme # pytype: disable=attribute-error
267 scheme = peer_path.url.scheme # pytype: disable=attribute-error
262 if scheme in peer_schemes:
268 if scheme in peer_schemes:
263 cls = peer_schemes[scheme]
269 cls = peer_schemes[scheme]
264 peer = cls.make_peer(
270 peer = cls.make_peer(
265 rui,
271 rui,
266 peer_path,
272 peer_path,
267 create,
273 create,
268 intents=intents,
274 intents=intents,
269 createopts=createopts,
275 createopts=createopts,
270 remotehidden=remotehidden,
276 remotehidden=remotehidden,
271 )
277 )
272 _setup_repo_or_peer(rui, peer)
278 _setup_repo_or_peer(rui, peer)
273 else:
279 else:
274 # this is a repository
280 # this is a repository
275 repo_path = peer_path.loc # pytype: disable=attribute-error
281 repo_path = peer_path.loc # pytype: disable=attribute-error
276 if not repo_path:
282 if not repo_path:
277 repo_path = peer_path.rawloc # pytype: disable=attribute-error
283 repo_path = peer_path.rawloc # pytype: disable=attribute-error
278 repo = repository(
284 repo = repository(
279 rui,
285 rui,
280 repo_path,
286 repo_path,
281 create,
287 create,
282 intents=intents,
288 intents=intents,
283 createopts=createopts,
289 createopts=createopts,
284 )
290 )
285 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
291 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
286 return peer
292 return peer
287
293
288
294
289 def defaultdest(source):
295 def defaultdest(source):
290 """return default destination of clone if none is given
296 """return default destination of clone if none is given
291
297
292 >>> defaultdest(b'foo')
298 >>> defaultdest(b'foo')
293 'foo'
299 'foo'
294 >>> defaultdest(b'/foo/bar')
300 >>> defaultdest(b'/foo/bar')
295 'bar'
301 'bar'
296 >>> defaultdest(b'/')
302 >>> defaultdest(b'/')
297 ''
303 ''
298 >>> defaultdest(b'')
304 >>> defaultdest(b'')
299 ''
305 ''
300 >>> defaultdest(b'http://example.org/')
306 >>> defaultdest(b'http://example.org/')
301 ''
307 ''
302 >>> defaultdest(b'http://example.org/foo/')
308 >>> defaultdest(b'http://example.org/foo/')
303 'foo'
309 'foo'
304 """
310 """
305 path = urlutil.url(source).path
311 path = urlutil.url(source).path
306 if not path:
312 if not path:
307 return b''
313 return b''
308 return os.path.basename(os.path.normpath(path))
314 return os.path.basename(os.path.normpath(path))
309
315
310
316
311 def sharedreposource(repo):
317 def sharedreposource(repo):
312 """Returns repository object for source repository of a shared repo.
318 """Returns repository object for source repository of a shared repo.
313
319
314 If repo is not a shared repository, returns None.
320 If repo is not a shared repository, returns None.
315 """
321 """
316 if repo.sharedpath == repo.path:
322 if repo.sharedpath == repo.path:
317 return None
323 return None
318
324
319 if hasattr(repo, 'srcrepo') and repo.srcrepo:
325 if hasattr(repo, 'srcrepo') and repo.srcrepo:
320 return repo.srcrepo
326 return repo.srcrepo
321
327
322 # the sharedpath always ends in the .hg; we want the path to the repo
328 # the sharedpath always ends in the .hg; we want the path to the repo
323 source = repo.vfs.split(repo.sharedpath)[0]
329 source = repo.vfs.split(repo.sharedpath)[0]
324 srcurl, branches = urlutil.parseurl(source)
330 srcurl, branches = urlutil.parseurl(source)
325 srcrepo = repository(repo.ui, srcurl)
331 srcrepo = repository(repo.ui, srcurl)
326 repo.srcrepo = srcrepo
332 repo.srcrepo = srcrepo
327 return srcrepo
333 return srcrepo
328
334
329
335
330 def share(
336 def share(
331 ui,
337 ui,
332 source,
338 source,
333 dest=None,
339 dest=None,
334 update=True,
340 update=True,
335 bookmarks=True,
341 bookmarks=True,
336 defaultpath=None,
342 defaultpath=None,
337 relative=False,
343 relative=False,
338 ):
344 ):
339 '''create a shared repository'''
345 '''create a shared repository'''
340
346
341 not_local_msg = _(b'can only share local repositories')
347 not_local_msg = _(b'can only share local repositories')
342 if hasattr(source, 'local'):
348 if hasattr(source, 'local'):
343 if source.local() is None:
349 if source.local() is None:
344 raise error.Abort(not_local_msg)
350 raise error.Abort(not_local_msg)
345 elif not islocal(source):
351 elif not islocal(source):
346 # XXX why are we getting bytes here ?
352 # XXX why are we getting bytes here ?
347 raise error.Abort(not_local_msg)
353 raise error.Abort(not_local_msg)
348
354
349 if not dest:
355 if not dest:
350 dest = defaultdest(source)
356 dest = defaultdest(source)
351 else:
357 else:
352 dest = urlutil.get_clone_path_obj(ui, dest).loc
358 dest = urlutil.get_clone_path_obj(ui, dest).loc
353
359
354 if isinstance(source, bytes):
360 if isinstance(source, bytes):
355 source_path = urlutil.get_clone_path_obj(ui, source)
361 source_path = urlutil.get_clone_path_obj(ui, source)
356 srcrepo = repository(ui, source_path.loc)
362 srcrepo = repository(ui, source_path.loc)
357 branches = (source_path.branch, [])
363 branches = (source_path.branch, [])
358 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
364 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
359 else:
365 else:
360 srcrepo = source.local()
366 srcrepo = source.local()
361 checkout = None
367 checkout = None
362
368
363 shareditems = set()
369 shareditems = set()
364 if bookmarks:
370 if bookmarks:
365 shareditems.add(sharedbookmarks)
371 shareditems.add(sharedbookmarks)
366
372
367 r = repository(
373 r = repository(
368 ui,
374 ui,
369 dest,
375 dest,
370 create=True,
376 create=True,
371 createopts={
377 createopts={
372 b'sharedrepo': srcrepo,
378 b'sharedrepo': srcrepo,
373 b'sharedrelative': relative,
379 b'sharedrelative': relative,
374 b'shareditems': shareditems,
380 b'shareditems': shareditems,
375 },
381 },
376 )
382 )
377
383
378 postshare(srcrepo, r, defaultpath=defaultpath)
384 postshare(srcrepo, r, defaultpath=defaultpath)
379 r = repository(ui, dest)
385 r = repository(ui, dest)
380 _postshareupdate(r, update, checkout=checkout)
386 _postshareupdate(r, update, checkout=checkout)
381 return r
387 return r
382
388
383
389
384 def _prependsourcehgrc(repo):
390 def _prependsourcehgrc(repo):
385 """copies the source repo config and prepend it in current repo .hg/hgrc
391 """copies the source repo config and prepend it in current repo .hg/hgrc
386 on unshare. This is only done if the share was perfomed using share safe
392 on unshare. This is only done if the share was perfomed using share safe
387 method where we share config of source in shares"""
393 method where we share config of source in shares"""
388 srcvfs = vfsmod.vfs(repo.sharedpath)
394 srcvfs = vfsmod.vfs(repo.sharedpath)
389 dstvfs = vfsmod.vfs(repo.path)
395 dstvfs = vfsmod.vfs(repo.path)
390
396
391 if not srcvfs.exists(b'hgrc'):
397 if not srcvfs.exists(b'hgrc'):
392 return
398 return
393
399
394 currentconfig = b''
400 currentconfig = b''
395 if dstvfs.exists(b'hgrc'):
401 if dstvfs.exists(b'hgrc'):
396 currentconfig = dstvfs.read(b'hgrc')
402 currentconfig = dstvfs.read(b'hgrc')
397
403
398 with dstvfs(b'hgrc', b'wb') as fp:
404 with dstvfs(b'hgrc', b'wb') as fp:
399 sourceconfig = srcvfs.read(b'hgrc')
405 sourceconfig = srcvfs.read(b'hgrc')
400 fp.write(b"# Config copied from shared source\n")
406 fp.write(b"# Config copied from shared source\n")
401 fp.write(sourceconfig)
407 fp.write(sourceconfig)
402 fp.write(b'\n')
408 fp.write(b'\n')
403 fp.write(currentconfig)
409 fp.write(currentconfig)
404
410
405
411
406 def unshare(ui, repo):
412 def unshare(ui, repo):
407 """convert a shared repository to a normal one
413 """convert a shared repository to a normal one
408
414
409 Copy the store data to the repo and remove the sharedpath data.
415 Copy the store data to the repo and remove the sharedpath data.
410
416
411 Returns a new repository object representing the unshared repository.
417 Returns a new repository object representing the unshared repository.
412
418
413 The passed repository object is not usable after this function is
419 The passed repository object is not usable after this function is
414 called.
420 called.
415 """
421 """
416
422
417 with repo.lock():
423 with repo.lock():
418 # we use locks here because if we race with commit, we
424 # we use locks here because if we race with commit, we
419 # can end up with extra data in the cloned revlogs that's
425 # can end up with extra data in the cloned revlogs that's
420 # not pointed to by changesets, thus causing verify to
426 # not pointed to by changesets, thus causing verify to
421 # fail
427 # fail
422 destlock = copystore(ui, repo, repo.path)
428 destlock = copystore(ui, repo, repo.path)
423 with destlock or util.nullcontextmanager():
429 with destlock or util.nullcontextmanager():
424 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
430 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
425 # we were sharing .hg/hgrc of the share source with the current
431 # we were sharing .hg/hgrc of the share source with the current
426 # repo. We need to copy that while unsharing otherwise it can
432 # repo. We need to copy that while unsharing otherwise it can
427 # disable hooks and other checks
433 # disable hooks and other checks
428 _prependsourcehgrc(repo)
434 _prependsourcehgrc(repo)
429
435
430 sharefile = repo.vfs.join(b'sharedpath')
436 sharefile = repo.vfs.join(b'sharedpath')
431 util.rename(sharefile, sharefile + b'.old')
437 util.rename(sharefile, sharefile + b'.old')
432
438
433 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
439 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
434 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
440 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
435 scmutil.writereporequirements(repo)
441 scmutil.writereporequirements(repo)
436
442
437 # Removing share changes some fundamental properties of the repo instance.
443 # Removing share changes some fundamental properties of the repo instance.
438 # So we instantiate a new repo object and operate on it rather than
444 # So we instantiate a new repo object and operate on it rather than
439 # try to keep the existing repo usable.
445 # try to keep the existing repo usable.
440 newrepo = repository(repo.baseui, repo.root, create=False)
446 newrepo = repository(repo.baseui, repo.root, create=False)
441
447
442 # TODO: figure out how to access subrepos that exist, but were previously
448 # TODO: figure out how to access subrepos that exist, but were previously
443 # removed from .hgsub
449 # removed from .hgsub
444 c = newrepo[b'.']
450 c = newrepo[b'.']
445 subs = c.substate
451 subs = c.substate
446 for s in sorted(subs):
452 for s in sorted(subs):
447 c.sub(s).unshare()
453 c.sub(s).unshare()
448
454
449 localrepo.poisonrepository(repo)
455 localrepo.poisonrepository(repo)
450
456
451 return newrepo
457 return newrepo
452
458
453
459
454 def postshare(sourcerepo, destrepo, defaultpath=None):
460 def postshare(sourcerepo, destrepo, defaultpath=None):
455 """Called after a new shared repo is created.
461 """Called after a new shared repo is created.
456
462
457 The new repo only has a requirements file and pointer to the source.
463 The new repo only has a requirements file and pointer to the source.
458 This function configures additional shared data.
464 This function configures additional shared data.
459
465
460 Extensions can wrap this function and write additional entries to
466 Extensions can wrap this function and write additional entries to
461 destrepo/.hg/shared to indicate additional pieces of data to be shared.
467 destrepo/.hg/shared to indicate additional pieces of data to be shared.
462 """
468 """
463 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
469 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
464 if default:
470 if default:
465 template = b'[paths]\ndefault = %s\n'
471 template = b'[paths]\ndefault = %s\n'
466 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
472 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
467 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
473 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
468 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
474 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
469 b"narrow-share"
475 b"narrow-share"
470 ):
476 ):
471 narrowspec.copytoworkingcopy(destrepo)
477 narrowspec.copytoworkingcopy(destrepo)
472
478
473
479
474 def _postshareupdate(repo, update, checkout=None):
480 def _postshareupdate(repo, update, checkout=None):
475 """Maybe perform a working directory update after a shared repo is created.
481 """Maybe perform a working directory update after a shared repo is created.
476
482
477 ``update`` can be a boolean or a revision to update to.
483 ``update`` can be a boolean or a revision to update to.
478 """
484 """
479 if not update:
485 if not update:
480 return
486 return
481
487
482 repo.ui.status(_(b"updating working directory\n"))
488 repo.ui.status(_(b"updating working directory\n"))
483 if update is not True:
489 if update is not True:
484 checkout = update
490 checkout = update
485 for test in (checkout, b'default', b'tip'):
491 for test in (checkout, b'default', b'tip'):
486 if test is None:
492 if test is None:
487 continue
493 continue
488 try:
494 try:
489 uprev = repo.lookup(test)
495 uprev = repo.lookup(test)
490 break
496 break
491 except error.RepoLookupError:
497 except error.RepoLookupError:
492 continue
498 continue
493 _update(repo, uprev)
499 _update(repo, uprev)
494
500
495
501
496 def copystore(ui, srcrepo, destpath):
502 def copystore(ui, srcrepo, destpath):
497 """copy files from store of srcrepo in destpath
503 """copy files from store of srcrepo in destpath
498
504
499 returns destlock
505 returns destlock
500 """
506 """
501 destlock = None
507 destlock = None
502 try:
508 try:
503 hardlink = None
509 hardlink = None
504 topic = _(b'linking') if hardlink else _(b'copying')
510 topic = _(b'linking') if hardlink else _(b'copying')
505 with ui.makeprogress(topic, unit=_(b'files')) as progress:
511 with ui.makeprogress(topic, unit=_(b'files')) as progress:
506 num = 0
512 num = 0
507 srcpublishing = srcrepo.publishing()
513 srcpublishing = srcrepo.publishing()
508 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
514 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
509 dstvfs = vfsmod.vfs(destpath)
515 dstvfs = vfsmod.vfs(destpath)
510 for f in srcrepo.store.copylist():
516 for f in srcrepo.store.copylist():
511 if srcpublishing and f.endswith(b'phaseroots'):
517 if srcpublishing and f.endswith(b'phaseroots'):
512 continue
518 continue
513 dstbase = os.path.dirname(f)
519 dstbase = os.path.dirname(f)
514 if dstbase and not dstvfs.exists(dstbase):
520 if dstbase and not dstvfs.exists(dstbase):
515 dstvfs.mkdir(dstbase)
521 dstvfs.mkdir(dstbase)
516 if srcvfs.exists(f):
522 if srcvfs.exists(f):
517 if f.endswith(b'data'):
523 if f.endswith(b'data'):
518 # 'dstbase' may be empty (e.g. revlog format 0)
524 # 'dstbase' may be empty (e.g. revlog format 0)
519 lockfile = os.path.join(dstbase, b"lock")
525 lockfile = os.path.join(dstbase, b"lock")
520 # lock to avoid premature writing to the target
526 # lock to avoid premature writing to the target
521 destlock = lock.lock(dstvfs, lockfile)
527 destlock = lock.lock(dstvfs, lockfile)
522 hardlink, n = util.copyfiles(
528 hardlink, n = util.copyfiles(
523 srcvfs.join(f), dstvfs.join(f), hardlink, progress
529 srcvfs.join(f), dstvfs.join(f), hardlink, progress
524 )
530 )
525 num += n
531 num += n
526 if hardlink:
532 if hardlink:
527 ui.debug(b"linked %d files\n" % num)
533 ui.debug(b"linked %d files\n" % num)
528 else:
534 else:
529 ui.debug(b"copied %d files\n" % num)
535 ui.debug(b"copied %d files\n" % num)
530 return destlock
536 return destlock
531 except: # re-raises
537 except: # re-raises
532 release(destlock)
538 release(destlock)
533 raise
539 raise
534
540
535
541
536 def clonewithshare(
542 def clonewithshare(
537 ui,
543 ui,
538 peeropts,
544 peeropts,
539 sharepath,
545 sharepath,
540 source,
546 source,
541 srcpeer,
547 srcpeer,
542 dest,
548 dest,
543 pull=False,
549 pull=False,
544 rev=None,
550 rev=None,
545 update=True,
551 update=True,
546 stream=False,
552 stream=False,
547 ):
553 ):
548 """Perform a clone using a shared repo.
554 """Perform a clone using a shared repo.
549
555
550 The store for the repository will be located at <sharepath>/.hg. The
556 The store for the repository will be located at <sharepath>/.hg. The
551 specified revisions will be cloned or pulled from "source". A shared repo
557 specified revisions will be cloned or pulled from "source". A shared repo
552 will be created at "dest" and a working copy will be created if "update" is
558 will be created at "dest" and a working copy will be created if "update" is
553 True.
559 True.
554 """
560 """
555 revs = None
561 revs = None
556 if rev:
562 if rev:
557 if not srcpeer.capable(b'lookup'):
563 if not srcpeer.capable(b'lookup'):
558 raise error.Abort(
564 raise error.Abort(
559 _(
565 _(
560 b"src repository does not support "
566 b"src repository does not support "
561 b"revision lookup and so doesn't "
567 b"revision lookup and so doesn't "
562 b"support clone by revision"
568 b"support clone by revision"
563 )
569 )
564 )
570 )
565
571
566 # TODO this is batchable.
572 # TODO this is batchable.
567 remoterevs = []
573 remoterevs = []
568 for r in rev:
574 for r in rev:
569 with srcpeer.commandexecutor() as e:
575 with srcpeer.commandexecutor() as e:
570 remoterevs.append(
576 remoterevs.append(
571 e.callcommand(
577 e.callcommand(
572 b'lookup',
578 b'lookup',
573 {
579 {
574 b'key': r,
580 b'key': r,
575 },
581 },
576 ).result()
582 ).result()
577 )
583 )
578 revs = remoterevs
584 revs = remoterevs
579
585
580 # Obtain a lock before checking for or cloning the pooled repo otherwise
586 # Obtain a lock before checking for or cloning the pooled repo otherwise
581 # 2 clients may race creating or populating it.
587 # 2 clients may race creating or populating it.
582 pooldir = os.path.dirname(sharepath)
588 pooldir = os.path.dirname(sharepath)
583 # lock class requires the directory to exist.
589 # lock class requires the directory to exist.
584 try:
590 try:
585 util.makedir(pooldir, False)
591 util.makedir(pooldir, False)
586 except FileExistsError:
592 except FileExistsError:
587 pass
593 pass
588
594
589 poolvfs = vfsmod.vfs(pooldir)
595 poolvfs = vfsmod.vfs(pooldir)
590 basename = os.path.basename(sharepath)
596 basename = os.path.basename(sharepath)
591
597
592 with lock.lock(poolvfs, b'%s.lock' % basename):
598 with lock.lock(poolvfs, b'%s.lock' % basename):
593 if os.path.exists(sharepath):
599 if os.path.exists(sharepath):
594 ui.status(
600 ui.status(
595 _(b'(sharing from existing pooled repository %s)\n') % basename
601 _(b'(sharing from existing pooled repository %s)\n') % basename
596 )
602 )
597 else:
603 else:
598 ui.status(
604 ui.status(
599 _(b'(sharing from new pooled repository %s)\n') % basename
605 _(b'(sharing from new pooled repository %s)\n') % basename
600 )
606 )
601 # Always use pull mode because hardlinks in share mode don't work
607 # Always use pull mode because hardlinks in share mode don't work
602 # well. Never update because working copies aren't necessary in
608 # well. Never update because working copies aren't necessary in
603 # share mode.
609 # share mode.
604 clone(
610 clone(
605 ui,
611 ui,
606 peeropts,
612 peeropts,
607 source,
613 source,
608 dest=sharepath,
614 dest=sharepath,
609 pull=True,
615 pull=True,
610 revs=rev,
616 revs=rev,
611 update=False,
617 update=False,
612 stream=stream,
618 stream=stream,
613 )
619 )
614
620
615 # Resolve the value to put in [paths] section for the source.
621 # Resolve the value to put in [paths] section for the source.
616 if islocal(source):
622 if islocal(source):
617 defaultpath = util.abspath(urlutil.urllocalpath(source))
623 defaultpath = util.abspath(urlutil.urllocalpath(source))
618 else:
624 else:
619 defaultpath = source
625 defaultpath = source
620
626
621 sharerepo = repository(ui, path=sharepath)
627 sharerepo = repository(ui, path=sharepath)
622 destrepo = share(
628 destrepo = share(
623 ui,
629 ui,
624 sharerepo,
630 sharerepo,
625 dest=dest,
631 dest=dest,
626 update=False,
632 update=False,
627 bookmarks=False,
633 bookmarks=False,
628 defaultpath=defaultpath,
634 defaultpath=defaultpath,
629 )
635 )
630
636
631 # We need to perform a pull against the dest repo to fetch bookmarks
637 # We need to perform a pull against the dest repo to fetch bookmarks
632 # and other non-store data that isn't shared by default. In the case of
638 # and other non-store data that isn't shared by default. In the case of
633 # non-existing shared repo, this means we pull from the remote twice. This
639 # non-existing shared repo, this means we pull from the remote twice. This
634 # is a bit weird. But at the time it was implemented, there wasn't an easy
640 # is a bit weird. But at the time it was implemented, there wasn't an easy
635 # way to pull just non-changegroup data.
641 # way to pull just non-changegroup data.
636 exchange.pull(destrepo, srcpeer, heads=revs)
642 exchange.pull(destrepo, srcpeer, heads=revs)
637
643
638 _postshareupdate(destrepo, update)
644 _postshareupdate(destrepo, update)
639
645
640 return srcpeer, peer(ui, peeropts, dest)
646 return srcpeer, peer(ui, peeropts, dest)
641
647
642
648
643 # Recomputing caches is often slow on big repos, so copy them.
649 # Recomputing caches is often slow on big repos, so copy them.
644 def _copycache(srcrepo, dstcachedir, fname):
650 def _copycache(srcrepo, dstcachedir, fname):
645 """copy a cache from srcrepo to destcachedir (if it exists)"""
651 """copy a cache from srcrepo to destcachedir (if it exists)"""
646 srcfname = srcrepo.cachevfs.join(fname)
652 srcfname = srcrepo.cachevfs.join(fname)
647 dstfname = os.path.join(dstcachedir, fname)
653 dstfname = os.path.join(dstcachedir, fname)
648 if os.path.exists(srcfname):
654 if os.path.exists(srcfname):
649 if not os.path.exists(dstcachedir):
655 if not os.path.exists(dstcachedir):
650 os.mkdir(dstcachedir)
656 os.mkdir(dstcachedir)
651 util.copyfile(srcfname, dstfname)
657 util.copyfile(srcfname, dstfname)
652
658
653
659
654 def clone(
660 def clone(
655 ui,
661 ui,
656 peeropts,
662 peeropts,
657 source,
663 source,
658 dest=None,
664 dest=None,
659 pull=False,
665 pull=False,
660 revs=None,
666 revs=None,
661 update=True,
667 update=True,
662 stream=False,
668 stream=False,
663 branch=None,
669 branch=None,
664 shareopts=None,
670 shareopts=None,
665 storeincludepats=None,
671 storeincludepats=None,
666 storeexcludepats=None,
672 storeexcludepats=None,
667 depth=None,
673 depth=None,
668 ):
674 ):
669 """Make a copy of an existing repository.
675 """Make a copy of an existing repository.
670
676
671 Create a copy of an existing repository in a new directory. The
677 Create a copy of an existing repository in a new directory. The
672 source and destination are URLs, as passed to the repository
678 source and destination are URLs, as passed to the repository
673 function. Returns a pair of repository peers, the source and
679 function. Returns a pair of repository peers, the source and
674 newly created destination.
680 newly created destination.
675
681
676 The location of the source is added to the new repository's
682 The location of the source is added to the new repository's
677 .hg/hgrc file, as the default to be used for future pulls and
683 .hg/hgrc file, as the default to be used for future pulls and
678 pushes.
684 pushes.
679
685
680 If an exception is raised, the partly cloned/updated destination
686 If an exception is raised, the partly cloned/updated destination
681 repository will be deleted.
687 repository will be deleted.
682
688
683 Arguments:
689 Arguments:
684
690
685 source: repository object or URL
691 source: repository object or URL
686
692
687 dest: URL of destination repository to create (defaults to base
693 dest: URL of destination repository to create (defaults to base
688 name of source repository)
694 name of source repository)
689
695
690 pull: always pull from source repository, even in local case or if the
696 pull: always pull from source repository, even in local case or if the
691 server prefers streaming
697 server prefers streaming
692
698
693 stream: stream raw data uncompressed from repository (fast over
699 stream: stream raw data uncompressed from repository (fast over
694 LAN, slow over WAN)
700 LAN, slow over WAN)
695
701
696 revs: revision to clone up to (implies pull=True)
702 revs: revision to clone up to (implies pull=True)
697
703
698 update: update working directory after clone completes, if
704 update: update working directory after clone completes, if
699 destination is local repository (True means update to default rev,
705 destination is local repository (True means update to default rev,
700 anything else is treated as a revision)
706 anything else is treated as a revision)
701
707
702 branch: branches to clone
708 branch: branches to clone
703
709
704 shareopts: dict of options to control auto sharing behavior. The "pool" key
710 shareopts: dict of options to control auto sharing behavior. The "pool" key
705 activates auto sharing mode and defines the directory for stores. The
711 activates auto sharing mode and defines the directory for stores. The
706 "mode" key determines how to construct the directory name of the shared
712 "mode" key determines how to construct the directory name of the shared
707 repository. "identity" means the name is derived from the node of the first
713 repository. "identity" means the name is derived from the node of the first
708 changeset in the repository. "remote" means the name is derived from the
714 changeset in the repository. "remote" means the name is derived from the
709 remote's path/URL. Defaults to "identity."
715 remote's path/URL. Defaults to "identity."
710
716
711 storeincludepats and storeexcludepats: sets of file patterns to include and
717 storeincludepats and storeexcludepats: sets of file patterns to include and
712 exclude in the repository copy, respectively. If not defined, all files
718 exclude in the repository copy, respectively. If not defined, all files
713 will be included (a "full" clone). Otherwise a "narrow" clone containing
719 will be included (a "full" clone). Otherwise a "narrow" clone containing
714 only the requested files will be performed. If ``storeincludepats`` is not
720 only the requested files will be performed. If ``storeincludepats`` is not
715 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
721 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
716 ``path:.``. If both are empty sets, no files will be cloned.
722 ``path:.``. If both are empty sets, no files will be cloned.
717 """
723 """
718
724
719 if isinstance(source, bytes):
725 if isinstance(source, bytes):
720 src_path = urlutil.get_clone_path_obj(ui, source)
726 src_path = urlutil.get_clone_path_obj(ui, source)
721 if src_path is None:
727 if src_path is None:
722 srcpeer = peer(ui, peeropts, b'')
728 srcpeer = peer(ui, peeropts, b'')
723 origsource = source = b''
729 origsource = source = b''
724 branches = (None, branch or [])
730 branches = (None, branch or [])
725 else:
731 else:
726 srcpeer = peer(ui, peeropts, src_path)
732 srcpeer = peer(ui, peeropts, src_path)
727 origsource = src_path.rawloc
733 origsource = src_path.rawloc
728 branches = (src_path.branch, branch or [])
734 branches = (src_path.branch, branch or [])
729 source = src_path.loc
735 source = src_path.loc
730 else:
736 else:
731 if hasattr(source, 'peer'):
737 if hasattr(source, 'peer'):
732 srcpeer = source.peer() # in case we were called with a localrepo
738 srcpeer = source.peer() # in case we were called with a localrepo
733 else:
739 else:
734 srcpeer = source
740 srcpeer = source
735 branches = (None, branch or [])
741 branches = (None, branch or [])
736 # XXX path: simply use the peer `path` object when this become available
742 # XXX path: simply use the peer `path` object when this become available
737 origsource = source = srcpeer.url()
743 origsource = source = srcpeer.url()
738 srclock = destlock = destwlock = cleandir = None
744 srclock = destlock = destwlock = cleandir = None
739 destpeer = None
745 destpeer = None
740 try:
746 try:
741 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
747 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
742
748
743 if dest is None:
749 if dest is None:
744 dest = defaultdest(source)
750 dest = defaultdest(source)
745 if dest:
751 if dest:
746 ui.status(_(b"destination directory: %s\n") % dest)
752 ui.status(_(b"destination directory: %s\n") % dest)
747 else:
753 else:
748 dest_path = urlutil.get_clone_path_obj(ui, dest)
754 dest_path = urlutil.get_clone_path_obj(ui, dest)
749 if dest_path is not None:
755 if dest_path is not None:
750 dest = dest_path.rawloc
756 dest = dest_path.rawloc
751 else:
757 else:
752 dest = b''
758 dest = b''
753
759
754 dest = urlutil.urllocalpath(dest)
760 dest = urlutil.urllocalpath(dest)
755 source = urlutil.urllocalpath(source)
761 source = urlutil.urllocalpath(source)
756
762
757 if not dest:
763 if not dest:
758 raise error.InputError(_(b"empty destination path is not valid"))
764 raise error.InputError(_(b"empty destination path is not valid"))
759
765
760 destvfs = vfsmod.vfs(dest, expandpath=True)
766 destvfs = vfsmod.vfs(dest, expandpath=True)
761 if destvfs.lexists():
767 if destvfs.lexists():
762 if not destvfs.isdir():
768 if not destvfs.isdir():
763 raise error.InputError(
769 raise error.InputError(
764 _(b"destination '%s' already exists") % dest
770 _(b"destination '%s' already exists") % dest
765 )
771 )
766 elif destvfs.listdir():
772 elif destvfs.listdir():
767 raise error.InputError(
773 raise error.InputError(
768 _(b"destination '%s' is not empty") % dest
774 _(b"destination '%s' is not empty") % dest
769 )
775 )
770
776
771 createopts = {}
777 createopts = {}
772 narrow = False
778 narrow = False
773
779
774 if storeincludepats is not None:
780 if storeincludepats is not None:
775 narrowspec.validatepatterns(storeincludepats)
781 narrowspec.validatepatterns(storeincludepats)
776 narrow = True
782 narrow = True
777
783
778 if storeexcludepats is not None:
784 if storeexcludepats is not None:
779 narrowspec.validatepatterns(storeexcludepats)
785 narrowspec.validatepatterns(storeexcludepats)
780 narrow = True
786 narrow = True
781
787
782 if narrow:
788 if narrow:
783 # Include everything by default if only exclusion patterns defined.
789 # Include everything by default if only exclusion patterns defined.
784 if storeexcludepats and not storeincludepats:
790 if storeexcludepats and not storeincludepats:
785 storeincludepats = {b'path:.'}
791 storeincludepats = {b'path:.'}
786
792
787 createopts[b'narrowfiles'] = True
793 createopts[b'narrowfiles'] = True
788
794
789 if depth:
795 if depth:
790 createopts[b'shallowfilestore'] = True
796 createopts[b'shallowfilestore'] = True
791
797
792 if srcpeer.capable(b'lfs-serve'):
798 if srcpeer.capable(b'lfs-serve'):
793 # Repository creation honors the config if it disabled the extension, so
799 # Repository creation honors the config if it disabled the extension, so
794 # we can't just announce that lfs will be enabled. This check avoids
800 # we can't just announce that lfs will be enabled. This check avoids
795 # saying that lfs will be enabled, and then saying it's an unknown
801 # saying that lfs will be enabled, and then saying it's an unknown
796 # feature. The lfs creation option is set in either case so that a
802 # feature. The lfs creation option is set in either case so that a
797 # requirement is added. If the extension is explicitly disabled but the
803 # requirement is added. If the extension is explicitly disabled but the
798 # requirement is set, the clone aborts early, before transferring any
804 # requirement is set, the clone aborts early, before transferring any
799 # data.
805 # data.
800 createopts[b'lfs'] = True
806 createopts[b'lfs'] = True
801
807
802 if b'lfs' in extensions.disabled():
808 if b'lfs' in extensions.disabled():
803 ui.status(
809 ui.status(
804 _(
810 _(
805 b'(remote is using large file support (lfs), but it is '
811 b'(remote is using large file support (lfs), but it is '
806 b'explicitly disabled in the local configuration)\n'
812 b'explicitly disabled in the local configuration)\n'
807 )
813 )
808 )
814 )
809 else:
815 else:
810 ui.status(
816 ui.status(
811 _(
817 _(
812 b'(remote is using large file support (lfs); lfs will '
818 b'(remote is using large file support (lfs); lfs will '
813 b'be enabled for this repository)\n'
819 b'be enabled for this repository)\n'
814 )
820 )
815 )
821 )
816
822
817 shareopts = shareopts or {}
823 shareopts = shareopts or {}
818 sharepool = shareopts.get(b'pool')
824 sharepool = shareopts.get(b'pool')
819 sharenamemode = shareopts.get(b'mode')
825 sharenamemode = shareopts.get(b'mode')
820 if sharepool and islocal(dest):
826 if sharepool and islocal(dest):
821 sharepath = None
827 sharepath = None
822 if sharenamemode == b'identity':
828 if sharenamemode == b'identity':
823 # Resolve the name from the initial changeset in the remote
829 # Resolve the name from the initial changeset in the remote
824 # repository. This returns nullid when the remote is empty. It
830 # repository. This returns nullid when the remote is empty. It
825 # raises RepoLookupError if revision 0 is filtered or otherwise
831 # raises RepoLookupError if revision 0 is filtered or otherwise
826 # not available. If we fail to resolve, sharing is not enabled.
832 # not available. If we fail to resolve, sharing is not enabled.
827 try:
833 try:
828 with srcpeer.commandexecutor() as e:
834 with srcpeer.commandexecutor() as e:
829 rootnode = e.callcommand(
835 rootnode = e.callcommand(
830 b'lookup',
836 b'lookup',
831 {
837 {
832 b'key': b'0',
838 b'key': b'0',
833 },
839 },
834 ).result()
840 ).result()
835
841
836 if rootnode != sha1nodeconstants.nullid:
842 if rootnode != sha1nodeconstants.nullid:
837 sharepath = os.path.join(sharepool, hex(rootnode))
843 sharepath = os.path.join(sharepool, hex(rootnode))
838 else:
844 else:
839 ui.status(
845 ui.status(
840 _(
846 _(
841 b'(not using pooled storage: '
847 b'(not using pooled storage: '
842 b'remote appears to be empty)\n'
848 b'remote appears to be empty)\n'
843 )
849 )
844 )
850 )
845 except error.RepoLookupError:
851 except error.RepoLookupError:
846 ui.status(
852 ui.status(
847 _(
853 _(
848 b'(not using pooled storage: '
854 b'(not using pooled storage: '
849 b'unable to resolve identity of remote)\n'
855 b'unable to resolve identity of remote)\n'
850 )
856 )
851 )
857 )
852 elif sharenamemode == b'remote':
858 elif sharenamemode == b'remote':
853 sharepath = os.path.join(
859 sharepath = os.path.join(
854 sharepool, hex(hashutil.sha1(source).digest())
860 sharepool, hex(hashutil.sha1(source).digest())
855 )
861 )
856 else:
862 else:
857 raise error.Abort(
863 raise error.Abort(
858 _(b'unknown share naming mode: %s') % sharenamemode
864 _(b'unknown share naming mode: %s') % sharenamemode
859 )
865 )
860
866
861 # TODO this is a somewhat arbitrary restriction.
867 # TODO this is a somewhat arbitrary restriction.
862 if narrow:
868 if narrow:
863 ui.status(
869 ui.status(
864 _(b'(pooled storage not supported for narrow clones)\n')
870 _(b'(pooled storage not supported for narrow clones)\n')
865 )
871 )
866 sharepath = None
872 sharepath = None
867
873
868 if sharepath:
874 if sharepath:
869 return clonewithshare(
875 return clonewithshare(
870 ui,
876 ui,
871 peeropts,
877 peeropts,
872 sharepath,
878 sharepath,
873 source,
879 source,
874 srcpeer,
880 srcpeer,
875 dest,
881 dest,
876 pull=pull,
882 pull=pull,
877 rev=revs,
883 rev=revs,
878 update=update,
884 update=update,
879 stream=stream,
885 stream=stream,
880 )
886 )
881
887
882 srcrepo = srcpeer.local()
888 srcrepo = srcpeer.local()
883
889
884 abspath = origsource
890 abspath = origsource
885 if islocal(origsource):
891 if islocal(origsource):
886 abspath = util.abspath(urlutil.urllocalpath(origsource))
892 abspath = util.abspath(urlutil.urllocalpath(origsource))
887
893
888 if islocal(dest):
894 if islocal(dest):
889 if os.path.exists(dest):
895 if os.path.exists(dest):
890 # only clean up directories we create ourselves
896 # only clean up directories we create ourselves
891 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
897 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
892 cleandir = hgdir
898 cleandir = hgdir
893 else:
899 else:
894 cleandir = dest
900 cleandir = dest
895
901
896 copy = False
902 copy = False
897 if (
903 if (
898 srcrepo
904 srcrepo
899 and srcrepo.cancopy()
905 and srcrepo.cancopy()
900 and islocal(dest)
906 and islocal(dest)
901 and not phases.hassecret(srcrepo)
907 and not phases.hassecret(srcrepo)
902 ):
908 ):
903 copy = not pull and not revs
909 copy = not pull and not revs
904
910
905 # TODO this is a somewhat arbitrary restriction.
911 # TODO this is a somewhat arbitrary restriction.
906 if narrow:
912 if narrow:
907 copy = False
913 copy = False
908
914
909 if copy:
915 if copy:
910 try:
916 try:
911 # we use a lock here because if we race with commit, we
917 # we use a lock here because if we race with commit, we
912 # can end up with extra data in the cloned revlogs that's
918 # can end up with extra data in the cloned revlogs that's
913 # not pointed to by changesets, thus causing verify to
919 # not pointed to by changesets, thus causing verify to
914 # fail
920 # fail
915 srclock = srcrepo.lock(wait=False)
921 srclock = srcrepo.lock(wait=False)
916 except error.LockError:
922 except error.LockError:
917 copy = False
923 copy = False
918
924
919 if copy:
925 if copy:
920 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
926 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
921
927
922 destrootpath = urlutil.urllocalpath(dest)
928 destrootpath = urlutil.urllocalpath(dest)
923 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
929 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
924 localrepo.createrepository(
930 localrepo.createrepository(
925 ui,
931 ui,
926 destrootpath,
932 destrootpath,
927 requirements=dest_reqs,
933 requirements=dest_reqs,
928 )
934 )
929 destrepo = localrepo.makelocalrepository(ui, destrootpath)
935 destrepo = localrepo.makelocalrepository(ui, destrootpath)
930
936
931 destwlock = destrepo.wlock()
937 destwlock = destrepo.wlock()
932 destlock = destrepo.lock()
938 destlock = destrepo.lock()
933 from . import streamclone # avoid cycle
939 from . import streamclone # avoid cycle
934
940
935 streamclone.local_copy(srcrepo, destrepo)
941 streamclone.local_copy(srcrepo, destrepo)
936
942
937 # we need to re-init the repo after manually copying the data
943 # we need to re-init the repo after manually copying the data
938 # into it
944 # into it
939 destpeer = peer(srcrepo, peeropts, dest)
945 destpeer = peer(srcrepo, peeropts, dest)
940
946
941 # make the peer aware that is it already locked
947 # make the peer aware that is it already locked
942 #
948 #
943 # important:
949 # important:
944 #
950 #
945 # We still need to release that lock at the end of the function
951 # We still need to release that lock at the end of the function
946 destpeer.local()._lockref = weakref.ref(destlock)
952 destpeer.local()._lockref = weakref.ref(destlock)
947 destpeer.local()._wlockref = weakref.ref(destwlock)
953 destpeer.local()._wlockref = weakref.ref(destwlock)
948 # dirstate also needs to be copied because `_wlockref` has a reference
954 # dirstate also needs to be copied because `_wlockref` has a reference
949 # to it: this dirstate is saved to disk when the wlock is released
955 # to it: this dirstate is saved to disk when the wlock is released
950 destpeer.local().dirstate = destrepo.dirstate
956 destpeer.local().dirstate = destrepo.dirstate
951
957
952 srcrepo.hook(
958 srcrepo.hook(
953 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
959 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
954 )
960 )
955 else:
961 else:
956 try:
962 try:
957 # only pass ui when no srcrepo
963 # only pass ui when no srcrepo
958 destpeer = peer(
964 destpeer = peer(
959 srcrepo or ui,
965 srcrepo or ui,
960 peeropts,
966 peeropts,
961 dest,
967 dest,
962 create=True,
968 create=True,
963 createopts=createopts,
969 createopts=createopts,
964 )
970 )
965 except FileExistsError:
971 except FileExistsError:
966 cleandir = None
972 cleandir = None
967 raise error.Abort(_(b"destination '%s' already exists") % dest)
973 raise error.Abort(_(b"destination '%s' already exists") % dest)
968
974
969 if revs:
975 if revs:
970 if not srcpeer.capable(b'lookup'):
976 if not srcpeer.capable(b'lookup'):
971 raise error.Abort(
977 raise error.Abort(
972 _(
978 _(
973 b"src repository does not support "
979 b"src repository does not support "
974 b"revision lookup and so doesn't "
980 b"revision lookup and so doesn't "
975 b"support clone by revision"
981 b"support clone by revision"
976 )
982 )
977 )
983 )
978
984
979 # TODO this is batchable.
985 # TODO this is batchable.
980 remoterevs = []
986 remoterevs = []
981 for rev in revs:
987 for rev in revs:
982 with srcpeer.commandexecutor() as e:
988 with srcpeer.commandexecutor() as e:
983 remoterevs.append(
989 remoterevs.append(
984 e.callcommand(
990 e.callcommand(
985 b'lookup',
991 b'lookup',
986 {
992 {
987 b'key': rev,
993 b'key': rev,
988 },
994 },
989 ).result()
995 ).result()
990 )
996 )
991 revs = remoterevs
997 revs = remoterevs
992
998
993 checkout = revs[0]
999 checkout = revs[0]
994 else:
1000 else:
995 revs = None
1001 revs = None
996 local = destpeer.local()
1002 local = destpeer.local()
997 if local:
1003 if local:
998 if narrow:
1004 if narrow:
999 with local.wlock(), local.lock(), local.transaction(
1005 with local.wlock(), local.lock(), local.transaction(
1000 b'narrow-clone'
1006 b'narrow-clone'
1001 ):
1007 ):
1002 local.setnarrowpats(storeincludepats, storeexcludepats)
1008 local.setnarrowpats(storeincludepats, storeexcludepats)
1003 narrowspec.copytoworkingcopy(local)
1009 narrowspec.copytoworkingcopy(local)
1004
1010
1005 u = urlutil.url(abspath)
1011 u = urlutil.url(abspath)
1006 defaulturl = bytes(u)
1012 defaulturl = bytes(u)
1007 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1013 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1008 if not stream:
1014 if not stream:
1009 if pull:
1015 if pull:
1010 stream = False
1016 stream = False
1011 else:
1017 else:
1012 stream = None
1018 stream = None
1013 # internal config: ui.quietbookmarkmove
1019 # internal config: ui.quietbookmarkmove
1014 overrides = {(b'ui', b'quietbookmarkmove'): True}
1020 overrides = {(b'ui', b'quietbookmarkmove'): True}
1015 with local.ui.configoverride(overrides, b'clone'):
1021 with local.ui.configoverride(overrides, b'clone'):
1016 exchange.pull(
1022 exchange.pull(
1017 local,
1023 local,
1018 srcpeer,
1024 srcpeer,
1019 heads=revs,
1025 heads=revs,
1020 streamclonerequested=stream,
1026 streamclonerequested=stream,
1021 includepats=storeincludepats,
1027 includepats=storeincludepats,
1022 excludepats=storeexcludepats,
1028 excludepats=storeexcludepats,
1023 depth=depth,
1029 depth=depth,
1024 )
1030 )
1025 elif srcrepo:
1031 elif srcrepo:
1026 # TODO lift restriction once exchange.push() accepts narrow
1032 # TODO lift restriction once exchange.push() accepts narrow
1027 # push.
1033 # push.
1028 if narrow:
1034 if narrow:
1029 raise error.Abort(
1035 raise error.Abort(
1030 _(
1036 _(
1031 b'narrow clone not available for '
1037 b'narrow clone not available for '
1032 b'remote destinations'
1038 b'remote destinations'
1033 )
1039 )
1034 )
1040 )
1035
1041
1036 exchange.push(
1042 exchange.push(
1037 srcrepo,
1043 srcrepo,
1038 destpeer,
1044 destpeer,
1039 revs=revs,
1045 revs=revs,
1040 bookmarks=srcrepo._bookmarks.keys(),
1046 bookmarks=srcrepo._bookmarks.keys(),
1041 )
1047 )
1042 else:
1048 else:
1043 raise error.Abort(
1049 raise error.Abort(
1044 _(b"clone from remote to remote not supported")
1050 _(b"clone from remote to remote not supported")
1045 )
1051 )
1046
1052
1047 cleandir = None
1053 cleandir = None
1048
1054
1049 destrepo = destpeer.local()
1055 destrepo = destpeer.local()
1050 if destrepo:
1056 if destrepo:
1051 template = uimod.samplehgrcs[b'cloned']
1057 template = uimod.samplehgrcs[b'cloned']
1052 u = urlutil.url(abspath)
1058 u = urlutil.url(abspath)
1053 u.passwd = None
1059 u.passwd = None
1054 defaulturl = bytes(u)
1060 defaulturl = bytes(u)
1055 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1061 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1056 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1062 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1057
1063
1058 if ui.configbool(b'experimental', b'remotenames'):
1064 if ui.configbool(b'experimental', b'remotenames'):
1059 logexchange.pullremotenames(destrepo, srcpeer)
1065 logexchange.pullremotenames(destrepo, srcpeer)
1060
1066
1061 if update:
1067 if update:
1062 if update is not True:
1068 if update is not True:
1063 with srcpeer.commandexecutor() as e:
1069 with srcpeer.commandexecutor() as e:
1064 checkout = e.callcommand(
1070 checkout = e.callcommand(
1065 b'lookup',
1071 b'lookup',
1066 {
1072 {
1067 b'key': update,
1073 b'key': update,
1068 },
1074 },
1069 ).result()
1075 ).result()
1070
1076
1071 uprev = None
1077 uprev = None
1072 status = None
1078 status = None
1073 if checkout is not None:
1079 if checkout is not None:
1074 # Some extensions (at least hg-git and hg-subversion) have
1080 # Some extensions (at least hg-git and hg-subversion) have
1075 # a peer.lookup() implementation that returns a name instead
1081 # a peer.lookup() implementation that returns a name instead
1076 # of a nodeid. We work around it here until we've figured
1082 # of a nodeid. We work around it here until we've figured
1077 # out a better solution.
1083 # out a better solution.
1078 if len(checkout) == 20 and checkout in destrepo:
1084 if len(checkout) == 20 and checkout in destrepo:
1079 uprev = checkout
1085 uprev = checkout
1080 elif scmutil.isrevsymbol(destrepo, checkout):
1086 elif scmutil.isrevsymbol(destrepo, checkout):
1081 uprev = scmutil.revsymbol(destrepo, checkout).node()
1087 uprev = scmutil.revsymbol(destrepo, checkout).node()
1082 else:
1088 else:
1083 if update is not True:
1089 if update is not True:
1084 try:
1090 try:
1085 uprev = destrepo.lookup(update)
1091 uprev = destrepo.lookup(update)
1086 except error.RepoLookupError:
1092 except error.RepoLookupError:
1087 pass
1093 pass
1088 if uprev is None:
1094 if uprev is None:
1089 try:
1095 try:
1090 if destrepo._activebookmark:
1096 if destrepo._activebookmark:
1091 uprev = destrepo.lookup(destrepo._activebookmark)
1097 uprev = destrepo.lookup(destrepo._activebookmark)
1092 update = destrepo._activebookmark
1098 update = destrepo._activebookmark
1093 else:
1099 else:
1094 uprev = destrepo._bookmarks[b'@']
1100 uprev = destrepo._bookmarks[b'@']
1095 update = b'@'
1101 update = b'@'
1096 bn = destrepo[uprev].branch()
1102 bn = destrepo[uprev].branch()
1097 if bn == b'default':
1103 if bn == b'default':
1098 status = _(b"updating to bookmark %s\n" % update)
1104 status = _(b"updating to bookmark %s\n" % update)
1099 else:
1105 else:
1100 status = (
1106 status = (
1101 _(b"updating to bookmark %s on branch %s\n")
1107 _(b"updating to bookmark %s on branch %s\n")
1102 ) % (update, bn)
1108 ) % (update, bn)
1103 except KeyError:
1109 except KeyError:
1104 try:
1110 try:
1105 uprev = destrepo.branchtip(b'default')
1111 uprev = destrepo.branchtip(b'default')
1106 except error.RepoLookupError:
1112 except error.RepoLookupError:
1107 uprev = destrepo.lookup(b'tip')
1113 uprev = destrepo.lookup(b'tip')
1108 if not status:
1114 if not status:
1109 bn = destrepo[uprev].branch()
1115 bn = destrepo[uprev].branch()
1110 status = _(b"updating to branch %s\n") % bn
1116 status = _(b"updating to branch %s\n") % bn
1111 destrepo.ui.status(status)
1117 destrepo.ui.status(status)
1112 _update(destrepo, uprev)
1118 _update(destrepo, uprev)
1113 if update in destrepo._bookmarks:
1119 if update in destrepo._bookmarks:
1114 bookmarks.activate(destrepo, update)
1120 bookmarks.activate(destrepo, update)
1115 if destlock is not None:
1121 if destlock is not None:
1116 release(destlock)
1122 release(destlock)
1117 if destwlock is not None:
1123 if destwlock is not None:
1118 release(destlock)
1124 release(destlock)
1119 # here is a tiny windows were someone could end up writing the
1125 # here is a tiny windows were someone could end up writing the
1120 # repository before the cache are sure to be warm. This is "fine"
1126 # repository before the cache are sure to be warm. This is "fine"
1121 # as the only "bad" outcome would be some slowness. That potential
1127 # as the only "bad" outcome would be some slowness. That potential
1122 # slowness already affect reader.
1128 # slowness already affect reader.
1123 with destrepo.lock():
1129 with destrepo.lock():
1124 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1130 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1125 finally:
1131 finally:
1126 release(srclock, destlock, destwlock)
1132 release(srclock, destlock, destwlock)
1127 if cleandir is not None:
1133 if cleandir is not None:
1128 shutil.rmtree(cleandir, True)
1134 shutil.rmtree(cleandir, True)
1129 if srcpeer is not None:
1135 if srcpeer is not None:
1130 srcpeer.close()
1136 srcpeer.close()
1131 if destpeer and destpeer.local() is None:
1137 if destpeer and destpeer.local() is None:
1132 destpeer.close()
1138 destpeer.close()
1133 return srcpeer, destpeer
1139 return srcpeer, destpeer
1134
1140
1135
1141
1136 def _showstats(repo, stats, quietempty=False):
1142 def _showstats(repo, stats, quietempty=False):
1137 if quietempty and stats.isempty():
1143 if quietempty and stats.isempty():
1138 return
1144 return
1139 repo.ui.status(
1145 repo.ui.status(
1140 _(
1146 _(
1141 b"%d files updated, %d files merged, "
1147 b"%d files updated, %d files merged, "
1142 b"%d files removed, %d files unresolved\n"
1148 b"%d files removed, %d files unresolved\n"
1143 )
1149 )
1144 % (
1150 % (
1145 stats.updatedcount,
1151 stats.updatedcount,
1146 stats.mergedcount,
1152 stats.mergedcount,
1147 stats.removedcount,
1153 stats.removedcount,
1148 stats.unresolvedcount,
1154 stats.unresolvedcount,
1149 )
1155 )
1150 )
1156 )
1151
1157
1152
1158
1153 def updaterepo(repo, node, overwrite, updatecheck=None):
1159 def updaterepo(repo, node, overwrite, updatecheck=None):
1154 """Update the working directory to node.
1160 """Update the working directory to node.
1155
1161
1156 When overwrite is set, changes are clobbered, merged else
1162 When overwrite is set, changes are clobbered, merged else
1157
1163
1158 returns stats (see pydoc mercurial.merge.applyupdates)"""
1164 returns stats (see pydoc mercurial.merge.applyupdates)"""
1159 repo.ui.deprecwarn(
1165 repo.ui.deprecwarn(
1160 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1166 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1161 b'5.7',
1167 b'5.7',
1162 )
1168 )
1163 return mergemod._update(
1169 return mergemod._update(
1164 repo,
1170 repo,
1165 node,
1171 node,
1166 branchmerge=False,
1172 branchmerge=False,
1167 force=overwrite,
1173 force=overwrite,
1168 labels=[b'working copy', b'destination'],
1174 labels=[b'working copy', b'destination'],
1169 updatecheck=updatecheck,
1175 updatecheck=updatecheck,
1170 )
1176 )
1171
1177
1172
1178
1173 def update(repo, node, quietempty=False, updatecheck=None):
1179 def update(repo, node, quietempty=False, updatecheck=None):
1174 """update the working directory to node"""
1180 """update the working directory to node"""
1175 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1181 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1176 _showstats(repo, stats, quietempty)
1182 _showstats(repo, stats, quietempty)
1177 if stats.unresolvedcount:
1183 if stats.unresolvedcount:
1178 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1184 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1179 return stats.unresolvedcount > 0
1185 return stats.unresolvedcount > 0
1180
1186
1181
1187
1182 # naming conflict in clone()
1188 # naming conflict in clone()
1183 _update = update
1189 _update = update
1184
1190
1185
1191
1186 def clean(repo, node, show_stats=True, quietempty=False):
1192 def clean(repo, node, show_stats=True, quietempty=False):
1187 """forcibly switch the working directory to node, clobbering changes"""
1193 """forcibly switch the working directory to node, clobbering changes"""
1188 stats = mergemod.clean_update(repo[node])
1194 stats = mergemod.clean_update(repo[node])
1189 assert stats.unresolvedcount == 0
1195 assert stats.unresolvedcount == 0
1190 if show_stats:
1196 if show_stats:
1191 _showstats(repo, stats, quietempty)
1197 _showstats(repo, stats, quietempty)
1192 return False
1198 return False
1193
1199
1194
1200
1195 # naming conflict in updatetotally()
1201 # naming conflict in updatetotally()
1196 _clean = clean
1202 _clean = clean
1197
1203
1198 _VALID_UPDATECHECKS = {
1204 _VALID_UPDATECHECKS = {
1199 mergemod.UPDATECHECK_ABORT,
1205 mergemod.UPDATECHECK_ABORT,
1200 mergemod.UPDATECHECK_NONE,
1206 mergemod.UPDATECHECK_NONE,
1201 mergemod.UPDATECHECK_LINEAR,
1207 mergemod.UPDATECHECK_LINEAR,
1202 mergemod.UPDATECHECK_NO_CONFLICT,
1208 mergemod.UPDATECHECK_NO_CONFLICT,
1203 }
1209 }
1204
1210
1205
1211
1206 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1212 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1207 """Update the working directory with extra care for non-file components
1213 """Update the working directory with extra care for non-file components
1208
1214
1209 This takes care of non-file components below:
1215 This takes care of non-file components below:
1210
1216
1211 :bookmark: might be advanced or (in)activated
1217 :bookmark: might be advanced or (in)activated
1212
1218
1213 This takes arguments below:
1219 This takes arguments below:
1214
1220
1215 :checkout: to which revision the working directory is updated
1221 :checkout: to which revision the working directory is updated
1216 :brev: a name, which might be a bookmark to be activated after updating
1222 :brev: a name, which might be a bookmark to be activated after updating
1217 :clean: whether changes in the working directory can be discarded
1223 :clean: whether changes in the working directory can be discarded
1218 :updatecheck: how to deal with a dirty working directory
1224 :updatecheck: how to deal with a dirty working directory
1219
1225
1220 Valid values for updatecheck are the UPDATECHECK_* constants
1226 Valid values for updatecheck are the UPDATECHECK_* constants
1221 defined in the merge module. Passing `None` will result in using the
1227 defined in the merge module. Passing `None` will result in using the
1222 configured default.
1228 configured default.
1223
1229
1224 * ABORT: abort if the working directory is dirty
1230 * ABORT: abort if the working directory is dirty
1225 * NONE: don't check (merge working directory changes into destination)
1231 * NONE: don't check (merge working directory changes into destination)
1226 * LINEAR: check that update is linear before merging working directory
1232 * LINEAR: check that update is linear before merging working directory
1227 changes into destination
1233 changes into destination
1228 * NO_CONFLICT: check that the update does not result in file merges
1234 * NO_CONFLICT: check that the update does not result in file merges
1229
1235
1230 This returns whether conflict is detected at updating or not.
1236 This returns whether conflict is detected at updating or not.
1231 """
1237 """
1232 if updatecheck is None:
1238 if updatecheck is None:
1233 updatecheck = ui.config(b'commands', b'update.check')
1239 updatecheck = ui.config(b'commands', b'update.check')
1234 if updatecheck not in _VALID_UPDATECHECKS:
1240 if updatecheck not in _VALID_UPDATECHECKS:
1235 # If not configured, or invalid value configured
1241 # If not configured, or invalid value configured
1236 updatecheck = mergemod.UPDATECHECK_LINEAR
1242 updatecheck = mergemod.UPDATECHECK_LINEAR
1237 if updatecheck not in _VALID_UPDATECHECKS:
1243 if updatecheck not in _VALID_UPDATECHECKS:
1238 raise ValueError(
1244 raise ValueError(
1239 r'Invalid updatecheck value %r (can accept %r)'
1245 r'Invalid updatecheck value %r (can accept %r)'
1240 % (updatecheck, _VALID_UPDATECHECKS)
1246 % (updatecheck, _VALID_UPDATECHECKS)
1241 )
1247 )
1242 with repo.wlock():
1248 with repo.wlock():
1243 movemarkfrom = None
1249 movemarkfrom = None
1244 warndest = False
1250 warndest = False
1245 if checkout is None:
1251 if checkout is None:
1246 updata = destutil.destupdate(repo, clean=clean)
1252 updata = destutil.destupdate(repo, clean=clean)
1247 checkout, movemarkfrom, brev = updata
1253 checkout, movemarkfrom, brev = updata
1248 warndest = True
1254 warndest = True
1249
1255
1250 if clean:
1256 if clean:
1251 ret = _clean(repo, checkout)
1257 ret = _clean(repo, checkout)
1252 else:
1258 else:
1253 if updatecheck == mergemod.UPDATECHECK_ABORT:
1259 if updatecheck == mergemod.UPDATECHECK_ABORT:
1254 cmdutil.bailifchanged(repo, merge=False)
1260 cmdutil.bailifchanged(repo, merge=False)
1255 updatecheck = mergemod.UPDATECHECK_NONE
1261 updatecheck = mergemod.UPDATECHECK_NONE
1256 ret = _update(repo, checkout, updatecheck=updatecheck)
1262 ret = _update(repo, checkout, updatecheck=updatecheck)
1257
1263
1258 if not ret and movemarkfrom:
1264 if not ret and movemarkfrom:
1259 if movemarkfrom == repo[b'.'].node():
1265 if movemarkfrom == repo[b'.'].node():
1260 pass # no-op update
1266 pass # no-op update
1261 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1267 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1262 b = ui.label(repo._activebookmark, b'bookmarks.active')
1268 b = ui.label(repo._activebookmark, b'bookmarks.active')
1263 ui.status(_(b"updating bookmark %s\n") % b)
1269 ui.status(_(b"updating bookmark %s\n") % b)
1264 else:
1270 else:
1265 # this can happen with a non-linear update
1271 # this can happen with a non-linear update
1266 b = ui.label(repo._activebookmark, b'bookmarks')
1272 b = ui.label(repo._activebookmark, b'bookmarks')
1267 ui.status(_(b"(leaving bookmark %s)\n") % b)
1273 ui.status(_(b"(leaving bookmark %s)\n") % b)
1268 bookmarks.deactivate(repo)
1274 bookmarks.deactivate(repo)
1269 elif brev in repo._bookmarks:
1275 elif brev in repo._bookmarks:
1270 if brev != repo._activebookmark:
1276 if brev != repo._activebookmark:
1271 b = ui.label(brev, b'bookmarks.active')
1277 b = ui.label(brev, b'bookmarks.active')
1272 ui.status(_(b"(activating bookmark %s)\n") % b)
1278 ui.status(_(b"(activating bookmark %s)\n") % b)
1273 bookmarks.activate(repo, brev)
1279 bookmarks.activate(repo, brev)
1274 elif brev:
1280 elif brev:
1275 if repo._activebookmark:
1281 if repo._activebookmark:
1276 b = ui.label(repo._activebookmark, b'bookmarks')
1282 b = ui.label(repo._activebookmark, b'bookmarks')
1277 ui.status(_(b"(leaving bookmark %s)\n") % b)
1283 ui.status(_(b"(leaving bookmark %s)\n") % b)
1278 bookmarks.deactivate(repo)
1284 bookmarks.deactivate(repo)
1279
1285
1280 if warndest:
1286 if warndest:
1281 destutil.statusotherdests(ui, repo)
1287 destutil.statusotherdests(ui, repo)
1282
1288
1283 return ret
1289 return ret
1284
1290
1285
1291
1286 def merge(
1292 def merge(
1287 ctx,
1293 ctx,
1288 force=False,
1294 force=False,
1289 remind=True,
1295 remind=True,
1290 labels=None,
1296 labels=None,
1291 ):
1297 ):
1292 """Branch merge with node, resolving changes. Return true if any
1298 """Branch merge with node, resolving changes. Return true if any
1293 unresolved conflicts."""
1299 unresolved conflicts."""
1294 repo = ctx.repo()
1300 repo = ctx.repo()
1295 stats = mergemod.merge(ctx, force=force, labels=labels)
1301 stats = mergemod.merge(ctx, force=force, labels=labels)
1296 _showstats(repo, stats)
1302 _showstats(repo, stats)
1297 if stats.unresolvedcount:
1303 if stats.unresolvedcount:
1298 repo.ui.status(
1304 repo.ui.status(
1299 _(
1305 _(
1300 b"use 'hg resolve' to retry unresolved file merges "
1306 b"use 'hg resolve' to retry unresolved file merges "
1301 b"or 'hg merge --abort' to abandon\n"
1307 b"or 'hg merge --abort' to abandon\n"
1302 )
1308 )
1303 )
1309 )
1304 elif remind:
1310 elif remind:
1305 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1311 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1306 return stats.unresolvedcount > 0
1312 return stats.unresolvedcount > 0
1307
1313
1308
1314
1309 def abortmerge(ui, repo):
1315 def abortmerge(ui, repo):
1310 ms = mergestatemod.mergestate.read(repo)
1316 ms = mergestatemod.mergestate.read(repo)
1311 if ms.active():
1317 if ms.active():
1312 # there were conflicts
1318 # there were conflicts
1313 node = ms.localctx.hex()
1319 node = ms.localctx.hex()
1314 else:
1320 else:
1315 # there were no conficts, mergestate was not stored
1321 # there were no conficts, mergestate was not stored
1316 node = repo[b'.'].hex()
1322 node = repo[b'.'].hex()
1317
1323
1318 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1324 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1319 stats = mergemod.clean_update(repo[node])
1325 stats = mergemod.clean_update(repo[node])
1320 assert stats.unresolvedcount == 0
1326 assert stats.unresolvedcount == 0
1321 _showstats(repo, stats)
1327 _showstats(repo, stats)
1322
1328
1323
1329
1324 def _incoming(
1330 def _incoming(
1325 displaychlist,
1331 displaychlist,
1326 subreporecurse,
1332 subreporecurse,
1327 ui,
1333 ui,
1328 repo,
1334 repo,
1329 source,
1335 source,
1330 opts,
1336 opts,
1331 buffered=False,
1337 buffered=False,
1332 subpath=None,
1338 subpath=None,
1333 ):
1339 ):
1334 """
1340 """
1335 Helper for incoming / gincoming.
1341 Helper for incoming / gincoming.
1336 displaychlist gets called with
1342 displaychlist gets called with
1337 (remoterepo, incomingchangesetlist, displayer) parameters,
1343 (remoterepo, incomingchangesetlist, displayer) parameters,
1338 and is supposed to contain only code that can't be unified.
1344 and is supposed to contain only code that can't be unified.
1339 """
1345 """
1340 srcs = urlutil.get_pull_paths(repo, ui, [source])
1346 srcs = urlutil.get_pull_paths(repo, ui, [source])
1341 srcs = list(srcs)
1347 srcs = list(srcs)
1342 if len(srcs) != 1:
1348 if len(srcs) != 1:
1343 msg = _(b'for now, incoming supports only a single source, %d provided')
1349 msg = _(b'for now, incoming supports only a single source, %d provided')
1344 msg %= len(srcs)
1350 msg %= len(srcs)
1345 raise error.Abort(msg)
1351 raise error.Abort(msg)
1346 path = srcs[0]
1352 path = srcs[0]
1347 if subpath is None:
1353 if subpath is None:
1348 peer_path = path
1354 peer_path = path
1349 url = path.loc
1355 url = path.loc
1350 else:
1356 else:
1351 # XXX path: we are losing the `path` object here. Keeping it would be
1357 # XXX path: we are losing the `path` object here. Keeping it would be
1352 # valuable. For example as a "variant" as we do for pushes.
1358 # valuable. For example as a "variant" as we do for pushes.
1353 subpath = urlutil.url(subpath)
1359 subpath = urlutil.url(subpath)
1354 if subpath.isabs():
1360 if subpath.isabs():
1355 peer_path = url = bytes(subpath)
1361 peer_path = url = bytes(subpath)
1356 else:
1362 else:
1357 p = urlutil.url(path.loc)
1363 p = urlutil.url(path.loc)
1358 if p.islocal():
1364 if p.islocal():
1359 normpath = os.path.normpath
1365 normpath = os.path.normpath
1360 else:
1366 else:
1361 normpath = posixpath.normpath
1367 normpath = posixpath.normpath
1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1368 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 peer_path = url = bytes(p)
1369 peer_path = url = bytes(p)
1364 other = peer(repo, opts, peer_path)
1370 other = peer(repo, opts, peer_path)
1365 cleanupfn = other.close
1371 cleanupfn = other.close
1366 try:
1372 try:
1367 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1373 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1368 branches = (path.branch, opts.get(b'branch', []))
1374 branches = (path.branch, opts.get(b'branch', []))
1369 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1375 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1370
1376
1371 if revs:
1377 if revs:
1372 revs = [other.lookup(rev) for rev in revs]
1378 revs = [other.lookup(rev) for rev in revs]
1373 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1379 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1374 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1380 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1375 )
1381 )
1376
1382
1377 if not chlist:
1383 if not chlist:
1378 ui.status(_(b"no changes found\n"))
1384 ui.status(_(b"no changes found\n"))
1379 return subreporecurse()
1385 return subreporecurse()
1380 ui.pager(b'incoming')
1386 ui.pager(b'incoming')
1381 displayer = logcmdutil.changesetdisplayer(
1387 displayer = logcmdutil.changesetdisplayer(
1382 ui, other, opts, buffered=buffered
1388 ui, other, opts, buffered=buffered
1383 )
1389 )
1384 displaychlist(other, chlist, displayer)
1390 displaychlist(other, chlist, displayer)
1385 displayer.close()
1391 displayer.close()
1386 finally:
1392 finally:
1387 cleanupfn()
1393 cleanupfn()
1388 subreporecurse()
1394 subreporecurse()
1389 return 0 # exit code is zero since we found incoming changes
1395 return 0 # exit code is zero since we found incoming changes
1390
1396
1391
1397
1392 def incoming(ui, repo, source, opts, subpath=None):
1398 def incoming(ui, repo, source, opts, subpath=None):
1393 def subreporecurse():
1399 def subreporecurse():
1394 ret = 1
1400 ret = 1
1395 if opts.get(b'subrepos'):
1401 if opts.get(b'subrepos'):
1396 ctx = repo[None]
1402 ctx = repo[None]
1397 for subpath in sorted(ctx.substate):
1403 for subpath in sorted(ctx.substate):
1398 sub = ctx.sub(subpath)
1404 sub = ctx.sub(subpath)
1399 ret = min(ret, sub.incoming(ui, source, opts))
1405 ret = min(ret, sub.incoming(ui, source, opts))
1400 return ret
1406 return ret
1401
1407
1402 def display(other, chlist, displayer):
1408 def display(other, chlist, displayer):
1403 limit = logcmdutil.getlimit(opts)
1409 limit = logcmdutil.getlimit(opts)
1404 if opts.get(b'newest_first'):
1410 if opts.get(b'newest_first'):
1405 chlist.reverse()
1411 chlist.reverse()
1406 count = 0
1412 count = 0
1407 for n in chlist:
1413 for n in chlist:
1408 if limit is not None and count >= limit:
1414 if limit is not None and count >= limit:
1409 break
1415 break
1410 parents = [
1416 parents = [
1411 p for p in other.changelog.parents(n) if p != repo.nullid
1417 p for p in other.changelog.parents(n) if p != repo.nullid
1412 ]
1418 ]
1413 if opts.get(b'no_merges') and len(parents) == 2:
1419 if opts.get(b'no_merges') and len(parents) == 2:
1414 continue
1420 continue
1415 count += 1
1421 count += 1
1416 displayer.show(other[n])
1422 displayer.show(other[n])
1417
1423
1418 return _incoming(
1424 return _incoming(
1419 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1425 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1420 )
1426 )
1421
1427
1422
1428
1423 def _outgoing(ui, repo, dests, opts, subpath=None):
1429 def _outgoing(ui, repo, dests, opts, subpath=None):
1424 out = set()
1430 out = set()
1425 others = []
1431 others = []
1426 for path in urlutil.get_push_paths(repo, ui, dests):
1432 for path in urlutil.get_push_paths(repo, ui, dests):
1427 dest = path.loc
1433 dest = path.loc
1428 if subpath is not None:
1434 if subpath is not None:
1429 subpath = urlutil.url(subpath)
1435 subpath = urlutil.url(subpath)
1430 if subpath.isabs():
1436 if subpath.isabs():
1431 dest = bytes(subpath)
1437 dest = bytes(subpath)
1432 else:
1438 else:
1433 p = urlutil.url(dest)
1439 p = urlutil.url(dest)
1434 if p.islocal():
1440 if p.islocal():
1435 normpath = os.path.normpath
1441 normpath = os.path.normpath
1436 else:
1442 else:
1437 normpath = posixpath.normpath
1443 normpath = posixpath.normpath
1438 p.path = normpath(b'%s/%s' % (p.path, subpath))
1444 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 dest = bytes(p)
1445 dest = bytes(p)
1440 branches = path.branch, opts.get(b'branch') or []
1446 branches = path.branch, opts.get(b'branch') or []
1441
1447
1442 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1448 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1449 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 if revs:
1450 if revs:
1445 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1451 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446
1452
1447 other = peer(repo, opts, dest)
1453 other = peer(repo, opts, dest)
1448 try:
1454 try:
1449 outgoing = discovery.findcommonoutgoing(
1455 outgoing = discovery.findcommonoutgoing(
1450 repo, other, revs, force=opts.get(b'force')
1456 repo, other, revs, force=opts.get(b'force')
1451 )
1457 )
1452 o = outgoing.missing
1458 o = outgoing.missing
1453 out.update(o)
1459 out.update(o)
1454 if not o:
1460 if not o:
1455 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1461 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 others.append(other)
1462 others.append(other)
1457 except: # re-raises
1463 except: # re-raises
1458 other.close()
1464 other.close()
1459 raise
1465 raise
1460 # make sure this is ordered by revision number
1466 # make sure this is ordered by revision number
1461 outgoing_revs = list(out)
1467 outgoing_revs = list(out)
1462 cl = repo.changelog
1468 cl = repo.changelog
1463 outgoing_revs.sort(key=cl.rev)
1469 outgoing_revs.sort(key=cl.rev)
1464 return outgoing_revs, others
1470 return outgoing_revs, others
1465
1471
1466
1472
1467 def _outgoing_recurse(ui, repo, dests, opts):
1473 def _outgoing_recurse(ui, repo, dests, opts):
1468 ret = 1
1474 ret = 1
1469 if opts.get(b'subrepos'):
1475 if opts.get(b'subrepos'):
1470 ctx = repo[None]
1476 ctx = repo[None]
1471 for subpath in sorted(ctx.substate):
1477 for subpath in sorted(ctx.substate):
1472 sub = ctx.sub(subpath)
1478 sub = ctx.sub(subpath)
1473 ret = min(ret, sub.outgoing(ui, dests, opts))
1479 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 return ret
1480 return ret
1475
1481
1476
1482
1477 def _outgoing_filter(repo, revs, opts):
1483 def _outgoing_filter(repo, revs, opts):
1478 """apply revision filtering/ordering option for outgoing"""
1484 """apply revision filtering/ordering option for outgoing"""
1479 limit = logcmdutil.getlimit(opts)
1485 limit = logcmdutil.getlimit(opts)
1480 no_merges = opts.get(b'no_merges')
1486 no_merges = opts.get(b'no_merges')
1481 if opts.get(b'newest_first'):
1487 if opts.get(b'newest_first'):
1482 revs.reverse()
1488 revs.reverse()
1483 if limit is None and not no_merges:
1489 if limit is None and not no_merges:
1484 for r in revs:
1490 for r in revs:
1485 yield r
1491 yield r
1486 return
1492 return
1487
1493
1488 count = 0
1494 count = 0
1489 cl = repo.changelog
1495 cl = repo.changelog
1490 for n in revs:
1496 for n in revs:
1491 if limit is not None and count >= limit:
1497 if limit is not None and count >= limit:
1492 break
1498 break
1493 parents = [p for p in cl.parents(n) if p != repo.nullid]
1499 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 if no_merges and len(parents) == 2:
1500 if no_merges and len(parents) == 2:
1495 continue
1501 continue
1496 count += 1
1502 count += 1
1497 yield n
1503 yield n
1498
1504
1499
1505
1500 def outgoing(ui, repo, dests, opts, subpath=None):
1506 def outgoing(ui, repo, dests, opts, subpath=None):
1501 if opts.get(b'graph'):
1507 if opts.get(b'graph'):
1502 logcmdutil.checkunsupportedgraphflags([], opts)
1508 logcmdutil.checkunsupportedgraphflags([], opts)
1503 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1509 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 ret = 1
1510 ret = 1
1505 try:
1511 try:
1506 if o:
1512 if o:
1507 ret = 0
1513 ret = 0
1508
1514
1509 if opts.get(b'graph'):
1515 if opts.get(b'graph'):
1510 revdag = logcmdutil.graphrevs(repo, o, opts)
1516 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 ui.pager(b'outgoing')
1517 ui.pager(b'outgoing')
1512 displayer = logcmdutil.changesetdisplayer(
1518 displayer = logcmdutil.changesetdisplayer(
1513 ui, repo, opts, buffered=True
1519 ui, repo, opts, buffered=True
1514 )
1520 )
1515 logcmdutil.displaygraph(
1521 logcmdutil.displaygraph(
1516 ui, repo, revdag, displayer, graphmod.asciiedges
1522 ui, repo, revdag, displayer, graphmod.asciiedges
1517 )
1523 )
1518 else:
1524 else:
1519 ui.pager(b'outgoing')
1525 ui.pager(b'outgoing')
1520 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1526 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 for n in _outgoing_filter(repo, o, opts):
1527 for n in _outgoing_filter(repo, o, opts):
1522 displayer.show(repo[n])
1528 displayer.show(repo[n])
1523 displayer.close()
1529 displayer.close()
1524 for oth in others:
1530 for oth in others:
1525 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1531 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1532 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 return ret # exit code is zero since we found outgoing changes
1533 return ret # exit code is zero since we found outgoing changes
1528 finally:
1534 finally:
1529 for oth in others:
1535 for oth in others:
1530 oth.close()
1536 oth.close()
1531
1537
1532
1538
1533 def verify(repo, level=None):
1539 def verify(repo, level=None):
1534 """verify the consistency of a repository"""
1540 """verify the consistency of a repository"""
1535 ret = verifymod.verify(repo, level=level)
1541 ret = verifymod.verify(repo, level=level)
1536
1542
1537 # Broken subrepo references in hidden csets don't seem worth worrying about,
1543 # Broken subrepo references in hidden csets don't seem worth worrying about,
1538 # since they can't be pushed/pulled, and --hidden can be used if they are a
1544 # since they can't be pushed/pulled, and --hidden can be used if they are a
1539 # concern.
1545 # concern.
1540
1546
1541 # pathto() is needed for -R case
1547 # pathto() is needed for -R case
1542 revs = repo.revs(
1548 revs = repo.revs(
1543 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1549 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1544 )
1550 )
1545
1551
1546 if revs:
1552 if revs:
1547 repo.ui.status(_(b'checking subrepo links\n'))
1553 repo.ui.status(_(b'checking subrepo links\n'))
1548 for rev in revs:
1554 for rev in revs:
1549 ctx = repo[rev]
1555 ctx = repo[rev]
1550 try:
1556 try:
1551 for subpath in ctx.substate:
1557 for subpath in ctx.substate:
1552 try:
1558 try:
1553 ret = (
1559 ret = (
1554 ctx.sub(subpath, allowcreate=False).verify() or ret
1560 ctx.sub(subpath, allowcreate=False).verify() or ret
1555 )
1561 )
1556 except error.RepoError as e:
1562 except error.RepoError as e:
1557 repo.ui.warn(b'%d: %s\n' % (rev, e))
1563 repo.ui.warn(b'%d: %s\n' % (rev, e))
1558 except Exception:
1564 except Exception:
1559 repo.ui.warn(
1565 repo.ui.warn(
1560 _(b'.hgsubstate is corrupt in revision %s\n')
1566 _(b'.hgsubstate is corrupt in revision %s\n')
1561 % short(ctx.node())
1567 % short(ctx.node())
1562 )
1568 )
1563
1569
1564 return ret
1570 return ret
1565
1571
1566
1572
1567 def remoteui(src, opts):
1573 def remoteui(src, opts):
1568 """build a remote ui from ui or repo and opts"""
1574 """build a remote ui from ui or repo and opts"""
1569 if hasattr(src, 'baseui'): # looks like a repository
1575 if hasattr(src, 'baseui'): # looks like a repository
1570 dst = src.baseui.copy() # drop repo-specific config
1576 dst = src.baseui.copy() # drop repo-specific config
1571 src = src.ui # copy target options from repo
1577 src = src.ui # copy target options from repo
1572 else: # assume it's a global ui object
1578 else: # assume it's a global ui object
1573 dst = src.copy() # keep all global options
1579 dst = src.copy() # keep all global options
1574
1580
1575 # copy ssh-specific options
1581 # copy ssh-specific options
1576 for o in b'ssh', b'remotecmd':
1582 for o in b'ssh', b'remotecmd':
1577 v = opts.get(o) or src.config(b'ui', o)
1583 v = opts.get(o) or src.config(b'ui', o)
1578 if v:
1584 if v:
1579 dst.setconfig(b"ui", o, v, b'copied')
1585 dst.setconfig(b"ui", o, v, b'copied')
1580
1586
1581 # copy bundle-specific options
1587 # copy bundle-specific options
1582 r = src.config(b'bundle', b'mainreporoot')
1588 r = src.config(b'bundle', b'mainreporoot')
1583 if r:
1589 if r:
1584 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1590 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1585
1591
1586 # copy selected local settings to the remote ui
1592 # copy selected local settings to the remote ui
1587 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1593 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1588 for key, val in src.configitems(sect):
1594 for key, val in src.configitems(sect):
1589 dst.setconfig(sect, key, val, b'copied')
1595 dst.setconfig(sect, key, val, b'copied')
1590 v = src.config(b'web', b'cacerts')
1596 v = src.config(b'web', b'cacerts')
1591 if v:
1597 if v:
1592 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1598 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1593
1599
1594 return dst
1600 return dst
1595
1601
1596
1602
1597 # Files of interest
1603 # Files of interest
1598 # Used to check if the repository has changed looking at mtime and size of
1604 # Used to check if the repository has changed looking at mtime and size of
1599 # these files.
1605 # these files.
1600 foi = [
1606 foi: "List[Tuple[str, bytes]]" = [
1601 ('spath', b'00changelog.i'),
1607 ('spath', b'00changelog.i'),
1602 ('spath', b'phaseroots'), # ! phase can change content at the same size
1608 ('spath', b'phaseroots'), # ! phase can change content at the same size
1603 ('spath', b'obsstore'),
1609 ('spath', b'obsstore'),
1604 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1610 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1605 ]
1611 ]
1606
1612
1607
1613
1608 class cachedlocalrepo:
1614 class cachedlocalrepo:
1609 """Holds a localrepository that can be cached and reused."""
1615 """Holds a localrepository that can be cached and reused."""
1610
1616
1611 def __init__(self, repo):
1617 def __init__(self, repo):
1612 """Create a new cached repo from an existing repo.
1618 """Create a new cached repo from an existing repo.
1613
1619
1614 We assume the passed in repo was recently created. If the
1620 We assume the passed in repo was recently created. If the
1615 repo has changed between when it was created and when it was
1621 repo has changed between when it was created and when it was
1616 turned into a cache, it may not refresh properly.
1622 turned into a cache, it may not refresh properly.
1617 """
1623 """
1618 assert isinstance(repo, localrepo.localrepository)
1624 assert isinstance(repo, localrepo.localrepository)
1619 self._repo = repo
1625 self._repo = repo
1620 self._state, self.mtime = self._repostate()
1626 self._state, self.mtime = self._repostate()
1621 self._filtername = repo.filtername
1627 self._filtername = repo.filtername
1622
1628
1623 def fetch(self):
1629 def fetch(self):
1624 """Refresh (if necessary) and return a repository.
1630 """Refresh (if necessary) and return a repository.
1625
1631
1626 If the cached instance is out of date, it will be recreated
1632 If the cached instance is out of date, it will be recreated
1627 automatically and returned.
1633 automatically and returned.
1628
1634
1629 Returns a tuple of the repo and a boolean indicating whether a new
1635 Returns a tuple of the repo and a boolean indicating whether a new
1630 repo instance was created.
1636 repo instance was created.
1631 """
1637 """
1632 # We compare the mtimes and sizes of some well-known files to
1638 # We compare the mtimes and sizes of some well-known files to
1633 # determine if the repo changed. This is not precise, as mtimes
1639 # determine if the repo changed. This is not precise, as mtimes
1634 # are susceptible to clock skew and imprecise filesystems and
1640 # are susceptible to clock skew and imprecise filesystems and
1635 # file content can change while maintaining the same size.
1641 # file content can change while maintaining the same size.
1636
1642
1637 state, mtime = self._repostate()
1643 state, mtime = self._repostate()
1638 if state == self._state:
1644 if state == self._state:
1639 return self._repo, False
1645 return self._repo, False
1640
1646
1641 repo = repository(self._repo.baseui, self._repo.url())
1647 repo = repository(self._repo.baseui, self._repo.url())
1642 if self._filtername:
1648 if self._filtername:
1643 self._repo = repo.filtered(self._filtername)
1649 self._repo = repo.filtered(self._filtername)
1644 else:
1650 else:
1645 self._repo = repo.unfiltered()
1651 self._repo = repo.unfiltered()
1646 self._state = state
1652 self._state = state
1647 self.mtime = mtime
1653 self.mtime = mtime
1648
1654
1649 return self._repo, True
1655 return self._repo, True
1650
1656
1651 def _repostate(self):
1657 def _repostate(self):
1652 state = []
1658 state = []
1653 maxmtime = -1
1659 maxmtime = -1
1654 for attr, fname in foi:
1660 for attr, fname in foi:
1655 prefix = getattr(self._repo, attr)
1661 prefix = getattr(self._repo, attr)
1656 p = os.path.join(prefix, fname)
1662 p = os.path.join(prefix, fname)
1657 try:
1663 try:
1658 st = os.stat(p)
1664 st = os.stat(p)
1659 except OSError:
1665 except OSError:
1660 st = os.stat(prefix)
1666 st = os.stat(prefix)
1661 state.append((st[stat.ST_MTIME], st.st_size))
1667 state.append((st[stat.ST_MTIME], st.st_size))
1662 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1668 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1663
1669
1664 return tuple(state), maxmtime
1670 return tuple(state), maxmtime
1665
1671
1666 def copy(self):
1672 def copy(self):
1667 """Obtain a copy of this class instance.
1673 """Obtain a copy of this class instance.
1668
1674
1669 A new localrepository instance is obtained. The new instance should be
1675 A new localrepository instance is obtained. The new instance should be
1670 completely independent of the original.
1676 completely independent of the original.
1671 """
1677 """
1672 repo = repository(self._repo.baseui, self._repo.origroot)
1678 repo = repository(self._repo.baseui, self._repo.origroot)
1673 if self._filtername:
1679 if self._filtername:
1674 repo = repo.filtered(self._filtername)
1680 repo = repo.filtered(self._filtername)
1675 else:
1681 else:
1676 repo = repo.unfiltered()
1682 repo = repo.unfiltered()
1677 c = cachedlocalrepo(repo)
1683 c = cachedlocalrepo(repo)
1678 c._state = self._state
1684 c._state = self._state
1679 c.mtime = self.mtime
1685 c.mtime = self.mtime
1680 return c
1686 return c
General Comments 0
You need to be logged in to leave comments. Login now