##// END OF EJS Templates
peer-or-repo: split the scheme between repo and peer...
marmoute -
r50584:f73f02ef default
parent child Browse files
Show More
@@ -1,150 +1,158
1 1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """extend schemes with shortcuts to repository swarms
7 7
8 8 This extension allows you to specify shortcuts for parent URLs with a
9 9 lot of repositories to act like a scheme, for example::
10 10
11 11 [schemes]
12 12 py = http://code.python.org/hg/
13 13
14 14 After that you can use it like::
15 15
16 16 hg clone py://trunk/
17 17
18 18 Additionally there is support for some more complex schemas, for
19 19 example used by Google Code::
20 20
21 21 [schemes]
22 22 gcode = http://{1}.googlecode.com/hg/
23 23
24 24 The syntax is taken from Mercurial templates, and you have unlimited
25 25 number of variables, starting with ``{1}`` and continuing with
26 26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 28 just appended to an URL.
29 29
30 30 For convenience, the extension adds these schemes by default::
31 31
32 32 [schemes]
33 33 py = http://hg.python.org/
34 34 bb = https://bitbucket.org/
35 35 bb+ssh = ssh://hg@bitbucket.org/
36 36 gcode = https://{1}.googlecode.com/hg/
37 37 kiln = https://{1}.kilnhg.com/Repo/
38 38
39 39 You can override a predefined scheme by defining a new scheme with the
40 40 same name.
41 41 """
42 42
43 43 import os
44 44 import re
45 45
46 46 from mercurial.i18n import _
47 47 from mercurial import (
48 48 error,
49 49 extensions,
50 50 hg,
51 51 pycompat,
52 52 registrar,
53 53 templater,
54 54 )
55 55 from mercurial.utils import (
56 56 urlutil,
57 57 )
58 58
59 59 cmdtable = {}
60 60 command = registrar.command(cmdtable)
61 61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 63 # be specifying the version(s) of Mercurial they are tested with, or
64 64 # leave the attribute unspecified.
65 65 testedwith = b'ships-with-hg-core'
66 66
67 67 _partre = re.compile(br'{(\d+)\}')
68 68
69 69
70 70 class ShortRepository:
71 71 def __init__(self, url, scheme, templater):
72 72 self.scheme = scheme
73 73 self.templater = templater
74 74 self.url = url
75 75 try:
76 76 self.parts = max(map(int, _partre.findall(self.url)))
77 77 except ValueError:
78 78 self.parts = 0
79 79
80 80 def __repr__(self):
81 81 return b'<ShortRepository: %s>' % self.scheme
82 82
83 83 def instance(self, ui, url, create, intents=None, createopts=None):
84 84 url = self.resolve(url)
85 85 return hg._peerlookup(url).instance(
86 86 ui, url, create, intents=intents, createopts=createopts
87 87 )
88 88
89 89 def resolve(self, url):
90 90 # Should this use the urlutil.url class, or is manual parsing better?
91 91 try:
92 92 url = url.split(b'://', 1)[1]
93 93 except IndexError:
94 94 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
95 95 parts = url.split(b'/', self.parts)
96 96 if len(parts) > self.parts:
97 97 tail = parts[-1]
98 98 parts = parts[:-1]
99 99 else:
100 100 tail = b''
101 101 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
102 102 return b''.join(self.templater.process(self.url, context)) + tail
103 103
104 104
105 105 def hasdriveletter(orig, path):
106 106 if path:
107 107 for scheme in schemes:
108 108 if path.startswith(scheme + b':'):
109 109 return False
110 110 return orig(path)
111 111
112 112
113 113 schemes = {
114 114 b'py': b'http://hg.python.org/',
115 115 b'bb': b'https://bitbucket.org/',
116 116 b'bb+ssh': b'ssh://hg@bitbucket.org/',
117 117 b'gcode': b'https://{1}.googlecode.com/hg/',
118 118 b'kiln': b'https://{1}.kilnhg.com/Repo/',
119 119 }
120 120
121 121
122 122 def extsetup(ui):
123 123 schemes.update(dict(ui.configitems(b'schemes')))
124 124 t = templater.engine(templater.parse)
125 125 for scheme, url in schemes.items():
126 126 if (
127 127 pycompat.iswindows
128 128 and len(scheme) == 1
129 129 and scheme.isalpha()
130 130 and os.path.exists(b'%s:\\' % scheme)
131 131 ):
132 132 raise error.Abort(
133 133 _(
134 134 b'custom scheme %s:// conflicts with drive '
135 135 b'letter %s:\\\n'
136 136 )
137 137 % (scheme, scheme.upper())
138 138 )
139 hg.schemes[scheme] = ShortRepository(url, scheme, t)
139 url_scheme = urlutil.url(url).scheme
140 if url_scheme in hg.peer_schemes:
141 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
142 else:
143 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
140 144
141 145 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
142 146
143 147
144 148 @command(b'debugexpandscheme', norepo=True)
145 149 def expandscheme(ui, url, **opts):
146 150 """given a repo path, provide the scheme-expanded path"""
147 repo = hg._peerlookup(url)
148 if isinstance(repo, ShortRepository):
149 url = repo.resolve(url)
151 scheme = urlutil.url(url).scheme
152 if scheme in hg.peer_schemes:
153 cls = hg.peer_schemes[scheme]
154 else:
155 cls = hg.repo_schemes.get(scheme)
156 if cls is not None and isinstance(cls, ShortRepository):
157 url = cls.resolve(url)
150 158 ui.write(url + b'\n')
@@ -1,1618 +1,1624
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 schemes = {
146 repo_schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 b'static-http': statichttprepo,
151 }
152
153 peer_schemes = {
150 154 b'http': httppeer,
151 155 b'https': httppeer,
152 156 b'ssh': sshpeer,
153 b'static-http': statichttprepo,
154 157 }
155 158
156 159
157 160 def _peerlookup(path):
158 161 u = urlutil.url(path)
159 162 scheme = u.scheme or b'file'
160 thing = schemes.get(scheme) or schemes[b'file']
161 return thing
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
167 return LocalFactory
162 168
163 169
164 170 def islocal(repo):
165 171 '''return true if repo (or path pointing to repo) is local'''
166 172 if isinstance(repo, bytes):
167 173 cls = _peerlookup(repo)
168 174 cls.instance # make sure we load the module
169 175 if util.safehasattr(cls, 'islocal'):
170 176 return cls.islocal(repo) # pytype: disable=module-attr
171 177 return False
172 178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
173 179 return repo.local()
174 180
175 181
176 182 def openpath(ui, path, sendaccept=True):
177 183 '''open path with open if local, url.open if remote'''
178 184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 185 if pathurl.islocal():
180 186 return util.posixfile(pathurl.localpath(), b'rb')
181 187 else:
182 188 return url.open(ui, path, sendaccept=sendaccept)
183 189
184 190
185 191 # a list of (ui, repo) functions called for wire peer initialization
186 192 wirepeersetupfuncs = []
187 193
188 194
189 195 def _peerorrepo(
190 196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 197 ):
192 198 """return a repository object for the specified path"""
193 199 cls = _peerlookup(path)
194 200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
195 201 _setup_repo_or_peer(ui, obj, presetupfuncs)
196 202 return obj
197 203
198 204
199 205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 206 ui = getattr(obj, "ui", ui)
201 207 for f in presetupfuncs or []:
202 208 f(ui, obj)
203 209 ui.log(b'extension', b'- executing reposetup hooks\n')
204 210 with util.timedcm('all reposetup') as allreposetupstats:
205 211 for name, module in extensions.extensions(ui):
206 212 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 213 hook = getattr(module, 'reposetup', None)
208 214 if hook:
209 215 with util.timedcm('reposetup %r', name) as stats:
210 216 hook(ui, obj)
211 217 msg = b' > reposetup for %s took %s\n'
212 218 ui.log(b'extension', msg, name, stats)
213 219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 220 if not obj.local():
215 221 for f in wirepeersetupfuncs:
216 222 f(ui, obj)
217 223
218 224
219 225 def repository(
220 226 ui,
221 227 path=b'',
222 228 create=False,
223 229 presetupfuncs=None,
224 230 intents=None,
225 231 createopts=None,
226 232 ):
227 233 """return a repository object for the specified path"""
228 234 peer = _peerorrepo(
229 235 ui,
230 236 path,
231 237 create,
232 238 presetupfuncs=presetupfuncs,
233 239 intents=intents,
234 240 createopts=createopts,
235 241 )
236 242 repo = peer.local()
237 243 if not repo:
238 244 raise error.Abort(
239 245 _(b"repository '%s' is not local") % (path or peer.url())
240 246 )
241 247 return repo.filtered(b'visible')
242 248
243 249
244 250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
245 251 '''return a repository peer for the specified path'''
246 252 rui = remoteui(uiorrepo, opts)
247 253 return _peerorrepo(
248 254 rui, path, create, intents=intents, createopts=createopts
249 255 ).peer()
250 256
251 257
252 258 def defaultdest(source):
253 259 """return default destination of clone if none is given
254 260
255 261 >>> defaultdest(b'foo')
256 262 'foo'
257 263 >>> defaultdest(b'/foo/bar')
258 264 'bar'
259 265 >>> defaultdest(b'/')
260 266 ''
261 267 >>> defaultdest(b'')
262 268 ''
263 269 >>> defaultdest(b'http://example.org/')
264 270 ''
265 271 >>> defaultdest(b'http://example.org/foo/')
266 272 'foo'
267 273 """
268 274 path = urlutil.url(source).path
269 275 if not path:
270 276 return b''
271 277 return os.path.basename(os.path.normpath(path))
272 278
273 279
274 280 def sharedreposource(repo):
275 281 """Returns repository object for source repository of a shared repo.
276 282
277 283 If repo is not a shared repository, returns None.
278 284 """
279 285 if repo.sharedpath == repo.path:
280 286 return None
281 287
282 288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
283 289 return repo.srcrepo
284 290
285 291 # the sharedpath always ends in the .hg; we want the path to the repo
286 292 source = repo.vfs.split(repo.sharedpath)[0]
287 293 srcurl, branches = urlutil.parseurl(source)
288 294 srcrepo = repository(repo.ui, srcurl)
289 295 repo.srcrepo = srcrepo
290 296 return srcrepo
291 297
292 298
293 299 def share(
294 300 ui,
295 301 source,
296 302 dest=None,
297 303 update=True,
298 304 bookmarks=True,
299 305 defaultpath=None,
300 306 relative=False,
301 307 ):
302 308 '''create a shared repository'''
303 309
304 310 not_local_msg = _(b'can only share local repositories')
305 311 if util.safehasattr(source, 'local'):
306 312 if source.local() is None:
307 313 raise error.Abort(not_local_msg)
308 314 elif not islocal(source):
309 315 # XXX why are we getting bytes here ?
310 316 raise error.Abort(not_local_msg)
311 317
312 318 if not dest:
313 319 dest = defaultdest(source)
314 320 else:
315 321 dest = urlutil.get_clone_path(ui, dest)[1]
316 322
317 323 if isinstance(source, bytes):
318 324 origsource, source, branches = urlutil.get_clone_path(ui, source)
319 325 srcrepo = repository(ui, source)
320 326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
321 327 else:
322 328 srcrepo = source.local()
323 329 checkout = None
324 330
325 331 shareditems = set()
326 332 if bookmarks:
327 333 shareditems.add(sharedbookmarks)
328 334
329 335 r = repository(
330 336 ui,
331 337 dest,
332 338 create=True,
333 339 createopts={
334 340 b'sharedrepo': srcrepo,
335 341 b'sharedrelative': relative,
336 342 b'shareditems': shareditems,
337 343 },
338 344 )
339 345
340 346 postshare(srcrepo, r, defaultpath=defaultpath)
341 347 r = repository(ui, dest)
342 348 _postshareupdate(r, update, checkout=checkout)
343 349 return r
344 350
345 351
346 352 def _prependsourcehgrc(repo):
347 353 """copies the source repo config and prepend it in current repo .hg/hgrc
348 354 on unshare. This is only done if the share was perfomed using share safe
349 355 method where we share config of source in shares"""
350 356 srcvfs = vfsmod.vfs(repo.sharedpath)
351 357 dstvfs = vfsmod.vfs(repo.path)
352 358
353 359 if not srcvfs.exists(b'hgrc'):
354 360 return
355 361
356 362 currentconfig = b''
357 363 if dstvfs.exists(b'hgrc'):
358 364 currentconfig = dstvfs.read(b'hgrc')
359 365
360 366 with dstvfs(b'hgrc', b'wb') as fp:
361 367 sourceconfig = srcvfs.read(b'hgrc')
362 368 fp.write(b"# Config copied from shared source\n")
363 369 fp.write(sourceconfig)
364 370 fp.write(b'\n')
365 371 fp.write(currentconfig)
366 372
367 373
368 374 def unshare(ui, repo):
369 375 """convert a shared repository to a normal one
370 376
371 377 Copy the store data to the repo and remove the sharedpath data.
372 378
373 379 Returns a new repository object representing the unshared repository.
374 380
375 381 The passed repository object is not usable after this function is
376 382 called.
377 383 """
378 384
379 385 with repo.lock():
380 386 # we use locks here because if we race with commit, we
381 387 # can end up with extra data in the cloned revlogs that's
382 388 # not pointed to by changesets, thus causing verify to
383 389 # fail
384 390 destlock = copystore(ui, repo, repo.path)
385 391 with destlock or util.nullcontextmanager():
386 392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
387 393 # we were sharing .hg/hgrc of the share source with the current
388 394 # repo. We need to copy that while unsharing otherwise it can
389 395 # disable hooks and other checks
390 396 _prependsourcehgrc(repo)
391 397
392 398 sharefile = repo.vfs.join(b'sharedpath')
393 399 util.rename(sharefile, sharefile + b'.old')
394 400
395 401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
396 402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
397 403 scmutil.writereporequirements(repo)
398 404
399 405 # Removing share changes some fundamental properties of the repo instance.
400 406 # So we instantiate a new repo object and operate on it rather than
401 407 # try to keep the existing repo usable.
402 408 newrepo = repository(repo.baseui, repo.root, create=False)
403 409
404 410 # TODO: figure out how to access subrepos that exist, but were previously
405 411 # removed from .hgsub
406 412 c = newrepo[b'.']
407 413 subs = c.substate
408 414 for s in sorted(subs):
409 415 c.sub(s).unshare()
410 416
411 417 localrepo.poisonrepository(repo)
412 418
413 419 return newrepo
414 420
415 421
416 422 def postshare(sourcerepo, destrepo, defaultpath=None):
417 423 """Called after a new shared repo is created.
418 424
419 425 The new repo only has a requirements file and pointer to the source.
420 426 This function configures additional shared data.
421 427
422 428 Extensions can wrap this function and write additional entries to
423 429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
424 430 """
425 431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
426 432 if default:
427 433 template = b'[paths]\ndefault = %s\n'
428 434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
429 435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
430 436 with destrepo.wlock():
431 437 narrowspec.copytoworkingcopy(destrepo)
432 438
433 439
434 440 def _postshareupdate(repo, update, checkout=None):
435 441 """Maybe perform a working directory update after a shared repo is created.
436 442
437 443 ``update`` can be a boolean or a revision to update to.
438 444 """
439 445 if not update:
440 446 return
441 447
442 448 repo.ui.status(_(b"updating working directory\n"))
443 449 if update is not True:
444 450 checkout = update
445 451 for test in (checkout, b'default', b'tip'):
446 452 if test is None:
447 453 continue
448 454 try:
449 455 uprev = repo.lookup(test)
450 456 break
451 457 except error.RepoLookupError:
452 458 continue
453 459 _update(repo, uprev)
454 460
455 461
456 462 def copystore(ui, srcrepo, destpath):
457 463 """copy files from store of srcrepo in destpath
458 464
459 465 returns destlock
460 466 """
461 467 destlock = None
462 468 try:
463 469 hardlink = None
464 470 topic = _(b'linking') if hardlink else _(b'copying')
465 471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
466 472 num = 0
467 473 srcpublishing = srcrepo.publishing()
468 474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
469 475 dstvfs = vfsmod.vfs(destpath)
470 476 for f in srcrepo.store.copylist():
471 477 if srcpublishing and f.endswith(b'phaseroots'):
472 478 continue
473 479 dstbase = os.path.dirname(f)
474 480 if dstbase and not dstvfs.exists(dstbase):
475 481 dstvfs.mkdir(dstbase)
476 482 if srcvfs.exists(f):
477 483 if f.endswith(b'data'):
478 484 # 'dstbase' may be empty (e.g. revlog format 0)
479 485 lockfile = os.path.join(dstbase, b"lock")
480 486 # lock to avoid premature writing to the target
481 487 destlock = lock.lock(dstvfs, lockfile)
482 488 hardlink, n = util.copyfiles(
483 489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
484 490 )
485 491 num += n
486 492 if hardlink:
487 493 ui.debug(b"linked %d files\n" % num)
488 494 else:
489 495 ui.debug(b"copied %d files\n" % num)
490 496 return destlock
491 497 except: # re-raises
492 498 release(destlock)
493 499 raise
494 500
495 501
496 502 def clonewithshare(
497 503 ui,
498 504 peeropts,
499 505 sharepath,
500 506 source,
501 507 srcpeer,
502 508 dest,
503 509 pull=False,
504 510 rev=None,
505 511 update=True,
506 512 stream=False,
507 513 ):
508 514 """Perform a clone using a shared repo.
509 515
510 516 The store for the repository will be located at <sharepath>/.hg. The
511 517 specified revisions will be cloned or pulled from "source". A shared repo
512 518 will be created at "dest" and a working copy will be created if "update" is
513 519 True.
514 520 """
515 521 revs = None
516 522 if rev:
517 523 if not srcpeer.capable(b'lookup'):
518 524 raise error.Abort(
519 525 _(
520 526 b"src repository does not support "
521 527 b"revision lookup and so doesn't "
522 528 b"support clone by revision"
523 529 )
524 530 )
525 531
526 532 # TODO this is batchable.
527 533 remoterevs = []
528 534 for r in rev:
529 535 with srcpeer.commandexecutor() as e:
530 536 remoterevs.append(
531 537 e.callcommand(
532 538 b'lookup',
533 539 {
534 540 b'key': r,
535 541 },
536 542 ).result()
537 543 )
538 544 revs = remoterevs
539 545
540 546 # Obtain a lock before checking for or cloning the pooled repo otherwise
541 547 # 2 clients may race creating or populating it.
542 548 pooldir = os.path.dirname(sharepath)
543 549 # lock class requires the directory to exist.
544 550 try:
545 551 util.makedir(pooldir, False)
546 552 except FileExistsError:
547 553 pass
548 554
549 555 poolvfs = vfsmod.vfs(pooldir)
550 556 basename = os.path.basename(sharepath)
551 557
552 558 with lock.lock(poolvfs, b'%s.lock' % basename):
553 559 if os.path.exists(sharepath):
554 560 ui.status(
555 561 _(b'(sharing from existing pooled repository %s)\n') % basename
556 562 )
557 563 else:
558 564 ui.status(
559 565 _(b'(sharing from new pooled repository %s)\n') % basename
560 566 )
561 567 # Always use pull mode because hardlinks in share mode don't work
562 568 # well. Never update because working copies aren't necessary in
563 569 # share mode.
564 570 clone(
565 571 ui,
566 572 peeropts,
567 573 source,
568 574 dest=sharepath,
569 575 pull=True,
570 576 revs=rev,
571 577 update=False,
572 578 stream=stream,
573 579 )
574 580
575 581 # Resolve the value to put in [paths] section for the source.
576 582 if islocal(source):
577 583 defaultpath = util.abspath(urlutil.urllocalpath(source))
578 584 else:
579 585 defaultpath = source
580 586
581 587 sharerepo = repository(ui, path=sharepath)
582 588 destrepo = share(
583 589 ui,
584 590 sharerepo,
585 591 dest=dest,
586 592 update=False,
587 593 bookmarks=False,
588 594 defaultpath=defaultpath,
589 595 )
590 596
591 597 # We need to perform a pull against the dest repo to fetch bookmarks
592 598 # and other non-store data that isn't shared by default. In the case of
593 599 # non-existing shared repo, this means we pull from the remote twice. This
594 600 # is a bit weird. But at the time it was implemented, there wasn't an easy
595 601 # way to pull just non-changegroup data.
596 602 exchange.pull(destrepo, srcpeer, heads=revs)
597 603
598 604 _postshareupdate(destrepo, update)
599 605
600 606 return srcpeer, peer(ui, peeropts, dest)
601 607
602 608
603 609 # Recomputing caches is often slow on big repos, so copy them.
604 610 def _copycache(srcrepo, dstcachedir, fname):
605 611 """copy a cache from srcrepo to destcachedir (if it exists)"""
606 612 srcfname = srcrepo.cachevfs.join(fname)
607 613 dstfname = os.path.join(dstcachedir, fname)
608 614 if os.path.exists(srcfname):
609 615 if not os.path.exists(dstcachedir):
610 616 os.mkdir(dstcachedir)
611 617 util.copyfile(srcfname, dstfname)
612 618
613 619
614 620 def clone(
615 621 ui,
616 622 peeropts,
617 623 source,
618 624 dest=None,
619 625 pull=False,
620 626 revs=None,
621 627 update=True,
622 628 stream=False,
623 629 branch=None,
624 630 shareopts=None,
625 631 storeincludepats=None,
626 632 storeexcludepats=None,
627 633 depth=None,
628 634 ):
629 635 """Make a copy of an existing repository.
630 636
631 637 Create a copy of an existing repository in a new directory. The
632 638 source and destination are URLs, as passed to the repository
633 639 function. Returns a pair of repository peers, the source and
634 640 newly created destination.
635 641
636 642 The location of the source is added to the new repository's
637 643 .hg/hgrc file, as the default to be used for future pulls and
638 644 pushes.
639 645
640 646 If an exception is raised, the partly cloned/updated destination
641 647 repository will be deleted.
642 648
643 649 Arguments:
644 650
645 651 source: repository object or URL
646 652
647 653 dest: URL of destination repository to create (defaults to base
648 654 name of source repository)
649 655
650 656 pull: always pull from source repository, even in local case or if the
651 657 server prefers streaming
652 658
653 659 stream: stream raw data uncompressed from repository (fast over
654 660 LAN, slow over WAN)
655 661
656 662 revs: revision to clone up to (implies pull=True)
657 663
658 664 update: update working directory after clone completes, if
659 665 destination is local repository (True means update to default rev,
660 666 anything else is treated as a revision)
661 667
662 668 branch: branches to clone
663 669
664 670 shareopts: dict of options to control auto sharing behavior. The "pool" key
665 671 activates auto sharing mode and defines the directory for stores. The
666 672 "mode" key determines how to construct the directory name of the shared
667 673 repository. "identity" means the name is derived from the node of the first
668 674 changeset in the repository. "remote" means the name is derived from the
669 675 remote's path/URL. Defaults to "identity."
670 676
671 677 storeincludepats and storeexcludepats: sets of file patterns to include and
672 678 exclude in the repository copy, respectively. If not defined, all files
673 679 will be included (a "full" clone). Otherwise a "narrow" clone containing
674 680 only the requested files will be performed. If ``storeincludepats`` is not
675 681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
676 682 ``path:.``. If both are empty sets, no files will be cloned.
677 683 """
678 684
679 685 if isinstance(source, bytes):
680 686 src = urlutil.get_clone_path(ui, source, branch)
681 687 origsource, source, branches = src
682 688 srcpeer = peer(ui, peeropts, source)
683 689 else:
684 690 srcpeer = source.peer() # in case we were called with a localrepo
685 691 branches = (None, branch or [])
686 692 origsource = source = srcpeer.url()
687 693 srclock = destlock = destwlock = cleandir = None
688 694 destpeer = None
689 695 try:
690 696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
691 697
692 698 if dest is None:
693 699 dest = defaultdest(source)
694 700 if dest:
695 701 ui.status(_(b"destination directory: %s\n") % dest)
696 702 else:
697 703 dest = urlutil.get_clone_path(ui, dest)[0]
698 704
699 705 dest = urlutil.urllocalpath(dest)
700 706 source = urlutil.urllocalpath(source)
701 707
702 708 if not dest:
703 709 raise error.InputError(_(b"empty destination path is not valid"))
704 710
705 711 destvfs = vfsmod.vfs(dest, expandpath=True)
706 712 if destvfs.lexists():
707 713 if not destvfs.isdir():
708 714 raise error.InputError(
709 715 _(b"destination '%s' already exists") % dest
710 716 )
711 717 elif destvfs.listdir():
712 718 raise error.InputError(
713 719 _(b"destination '%s' is not empty") % dest
714 720 )
715 721
716 722 createopts = {}
717 723 narrow = False
718 724
719 725 if storeincludepats is not None:
720 726 narrowspec.validatepatterns(storeincludepats)
721 727 narrow = True
722 728
723 729 if storeexcludepats is not None:
724 730 narrowspec.validatepatterns(storeexcludepats)
725 731 narrow = True
726 732
727 733 if narrow:
728 734 # Include everything by default if only exclusion patterns defined.
729 735 if storeexcludepats and not storeincludepats:
730 736 storeincludepats = {b'path:.'}
731 737
732 738 createopts[b'narrowfiles'] = True
733 739
734 740 if depth:
735 741 createopts[b'shallowfilestore'] = True
736 742
737 743 if srcpeer.capable(b'lfs-serve'):
738 744 # Repository creation honors the config if it disabled the extension, so
739 745 # we can't just announce that lfs will be enabled. This check avoids
740 746 # saying that lfs will be enabled, and then saying it's an unknown
741 747 # feature. The lfs creation option is set in either case so that a
742 748 # requirement is added. If the extension is explicitly disabled but the
743 749 # requirement is set, the clone aborts early, before transferring any
744 750 # data.
745 751 createopts[b'lfs'] = True
746 752
747 753 if extensions.disabled_help(b'lfs'):
748 754 ui.status(
749 755 _(
750 756 b'(remote is using large file support (lfs), but it is '
751 757 b'explicitly disabled in the local configuration)\n'
752 758 )
753 759 )
754 760 else:
755 761 ui.status(
756 762 _(
757 763 b'(remote is using large file support (lfs); lfs will '
758 764 b'be enabled for this repository)\n'
759 765 )
760 766 )
761 767
762 768 shareopts = shareopts or {}
763 769 sharepool = shareopts.get(b'pool')
764 770 sharenamemode = shareopts.get(b'mode')
765 771 if sharepool and islocal(dest):
766 772 sharepath = None
767 773 if sharenamemode == b'identity':
768 774 # Resolve the name from the initial changeset in the remote
769 775 # repository. This returns nullid when the remote is empty. It
770 776 # raises RepoLookupError if revision 0 is filtered or otherwise
771 777 # not available. If we fail to resolve, sharing is not enabled.
772 778 try:
773 779 with srcpeer.commandexecutor() as e:
774 780 rootnode = e.callcommand(
775 781 b'lookup',
776 782 {
777 783 b'key': b'0',
778 784 },
779 785 ).result()
780 786
781 787 if rootnode != sha1nodeconstants.nullid:
782 788 sharepath = os.path.join(sharepool, hex(rootnode))
783 789 else:
784 790 ui.status(
785 791 _(
786 792 b'(not using pooled storage: '
787 793 b'remote appears to be empty)\n'
788 794 )
789 795 )
790 796 except error.RepoLookupError:
791 797 ui.status(
792 798 _(
793 799 b'(not using pooled storage: '
794 800 b'unable to resolve identity of remote)\n'
795 801 )
796 802 )
797 803 elif sharenamemode == b'remote':
798 804 sharepath = os.path.join(
799 805 sharepool, hex(hashutil.sha1(source).digest())
800 806 )
801 807 else:
802 808 raise error.Abort(
803 809 _(b'unknown share naming mode: %s') % sharenamemode
804 810 )
805 811
806 812 # TODO this is a somewhat arbitrary restriction.
807 813 if narrow:
808 814 ui.status(
809 815 _(b'(pooled storage not supported for narrow clones)\n')
810 816 )
811 817 sharepath = None
812 818
813 819 if sharepath:
814 820 return clonewithshare(
815 821 ui,
816 822 peeropts,
817 823 sharepath,
818 824 source,
819 825 srcpeer,
820 826 dest,
821 827 pull=pull,
822 828 rev=revs,
823 829 update=update,
824 830 stream=stream,
825 831 )
826 832
827 833 srcrepo = srcpeer.local()
828 834
829 835 abspath = origsource
830 836 if islocal(origsource):
831 837 abspath = util.abspath(urlutil.urllocalpath(origsource))
832 838
833 839 if islocal(dest):
834 840 if os.path.exists(dest):
835 841 # only clean up directories we create ourselves
836 842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
837 843 cleandir = hgdir
838 844 else:
839 845 cleandir = dest
840 846
841 847 copy = False
842 848 if (
843 849 srcrepo
844 850 and srcrepo.cancopy()
845 851 and islocal(dest)
846 852 and not phases.hassecret(srcrepo)
847 853 ):
848 854 copy = not pull and not revs
849 855
850 856 # TODO this is a somewhat arbitrary restriction.
851 857 if narrow:
852 858 copy = False
853 859
854 860 if copy:
855 861 try:
856 862 # we use a lock here because if we race with commit, we
857 863 # can end up with extra data in the cloned revlogs that's
858 864 # not pointed to by changesets, thus causing verify to
859 865 # fail
860 866 srclock = srcrepo.lock(wait=False)
861 867 except error.LockError:
862 868 copy = False
863 869
864 870 if copy:
865 871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
866 872
867 873 destrootpath = urlutil.urllocalpath(dest)
868 874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
869 875 localrepo.createrepository(
870 876 ui,
871 877 destrootpath,
872 878 requirements=dest_reqs,
873 879 )
874 880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
875 881
876 882 destwlock = destrepo.wlock()
877 883 destlock = destrepo.lock()
878 884 from . import streamclone # avoid cycle
879 885
880 886 streamclone.local_copy(srcrepo, destrepo)
881 887
882 888 # we need to re-init the repo after manually copying the data
883 889 # into it
884 890 destpeer = peer(srcrepo, peeropts, dest)
885 891
886 892 # make the peer aware that is it already locked
887 893 #
888 894 # important:
889 895 #
890 896 # We still need to release that lock at the end of the function
891 897 destpeer.local()._lockref = weakref.ref(destlock)
892 898 destpeer.local()._wlockref = weakref.ref(destwlock)
893 899 # dirstate also needs to be copied because `_wlockref` has a reference
894 900 # to it: this dirstate is saved to disk when the wlock is released
895 901 destpeer.local().dirstate = destrepo.dirstate
896 902
897 903 srcrepo.hook(
898 904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
899 905 )
900 906 else:
901 907 try:
902 908 # only pass ui when no srcrepo
903 909 destpeer = peer(
904 910 srcrepo or ui,
905 911 peeropts,
906 912 dest,
907 913 create=True,
908 914 createopts=createopts,
909 915 )
910 916 except FileExistsError:
911 917 cleandir = None
912 918 raise error.Abort(_(b"destination '%s' already exists") % dest)
913 919
914 920 if revs:
915 921 if not srcpeer.capable(b'lookup'):
916 922 raise error.Abort(
917 923 _(
918 924 b"src repository does not support "
919 925 b"revision lookup and so doesn't "
920 926 b"support clone by revision"
921 927 )
922 928 )
923 929
924 930 # TODO this is batchable.
925 931 remoterevs = []
926 932 for rev in revs:
927 933 with srcpeer.commandexecutor() as e:
928 934 remoterevs.append(
929 935 e.callcommand(
930 936 b'lookup',
931 937 {
932 938 b'key': rev,
933 939 },
934 940 ).result()
935 941 )
936 942 revs = remoterevs
937 943
938 944 checkout = revs[0]
939 945 else:
940 946 revs = None
941 947 local = destpeer.local()
942 948 if local:
943 949 if narrow:
944 950 with local.wlock(), local.lock():
945 951 local.setnarrowpats(storeincludepats, storeexcludepats)
946 952 narrowspec.copytoworkingcopy(local)
947 953
948 954 u = urlutil.url(abspath)
949 955 defaulturl = bytes(u)
950 956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
951 957 if not stream:
952 958 if pull:
953 959 stream = False
954 960 else:
955 961 stream = None
956 962 # internal config: ui.quietbookmarkmove
957 963 overrides = {(b'ui', b'quietbookmarkmove'): True}
958 964 with local.ui.configoverride(overrides, b'clone'):
959 965 exchange.pull(
960 966 local,
961 967 srcpeer,
962 968 heads=revs,
963 969 streamclonerequested=stream,
964 970 includepats=storeincludepats,
965 971 excludepats=storeexcludepats,
966 972 depth=depth,
967 973 )
968 974 elif srcrepo:
969 975 # TODO lift restriction once exchange.push() accepts narrow
970 976 # push.
971 977 if narrow:
972 978 raise error.Abort(
973 979 _(
974 980 b'narrow clone not available for '
975 981 b'remote destinations'
976 982 )
977 983 )
978 984
979 985 exchange.push(
980 986 srcrepo,
981 987 destpeer,
982 988 revs=revs,
983 989 bookmarks=srcrepo._bookmarks.keys(),
984 990 )
985 991 else:
986 992 raise error.Abort(
987 993 _(b"clone from remote to remote not supported")
988 994 )
989 995
990 996 cleandir = None
991 997
992 998 destrepo = destpeer.local()
993 999 if destrepo:
994 1000 template = uimod.samplehgrcs[b'cloned']
995 1001 u = urlutil.url(abspath)
996 1002 u.passwd = None
997 1003 defaulturl = bytes(u)
998 1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
999 1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1000 1006
1001 1007 if ui.configbool(b'experimental', b'remotenames'):
1002 1008 logexchange.pullremotenames(destrepo, srcpeer)
1003 1009
1004 1010 if update:
1005 1011 if update is not True:
1006 1012 with srcpeer.commandexecutor() as e:
1007 1013 checkout = e.callcommand(
1008 1014 b'lookup',
1009 1015 {
1010 1016 b'key': update,
1011 1017 },
1012 1018 ).result()
1013 1019
1014 1020 uprev = None
1015 1021 status = None
1016 1022 if checkout is not None:
1017 1023 # Some extensions (at least hg-git and hg-subversion) have
1018 1024 # a peer.lookup() implementation that returns a name instead
1019 1025 # of a nodeid. We work around it here until we've figured
1020 1026 # out a better solution.
1021 1027 if len(checkout) == 20 and checkout in destrepo:
1022 1028 uprev = checkout
1023 1029 elif scmutil.isrevsymbol(destrepo, checkout):
1024 1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1025 1031 else:
1026 1032 if update is not True:
1027 1033 try:
1028 1034 uprev = destrepo.lookup(update)
1029 1035 except error.RepoLookupError:
1030 1036 pass
1031 1037 if uprev is None:
1032 1038 try:
1033 1039 if destrepo._activebookmark:
1034 1040 uprev = destrepo.lookup(destrepo._activebookmark)
1035 1041 update = destrepo._activebookmark
1036 1042 else:
1037 1043 uprev = destrepo._bookmarks[b'@']
1038 1044 update = b'@'
1039 1045 bn = destrepo[uprev].branch()
1040 1046 if bn == b'default':
1041 1047 status = _(b"updating to bookmark %s\n" % update)
1042 1048 else:
1043 1049 status = (
1044 1050 _(b"updating to bookmark %s on branch %s\n")
1045 1051 ) % (update, bn)
1046 1052 except KeyError:
1047 1053 try:
1048 1054 uprev = destrepo.branchtip(b'default')
1049 1055 except error.RepoLookupError:
1050 1056 uprev = destrepo.lookup(b'tip')
1051 1057 if not status:
1052 1058 bn = destrepo[uprev].branch()
1053 1059 status = _(b"updating to branch %s\n") % bn
1054 1060 destrepo.ui.status(status)
1055 1061 _update(destrepo, uprev)
1056 1062 if update in destrepo._bookmarks:
1057 1063 bookmarks.activate(destrepo, update)
1058 1064 if destlock is not None:
1059 1065 release(destlock)
1060 1066 if destwlock is not None:
1061 1067 release(destlock)
1062 1068 # here is a tiny windows were someone could end up writing the
1063 1069 # repository before the cache are sure to be warm. This is "fine"
1064 1070 # as the only "bad" outcome would be some slowness. That potential
1065 1071 # slowness already affect reader.
1066 1072 with destrepo.lock():
1067 1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1068 1074 finally:
1069 1075 release(srclock, destlock, destwlock)
1070 1076 if cleandir is not None:
1071 1077 shutil.rmtree(cleandir, True)
1072 1078 if srcpeer is not None:
1073 1079 srcpeer.close()
1074 1080 if destpeer and destpeer.local() is None:
1075 1081 destpeer.close()
1076 1082 return srcpeer, destpeer
1077 1083
1078 1084
1079 1085 def _showstats(repo, stats, quietempty=False):
1080 1086 if quietempty and stats.isempty():
1081 1087 return
1082 1088 repo.ui.status(
1083 1089 _(
1084 1090 b"%d files updated, %d files merged, "
1085 1091 b"%d files removed, %d files unresolved\n"
1086 1092 )
1087 1093 % (
1088 1094 stats.updatedcount,
1089 1095 stats.mergedcount,
1090 1096 stats.removedcount,
1091 1097 stats.unresolvedcount,
1092 1098 )
1093 1099 )
1094 1100
1095 1101
1096 1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1097 1103 """Update the working directory to node.
1098 1104
1099 1105 When overwrite is set, changes are clobbered, merged else
1100 1106
1101 1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1102 1108 repo.ui.deprecwarn(
1103 1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1104 1110 b'5.7',
1105 1111 )
1106 1112 return mergemod._update(
1107 1113 repo,
1108 1114 node,
1109 1115 branchmerge=False,
1110 1116 force=overwrite,
1111 1117 labels=[b'working copy', b'destination'],
1112 1118 updatecheck=updatecheck,
1113 1119 )
1114 1120
1115 1121
1116 1122 def update(repo, node, quietempty=False, updatecheck=None):
1117 1123 """update the working directory to node"""
1118 1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1119 1125 _showstats(repo, stats, quietempty)
1120 1126 if stats.unresolvedcount:
1121 1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1122 1128 return stats.unresolvedcount > 0
1123 1129
1124 1130
1125 1131 # naming conflict in clone()
1126 1132 _update = update
1127 1133
1128 1134
1129 1135 def clean(repo, node, show_stats=True, quietempty=False):
1130 1136 """forcibly switch the working directory to node, clobbering changes"""
1131 1137 stats = mergemod.clean_update(repo[node])
1132 1138 assert stats.unresolvedcount == 0
1133 1139 if show_stats:
1134 1140 _showstats(repo, stats, quietempty)
1135 1141 return False
1136 1142
1137 1143
1138 1144 # naming conflict in updatetotally()
1139 1145 _clean = clean
1140 1146
1141 1147 _VALID_UPDATECHECKS = {
1142 1148 mergemod.UPDATECHECK_ABORT,
1143 1149 mergemod.UPDATECHECK_NONE,
1144 1150 mergemod.UPDATECHECK_LINEAR,
1145 1151 mergemod.UPDATECHECK_NO_CONFLICT,
1146 1152 }
1147 1153
1148 1154
1149 1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1150 1156 """Update the working directory with extra care for non-file components
1151 1157
1152 1158 This takes care of non-file components below:
1153 1159
1154 1160 :bookmark: might be advanced or (in)activated
1155 1161
1156 1162 This takes arguments below:
1157 1163
1158 1164 :checkout: to which revision the working directory is updated
1159 1165 :brev: a name, which might be a bookmark to be activated after updating
1160 1166 :clean: whether changes in the working directory can be discarded
1161 1167 :updatecheck: how to deal with a dirty working directory
1162 1168
1163 1169 Valid values for updatecheck are the UPDATECHECK_* constants
1164 1170 defined in the merge module. Passing `None` will result in using the
1165 1171 configured default.
1166 1172
1167 1173 * ABORT: abort if the working directory is dirty
1168 1174 * NONE: don't check (merge working directory changes into destination)
1169 1175 * LINEAR: check that update is linear before merging working directory
1170 1176 changes into destination
1171 1177 * NO_CONFLICT: check that the update does not result in file merges
1172 1178
1173 1179 This returns whether conflict is detected at updating or not.
1174 1180 """
1175 1181 if updatecheck is None:
1176 1182 updatecheck = ui.config(b'commands', b'update.check')
1177 1183 if updatecheck not in _VALID_UPDATECHECKS:
1178 1184 # If not configured, or invalid value configured
1179 1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1180 1186 if updatecheck not in _VALID_UPDATECHECKS:
1181 1187 raise ValueError(
1182 1188 r'Invalid updatecheck value %r (can accept %r)'
1183 1189 % (updatecheck, _VALID_UPDATECHECKS)
1184 1190 )
1185 1191 with repo.wlock():
1186 1192 movemarkfrom = None
1187 1193 warndest = False
1188 1194 if checkout is None:
1189 1195 updata = destutil.destupdate(repo, clean=clean)
1190 1196 checkout, movemarkfrom, brev = updata
1191 1197 warndest = True
1192 1198
1193 1199 if clean:
1194 1200 ret = _clean(repo, checkout)
1195 1201 else:
1196 1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1197 1203 cmdutil.bailifchanged(repo, merge=False)
1198 1204 updatecheck = mergemod.UPDATECHECK_NONE
1199 1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1200 1206
1201 1207 if not ret and movemarkfrom:
1202 1208 if movemarkfrom == repo[b'.'].node():
1203 1209 pass # no-op update
1204 1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1205 1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1206 1212 ui.status(_(b"updating bookmark %s\n") % b)
1207 1213 else:
1208 1214 # this can happen with a non-linear update
1209 1215 b = ui.label(repo._activebookmark, b'bookmarks')
1210 1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 1217 bookmarks.deactivate(repo)
1212 1218 elif brev in repo._bookmarks:
1213 1219 if brev != repo._activebookmark:
1214 1220 b = ui.label(brev, b'bookmarks.active')
1215 1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1216 1222 bookmarks.activate(repo, brev)
1217 1223 elif brev:
1218 1224 if repo._activebookmark:
1219 1225 b = ui.label(repo._activebookmark, b'bookmarks')
1220 1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1221 1227 bookmarks.deactivate(repo)
1222 1228
1223 1229 if warndest:
1224 1230 destutil.statusotherdests(ui, repo)
1225 1231
1226 1232 return ret
1227 1233
1228 1234
1229 1235 def merge(
1230 1236 ctx,
1231 1237 force=False,
1232 1238 remind=True,
1233 1239 labels=None,
1234 1240 ):
1235 1241 """Branch merge with node, resolving changes. Return true if any
1236 1242 unresolved conflicts."""
1237 1243 repo = ctx.repo()
1238 1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1239 1245 _showstats(repo, stats)
1240 1246 if stats.unresolvedcount:
1241 1247 repo.ui.status(
1242 1248 _(
1243 1249 b"use 'hg resolve' to retry unresolved file merges "
1244 1250 b"or 'hg merge --abort' to abandon\n"
1245 1251 )
1246 1252 )
1247 1253 elif remind:
1248 1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1249 1255 return stats.unresolvedcount > 0
1250 1256
1251 1257
1252 1258 def abortmerge(ui, repo):
1253 1259 ms = mergestatemod.mergestate.read(repo)
1254 1260 if ms.active():
1255 1261 # there were conflicts
1256 1262 node = ms.localctx.hex()
1257 1263 else:
1258 1264 # there were no conficts, mergestate was not stored
1259 1265 node = repo[b'.'].hex()
1260 1266
1261 1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1262 1268 stats = mergemod.clean_update(repo[node])
1263 1269 assert stats.unresolvedcount == 0
1264 1270 _showstats(repo, stats)
1265 1271
1266 1272
1267 1273 def _incoming(
1268 1274 displaychlist,
1269 1275 subreporecurse,
1270 1276 ui,
1271 1277 repo,
1272 1278 source,
1273 1279 opts,
1274 1280 buffered=False,
1275 1281 subpath=None,
1276 1282 ):
1277 1283 """
1278 1284 Helper for incoming / gincoming.
1279 1285 displaychlist gets called with
1280 1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1281 1287 and is supposed to contain only code that can't be unified.
1282 1288 """
1283 1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1284 1290 srcs = list(srcs)
1285 1291 if len(srcs) != 1:
1286 1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1287 1293 msg %= len(srcs)
1288 1294 raise error.Abort(msg)
1289 1295 path = srcs[0]
1290 1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1291 1297 if subpath is not None:
1292 1298 subpath = urlutil.url(subpath)
1293 1299 if subpath.isabs():
1294 1300 source = bytes(subpath)
1295 1301 else:
1296 1302 p = urlutil.url(source)
1297 1303 if p.islocal():
1298 1304 normpath = os.path.normpath
1299 1305 else:
1300 1306 normpath = posixpath.normpath
1301 1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1302 1308 source = bytes(p)
1303 1309 other = peer(repo, opts, source)
1304 1310 cleanupfn = other.close
1305 1311 try:
1306 1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1307 1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1308 1314
1309 1315 if revs:
1310 1316 revs = [other.lookup(rev) for rev in revs]
1311 1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1312 1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1313 1319 )
1314 1320
1315 1321 if not chlist:
1316 1322 ui.status(_(b"no changes found\n"))
1317 1323 return subreporecurse()
1318 1324 ui.pager(b'incoming')
1319 1325 displayer = logcmdutil.changesetdisplayer(
1320 1326 ui, other, opts, buffered=buffered
1321 1327 )
1322 1328 displaychlist(other, chlist, displayer)
1323 1329 displayer.close()
1324 1330 finally:
1325 1331 cleanupfn()
1326 1332 subreporecurse()
1327 1333 return 0 # exit code is zero since we found incoming changes
1328 1334
1329 1335
1330 1336 def incoming(ui, repo, source, opts, subpath=None):
1331 1337 def subreporecurse():
1332 1338 ret = 1
1333 1339 if opts.get(b'subrepos'):
1334 1340 ctx = repo[None]
1335 1341 for subpath in sorted(ctx.substate):
1336 1342 sub = ctx.sub(subpath)
1337 1343 ret = min(ret, sub.incoming(ui, source, opts))
1338 1344 return ret
1339 1345
1340 1346 def display(other, chlist, displayer):
1341 1347 limit = logcmdutil.getlimit(opts)
1342 1348 if opts.get(b'newest_first'):
1343 1349 chlist.reverse()
1344 1350 count = 0
1345 1351 for n in chlist:
1346 1352 if limit is not None and count >= limit:
1347 1353 break
1348 1354 parents = [
1349 1355 p for p in other.changelog.parents(n) if p != repo.nullid
1350 1356 ]
1351 1357 if opts.get(b'no_merges') and len(parents) == 2:
1352 1358 continue
1353 1359 count += 1
1354 1360 displayer.show(other[n])
1355 1361
1356 1362 return _incoming(
1357 1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1358 1364 )
1359 1365
1360 1366
1361 1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1362 1368 out = set()
1363 1369 others = []
1364 1370 for path in urlutil.get_push_paths(repo, ui, dests):
1365 1371 dest = path.pushloc or path.loc
1366 1372 if subpath is not None:
1367 1373 subpath = urlutil.url(subpath)
1368 1374 if subpath.isabs():
1369 1375 dest = bytes(subpath)
1370 1376 else:
1371 1377 p = urlutil.url(dest)
1372 1378 if p.islocal():
1373 1379 normpath = os.path.normpath
1374 1380 else:
1375 1381 normpath = posixpath.normpath
1376 1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1377 1383 dest = bytes(p)
1378 1384 branches = path.branch, opts.get(b'branch') or []
1379 1385
1380 1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1381 1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1382 1388 if revs:
1383 1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1384 1390
1385 1391 other = peer(repo, opts, dest)
1386 1392 try:
1387 1393 outgoing = discovery.findcommonoutgoing(
1388 1394 repo, other, revs, force=opts.get(b'force')
1389 1395 )
1390 1396 o = outgoing.missing
1391 1397 out.update(o)
1392 1398 if not o:
1393 1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1394 1400 others.append(other)
1395 1401 except: # re-raises
1396 1402 other.close()
1397 1403 raise
1398 1404 # make sure this is ordered by revision number
1399 1405 outgoing_revs = list(out)
1400 1406 cl = repo.changelog
1401 1407 outgoing_revs.sort(key=cl.rev)
1402 1408 return outgoing_revs, others
1403 1409
1404 1410
1405 1411 def _outgoing_recurse(ui, repo, dests, opts):
1406 1412 ret = 1
1407 1413 if opts.get(b'subrepos'):
1408 1414 ctx = repo[None]
1409 1415 for subpath in sorted(ctx.substate):
1410 1416 sub = ctx.sub(subpath)
1411 1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1412 1418 return ret
1413 1419
1414 1420
1415 1421 def _outgoing_filter(repo, revs, opts):
1416 1422 """apply revision filtering/ordering option for outgoing"""
1417 1423 limit = logcmdutil.getlimit(opts)
1418 1424 no_merges = opts.get(b'no_merges')
1419 1425 if opts.get(b'newest_first'):
1420 1426 revs.reverse()
1421 1427 if limit is None and not no_merges:
1422 1428 for r in revs:
1423 1429 yield r
1424 1430 return
1425 1431
1426 1432 count = 0
1427 1433 cl = repo.changelog
1428 1434 for n in revs:
1429 1435 if limit is not None and count >= limit:
1430 1436 break
1431 1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1432 1438 if no_merges and len(parents) == 2:
1433 1439 continue
1434 1440 count += 1
1435 1441 yield n
1436 1442
1437 1443
1438 1444 def outgoing(ui, repo, dests, opts, subpath=None):
1439 1445 if opts.get(b'graph'):
1440 1446 logcmdutil.checkunsupportedgraphflags([], opts)
1441 1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1442 1448 ret = 1
1443 1449 try:
1444 1450 if o:
1445 1451 ret = 0
1446 1452
1447 1453 if opts.get(b'graph'):
1448 1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1449 1455 ui.pager(b'outgoing')
1450 1456 displayer = logcmdutil.changesetdisplayer(
1451 1457 ui, repo, opts, buffered=True
1452 1458 )
1453 1459 logcmdutil.displaygraph(
1454 1460 ui, repo, revdag, displayer, graphmod.asciiedges
1455 1461 )
1456 1462 else:
1457 1463 ui.pager(b'outgoing')
1458 1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1459 1465 for n in _outgoing_filter(repo, o, opts):
1460 1466 displayer.show(repo[n])
1461 1467 displayer.close()
1462 1468 for oth in others:
1463 1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1464 1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1465 1471 return ret # exit code is zero since we found outgoing changes
1466 1472 finally:
1467 1473 for oth in others:
1468 1474 oth.close()
1469 1475
1470 1476
1471 1477 def verify(repo, level=None):
1472 1478 """verify the consistency of a repository"""
1473 1479 ret = verifymod.verify(repo, level=level)
1474 1480
1475 1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1476 1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1477 1483 # concern.
1478 1484
1479 1485 # pathto() is needed for -R case
1480 1486 revs = repo.revs(
1481 1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1482 1488 )
1483 1489
1484 1490 if revs:
1485 1491 repo.ui.status(_(b'checking subrepo links\n'))
1486 1492 for rev in revs:
1487 1493 ctx = repo[rev]
1488 1494 try:
1489 1495 for subpath in ctx.substate:
1490 1496 try:
1491 1497 ret = (
1492 1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1493 1499 )
1494 1500 except error.RepoError as e:
1495 1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1496 1502 except Exception:
1497 1503 repo.ui.warn(
1498 1504 _(b'.hgsubstate is corrupt in revision %s\n')
1499 1505 % short(ctx.node())
1500 1506 )
1501 1507
1502 1508 return ret
1503 1509
1504 1510
1505 1511 def remoteui(src, opts):
1506 1512 """build a remote ui from ui or repo and opts"""
1507 1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1508 1514 dst = src.baseui.copy() # drop repo-specific config
1509 1515 src = src.ui # copy target options from repo
1510 1516 else: # assume it's a global ui object
1511 1517 dst = src.copy() # keep all global options
1512 1518
1513 1519 # copy ssh-specific options
1514 1520 for o in b'ssh', b'remotecmd':
1515 1521 v = opts.get(o) or src.config(b'ui', o)
1516 1522 if v:
1517 1523 dst.setconfig(b"ui", o, v, b'copied')
1518 1524
1519 1525 # copy bundle-specific options
1520 1526 r = src.config(b'bundle', b'mainreporoot')
1521 1527 if r:
1522 1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1523 1529
1524 1530 # copy selected local settings to the remote ui
1525 1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1526 1532 for key, val in src.configitems(sect):
1527 1533 dst.setconfig(sect, key, val, b'copied')
1528 1534 v = src.config(b'web', b'cacerts')
1529 1535 if v:
1530 1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1531 1537
1532 1538 return dst
1533 1539
1534 1540
1535 1541 # Files of interest
1536 1542 # Used to check if the repository has changed looking at mtime and size of
1537 1543 # these files.
1538 1544 foi = [
1539 1545 (b'spath', b'00changelog.i'),
1540 1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1541 1547 (b'spath', b'obsstore'),
1542 1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1543 1549 ]
1544 1550
1545 1551
1546 1552 class cachedlocalrepo:
1547 1553 """Holds a localrepository that can be cached and reused."""
1548 1554
1549 1555 def __init__(self, repo):
1550 1556 """Create a new cached repo from an existing repo.
1551 1557
1552 1558 We assume the passed in repo was recently created. If the
1553 1559 repo has changed between when it was created and when it was
1554 1560 turned into a cache, it may not refresh properly.
1555 1561 """
1556 1562 assert isinstance(repo, localrepo.localrepository)
1557 1563 self._repo = repo
1558 1564 self._state, self.mtime = self._repostate()
1559 1565 self._filtername = repo.filtername
1560 1566
1561 1567 def fetch(self):
1562 1568 """Refresh (if necessary) and return a repository.
1563 1569
1564 1570 If the cached instance is out of date, it will be recreated
1565 1571 automatically and returned.
1566 1572
1567 1573 Returns a tuple of the repo and a boolean indicating whether a new
1568 1574 repo instance was created.
1569 1575 """
1570 1576 # We compare the mtimes and sizes of some well-known files to
1571 1577 # determine if the repo changed. This is not precise, as mtimes
1572 1578 # are susceptible to clock skew and imprecise filesystems and
1573 1579 # file content can change while maintaining the same size.
1574 1580
1575 1581 state, mtime = self._repostate()
1576 1582 if state == self._state:
1577 1583 return self._repo, False
1578 1584
1579 1585 repo = repository(self._repo.baseui, self._repo.url())
1580 1586 if self._filtername:
1581 1587 self._repo = repo.filtered(self._filtername)
1582 1588 else:
1583 1589 self._repo = repo.unfiltered()
1584 1590 self._state = state
1585 1591 self.mtime = mtime
1586 1592
1587 1593 return self._repo, True
1588 1594
1589 1595 def _repostate(self):
1590 1596 state = []
1591 1597 maxmtime = -1
1592 1598 for attr, fname in foi:
1593 1599 prefix = getattr(self._repo, attr)
1594 1600 p = os.path.join(prefix, fname)
1595 1601 try:
1596 1602 st = os.stat(p)
1597 1603 except OSError:
1598 1604 st = os.stat(prefix)
1599 1605 state.append((st[stat.ST_MTIME], st.st_size))
1600 1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1601 1607
1602 1608 return tuple(state), maxmtime
1603 1609
1604 1610 def copy(self):
1605 1611 """Obtain a copy of this class instance.
1606 1612
1607 1613 A new localrepository instance is obtained. The new instance should be
1608 1614 completely independent of the original.
1609 1615 """
1610 1616 repo = repository(self._repo.baseui, self._repo.origroot)
1611 1617 if self._filtername:
1612 1618 repo = repo.filtered(self._filtername)
1613 1619 else:
1614 1620 repo = repo.unfiltered()
1615 1621 c = cachedlocalrepo(repo)
1616 1622 c._state = self._state
1617 1623 c.mtime = self.mtime
1618 1624 return c
General Comments 0
You need to be logged in to leave comments. Login now