##// END OF EJS Templates
peer: dissolve `_peerlookup` into its last two callers...
marmoute -
r50644:be3fcd9e default
parent child Browse files
Show More
@@ -1,159 +1,167
1 1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """extend schemes with shortcuts to repository swarms
7 7
8 8 This extension allows you to specify shortcuts for parent URLs with a
9 9 lot of repositories to act like a scheme, for example::
10 10
11 11 [schemes]
12 12 py = http://code.python.org/hg/
13 13
14 14 After that you can use it like::
15 15
16 16 hg clone py://trunk/
17 17
18 18 Additionally there is support for some more complex schemas, for
19 19 example used by Google Code::
20 20
21 21 [schemes]
22 22 gcode = http://{1}.googlecode.com/hg/
23 23
24 24 The syntax is taken from Mercurial templates, and you have unlimited
25 25 number of variables, starting with ``{1}`` and continuing with
26 26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 28 just appended to an URL.
29 29
30 30 For convenience, the extension adds these schemes by default::
31 31
32 32 [schemes]
33 33 py = http://hg.python.org/
34 34 bb = https://bitbucket.org/
35 35 bb+ssh = ssh://hg@bitbucket.org/
36 36 gcode = https://{1}.googlecode.com/hg/
37 37 kiln = https://{1}.kilnhg.com/Repo/
38 38
39 39 You can override a predefined scheme by defining a new scheme with the
40 40 same name.
41 41 """
42 42
43 43 import os
44 44 import re
45 45
46 46 from mercurial.i18n import _
47 47 from mercurial import (
48 48 error,
49 49 extensions,
50 50 hg,
51 51 pycompat,
52 52 registrar,
53 53 templater,
54 54 )
55 55 from mercurial.utils import (
56 56 urlutil,
57 57 )
58 58
59 59 cmdtable = {}
60 60 command = registrar.command(cmdtable)
61 61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 63 # be specifying the version(s) of Mercurial they are tested with, or
64 64 # leave the attribute unspecified.
65 65 testedwith = b'ships-with-hg-core'
66 66
67 67 _partre = re.compile(br'{(\d+)\}')
68 68
69 69
70 70 class ShortRepository:
71 71 def __init__(self, url, scheme, templater):
72 72 self.scheme = scheme
73 73 self.templater = templater
74 74 self.url = url
75 75 try:
76 76 self.parts = max(map(int, _partre.findall(self.url)))
77 77 except ValueError:
78 78 self.parts = 0
79 79
80 80 def __repr__(self):
81 81 return b'<ShortRepository: %s>' % self.scheme
82 82
83 83 def instance(self, ui, url, create, intents=None, createopts=None):
84 84 url = self.resolve(url)
85 return hg._peerlookup(url).instance(
85 u = urlutil.url(url)
86 scheme = u.scheme or b'file'
87 if scheme in hg.peer_schemes:
88 cls = hg.peer_schemes[scheme]
89 elif scheme in hg.repo_schemes:
90 cls = hg.repo_schemes[scheme]
91 else:
92 cls = hg.LocalFactory
93 return cls.instance(
86 94 ui, url, create, intents=intents, createopts=createopts
87 95 )
88 96
89 97 def resolve(self, url):
90 98 # Should this use the urlutil.url class, or is manual parsing better?
91 99 try:
92 100 url = url.split(b'://', 1)[1]
93 101 except IndexError:
94 102 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
95 103 parts = url.split(b'/', self.parts)
96 104 if len(parts) > self.parts:
97 105 tail = parts[-1]
98 106 parts = parts[:-1]
99 107 else:
100 108 tail = b''
101 109 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
102 110 return b''.join(self.templater.process(self.url, context)) + tail
103 111
104 112
105 113 def hasdriveletter(orig, path):
106 114 if path:
107 115 for scheme in schemes:
108 116 if path.startswith(scheme + b':'):
109 117 return False
110 118 return orig(path)
111 119
112 120
113 121 schemes = {
114 122 b'py': b'http://hg.python.org/',
115 123 b'bb': b'https://bitbucket.org/',
116 124 b'bb+ssh': b'ssh://hg@bitbucket.org/',
117 125 b'gcode': b'https://{1}.googlecode.com/hg/',
118 126 b'kiln': b'https://{1}.kilnhg.com/Repo/',
119 127 }
120 128
121 129
122 130 def _check_drive_letter(scheme):
123 131 """check if a scheme conflict with a Windows drive letter"""
124 132 if (
125 133 pycompat.iswindows
126 134 and len(scheme) == 1
127 135 and scheme.isalpha()
128 136 and os.path.exists(b'%s:\\' % scheme)
129 137 ):
130 138 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
131 139 msg %= (scheme, scheme.upper())
132 140 raise error.Abort(msg)
133 141
134 142
135 143 def extsetup(ui):
136 144 schemes.update(dict(ui.configitems(b'schemes')))
137 145 t = templater.engine(templater.parse)
138 146 for scheme, url in schemes.items():
139 147 _check_drive_letter(schemes)
140 148 url_scheme = urlutil.url(url).scheme
141 149 if url_scheme in hg.peer_schemes:
142 150 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
143 151 else:
144 152 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
145 153
146 154 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
147 155
148 156
149 157 @command(b'debugexpandscheme', norepo=True)
150 158 def expandscheme(ui, url, **opts):
151 159 """given a repo path, provide the scheme-expanded path"""
152 160 scheme = urlutil.url(url).scheme
153 161 if scheme in hg.peer_schemes:
154 162 cls = hg.peer_schemes[scheme]
155 163 else:
156 164 cls = hg.repo_schemes.get(scheme)
157 165 if cls is not None and isinstance(cls, ShortRepository):
158 166 url = cls.resolve(url)
159 167 ui.write(url + b'\n')
@@ -1,1667 +1,1664
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 if util.safehasattr(other, 'peer'):
70 70 # a courtesy to callers using a localrepo for other
71 71 peer = other.peer()
72 72 else:
73 73 peer = other
74 74 hashbranch, branches = branches
75 75 if not hashbranch and not branches:
76 76 x = revs or None
77 77 if revs:
78 78 y = revs[0]
79 79 else:
80 80 y = None
81 81 return x, y
82 82 if revs:
83 83 revs = list(revs)
84 84 else:
85 85 revs = []
86 86
87 87 if not peer.capable(b'branchmap'):
88 88 if branches:
89 89 raise error.Abort(_(b"remote branch lookup not supported"))
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 with peer.commandexecutor() as e:
94 94 branchmap = e.callcommand(b'branchmap', {}).result()
95 95
96 96 def primary(branch):
97 97 if branch == b'.':
98 98 if not lrepo:
99 99 raise error.Abort(_(b"dirstate branch not accessible"))
100 100 branch = lrepo.dirstate.branch()
101 101 if branch in branchmap:
102 102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 103 return True
104 104 else:
105 105 return False
106 106
107 107 for branch in branches:
108 108 if not primary(branch):
109 109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 110 if hashbranch:
111 111 if not primary(hashbranch):
112 112 revs.append(hashbranch)
113 113 return revs, revs[0]
114 114
115 115
116 116 def _isfile(path):
117 117 try:
118 118 # we use os.stat() directly here instead of os.path.isfile()
119 119 # because the latter started returning `False` on invalid path
120 120 # exceptions starting in 3.8 and we care about handling
121 121 # invalid paths specially here.
122 122 st = os.stat(path)
123 123 except ValueError as e:
124 124 msg = stringutil.forcebytestr(e)
125 125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 126 except OSError:
127 127 return False
128 128 else:
129 129 return stat.S_ISREG(st.st_mode)
130 130
131 131
132 132 class LocalFactory:
133 133 """thin wrapper to dispatch between localrepo and bundle repo"""
134 134
135 135 @staticmethod
136 136 def islocal(path: bytes) -> bool:
137 137 path = util.expandpath(urlutil.urllocalpath(path))
138 138 return not _isfile(path)
139 139
140 140 @staticmethod
141 141 def instance(ui, path, *args, **kwargs):
142 142 path = util.expandpath(urlutil.urllocalpath(path))
143 143 if _isfile(path):
144 144 cls = bundlerepo
145 145 else:
146 146 cls = localrepo
147 147 return cls.instance(ui, path, *args, **kwargs)
148 148
149 149
150 150 repo_schemes = {
151 151 b'bundle': bundlerepo,
152 152 b'union': unionrepo,
153 153 b'file': LocalFactory,
154 154 }
155 155
156 156 peer_schemes = {
157 157 b'http': httppeer,
158 158 b'https': httppeer,
159 159 b'ssh': sshpeer,
160 160 b'static-http': statichttprepo,
161 161 }
162 162
163 163
164 def _peerlookup(path):
165 u = urlutil.url(path)
166 scheme = u.scheme or b'file'
167 if scheme in peer_schemes:
168 return peer_schemes[scheme]
169 if scheme in repo_schemes:
170 return repo_schemes[scheme]
171 return LocalFactory
172
173
174 164 def islocal(repo):
175 165 '''return true if repo (or path pointing to repo) is local'''
176 166 if isinstance(repo, bytes):
177 cls = _peerlookup(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
171 elif scheme in repo_schemes:
172 cls = repo_schemes[scheme]
173 else:
174 cls = LocalFactory
178 175 cls.instance # make sure we load the module
179 176 if util.safehasattr(cls, 'islocal'):
180 177 return cls.islocal(repo) # pytype: disable=module-attr
181 178 return False
182 179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
183 180 return repo.local()
184 181
185 182
186 183 def openpath(ui, path, sendaccept=True):
187 184 '''open path with open if local, url.open if remote'''
188 185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
189 186 if pathurl.islocal():
190 187 return util.posixfile(pathurl.localpath(), b'rb')
191 188 else:
192 189 return url.open(ui, path, sendaccept=sendaccept)
193 190
194 191
195 192 # a list of (ui, repo) functions called for wire peer initialization
196 193 wirepeersetupfuncs = []
197 194
198 195
199 196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 197 ui = getattr(obj, "ui", ui)
201 198 for f in presetupfuncs or []:
202 199 f(ui, obj)
203 200 ui.log(b'extension', b'- executing reposetup hooks\n')
204 201 with util.timedcm('all reposetup') as allreposetupstats:
205 202 for name, module in extensions.extensions(ui):
206 203 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 204 hook = getattr(module, 'reposetup', None)
208 205 if hook:
209 206 with util.timedcm('reposetup %r', name) as stats:
210 207 hook(ui, obj)
211 208 msg = b' > reposetup for %s took %s\n'
212 209 ui.log(b'extension', msg, name, stats)
213 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 211 if not obj.local():
215 212 for f in wirepeersetupfuncs:
216 213 f(ui, obj)
217 214
218 215
219 216 def repository(
220 217 ui,
221 218 path=b'',
222 219 create=False,
223 220 presetupfuncs=None,
224 221 intents=None,
225 222 createopts=None,
226 223 ):
227 224 """return a repository object for the specified path"""
228 225 scheme = urlutil.url(path).scheme
229 226 if scheme is None:
230 227 scheme = b'file'
231 228 cls = repo_schemes.get(scheme)
232 229 if cls is None:
233 230 if scheme in peer_schemes:
234 231 raise error.Abort(_(b"repository '%s' is not local") % path)
235 232 cls = LocalFactory
236 233 repo = cls.instance(
237 234 ui,
238 235 path,
239 236 create,
240 237 intents=intents,
241 238 createopts=createopts,
242 239 )
243 240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
244 241 return repo.filtered(b'visible')
245 242
246 243
247 244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
248 245 '''return a repository peer for the specified path'''
249 246 rui = remoteui(uiorrepo, opts)
250 247 if util.safehasattr(path, 'url'):
251 248 # this is a urlutil.path object
252 249 scheme = path.url.scheme # pytype: disable=attribute-error
253 250 # XXX for now we don't do anything more than that
254 251 path = path.loc # pytype: disable=attribute-error
255 252 else:
256 253 scheme = urlutil.url(path).scheme
257 254 if scheme in peer_schemes:
258 255 cls = peer_schemes[scheme]
259 256 peer = cls.instance(
260 257 rui,
261 258 path,
262 259 create,
263 260 intents=intents,
264 261 createopts=createopts,
265 262 )
266 263 _setup_repo_or_peer(rui, peer)
267 264 else:
268 265 # this is a repository
269 266 repo = repository(
270 267 rui,
271 268 path,
272 269 create,
273 270 intents=intents,
274 271 createopts=createopts,
275 272 )
276 273 peer = repo.peer()
277 274 return peer
278 275
279 276
280 277 def defaultdest(source):
281 278 """return default destination of clone if none is given
282 279
283 280 >>> defaultdest(b'foo')
284 281 'foo'
285 282 >>> defaultdest(b'/foo/bar')
286 283 'bar'
287 284 >>> defaultdest(b'/')
288 285 ''
289 286 >>> defaultdest(b'')
290 287 ''
291 288 >>> defaultdest(b'http://example.org/')
292 289 ''
293 290 >>> defaultdest(b'http://example.org/foo/')
294 291 'foo'
295 292 """
296 293 path = urlutil.url(source).path
297 294 if not path:
298 295 return b''
299 296 return os.path.basename(os.path.normpath(path))
300 297
301 298
302 299 def sharedreposource(repo):
303 300 """Returns repository object for source repository of a shared repo.
304 301
305 302 If repo is not a shared repository, returns None.
306 303 """
307 304 if repo.sharedpath == repo.path:
308 305 return None
309 306
310 307 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 308 return repo.srcrepo
312 309
313 310 # the sharedpath always ends in the .hg; we want the path to the repo
314 311 source = repo.vfs.split(repo.sharedpath)[0]
315 312 srcurl, branches = urlutil.parseurl(source)
316 313 srcrepo = repository(repo.ui, srcurl)
317 314 repo.srcrepo = srcrepo
318 315 return srcrepo
319 316
320 317
321 318 def share(
322 319 ui,
323 320 source,
324 321 dest=None,
325 322 update=True,
326 323 bookmarks=True,
327 324 defaultpath=None,
328 325 relative=False,
329 326 ):
330 327 '''create a shared repository'''
331 328
332 329 not_local_msg = _(b'can only share local repositories')
333 330 if util.safehasattr(source, 'local'):
334 331 if source.local() is None:
335 332 raise error.Abort(not_local_msg)
336 333 elif not islocal(source):
337 334 # XXX why are we getting bytes here ?
338 335 raise error.Abort(not_local_msg)
339 336
340 337 if not dest:
341 338 dest = defaultdest(source)
342 339 else:
343 340 dest = urlutil.get_clone_path_obj(ui, dest).loc
344 341
345 342 if isinstance(source, bytes):
346 343 source_path = urlutil.get_clone_path_obj(ui, source)
347 344 srcrepo = repository(ui, source_path.loc)
348 345 branches = (source_path.branch, [])
349 346 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 347 else:
351 348 srcrepo = source.local()
352 349 checkout = None
353 350
354 351 shareditems = set()
355 352 if bookmarks:
356 353 shareditems.add(sharedbookmarks)
357 354
358 355 r = repository(
359 356 ui,
360 357 dest,
361 358 create=True,
362 359 createopts={
363 360 b'sharedrepo': srcrepo,
364 361 b'sharedrelative': relative,
365 362 b'shareditems': shareditems,
366 363 },
367 364 )
368 365
369 366 postshare(srcrepo, r, defaultpath=defaultpath)
370 367 r = repository(ui, dest)
371 368 _postshareupdate(r, update, checkout=checkout)
372 369 return r
373 370
374 371
375 372 def _prependsourcehgrc(repo):
376 373 """copies the source repo config and prepend it in current repo .hg/hgrc
377 374 on unshare. This is only done if the share was perfomed using share safe
378 375 method where we share config of source in shares"""
379 376 srcvfs = vfsmod.vfs(repo.sharedpath)
380 377 dstvfs = vfsmod.vfs(repo.path)
381 378
382 379 if not srcvfs.exists(b'hgrc'):
383 380 return
384 381
385 382 currentconfig = b''
386 383 if dstvfs.exists(b'hgrc'):
387 384 currentconfig = dstvfs.read(b'hgrc')
388 385
389 386 with dstvfs(b'hgrc', b'wb') as fp:
390 387 sourceconfig = srcvfs.read(b'hgrc')
391 388 fp.write(b"# Config copied from shared source\n")
392 389 fp.write(sourceconfig)
393 390 fp.write(b'\n')
394 391 fp.write(currentconfig)
395 392
396 393
397 394 def unshare(ui, repo):
398 395 """convert a shared repository to a normal one
399 396
400 397 Copy the store data to the repo and remove the sharedpath data.
401 398
402 399 Returns a new repository object representing the unshared repository.
403 400
404 401 The passed repository object is not usable after this function is
405 402 called.
406 403 """
407 404
408 405 with repo.lock():
409 406 # we use locks here because if we race with commit, we
410 407 # can end up with extra data in the cloned revlogs that's
411 408 # not pointed to by changesets, thus causing verify to
412 409 # fail
413 410 destlock = copystore(ui, repo, repo.path)
414 411 with destlock or util.nullcontextmanager():
415 412 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 413 # we were sharing .hg/hgrc of the share source with the current
417 414 # repo. We need to copy that while unsharing otherwise it can
418 415 # disable hooks and other checks
419 416 _prependsourcehgrc(repo)
420 417
421 418 sharefile = repo.vfs.join(b'sharedpath')
422 419 util.rename(sharefile, sharefile + b'.old')
423 420
424 421 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 422 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 423 scmutil.writereporequirements(repo)
427 424
428 425 # Removing share changes some fundamental properties of the repo instance.
429 426 # So we instantiate a new repo object and operate on it rather than
430 427 # try to keep the existing repo usable.
431 428 newrepo = repository(repo.baseui, repo.root, create=False)
432 429
433 430 # TODO: figure out how to access subrepos that exist, but were previously
434 431 # removed from .hgsub
435 432 c = newrepo[b'.']
436 433 subs = c.substate
437 434 for s in sorted(subs):
438 435 c.sub(s).unshare()
439 436
440 437 localrepo.poisonrepository(repo)
441 438
442 439 return newrepo
443 440
444 441
445 442 def postshare(sourcerepo, destrepo, defaultpath=None):
446 443 """Called after a new shared repo is created.
447 444
448 445 The new repo only has a requirements file and pointer to the source.
449 446 This function configures additional shared data.
450 447
451 448 Extensions can wrap this function and write additional entries to
452 449 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 450 """
454 451 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 452 if default:
456 453 template = b'[paths]\ndefault = %s\n'
457 454 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 455 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 456 with destrepo.wlock():
460 457 narrowspec.copytoworkingcopy(destrepo)
461 458
462 459
463 460 def _postshareupdate(repo, update, checkout=None):
464 461 """Maybe perform a working directory update after a shared repo is created.
465 462
466 463 ``update`` can be a boolean or a revision to update to.
467 464 """
468 465 if not update:
469 466 return
470 467
471 468 repo.ui.status(_(b"updating working directory\n"))
472 469 if update is not True:
473 470 checkout = update
474 471 for test in (checkout, b'default', b'tip'):
475 472 if test is None:
476 473 continue
477 474 try:
478 475 uprev = repo.lookup(test)
479 476 break
480 477 except error.RepoLookupError:
481 478 continue
482 479 _update(repo, uprev)
483 480
484 481
485 482 def copystore(ui, srcrepo, destpath):
486 483 """copy files from store of srcrepo in destpath
487 484
488 485 returns destlock
489 486 """
490 487 destlock = None
491 488 try:
492 489 hardlink = None
493 490 topic = _(b'linking') if hardlink else _(b'copying')
494 491 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 492 num = 0
496 493 srcpublishing = srcrepo.publishing()
497 494 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 495 dstvfs = vfsmod.vfs(destpath)
499 496 for f in srcrepo.store.copylist():
500 497 if srcpublishing and f.endswith(b'phaseroots'):
501 498 continue
502 499 dstbase = os.path.dirname(f)
503 500 if dstbase and not dstvfs.exists(dstbase):
504 501 dstvfs.mkdir(dstbase)
505 502 if srcvfs.exists(f):
506 503 if f.endswith(b'data'):
507 504 # 'dstbase' may be empty (e.g. revlog format 0)
508 505 lockfile = os.path.join(dstbase, b"lock")
509 506 # lock to avoid premature writing to the target
510 507 destlock = lock.lock(dstvfs, lockfile)
511 508 hardlink, n = util.copyfiles(
512 509 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 510 )
514 511 num += n
515 512 if hardlink:
516 513 ui.debug(b"linked %d files\n" % num)
517 514 else:
518 515 ui.debug(b"copied %d files\n" % num)
519 516 return destlock
520 517 except: # re-raises
521 518 release(destlock)
522 519 raise
523 520
524 521
525 522 def clonewithshare(
526 523 ui,
527 524 peeropts,
528 525 sharepath,
529 526 source,
530 527 srcpeer,
531 528 dest,
532 529 pull=False,
533 530 rev=None,
534 531 update=True,
535 532 stream=False,
536 533 ):
537 534 """Perform a clone using a shared repo.
538 535
539 536 The store for the repository will be located at <sharepath>/.hg. The
540 537 specified revisions will be cloned or pulled from "source". A shared repo
541 538 will be created at "dest" and a working copy will be created if "update" is
542 539 True.
543 540 """
544 541 revs = None
545 542 if rev:
546 543 if not srcpeer.capable(b'lookup'):
547 544 raise error.Abort(
548 545 _(
549 546 b"src repository does not support "
550 547 b"revision lookup and so doesn't "
551 548 b"support clone by revision"
552 549 )
553 550 )
554 551
555 552 # TODO this is batchable.
556 553 remoterevs = []
557 554 for r in rev:
558 555 with srcpeer.commandexecutor() as e:
559 556 remoterevs.append(
560 557 e.callcommand(
561 558 b'lookup',
562 559 {
563 560 b'key': r,
564 561 },
565 562 ).result()
566 563 )
567 564 revs = remoterevs
568 565
569 566 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 567 # 2 clients may race creating or populating it.
571 568 pooldir = os.path.dirname(sharepath)
572 569 # lock class requires the directory to exist.
573 570 try:
574 571 util.makedir(pooldir, False)
575 572 except FileExistsError:
576 573 pass
577 574
578 575 poolvfs = vfsmod.vfs(pooldir)
579 576 basename = os.path.basename(sharepath)
580 577
581 578 with lock.lock(poolvfs, b'%s.lock' % basename):
582 579 if os.path.exists(sharepath):
583 580 ui.status(
584 581 _(b'(sharing from existing pooled repository %s)\n') % basename
585 582 )
586 583 else:
587 584 ui.status(
588 585 _(b'(sharing from new pooled repository %s)\n') % basename
589 586 )
590 587 # Always use pull mode because hardlinks in share mode don't work
591 588 # well. Never update because working copies aren't necessary in
592 589 # share mode.
593 590 clone(
594 591 ui,
595 592 peeropts,
596 593 source,
597 594 dest=sharepath,
598 595 pull=True,
599 596 revs=rev,
600 597 update=False,
601 598 stream=stream,
602 599 )
603 600
604 601 # Resolve the value to put in [paths] section for the source.
605 602 if islocal(source):
606 603 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 604 else:
608 605 defaultpath = source
609 606
610 607 sharerepo = repository(ui, path=sharepath)
611 608 destrepo = share(
612 609 ui,
613 610 sharerepo,
614 611 dest=dest,
615 612 update=False,
616 613 bookmarks=False,
617 614 defaultpath=defaultpath,
618 615 )
619 616
620 617 # We need to perform a pull against the dest repo to fetch bookmarks
621 618 # and other non-store data that isn't shared by default. In the case of
622 619 # non-existing shared repo, this means we pull from the remote twice. This
623 620 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 621 # way to pull just non-changegroup data.
625 622 exchange.pull(destrepo, srcpeer, heads=revs)
626 623
627 624 _postshareupdate(destrepo, update)
628 625
629 626 return srcpeer, peer(ui, peeropts, dest)
630 627
631 628
632 629 # Recomputing caches is often slow on big repos, so copy them.
633 630 def _copycache(srcrepo, dstcachedir, fname):
634 631 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 632 srcfname = srcrepo.cachevfs.join(fname)
636 633 dstfname = os.path.join(dstcachedir, fname)
637 634 if os.path.exists(srcfname):
638 635 if not os.path.exists(dstcachedir):
639 636 os.mkdir(dstcachedir)
640 637 util.copyfile(srcfname, dstfname)
641 638
642 639
643 640 def clone(
644 641 ui,
645 642 peeropts,
646 643 source,
647 644 dest=None,
648 645 pull=False,
649 646 revs=None,
650 647 update=True,
651 648 stream=False,
652 649 branch=None,
653 650 shareopts=None,
654 651 storeincludepats=None,
655 652 storeexcludepats=None,
656 653 depth=None,
657 654 ):
658 655 """Make a copy of an existing repository.
659 656
660 657 Create a copy of an existing repository in a new directory. The
661 658 source and destination are URLs, as passed to the repository
662 659 function. Returns a pair of repository peers, the source and
663 660 newly created destination.
664 661
665 662 The location of the source is added to the new repository's
666 663 .hg/hgrc file, as the default to be used for future pulls and
667 664 pushes.
668 665
669 666 If an exception is raised, the partly cloned/updated destination
670 667 repository will be deleted.
671 668
672 669 Arguments:
673 670
674 671 source: repository object or URL
675 672
676 673 dest: URL of destination repository to create (defaults to base
677 674 name of source repository)
678 675
679 676 pull: always pull from source repository, even in local case or if the
680 677 server prefers streaming
681 678
682 679 stream: stream raw data uncompressed from repository (fast over
683 680 LAN, slow over WAN)
684 681
685 682 revs: revision to clone up to (implies pull=True)
686 683
687 684 update: update working directory after clone completes, if
688 685 destination is local repository (True means update to default rev,
689 686 anything else is treated as a revision)
690 687
691 688 branch: branches to clone
692 689
693 690 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 691 activates auto sharing mode and defines the directory for stores. The
695 692 "mode" key determines how to construct the directory name of the shared
696 693 repository. "identity" means the name is derived from the node of the first
697 694 changeset in the repository. "remote" means the name is derived from the
698 695 remote's path/URL. Defaults to "identity."
699 696
700 697 storeincludepats and storeexcludepats: sets of file patterns to include and
701 698 exclude in the repository copy, respectively. If not defined, all files
702 699 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 700 only the requested files will be performed. If ``storeincludepats`` is not
704 701 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 702 ``path:.``. If both are empty sets, no files will be cloned.
706 703 """
707 704
708 705 if isinstance(source, bytes):
709 706 src_path = urlutil.get_clone_path_obj(ui, source)
710 707 if src_path is None:
711 708 srcpeer = peer(ui, peeropts, b'')
712 709 origsource = source = b''
713 710 branches = (None, branch or [])
714 711 else:
715 712 srcpeer = peer(ui, peeropts, src_path)
716 713 origsource = src_path.rawloc
717 714 branches = (src_path.branch, branch or [])
718 715 source = src_path.loc
719 716 else:
720 717 if util.safehasattr(source, 'peer'):
721 718 srcpeer = source.peer() # in case we were called with a localrepo
722 719 else:
723 720 srcpeer = source
724 721 branches = (None, branch or [])
725 722 # XXX path: simply use the peer `path` object when this become available
726 723 origsource = source = srcpeer.url()
727 724 srclock = destlock = destwlock = cleandir = None
728 725 destpeer = None
729 726 try:
730 727 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
731 728
732 729 if dest is None:
733 730 dest = defaultdest(source)
734 731 if dest:
735 732 ui.status(_(b"destination directory: %s\n") % dest)
736 733 else:
737 734 dest_path = urlutil.get_clone_path_obj(ui, dest)
738 735 if dest_path is not None:
739 736 dest = dest_path.rawloc
740 737 else:
741 738 dest = b''
742 739
743 740 dest = urlutil.urllocalpath(dest)
744 741 source = urlutil.urllocalpath(source)
745 742
746 743 if not dest:
747 744 raise error.InputError(_(b"empty destination path is not valid"))
748 745
749 746 destvfs = vfsmod.vfs(dest, expandpath=True)
750 747 if destvfs.lexists():
751 748 if not destvfs.isdir():
752 749 raise error.InputError(
753 750 _(b"destination '%s' already exists") % dest
754 751 )
755 752 elif destvfs.listdir():
756 753 raise error.InputError(
757 754 _(b"destination '%s' is not empty") % dest
758 755 )
759 756
760 757 createopts = {}
761 758 narrow = False
762 759
763 760 if storeincludepats is not None:
764 761 narrowspec.validatepatterns(storeincludepats)
765 762 narrow = True
766 763
767 764 if storeexcludepats is not None:
768 765 narrowspec.validatepatterns(storeexcludepats)
769 766 narrow = True
770 767
771 768 if narrow:
772 769 # Include everything by default if only exclusion patterns defined.
773 770 if storeexcludepats and not storeincludepats:
774 771 storeincludepats = {b'path:.'}
775 772
776 773 createopts[b'narrowfiles'] = True
777 774
778 775 if depth:
779 776 createopts[b'shallowfilestore'] = True
780 777
781 778 if srcpeer.capable(b'lfs-serve'):
782 779 # Repository creation honors the config if it disabled the extension, so
783 780 # we can't just announce that lfs will be enabled. This check avoids
784 781 # saying that lfs will be enabled, and then saying it's an unknown
785 782 # feature. The lfs creation option is set in either case so that a
786 783 # requirement is added. If the extension is explicitly disabled but the
787 784 # requirement is set, the clone aborts early, before transferring any
788 785 # data.
789 786 createopts[b'lfs'] = True
790 787
791 788 if extensions.disabled_help(b'lfs'):
792 789 ui.status(
793 790 _(
794 791 b'(remote is using large file support (lfs), but it is '
795 792 b'explicitly disabled in the local configuration)\n'
796 793 )
797 794 )
798 795 else:
799 796 ui.status(
800 797 _(
801 798 b'(remote is using large file support (lfs); lfs will '
802 799 b'be enabled for this repository)\n'
803 800 )
804 801 )
805 802
806 803 shareopts = shareopts or {}
807 804 sharepool = shareopts.get(b'pool')
808 805 sharenamemode = shareopts.get(b'mode')
809 806 if sharepool and islocal(dest):
810 807 sharepath = None
811 808 if sharenamemode == b'identity':
812 809 # Resolve the name from the initial changeset in the remote
813 810 # repository. This returns nullid when the remote is empty. It
814 811 # raises RepoLookupError if revision 0 is filtered or otherwise
815 812 # not available. If we fail to resolve, sharing is not enabled.
816 813 try:
817 814 with srcpeer.commandexecutor() as e:
818 815 rootnode = e.callcommand(
819 816 b'lookup',
820 817 {
821 818 b'key': b'0',
822 819 },
823 820 ).result()
824 821
825 822 if rootnode != sha1nodeconstants.nullid:
826 823 sharepath = os.path.join(sharepool, hex(rootnode))
827 824 else:
828 825 ui.status(
829 826 _(
830 827 b'(not using pooled storage: '
831 828 b'remote appears to be empty)\n'
832 829 )
833 830 )
834 831 except error.RepoLookupError:
835 832 ui.status(
836 833 _(
837 834 b'(not using pooled storage: '
838 835 b'unable to resolve identity of remote)\n'
839 836 )
840 837 )
841 838 elif sharenamemode == b'remote':
842 839 sharepath = os.path.join(
843 840 sharepool, hex(hashutil.sha1(source).digest())
844 841 )
845 842 else:
846 843 raise error.Abort(
847 844 _(b'unknown share naming mode: %s') % sharenamemode
848 845 )
849 846
850 847 # TODO this is a somewhat arbitrary restriction.
851 848 if narrow:
852 849 ui.status(
853 850 _(b'(pooled storage not supported for narrow clones)\n')
854 851 )
855 852 sharepath = None
856 853
857 854 if sharepath:
858 855 return clonewithshare(
859 856 ui,
860 857 peeropts,
861 858 sharepath,
862 859 source,
863 860 srcpeer,
864 861 dest,
865 862 pull=pull,
866 863 rev=revs,
867 864 update=update,
868 865 stream=stream,
869 866 )
870 867
871 868 srcrepo = srcpeer.local()
872 869
873 870 abspath = origsource
874 871 if islocal(origsource):
875 872 abspath = util.abspath(urlutil.urllocalpath(origsource))
876 873
877 874 if islocal(dest):
878 875 if os.path.exists(dest):
879 876 # only clean up directories we create ourselves
880 877 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
881 878 cleandir = hgdir
882 879 else:
883 880 cleandir = dest
884 881
885 882 copy = False
886 883 if (
887 884 srcrepo
888 885 and srcrepo.cancopy()
889 886 and islocal(dest)
890 887 and not phases.hassecret(srcrepo)
891 888 ):
892 889 copy = not pull and not revs
893 890
894 891 # TODO this is a somewhat arbitrary restriction.
895 892 if narrow:
896 893 copy = False
897 894
898 895 if copy:
899 896 try:
900 897 # we use a lock here because if we race with commit, we
901 898 # can end up with extra data in the cloned revlogs that's
902 899 # not pointed to by changesets, thus causing verify to
903 900 # fail
904 901 srclock = srcrepo.lock(wait=False)
905 902 except error.LockError:
906 903 copy = False
907 904
908 905 if copy:
909 906 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
910 907
911 908 destrootpath = urlutil.urllocalpath(dest)
912 909 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
913 910 localrepo.createrepository(
914 911 ui,
915 912 destrootpath,
916 913 requirements=dest_reqs,
917 914 )
918 915 destrepo = localrepo.makelocalrepository(ui, destrootpath)
919 916
920 917 destwlock = destrepo.wlock()
921 918 destlock = destrepo.lock()
922 919 from . import streamclone # avoid cycle
923 920
924 921 streamclone.local_copy(srcrepo, destrepo)
925 922
926 923 # we need to re-init the repo after manually copying the data
927 924 # into it
928 925 destpeer = peer(srcrepo, peeropts, dest)
929 926
930 927 # make the peer aware that is it already locked
931 928 #
932 929 # important:
933 930 #
934 931 # We still need to release that lock at the end of the function
935 932 destpeer.local()._lockref = weakref.ref(destlock)
936 933 destpeer.local()._wlockref = weakref.ref(destwlock)
937 934 # dirstate also needs to be copied because `_wlockref` has a reference
938 935 # to it: this dirstate is saved to disk when the wlock is released
939 936 destpeer.local().dirstate = destrepo.dirstate
940 937
941 938 srcrepo.hook(
942 939 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
943 940 )
944 941 else:
945 942 try:
946 943 # only pass ui when no srcrepo
947 944 destpeer = peer(
948 945 srcrepo or ui,
949 946 peeropts,
950 947 dest,
951 948 create=True,
952 949 createopts=createopts,
953 950 )
954 951 except FileExistsError:
955 952 cleandir = None
956 953 raise error.Abort(_(b"destination '%s' already exists") % dest)
957 954
958 955 if revs:
959 956 if not srcpeer.capable(b'lookup'):
960 957 raise error.Abort(
961 958 _(
962 959 b"src repository does not support "
963 960 b"revision lookup and so doesn't "
964 961 b"support clone by revision"
965 962 )
966 963 )
967 964
968 965 # TODO this is batchable.
969 966 remoterevs = []
970 967 for rev in revs:
971 968 with srcpeer.commandexecutor() as e:
972 969 remoterevs.append(
973 970 e.callcommand(
974 971 b'lookup',
975 972 {
976 973 b'key': rev,
977 974 },
978 975 ).result()
979 976 )
980 977 revs = remoterevs
981 978
982 979 checkout = revs[0]
983 980 else:
984 981 revs = None
985 982 local = destpeer.local()
986 983 if local:
987 984 if narrow:
988 985 with local.wlock(), local.lock():
989 986 local.setnarrowpats(storeincludepats, storeexcludepats)
990 987 narrowspec.copytoworkingcopy(local)
991 988
992 989 u = urlutil.url(abspath)
993 990 defaulturl = bytes(u)
994 991 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
995 992 if not stream:
996 993 if pull:
997 994 stream = False
998 995 else:
999 996 stream = None
1000 997 # internal config: ui.quietbookmarkmove
1001 998 overrides = {(b'ui', b'quietbookmarkmove'): True}
1002 999 with local.ui.configoverride(overrides, b'clone'):
1003 1000 exchange.pull(
1004 1001 local,
1005 1002 srcpeer,
1006 1003 heads=revs,
1007 1004 streamclonerequested=stream,
1008 1005 includepats=storeincludepats,
1009 1006 excludepats=storeexcludepats,
1010 1007 depth=depth,
1011 1008 )
1012 1009 elif srcrepo:
1013 1010 # TODO lift restriction once exchange.push() accepts narrow
1014 1011 # push.
1015 1012 if narrow:
1016 1013 raise error.Abort(
1017 1014 _(
1018 1015 b'narrow clone not available for '
1019 1016 b'remote destinations'
1020 1017 )
1021 1018 )
1022 1019
1023 1020 exchange.push(
1024 1021 srcrepo,
1025 1022 destpeer,
1026 1023 revs=revs,
1027 1024 bookmarks=srcrepo._bookmarks.keys(),
1028 1025 )
1029 1026 else:
1030 1027 raise error.Abort(
1031 1028 _(b"clone from remote to remote not supported")
1032 1029 )
1033 1030
1034 1031 cleandir = None
1035 1032
1036 1033 destrepo = destpeer.local()
1037 1034 if destrepo:
1038 1035 template = uimod.samplehgrcs[b'cloned']
1039 1036 u = urlutil.url(abspath)
1040 1037 u.passwd = None
1041 1038 defaulturl = bytes(u)
1042 1039 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1043 1040 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1044 1041
1045 1042 if ui.configbool(b'experimental', b'remotenames'):
1046 1043 logexchange.pullremotenames(destrepo, srcpeer)
1047 1044
1048 1045 if update:
1049 1046 if update is not True:
1050 1047 with srcpeer.commandexecutor() as e:
1051 1048 checkout = e.callcommand(
1052 1049 b'lookup',
1053 1050 {
1054 1051 b'key': update,
1055 1052 },
1056 1053 ).result()
1057 1054
1058 1055 uprev = None
1059 1056 status = None
1060 1057 if checkout is not None:
1061 1058 # Some extensions (at least hg-git and hg-subversion) have
1062 1059 # a peer.lookup() implementation that returns a name instead
1063 1060 # of a nodeid. We work around it here until we've figured
1064 1061 # out a better solution.
1065 1062 if len(checkout) == 20 and checkout in destrepo:
1066 1063 uprev = checkout
1067 1064 elif scmutil.isrevsymbol(destrepo, checkout):
1068 1065 uprev = scmutil.revsymbol(destrepo, checkout).node()
1069 1066 else:
1070 1067 if update is not True:
1071 1068 try:
1072 1069 uprev = destrepo.lookup(update)
1073 1070 except error.RepoLookupError:
1074 1071 pass
1075 1072 if uprev is None:
1076 1073 try:
1077 1074 if destrepo._activebookmark:
1078 1075 uprev = destrepo.lookup(destrepo._activebookmark)
1079 1076 update = destrepo._activebookmark
1080 1077 else:
1081 1078 uprev = destrepo._bookmarks[b'@']
1082 1079 update = b'@'
1083 1080 bn = destrepo[uprev].branch()
1084 1081 if bn == b'default':
1085 1082 status = _(b"updating to bookmark %s\n" % update)
1086 1083 else:
1087 1084 status = (
1088 1085 _(b"updating to bookmark %s on branch %s\n")
1089 1086 ) % (update, bn)
1090 1087 except KeyError:
1091 1088 try:
1092 1089 uprev = destrepo.branchtip(b'default')
1093 1090 except error.RepoLookupError:
1094 1091 uprev = destrepo.lookup(b'tip')
1095 1092 if not status:
1096 1093 bn = destrepo[uprev].branch()
1097 1094 status = _(b"updating to branch %s\n") % bn
1098 1095 destrepo.ui.status(status)
1099 1096 _update(destrepo, uprev)
1100 1097 if update in destrepo._bookmarks:
1101 1098 bookmarks.activate(destrepo, update)
1102 1099 if destlock is not None:
1103 1100 release(destlock)
1104 1101 if destwlock is not None:
1105 1102 release(destlock)
1106 1103 # here is a tiny windows were someone could end up writing the
1107 1104 # repository before the cache are sure to be warm. This is "fine"
1108 1105 # as the only "bad" outcome would be some slowness. That potential
1109 1106 # slowness already affect reader.
1110 1107 with destrepo.lock():
1111 1108 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1112 1109 finally:
1113 1110 release(srclock, destlock, destwlock)
1114 1111 if cleandir is not None:
1115 1112 shutil.rmtree(cleandir, True)
1116 1113 if srcpeer is not None:
1117 1114 srcpeer.close()
1118 1115 if destpeer and destpeer.local() is None:
1119 1116 destpeer.close()
1120 1117 return srcpeer, destpeer
1121 1118
1122 1119
1123 1120 def _showstats(repo, stats, quietempty=False):
1124 1121 if quietempty and stats.isempty():
1125 1122 return
1126 1123 repo.ui.status(
1127 1124 _(
1128 1125 b"%d files updated, %d files merged, "
1129 1126 b"%d files removed, %d files unresolved\n"
1130 1127 )
1131 1128 % (
1132 1129 stats.updatedcount,
1133 1130 stats.mergedcount,
1134 1131 stats.removedcount,
1135 1132 stats.unresolvedcount,
1136 1133 )
1137 1134 )
1138 1135
1139 1136
1140 1137 def updaterepo(repo, node, overwrite, updatecheck=None):
1141 1138 """Update the working directory to node.
1142 1139
1143 1140 When overwrite is set, changes are clobbered, merged else
1144 1141
1145 1142 returns stats (see pydoc mercurial.merge.applyupdates)"""
1146 1143 repo.ui.deprecwarn(
1147 1144 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1148 1145 b'5.7',
1149 1146 )
1150 1147 return mergemod._update(
1151 1148 repo,
1152 1149 node,
1153 1150 branchmerge=False,
1154 1151 force=overwrite,
1155 1152 labels=[b'working copy', b'destination'],
1156 1153 updatecheck=updatecheck,
1157 1154 )
1158 1155
1159 1156
1160 1157 def update(repo, node, quietempty=False, updatecheck=None):
1161 1158 """update the working directory to node"""
1162 1159 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1163 1160 _showstats(repo, stats, quietempty)
1164 1161 if stats.unresolvedcount:
1165 1162 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1166 1163 return stats.unresolvedcount > 0
1167 1164
1168 1165
1169 1166 # naming conflict in clone()
1170 1167 _update = update
1171 1168
1172 1169
1173 1170 def clean(repo, node, show_stats=True, quietempty=False):
1174 1171 """forcibly switch the working directory to node, clobbering changes"""
1175 1172 stats = mergemod.clean_update(repo[node])
1176 1173 assert stats.unresolvedcount == 0
1177 1174 if show_stats:
1178 1175 _showstats(repo, stats, quietempty)
1179 1176 return False
1180 1177
1181 1178
1182 1179 # naming conflict in updatetotally()
1183 1180 _clean = clean
1184 1181
1185 1182 _VALID_UPDATECHECKS = {
1186 1183 mergemod.UPDATECHECK_ABORT,
1187 1184 mergemod.UPDATECHECK_NONE,
1188 1185 mergemod.UPDATECHECK_LINEAR,
1189 1186 mergemod.UPDATECHECK_NO_CONFLICT,
1190 1187 }
1191 1188
1192 1189
1193 1190 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1194 1191 """Update the working directory with extra care for non-file components
1195 1192
1196 1193 This takes care of non-file components below:
1197 1194
1198 1195 :bookmark: might be advanced or (in)activated
1199 1196
1200 1197 This takes arguments below:
1201 1198
1202 1199 :checkout: to which revision the working directory is updated
1203 1200 :brev: a name, which might be a bookmark to be activated after updating
1204 1201 :clean: whether changes in the working directory can be discarded
1205 1202 :updatecheck: how to deal with a dirty working directory
1206 1203
1207 1204 Valid values for updatecheck are the UPDATECHECK_* constants
1208 1205 defined in the merge module. Passing `None` will result in using the
1209 1206 configured default.
1210 1207
1211 1208 * ABORT: abort if the working directory is dirty
1212 1209 * NONE: don't check (merge working directory changes into destination)
1213 1210 * LINEAR: check that update is linear before merging working directory
1214 1211 changes into destination
1215 1212 * NO_CONFLICT: check that the update does not result in file merges
1216 1213
1217 1214 This returns whether conflict is detected at updating or not.
1218 1215 """
1219 1216 if updatecheck is None:
1220 1217 updatecheck = ui.config(b'commands', b'update.check')
1221 1218 if updatecheck not in _VALID_UPDATECHECKS:
1222 1219 # If not configured, or invalid value configured
1223 1220 updatecheck = mergemod.UPDATECHECK_LINEAR
1224 1221 if updatecheck not in _VALID_UPDATECHECKS:
1225 1222 raise ValueError(
1226 1223 r'Invalid updatecheck value %r (can accept %r)'
1227 1224 % (updatecheck, _VALID_UPDATECHECKS)
1228 1225 )
1229 1226 with repo.wlock():
1230 1227 movemarkfrom = None
1231 1228 warndest = False
1232 1229 if checkout is None:
1233 1230 updata = destutil.destupdate(repo, clean=clean)
1234 1231 checkout, movemarkfrom, brev = updata
1235 1232 warndest = True
1236 1233
1237 1234 if clean:
1238 1235 ret = _clean(repo, checkout)
1239 1236 else:
1240 1237 if updatecheck == mergemod.UPDATECHECK_ABORT:
1241 1238 cmdutil.bailifchanged(repo, merge=False)
1242 1239 updatecheck = mergemod.UPDATECHECK_NONE
1243 1240 ret = _update(repo, checkout, updatecheck=updatecheck)
1244 1241
1245 1242 if not ret and movemarkfrom:
1246 1243 if movemarkfrom == repo[b'.'].node():
1247 1244 pass # no-op update
1248 1245 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1249 1246 b = ui.label(repo._activebookmark, b'bookmarks.active')
1250 1247 ui.status(_(b"updating bookmark %s\n") % b)
1251 1248 else:
1252 1249 # this can happen with a non-linear update
1253 1250 b = ui.label(repo._activebookmark, b'bookmarks')
1254 1251 ui.status(_(b"(leaving bookmark %s)\n") % b)
1255 1252 bookmarks.deactivate(repo)
1256 1253 elif brev in repo._bookmarks:
1257 1254 if brev != repo._activebookmark:
1258 1255 b = ui.label(brev, b'bookmarks.active')
1259 1256 ui.status(_(b"(activating bookmark %s)\n") % b)
1260 1257 bookmarks.activate(repo, brev)
1261 1258 elif brev:
1262 1259 if repo._activebookmark:
1263 1260 b = ui.label(repo._activebookmark, b'bookmarks')
1264 1261 ui.status(_(b"(leaving bookmark %s)\n") % b)
1265 1262 bookmarks.deactivate(repo)
1266 1263
1267 1264 if warndest:
1268 1265 destutil.statusotherdests(ui, repo)
1269 1266
1270 1267 return ret
1271 1268
1272 1269
1273 1270 def merge(
1274 1271 ctx,
1275 1272 force=False,
1276 1273 remind=True,
1277 1274 labels=None,
1278 1275 ):
1279 1276 """Branch merge with node, resolving changes. Return true if any
1280 1277 unresolved conflicts."""
1281 1278 repo = ctx.repo()
1282 1279 stats = mergemod.merge(ctx, force=force, labels=labels)
1283 1280 _showstats(repo, stats)
1284 1281 if stats.unresolvedcount:
1285 1282 repo.ui.status(
1286 1283 _(
1287 1284 b"use 'hg resolve' to retry unresolved file merges "
1288 1285 b"or 'hg merge --abort' to abandon\n"
1289 1286 )
1290 1287 )
1291 1288 elif remind:
1292 1289 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1293 1290 return stats.unresolvedcount > 0
1294 1291
1295 1292
1296 1293 def abortmerge(ui, repo):
1297 1294 ms = mergestatemod.mergestate.read(repo)
1298 1295 if ms.active():
1299 1296 # there were conflicts
1300 1297 node = ms.localctx.hex()
1301 1298 else:
1302 1299 # there were no conficts, mergestate was not stored
1303 1300 node = repo[b'.'].hex()
1304 1301
1305 1302 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1306 1303 stats = mergemod.clean_update(repo[node])
1307 1304 assert stats.unresolvedcount == 0
1308 1305 _showstats(repo, stats)
1309 1306
1310 1307
1311 1308 def _incoming(
1312 1309 displaychlist,
1313 1310 subreporecurse,
1314 1311 ui,
1315 1312 repo,
1316 1313 source,
1317 1314 opts,
1318 1315 buffered=False,
1319 1316 subpath=None,
1320 1317 ):
1321 1318 """
1322 1319 Helper for incoming / gincoming.
1323 1320 displaychlist gets called with
1324 1321 (remoterepo, incomingchangesetlist, displayer) parameters,
1325 1322 and is supposed to contain only code that can't be unified.
1326 1323 """
1327 1324 srcs = urlutil.get_pull_paths(repo, ui, [source])
1328 1325 srcs = list(srcs)
1329 1326 if len(srcs) != 1:
1330 1327 msg = _(b'for now, incoming supports only a single source, %d provided')
1331 1328 msg %= len(srcs)
1332 1329 raise error.Abort(msg)
1333 1330 path = srcs[0]
1334 1331 if subpath is None:
1335 1332 peer_path = path
1336 1333 url = path.loc
1337 1334 else:
1338 1335 # XXX path: we are losing the `path` object here. Keeping it would be
1339 1336 # valuable. For example as a "variant" as we do for pushes.
1340 1337 subpath = urlutil.url(subpath)
1341 1338 if subpath.isabs():
1342 1339 peer_path = url = bytes(subpath)
1343 1340 else:
1344 1341 p = urlutil.url(path.loc)
1345 1342 if p.islocal():
1346 1343 normpath = os.path.normpath
1347 1344 else:
1348 1345 normpath = posixpath.normpath
1349 1346 p.path = normpath(b'%s/%s' % (p.path, subpath))
1350 1347 peer_path = url = bytes(p)
1351 1348 other = peer(repo, opts, peer_path)
1352 1349 cleanupfn = other.close
1353 1350 try:
1354 1351 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1355 1352 branches = (path.branch, opts.get(b'branch', []))
1356 1353 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1357 1354
1358 1355 if revs:
1359 1356 revs = [other.lookup(rev) for rev in revs]
1360 1357 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1361 1358 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1362 1359 )
1363 1360
1364 1361 if not chlist:
1365 1362 ui.status(_(b"no changes found\n"))
1366 1363 return subreporecurse()
1367 1364 ui.pager(b'incoming')
1368 1365 displayer = logcmdutil.changesetdisplayer(
1369 1366 ui, other, opts, buffered=buffered
1370 1367 )
1371 1368 displaychlist(other, chlist, displayer)
1372 1369 displayer.close()
1373 1370 finally:
1374 1371 cleanupfn()
1375 1372 subreporecurse()
1376 1373 return 0 # exit code is zero since we found incoming changes
1377 1374
1378 1375
1379 1376 def incoming(ui, repo, source, opts, subpath=None):
1380 1377 def subreporecurse():
1381 1378 ret = 1
1382 1379 if opts.get(b'subrepos'):
1383 1380 ctx = repo[None]
1384 1381 for subpath in sorted(ctx.substate):
1385 1382 sub = ctx.sub(subpath)
1386 1383 ret = min(ret, sub.incoming(ui, source, opts))
1387 1384 return ret
1388 1385
1389 1386 def display(other, chlist, displayer):
1390 1387 limit = logcmdutil.getlimit(opts)
1391 1388 if opts.get(b'newest_first'):
1392 1389 chlist.reverse()
1393 1390 count = 0
1394 1391 for n in chlist:
1395 1392 if limit is not None and count >= limit:
1396 1393 break
1397 1394 parents = [
1398 1395 p for p in other.changelog.parents(n) if p != repo.nullid
1399 1396 ]
1400 1397 if opts.get(b'no_merges') and len(parents) == 2:
1401 1398 continue
1402 1399 count += 1
1403 1400 displayer.show(other[n])
1404 1401
1405 1402 return _incoming(
1406 1403 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1407 1404 )
1408 1405
1409 1406
1410 1407 def _outgoing(ui, repo, dests, opts, subpath=None):
1411 1408 out = set()
1412 1409 others = []
1413 1410 for path in urlutil.get_push_paths(repo, ui, dests):
1414 1411 dest = path.loc
1415 1412 if subpath is not None:
1416 1413 subpath = urlutil.url(subpath)
1417 1414 if subpath.isabs():
1418 1415 dest = bytes(subpath)
1419 1416 else:
1420 1417 p = urlutil.url(dest)
1421 1418 if p.islocal():
1422 1419 normpath = os.path.normpath
1423 1420 else:
1424 1421 normpath = posixpath.normpath
1425 1422 p.path = normpath(b'%s/%s' % (p.path, subpath))
1426 1423 dest = bytes(p)
1427 1424 branches = path.branch, opts.get(b'branch') or []
1428 1425
1429 1426 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1430 1427 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1431 1428 if revs:
1432 1429 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1433 1430
1434 1431 other = peer(repo, opts, dest)
1435 1432 try:
1436 1433 outgoing = discovery.findcommonoutgoing(
1437 1434 repo, other, revs, force=opts.get(b'force')
1438 1435 )
1439 1436 o = outgoing.missing
1440 1437 out.update(o)
1441 1438 if not o:
1442 1439 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1443 1440 others.append(other)
1444 1441 except: # re-raises
1445 1442 other.close()
1446 1443 raise
1447 1444 # make sure this is ordered by revision number
1448 1445 outgoing_revs = list(out)
1449 1446 cl = repo.changelog
1450 1447 outgoing_revs.sort(key=cl.rev)
1451 1448 return outgoing_revs, others
1452 1449
1453 1450
1454 1451 def _outgoing_recurse(ui, repo, dests, opts):
1455 1452 ret = 1
1456 1453 if opts.get(b'subrepos'):
1457 1454 ctx = repo[None]
1458 1455 for subpath in sorted(ctx.substate):
1459 1456 sub = ctx.sub(subpath)
1460 1457 ret = min(ret, sub.outgoing(ui, dests, opts))
1461 1458 return ret
1462 1459
1463 1460
1464 1461 def _outgoing_filter(repo, revs, opts):
1465 1462 """apply revision filtering/ordering option for outgoing"""
1466 1463 limit = logcmdutil.getlimit(opts)
1467 1464 no_merges = opts.get(b'no_merges')
1468 1465 if opts.get(b'newest_first'):
1469 1466 revs.reverse()
1470 1467 if limit is None and not no_merges:
1471 1468 for r in revs:
1472 1469 yield r
1473 1470 return
1474 1471
1475 1472 count = 0
1476 1473 cl = repo.changelog
1477 1474 for n in revs:
1478 1475 if limit is not None and count >= limit:
1479 1476 break
1480 1477 parents = [p for p in cl.parents(n) if p != repo.nullid]
1481 1478 if no_merges and len(parents) == 2:
1482 1479 continue
1483 1480 count += 1
1484 1481 yield n
1485 1482
1486 1483
1487 1484 def outgoing(ui, repo, dests, opts, subpath=None):
1488 1485 if opts.get(b'graph'):
1489 1486 logcmdutil.checkunsupportedgraphflags([], opts)
1490 1487 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1491 1488 ret = 1
1492 1489 try:
1493 1490 if o:
1494 1491 ret = 0
1495 1492
1496 1493 if opts.get(b'graph'):
1497 1494 revdag = logcmdutil.graphrevs(repo, o, opts)
1498 1495 ui.pager(b'outgoing')
1499 1496 displayer = logcmdutil.changesetdisplayer(
1500 1497 ui, repo, opts, buffered=True
1501 1498 )
1502 1499 logcmdutil.displaygraph(
1503 1500 ui, repo, revdag, displayer, graphmod.asciiedges
1504 1501 )
1505 1502 else:
1506 1503 ui.pager(b'outgoing')
1507 1504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1508 1505 for n in _outgoing_filter(repo, o, opts):
1509 1506 displayer.show(repo[n])
1510 1507 displayer.close()
1511 1508 for oth in others:
1512 1509 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1513 1510 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1514 1511 return ret # exit code is zero since we found outgoing changes
1515 1512 finally:
1516 1513 for oth in others:
1517 1514 oth.close()
1518 1515
1519 1516
1520 1517 def verify(repo, level=None):
1521 1518 """verify the consistency of a repository"""
1522 1519 ret = verifymod.verify(repo, level=level)
1523 1520
1524 1521 # Broken subrepo references in hidden csets don't seem worth worrying about,
1525 1522 # since they can't be pushed/pulled, and --hidden can be used if they are a
1526 1523 # concern.
1527 1524
1528 1525 # pathto() is needed for -R case
1529 1526 revs = repo.revs(
1530 1527 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1531 1528 )
1532 1529
1533 1530 if revs:
1534 1531 repo.ui.status(_(b'checking subrepo links\n'))
1535 1532 for rev in revs:
1536 1533 ctx = repo[rev]
1537 1534 try:
1538 1535 for subpath in ctx.substate:
1539 1536 try:
1540 1537 ret = (
1541 1538 ctx.sub(subpath, allowcreate=False).verify() or ret
1542 1539 )
1543 1540 except error.RepoError as e:
1544 1541 repo.ui.warn(b'%d: %s\n' % (rev, e))
1545 1542 except Exception:
1546 1543 repo.ui.warn(
1547 1544 _(b'.hgsubstate is corrupt in revision %s\n')
1548 1545 % short(ctx.node())
1549 1546 )
1550 1547
1551 1548 return ret
1552 1549
1553 1550
1554 1551 def remoteui(src, opts):
1555 1552 """build a remote ui from ui or repo and opts"""
1556 1553 if util.safehasattr(src, b'baseui'): # looks like a repository
1557 1554 dst = src.baseui.copy() # drop repo-specific config
1558 1555 src = src.ui # copy target options from repo
1559 1556 else: # assume it's a global ui object
1560 1557 dst = src.copy() # keep all global options
1561 1558
1562 1559 # copy ssh-specific options
1563 1560 for o in b'ssh', b'remotecmd':
1564 1561 v = opts.get(o) or src.config(b'ui', o)
1565 1562 if v:
1566 1563 dst.setconfig(b"ui", o, v, b'copied')
1567 1564
1568 1565 # copy bundle-specific options
1569 1566 r = src.config(b'bundle', b'mainreporoot')
1570 1567 if r:
1571 1568 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1572 1569
1573 1570 # copy selected local settings to the remote ui
1574 1571 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1575 1572 for key, val in src.configitems(sect):
1576 1573 dst.setconfig(sect, key, val, b'copied')
1577 1574 v = src.config(b'web', b'cacerts')
1578 1575 if v:
1579 1576 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1580 1577
1581 1578 return dst
1582 1579
1583 1580
1584 1581 # Files of interest
1585 1582 # Used to check if the repository has changed looking at mtime and size of
1586 1583 # these files.
1587 1584 foi = [
1588 1585 (b'spath', b'00changelog.i'),
1589 1586 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1590 1587 (b'spath', b'obsstore'),
1591 1588 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1592 1589 ]
1593 1590
1594 1591
1595 1592 class cachedlocalrepo:
1596 1593 """Holds a localrepository that can be cached and reused."""
1597 1594
1598 1595 def __init__(self, repo):
1599 1596 """Create a new cached repo from an existing repo.
1600 1597
1601 1598 We assume the passed in repo was recently created. If the
1602 1599 repo has changed between when it was created and when it was
1603 1600 turned into a cache, it may not refresh properly.
1604 1601 """
1605 1602 assert isinstance(repo, localrepo.localrepository)
1606 1603 self._repo = repo
1607 1604 self._state, self.mtime = self._repostate()
1608 1605 self._filtername = repo.filtername
1609 1606
1610 1607 def fetch(self):
1611 1608 """Refresh (if necessary) and return a repository.
1612 1609
1613 1610 If the cached instance is out of date, it will be recreated
1614 1611 automatically and returned.
1615 1612
1616 1613 Returns a tuple of the repo and a boolean indicating whether a new
1617 1614 repo instance was created.
1618 1615 """
1619 1616 # We compare the mtimes and sizes of some well-known files to
1620 1617 # determine if the repo changed. This is not precise, as mtimes
1621 1618 # are susceptible to clock skew and imprecise filesystems and
1622 1619 # file content can change while maintaining the same size.
1623 1620
1624 1621 state, mtime = self._repostate()
1625 1622 if state == self._state:
1626 1623 return self._repo, False
1627 1624
1628 1625 repo = repository(self._repo.baseui, self._repo.url())
1629 1626 if self._filtername:
1630 1627 self._repo = repo.filtered(self._filtername)
1631 1628 else:
1632 1629 self._repo = repo.unfiltered()
1633 1630 self._state = state
1634 1631 self.mtime = mtime
1635 1632
1636 1633 return self._repo, True
1637 1634
1638 1635 def _repostate(self):
1639 1636 state = []
1640 1637 maxmtime = -1
1641 1638 for attr, fname in foi:
1642 1639 prefix = getattr(self._repo, attr)
1643 1640 p = os.path.join(prefix, fname)
1644 1641 try:
1645 1642 st = os.stat(p)
1646 1643 except OSError:
1647 1644 st = os.stat(prefix)
1648 1645 state.append((st[stat.ST_MTIME], st.st_size))
1649 1646 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1650 1647
1651 1648 return tuple(state), maxmtime
1652 1649
1653 1650 def copy(self):
1654 1651 """Obtain a copy of this class instance.
1655 1652
1656 1653 A new localrepository instance is obtained. The new instance should be
1657 1654 completely independent of the original.
1658 1655 """
1659 1656 repo = repository(self._repo.baseui, self._repo.origroot)
1660 1657 if self._filtername:
1661 1658 repo = repo.filtered(self._filtername)
1662 1659 else:
1663 1660 repo = repo.unfiltered()
1664 1661 c = cachedlocalrepo(repo)
1665 1662 c._state = self._state
1666 1663 c.mtime = self.mtime
1667 1664 return c
General Comments 0
You need to be logged in to leave comments. Login now