##// END OF EJS Templates
static-http: have `statichttprepo.instance` return a peer object...
marmoute -
r50586:c3728734 default
parent child Browse files
Show More
@@ -1,1624 +1,1624 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 146 repo_schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 b'static-http': statichttprepo,
151 150 }
152 151
153 152 peer_schemes = {
154 153 b'http': httppeer,
155 154 b'https': httppeer,
156 155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
157 157 }
158 158
159 159
160 160 def _peerlookup(path):
161 161 u = urlutil.url(path)
162 162 scheme = u.scheme or b'file'
163 163 if scheme in peer_schemes:
164 164 return peer_schemes[scheme]
165 165 if scheme in repo_schemes:
166 166 return repo_schemes[scheme]
167 167 return LocalFactory
168 168
169 169
170 170 def islocal(repo):
171 171 '''return true if repo (or path pointing to repo) is local'''
172 172 if isinstance(repo, bytes):
173 173 cls = _peerlookup(repo)
174 174 cls.instance # make sure we load the module
175 175 if util.safehasattr(cls, 'islocal'):
176 176 return cls.islocal(repo) # pytype: disable=module-attr
177 177 return False
178 178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 179 return repo.local()
180 180
181 181
182 182 def openpath(ui, path, sendaccept=True):
183 183 '''open path with open if local, url.open if remote'''
184 184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 185 if pathurl.islocal():
186 186 return util.posixfile(pathurl.localpath(), b'rb')
187 187 else:
188 188 return url.open(ui, path, sendaccept=sendaccept)
189 189
190 190
191 191 # a list of (ui, repo) functions called for wire peer initialization
192 192 wirepeersetupfuncs = []
193 193
194 194
195 195 def _peerorrepo(
196 196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 197 ):
198 198 """return a repository object for the specified path"""
199 199 cls = _peerlookup(path)
200 200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 202 return obj
203 203
204 204
205 205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 206 ui = getattr(obj, "ui", ui)
207 207 for f in presetupfuncs or []:
208 208 f(ui, obj)
209 209 ui.log(b'extension', b'- executing reposetup hooks\n')
210 210 with util.timedcm('all reposetup') as allreposetupstats:
211 211 for name, module in extensions.extensions(ui):
212 212 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 213 hook = getattr(module, 'reposetup', None)
214 214 if hook:
215 215 with util.timedcm('reposetup %r', name) as stats:
216 216 hook(ui, obj)
217 217 msg = b' > reposetup for %s took %s\n'
218 218 ui.log(b'extension', msg, name, stats)
219 219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 220 if not obj.local():
221 221 for f in wirepeersetupfuncs:
222 222 f(ui, obj)
223 223
224 224
225 225 def repository(
226 226 ui,
227 227 path=b'',
228 228 create=False,
229 229 presetupfuncs=None,
230 230 intents=None,
231 231 createopts=None,
232 232 ):
233 233 """return a repository object for the specified path"""
234 234 peer = _peerorrepo(
235 235 ui,
236 236 path,
237 237 create,
238 238 presetupfuncs=presetupfuncs,
239 239 intents=intents,
240 240 createopts=createopts,
241 241 )
242 242 repo = peer.local()
243 243 if not repo:
244 244 raise error.Abort(
245 245 _(b"repository '%s' is not local") % (path or peer.url())
246 246 )
247 247 return repo.filtered(b'visible')
248 248
249 249
250 250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
251 251 '''return a repository peer for the specified path'''
252 252 rui = remoteui(uiorrepo, opts)
253 253 return _peerorrepo(
254 254 rui, path, create, intents=intents, createopts=createopts
255 255 ).peer()
256 256
257 257
258 258 def defaultdest(source):
259 259 """return default destination of clone if none is given
260 260
261 261 >>> defaultdest(b'foo')
262 262 'foo'
263 263 >>> defaultdest(b'/foo/bar')
264 264 'bar'
265 265 >>> defaultdest(b'/')
266 266 ''
267 267 >>> defaultdest(b'')
268 268 ''
269 269 >>> defaultdest(b'http://example.org/')
270 270 ''
271 271 >>> defaultdest(b'http://example.org/foo/')
272 272 'foo'
273 273 """
274 274 path = urlutil.url(source).path
275 275 if not path:
276 276 return b''
277 277 return os.path.basename(os.path.normpath(path))
278 278
279 279
280 280 def sharedreposource(repo):
281 281 """Returns repository object for source repository of a shared repo.
282 282
283 283 If repo is not a shared repository, returns None.
284 284 """
285 285 if repo.sharedpath == repo.path:
286 286 return None
287 287
288 288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
289 289 return repo.srcrepo
290 290
291 291 # the sharedpath always ends in the .hg; we want the path to the repo
292 292 source = repo.vfs.split(repo.sharedpath)[0]
293 293 srcurl, branches = urlutil.parseurl(source)
294 294 srcrepo = repository(repo.ui, srcurl)
295 295 repo.srcrepo = srcrepo
296 296 return srcrepo
297 297
298 298
299 299 def share(
300 300 ui,
301 301 source,
302 302 dest=None,
303 303 update=True,
304 304 bookmarks=True,
305 305 defaultpath=None,
306 306 relative=False,
307 307 ):
308 308 '''create a shared repository'''
309 309
310 310 not_local_msg = _(b'can only share local repositories')
311 311 if util.safehasattr(source, 'local'):
312 312 if source.local() is None:
313 313 raise error.Abort(not_local_msg)
314 314 elif not islocal(source):
315 315 # XXX why are we getting bytes here ?
316 316 raise error.Abort(not_local_msg)
317 317
318 318 if not dest:
319 319 dest = defaultdest(source)
320 320 else:
321 321 dest = urlutil.get_clone_path(ui, dest)[1]
322 322
323 323 if isinstance(source, bytes):
324 324 origsource, source, branches = urlutil.get_clone_path(ui, source)
325 325 srcrepo = repository(ui, source)
326 326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
327 327 else:
328 328 srcrepo = source.local()
329 329 checkout = None
330 330
331 331 shareditems = set()
332 332 if bookmarks:
333 333 shareditems.add(sharedbookmarks)
334 334
335 335 r = repository(
336 336 ui,
337 337 dest,
338 338 create=True,
339 339 createopts={
340 340 b'sharedrepo': srcrepo,
341 341 b'sharedrelative': relative,
342 342 b'shareditems': shareditems,
343 343 },
344 344 )
345 345
346 346 postshare(srcrepo, r, defaultpath=defaultpath)
347 347 r = repository(ui, dest)
348 348 _postshareupdate(r, update, checkout=checkout)
349 349 return r
350 350
351 351
352 352 def _prependsourcehgrc(repo):
353 353 """copies the source repo config and prepend it in current repo .hg/hgrc
354 354 on unshare. This is only done if the share was perfomed using share safe
355 355 method where we share config of source in shares"""
356 356 srcvfs = vfsmod.vfs(repo.sharedpath)
357 357 dstvfs = vfsmod.vfs(repo.path)
358 358
359 359 if not srcvfs.exists(b'hgrc'):
360 360 return
361 361
362 362 currentconfig = b''
363 363 if dstvfs.exists(b'hgrc'):
364 364 currentconfig = dstvfs.read(b'hgrc')
365 365
366 366 with dstvfs(b'hgrc', b'wb') as fp:
367 367 sourceconfig = srcvfs.read(b'hgrc')
368 368 fp.write(b"# Config copied from shared source\n")
369 369 fp.write(sourceconfig)
370 370 fp.write(b'\n')
371 371 fp.write(currentconfig)
372 372
373 373
374 374 def unshare(ui, repo):
375 375 """convert a shared repository to a normal one
376 376
377 377 Copy the store data to the repo and remove the sharedpath data.
378 378
379 379 Returns a new repository object representing the unshared repository.
380 380
381 381 The passed repository object is not usable after this function is
382 382 called.
383 383 """
384 384
385 385 with repo.lock():
386 386 # we use locks here because if we race with commit, we
387 387 # can end up with extra data in the cloned revlogs that's
388 388 # not pointed to by changesets, thus causing verify to
389 389 # fail
390 390 destlock = copystore(ui, repo, repo.path)
391 391 with destlock or util.nullcontextmanager():
392 392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
393 393 # we were sharing .hg/hgrc of the share source with the current
394 394 # repo. We need to copy that while unsharing otherwise it can
395 395 # disable hooks and other checks
396 396 _prependsourcehgrc(repo)
397 397
398 398 sharefile = repo.vfs.join(b'sharedpath')
399 399 util.rename(sharefile, sharefile + b'.old')
400 400
401 401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
402 402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
403 403 scmutil.writereporequirements(repo)
404 404
405 405 # Removing share changes some fundamental properties of the repo instance.
406 406 # So we instantiate a new repo object and operate on it rather than
407 407 # try to keep the existing repo usable.
408 408 newrepo = repository(repo.baseui, repo.root, create=False)
409 409
410 410 # TODO: figure out how to access subrepos that exist, but were previously
411 411 # removed from .hgsub
412 412 c = newrepo[b'.']
413 413 subs = c.substate
414 414 for s in sorted(subs):
415 415 c.sub(s).unshare()
416 416
417 417 localrepo.poisonrepository(repo)
418 418
419 419 return newrepo
420 420
421 421
422 422 def postshare(sourcerepo, destrepo, defaultpath=None):
423 423 """Called after a new shared repo is created.
424 424
425 425 The new repo only has a requirements file and pointer to the source.
426 426 This function configures additional shared data.
427 427
428 428 Extensions can wrap this function and write additional entries to
429 429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
430 430 """
431 431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
432 432 if default:
433 433 template = b'[paths]\ndefault = %s\n'
434 434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
435 435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
436 436 with destrepo.wlock():
437 437 narrowspec.copytoworkingcopy(destrepo)
438 438
439 439
440 440 def _postshareupdate(repo, update, checkout=None):
441 441 """Maybe perform a working directory update after a shared repo is created.
442 442
443 443 ``update`` can be a boolean or a revision to update to.
444 444 """
445 445 if not update:
446 446 return
447 447
448 448 repo.ui.status(_(b"updating working directory\n"))
449 449 if update is not True:
450 450 checkout = update
451 451 for test in (checkout, b'default', b'tip'):
452 452 if test is None:
453 453 continue
454 454 try:
455 455 uprev = repo.lookup(test)
456 456 break
457 457 except error.RepoLookupError:
458 458 continue
459 459 _update(repo, uprev)
460 460
461 461
462 462 def copystore(ui, srcrepo, destpath):
463 463 """copy files from store of srcrepo in destpath
464 464
465 465 returns destlock
466 466 """
467 467 destlock = None
468 468 try:
469 469 hardlink = None
470 470 topic = _(b'linking') if hardlink else _(b'copying')
471 471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
472 472 num = 0
473 473 srcpublishing = srcrepo.publishing()
474 474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
475 475 dstvfs = vfsmod.vfs(destpath)
476 476 for f in srcrepo.store.copylist():
477 477 if srcpublishing and f.endswith(b'phaseroots'):
478 478 continue
479 479 dstbase = os.path.dirname(f)
480 480 if dstbase and not dstvfs.exists(dstbase):
481 481 dstvfs.mkdir(dstbase)
482 482 if srcvfs.exists(f):
483 483 if f.endswith(b'data'):
484 484 # 'dstbase' may be empty (e.g. revlog format 0)
485 485 lockfile = os.path.join(dstbase, b"lock")
486 486 # lock to avoid premature writing to the target
487 487 destlock = lock.lock(dstvfs, lockfile)
488 488 hardlink, n = util.copyfiles(
489 489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
490 490 )
491 491 num += n
492 492 if hardlink:
493 493 ui.debug(b"linked %d files\n" % num)
494 494 else:
495 495 ui.debug(b"copied %d files\n" % num)
496 496 return destlock
497 497 except: # re-raises
498 498 release(destlock)
499 499 raise
500 500
501 501
502 502 def clonewithshare(
503 503 ui,
504 504 peeropts,
505 505 sharepath,
506 506 source,
507 507 srcpeer,
508 508 dest,
509 509 pull=False,
510 510 rev=None,
511 511 update=True,
512 512 stream=False,
513 513 ):
514 514 """Perform a clone using a shared repo.
515 515
516 516 The store for the repository will be located at <sharepath>/.hg. The
517 517 specified revisions will be cloned or pulled from "source". A shared repo
518 518 will be created at "dest" and a working copy will be created if "update" is
519 519 True.
520 520 """
521 521 revs = None
522 522 if rev:
523 523 if not srcpeer.capable(b'lookup'):
524 524 raise error.Abort(
525 525 _(
526 526 b"src repository does not support "
527 527 b"revision lookup and so doesn't "
528 528 b"support clone by revision"
529 529 )
530 530 )
531 531
532 532 # TODO this is batchable.
533 533 remoterevs = []
534 534 for r in rev:
535 535 with srcpeer.commandexecutor() as e:
536 536 remoterevs.append(
537 537 e.callcommand(
538 538 b'lookup',
539 539 {
540 540 b'key': r,
541 541 },
542 542 ).result()
543 543 )
544 544 revs = remoterevs
545 545
546 546 # Obtain a lock before checking for or cloning the pooled repo otherwise
547 547 # 2 clients may race creating or populating it.
548 548 pooldir = os.path.dirname(sharepath)
549 549 # lock class requires the directory to exist.
550 550 try:
551 551 util.makedir(pooldir, False)
552 552 except FileExistsError:
553 553 pass
554 554
555 555 poolvfs = vfsmod.vfs(pooldir)
556 556 basename = os.path.basename(sharepath)
557 557
558 558 with lock.lock(poolvfs, b'%s.lock' % basename):
559 559 if os.path.exists(sharepath):
560 560 ui.status(
561 561 _(b'(sharing from existing pooled repository %s)\n') % basename
562 562 )
563 563 else:
564 564 ui.status(
565 565 _(b'(sharing from new pooled repository %s)\n') % basename
566 566 )
567 567 # Always use pull mode because hardlinks in share mode don't work
568 568 # well. Never update because working copies aren't necessary in
569 569 # share mode.
570 570 clone(
571 571 ui,
572 572 peeropts,
573 573 source,
574 574 dest=sharepath,
575 575 pull=True,
576 576 revs=rev,
577 577 update=False,
578 578 stream=stream,
579 579 )
580 580
581 581 # Resolve the value to put in [paths] section for the source.
582 582 if islocal(source):
583 583 defaultpath = util.abspath(urlutil.urllocalpath(source))
584 584 else:
585 585 defaultpath = source
586 586
587 587 sharerepo = repository(ui, path=sharepath)
588 588 destrepo = share(
589 589 ui,
590 590 sharerepo,
591 591 dest=dest,
592 592 update=False,
593 593 bookmarks=False,
594 594 defaultpath=defaultpath,
595 595 )
596 596
597 597 # We need to perform a pull against the dest repo to fetch bookmarks
598 598 # and other non-store data that isn't shared by default. In the case of
599 599 # non-existing shared repo, this means we pull from the remote twice. This
600 600 # is a bit weird. But at the time it was implemented, there wasn't an easy
601 601 # way to pull just non-changegroup data.
602 602 exchange.pull(destrepo, srcpeer, heads=revs)
603 603
604 604 _postshareupdate(destrepo, update)
605 605
606 606 return srcpeer, peer(ui, peeropts, dest)
607 607
608 608
609 609 # Recomputing caches is often slow on big repos, so copy them.
610 610 def _copycache(srcrepo, dstcachedir, fname):
611 611 """copy a cache from srcrepo to destcachedir (if it exists)"""
612 612 srcfname = srcrepo.cachevfs.join(fname)
613 613 dstfname = os.path.join(dstcachedir, fname)
614 614 if os.path.exists(srcfname):
615 615 if not os.path.exists(dstcachedir):
616 616 os.mkdir(dstcachedir)
617 617 util.copyfile(srcfname, dstfname)
618 618
619 619
620 620 def clone(
621 621 ui,
622 622 peeropts,
623 623 source,
624 624 dest=None,
625 625 pull=False,
626 626 revs=None,
627 627 update=True,
628 628 stream=False,
629 629 branch=None,
630 630 shareopts=None,
631 631 storeincludepats=None,
632 632 storeexcludepats=None,
633 633 depth=None,
634 634 ):
635 635 """Make a copy of an existing repository.
636 636
637 637 Create a copy of an existing repository in a new directory. The
638 638 source and destination are URLs, as passed to the repository
639 639 function. Returns a pair of repository peers, the source and
640 640 newly created destination.
641 641
642 642 The location of the source is added to the new repository's
643 643 .hg/hgrc file, as the default to be used for future pulls and
644 644 pushes.
645 645
646 646 If an exception is raised, the partly cloned/updated destination
647 647 repository will be deleted.
648 648
649 649 Arguments:
650 650
651 651 source: repository object or URL
652 652
653 653 dest: URL of destination repository to create (defaults to base
654 654 name of source repository)
655 655
656 656 pull: always pull from source repository, even in local case or if the
657 657 server prefers streaming
658 658
659 659 stream: stream raw data uncompressed from repository (fast over
660 660 LAN, slow over WAN)
661 661
662 662 revs: revision to clone up to (implies pull=True)
663 663
664 664 update: update working directory after clone completes, if
665 665 destination is local repository (True means update to default rev,
666 666 anything else is treated as a revision)
667 667
668 668 branch: branches to clone
669 669
670 670 shareopts: dict of options to control auto sharing behavior. The "pool" key
671 671 activates auto sharing mode and defines the directory for stores. The
672 672 "mode" key determines how to construct the directory name of the shared
673 673 repository. "identity" means the name is derived from the node of the first
674 674 changeset in the repository. "remote" means the name is derived from the
675 675 remote's path/URL. Defaults to "identity."
676 676
677 677 storeincludepats and storeexcludepats: sets of file patterns to include and
678 678 exclude in the repository copy, respectively. If not defined, all files
679 679 will be included (a "full" clone). Otherwise a "narrow" clone containing
680 680 only the requested files will be performed. If ``storeincludepats`` is not
681 681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
682 682 ``path:.``. If both are empty sets, no files will be cloned.
683 683 """
684 684
685 685 if isinstance(source, bytes):
686 686 src = urlutil.get_clone_path(ui, source, branch)
687 687 origsource, source, branches = src
688 688 srcpeer = peer(ui, peeropts, source)
689 689 else:
690 690 srcpeer = source.peer() # in case we were called with a localrepo
691 691 branches = (None, branch or [])
692 692 origsource = source = srcpeer.url()
693 693 srclock = destlock = destwlock = cleandir = None
694 694 destpeer = None
695 695 try:
696 696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
697 697
698 698 if dest is None:
699 699 dest = defaultdest(source)
700 700 if dest:
701 701 ui.status(_(b"destination directory: %s\n") % dest)
702 702 else:
703 703 dest = urlutil.get_clone_path(ui, dest)[0]
704 704
705 705 dest = urlutil.urllocalpath(dest)
706 706 source = urlutil.urllocalpath(source)
707 707
708 708 if not dest:
709 709 raise error.InputError(_(b"empty destination path is not valid"))
710 710
711 711 destvfs = vfsmod.vfs(dest, expandpath=True)
712 712 if destvfs.lexists():
713 713 if not destvfs.isdir():
714 714 raise error.InputError(
715 715 _(b"destination '%s' already exists") % dest
716 716 )
717 717 elif destvfs.listdir():
718 718 raise error.InputError(
719 719 _(b"destination '%s' is not empty") % dest
720 720 )
721 721
722 722 createopts = {}
723 723 narrow = False
724 724
725 725 if storeincludepats is not None:
726 726 narrowspec.validatepatterns(storeincludepats)
727 727 narrow = True
728 728
729 729 if storeexcludepats is not None:
730 730 narrowspec.validatepatterns(storeexcludepats)
731 731 narrow = True
732 732
733 733 if narrow:
734 734 # Include everything by default if only exclusion patterns defined.
735 735 if storeexcludepats and not storeincludepats:
736 736 storeincludepats = {b'path:.'}
737 737
738 738 createopts[b'narrowfiles'] = True
739 739
740 740 if depth:
741 741 createopts[b'shallowfilestore'] = True
742 742
743 743 if srcpeer.capable(b'lfs-serve'):
744 744 # Repository creation honors the config if it disabled the extension, so
745 745 # we can't just announce that lfs will be enabled. This check avoids
746 746 # saying that lfs will be enabled, and then saying it's an unknown
747 747 # feature. The lfs creation option is set in either case so that a
748 748 # requirement is added. If the extension is explicitly disabled but the
749 749 # requirement is set, the clone aborts early, before transferring any
750 750 # data.
751 751 createopts[b'lfs'] = True
752 752
753 753 if extensions.disabled_help(b'lfs'):
754 754 ui.status(
755 755 _(
756 756 b'(remote is using large file support (lfs), but it is '
757 757 b'explicitly disabled in the local configuration)\n'
758 758 )
759 759 )
760 760 else:
761 761 ui.status(
762 762 _(
763 763 b'(remote is using large file support (lfs); lfs will '
764 764 b'be enabled for this repository)\n'
765 765 )
766 766 )
767 767
768 768 shareopts = shareopts or {}
769 769 sharepool = shareopts.get(b'pool')
770 770 sharenamemode = shareopts.get(b'mode')
771 771 if sharepool and islocal(dest):
772 772 sharepath = None
773 773 if sharenamemode == b'identity':
774 774 # Resolve the name from the initial changeset in the remote
775 775 # repository. This returns nullid when the remote is empty. It
776 776 # raises RepoLookupError if revision 0 is filtered or otherwise
777 777 # not available. If we fail to resolve, sharing is not enabled.
778 778 try:
779 779 with srcpeer.commandexecutor() as e:
780 780 rootnode = e.callcommand(
781 781 b'lookup',
782 782 {
783 783 b'key': b'0',
784 784 },
785 785 ).result()
786 786
787 787 if rootnode != sha1nodeconstants.nullid:
788 788 sharepath = os.path.join(sharepool, hex(rootnode))
789 789 else:
790 790 ui.status(
791 791 _(
792 792 b'(not using pooled storage: '
793 793 b'remote appears to be empty)\n'
794 794 )
795 795 )
796 796 except error.RepoLookupError:
797 797 ui.status(
798 798 _(
799 799 b'(not using pooled storage: '
800 800 b'unable to resolve identity of remote)\n'
801 801 )
802 802 )
803 803 elif sharenamemode == b'remote':
804 804 sharepath = os.path.join(
805 805 sharepool, hex(hashutil.sha1(source).digest())
806 806 )
807 807 else:
808 808 raise error.Abort(
809 809 _(b'unknown share naming mode: %s') % sharenamemode
810 810 )
811 811
812 812 # TODO this is a somewhat arbitrary restriction.
813 813 if narrow:
814 814 ui.status(
815 815 _(b'(pooled storage not supported for narrow clones)\n')
816 816 )
817 817 sharepath = None
818 818
819 819 if sharepath:
820 820 return clonewithshare(
821 821 ui,
822 822 peeropts,
823 823 sharepath,
824 824 source,
825 825 srcpeer,
826 826 dest,
827 827 pull=pull,
828 828 rev=revs,
829 829 update=update,
830 830 stream=stream,
831 831 )
832 832
833 833 srcrepo = srcpeer.local()
834 834
835 835 abspath = origsource
836 836 if islocal(origsource):
837 837 abspath = util.abspath(urlutil.urllocalpath(origsource))
838 838
839 839 if islocal(dest):
840 840 if os.path.exists(dest):
841 841 # only clean up directories we create ourselves
842 842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
843 843 cleandir = hgdir
844 844 else:
845 845 cleandir = dest
846 846
847 847 copy = False
848 848 if (
849 849 srcrepo
850 850 and srcrepo.cancopy()
851 851 and islocal(dest)
852 852 and not phases.hassecret(srcrepo)
853 853 ):
854 854 copy = not pull and not revs
855 855
856 856 # TODO this is a somewhat arbitrary restriction.
857 857 if narrow:
858 858 copy = False
859 859
860 860 if copy:
861 861 try:
862 862 # we use a lock here because if we race with commit, we
863 863 # can end up with extra data in the cloned revlogs that's
864 864 # not pointed to by changesets, thus causing verify to
865 865 # fail
866 866 srclock = srcrepo.lock(wait=False)
867 867 except error.LockError:
868 868 copy = False
869 869
870 870 if copy:
871 871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
872 872
873 873 destrootpath = urlutil.urllocalpath(dest)
874 874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
875 875 localrepo.createrepository(
876 876 ui,
877 877 destrootpath,
878 878 requirements=dest_reqs,
879 879 )
880 880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
881 881
882 882 destwlock = destrepo.wlock()
883 883 destlock = destrepo.lock()
884 884 from . import streamclone # avoid cycle
885 885
886 886 streamclone.local_copy(srcrepo, destrepo)
887 887
888 888 # we need to re-init the repo after manually copying the data
889 889 # into it
890 890 destpeer = peer(srcrepo, peeropts, dest)
891 891
892 892 # make the peer aware that is it already locked
893 893 #
894 894 # important:
895 895 #
896 896 # We still need to release that lock at the end of the function
897 897 destpeer.local()._lockref = weakref.ref(destlock)
898 898 destpeer.local()._wlockref = weakref.ref(destwlock)
899 899 # dirstate also needs to be copied because `_wlockref` has a reference
900 900 # to it: this dirstate is saved to disk when the wlock is released
901 901 destpeer.local().dirstate = destrepo.dirstate
902 902
903 903 srcrepo.hook(
904 904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
905 905 )
906 906 else:
907 907 try:
908 908 # only pass ui when no srcrepo
909 909 destpeer = peer(
910 910 srcrepo or ui,
911 911 peeropts,
912 912 dest,
913 913 create=True,
914 914 createopts=createopts,
915 915 )
916 916 except FileExistsError:
917 917 cleandir = None
918 918 raise error.Abort(_(b"destination '%s' already exists") % dest)
919 919
920 920 if revs:
921 921 if not srcpeer.capable(b'lookup'):
922 922 raise error.Abort(
923 923 _(
924 924 b"src repository does not support "
925 925 b"revision lookup and so doesn't "
926 926 b"support clone by revision"
927 927 )
928 928 )
929 929
930 930 # TODO this is batchable.
931 931 remoterevs = []
932 932 for rev in revs:
933 933 with srcpeer.commandexecutor() as e:
934 934 remoterevs.append(
935 935 e.callcommand(
936 936 b'lookup',
937 937 {
938 938 b'key': rev,
939 939 },
940 940 ).result()
941 941 )
942 942 revs = remoterevs
943 943
944 944 checkout = revs[0]
945 945 else:
946 946 revs = None
947 947 local = destpeer.local()
948 948 if local:
949 949 if narrow:
950 950 with local.wlock(), local.lock():
951 951 local.setnarrowpats(storeincludepats, storeexcludepats)
952 952 narrowspec.copytoworkingcopy(local)
953 953
954 954 u = urlutil.url(abspath)
955 955 defaulturl = bytes(u)
956 956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
957 957 if not stream:
958 958 if pull:
959 959 stream = False
960 960 else:
961 961 stream = None
962 962 # internal config: ui.quietbookmarkmove
963 963 overrides = {(b'ui', b'quietbookmarkmove'): True}
964 964 with local.ui.configoverride(overrides, b'clone'):
965 965 exchange.pull(
966 966 local,
967 967 srcpeer,
968 968 heads=revs,
969 969 streamclonerequested=stream,
970 970 includepats=storeincludepats,
971 971 excludepats=storeexcludepats,
972 972 depth=depth,
973 973 )
974 974 elif srcrepo:
975 975 # TODO lift restriction once exchange.push() accepts narrow
976 976 # push.
977 977 if narrow:
978 978 raise error.Abort(
979 979 _(
980 980 b'narrow clone not available for '
981 981 b'remote destinations'
982 982 )
983 983 )
984 984
985 985 exchange.push(
986 986 srcrepo,
987 987 destpeer,
988 988 revs=revs,
989 989 bookmarks=srcrepo._bookmarks.keys(),
990 990 )
991 991 else:
992 992 raise error.Abort(
993 993 _(b"clone from remote to remote not supported")
994 994 )
995 995
996 996 cleandir = None
997 997
998 998 destrepo = destpeer.local()
999 999 if destrepo:
1000 1000 template = uimod.samplehgrcs[b'cloned']
1001 1001 u = urlutil.url(abspath)
1002 1002 u.passwd = None
1003 1003 defaulturl = bytes(u)
1004 1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1005 1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1006 1006
1007 1007 if ui.configbool(b'experimental', b'remotenames'):
1008 1008 logexchange.pullremotenames(destrepo, srcpeer)
1009 1009
1010 1010 if update:
1011 1011 if update is not True:
1012 1012 with srcpeer.commandexecutor() as e:
1013 1013 checkout = e.callcommand(
1014 1014 b'lookup',
1015 1015 {
1016 1016 b'key': update,
1017 1017 },
1018 1018 ).result()
1019 1019
1020 1020 uprev = None
1021 1021 status = None
1022 1022 if checkout is not None:
1023 1023 # Some extensions (at least hg-git and hg-subversion) have
1024 1024 # a peer.lookup() implementation that returns a name instead
1025 1025 # of a nodeid. We work around it here until we've figured
1026 1026 # out a better solution.
1027 1027 if len(checkout) == 20 and checkout in destrepo:
1028 1028 uprev = checkout
1029 1029 elif scmutil.isrevsymbol(destrepo, checkout):
1030 1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1031 1031 else:
1032 1032 if update is not True:
1033 1033 try:
1034 1034 uprev = destrepo.lookup(update)
1035 1035 except error.RepoLookupError:
1036 1036 pass
1037 1037 if uprev is None:
1038 1038 try:
1039 1039 if destrepo._activebookmark:
1040 1040 uprev = destrepo.lookup(destrepo._activebookmark)
1041 1041 update = destrepo._activebookmark
1042 1042 else:
1043 1043 uprev = destrepo._bookmarks[b'@']
1044 1044 update = b'@'
1045 1045 bn = destrepo[uprev].branch()
1046 1046 if bn == b'default':
1047 1047 status = _(b"updating to bookmark %s\n" % update)
1048 1048 else:
1049 1049 status = (
1050 1050 _(b"updating to bookmark %s on branch %s\n")
1051 1051 ) % (update, bn)
1052 1052 except KeyError:
1053 1053 try:
1054 1054 uprev = destrepo.branchtip(b'default')
1055 1055 except error.RepoLookupError:
1056 1056 uprev = destrepo.lookup(b'tip')
1057 1057 if not status:
1058 1058 bn = destrepo[uprev].branch()
1059 1059 status = _(b"updating to branch %s\n") % bn
1060 1060 destrepo.ui.status(status)
1061 1061 _update(destrepo, uprev)
1062 1062 if update in destrepo._bookmarks:
1063 1063 bookmarks.activate(destrepo, update)
1064 1064 if destlock is not None:
1065 1065 release(destlock)
1066 1066 if destwlock is not None:
1067 1067 release(destlock)
1068 1068 # here is a tiny windows were someone could end up writing the
1069 1069 # repository before the cache are sure to be warm. This is "fine"
1070 1070 # as the only "bad" outcome would be some slowness. That potential
1071 1071 # slowness already affect reader.
1072 1072 with destrepo.lock():
1073 1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1074 1074 finally:
1075 1075 release(srclock, destlock, destwlock)
1076 1076 if cleandir is not None:
1077 1077 shutil.rmtree(cleandir, True)
1078 1078 if srcpeer is not None:
1079 1079 srcpeer.close()
1080 1080 if destpeer and destpeer.local() is None:
1081 1081 destpeer.close()
1082 1082 return srcpeer, destpeer
1083 1083
1084 1084
1085 1085 def _showstats(repo, stats, quietempty=False):
1086 1086 if quietempty and stats.isempty():
1087 1087 return
1088 1088 repo.ui.status(
1089 1089 _(
1090 1090 b"%d files updated, %d files merged, "
1091 1091 b"%d files removed, %d files unresolved\n"
1092 1092 )
1093 1093 % (
1094 1094 stats.updatedcount,
1095 1095 stats.mergedcount,
1096 1096 stats.removedcount,
1097 1097 stats.unresolvedcount,
1098 1098 )
1099 1099 )
1100 1100
1101 1101
1102 1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1103 1103 """Update the working directory to node.
1104 1104
1105 1105 When overwrite is set, changes are clobbered, merged else
1106 1106
1107 1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1108 1108 repo.ui.deprecwarn(
1109 1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1110 1110 b'5.7',
1111 1111 )
1112 1112 return mergemod._update(
1113 1113 repo,
1114 1114 node,
1115 1115 branchmerge=False,
1116 1116 force=overwrite,
1117 1117 labels=[b'working copy', b'destination'],
1118 1118 updatecheck=updatecheck,
1119 1119 )
1120 1120
1121 1121
1122 1122 def update(repo, node, quietempty=False, updatecheck=None):
1123 1123 """update the working directory to node"""
1124 1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1125 1125 _showstats(repo, stats, quietempty)
1126 1126 if stats.unresolvedcount:
1127 1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1128 1128 return stats.unresolvedcount > 0
1129 1129
1130 1130
1131 1131 # naming conflict in clone()
1132 1132 _update = update
1133 1133
1134 1134
1135 1135 def clean(repo, node, show_stats=True, quietempty=False):
1136 1136 """forcibly switch the working directory to node, clobbering changes"""
1137 1137 stats = mergemod.clean_update(repo[node])
1138 1138 assert stats.unresolvedcount == 0
1139 1139 if show_stats:
1140 1140 _showstats(repo, stats, quietempty)
1141 1141 return False
1142 1142
1143 1143
1144 1144 # naming conflict in updatetotally()
1145 1145 _clean = clean
1146 1146
1147 1147 _VALID_UPDATECHECKS = {
1148 1148 mergemod.UPDATECHECK_ABORT,
1149 1149 mergemod.UPDATECHECK_NONE,
1150 1150 mergemod.UPDATECHECK_LINEAR,
1151 1151 mergemod.UPDATECHECK_NO_CONFLICT,
1152 1152 }
1153 1153
1154 1154
1155 1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1156 1156 """Update the working directory with extra care for non-file components
1157 1157
1158 1158 This takes care of non-file components below:
1159 1159
1160 1160 :bookmark: might be advanced or (in)activated
1161 1161
1162 1162 This takes arguments below:
1163 1163
1164 1164 :checkout: to which revision the working directory is updated
1165 1165 :brev: a name, which might be a bookmark to be activated after updating
1166 1166 :clean: whether changes in the working directory can be discarded
1167 1167 :updatecheck: how to deal with a dirty working directory
1168 1168
1169 1169 Valid values for updatecheck are the UPDATECHECK_* constants
1170 1170 defined in the merge module. Passing `None` will result in using the
1171 1171 configured default.
1172 1172
1173 1173 * ABORT: abort if the working directory is dirty
1174 1174 * NONE: don't check (merge working directory changes into destination)
1175 1175 * LINEAR: check that update is linear before merging working directory
1176 1176 changes into destination
1177 1177 * NO_CONFLICT: check that the update does not result in file merges
1178 1178
1179 1179 This returns whether conflict is detected at updating or not.
1180 1180 """
1181 1181 if updatecheck is None:
1182 1182 updatecheck = ui.config(b'commands', b'update.check')
1183 1183 if updatecheck not in _VALID_UPDATECHECKS:
1184 1184 # If not configured, or invalid value configured
1185 1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1186 1186 if updatecheck not in _VALID_UPDATECHECKS:
1187 1187 raise ValueError(
1188 1188 r'Invalid updatecheck value %r (can accept %r)'
1189 1189 % (updatecheck, _VALID_UPDATECHECKS)
1190 1190 )
1191 1191 with repo.wlock():
1192 1192 movemarkfrom = None
1193 1193 warndest = False
1194 1194 if checkout is None:
1195 1195 updata = destutil.destupdate(repo, clean=clean)
1196 1196 checkout, movemarkfrom, brev = updata
1197 1197 warndest = True
1198 1198
1199 1199 if clean:
1200 1200 ret = _clean(repo, checkout)
1201 1201 else:
1202 1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1203 1203 cmdutil.bailifchanged(repo, merge=False)
1204 1204 updatecheck = mergemod.UPDATECHECK_NONE
1205 1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1206 1206
1207 1207 if not ret and movemarkfrom:
1208 1208 if movemarkfrom == repo[b'.'].node():
1209 1209 pass # no-op update
1210 1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1211 1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1212 1212 ui.status(_(b"updating bookmark %s\n") % b)
1213 1213 else:
1214 1214 # this can happen with a non-linear update
1215 1215 b = ui.label(repo._activebookmark, b'bookmarks')
1216 1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1217 1217 bookmarks.deactivate(repo)
1218 1218 elif brev in repo._bookmarks:
1219 1219 if brev != repo._activebookmark:
1220 1220 b = ui.label(brev, b'bookmarks.active')
1221 1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1222 1222 bookmarks.activate(repo, brev)
1223 1223 elif brev:
1224 1224 if repo._activebookmark:
1225 1225 b = ui.label(repo._activebookmark, b'bookmarks')
1226 1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1227 1227 bookmarks.deactivate(repo)
1228 1228
1229 1229 if warndest:
1230 1230 destutil.statusotherdests(ui, repo)
1231 1231
1232 1232 return ret
1233 1233
1234 1234
1235 1235 def merge(
1236 1236 ctx,
1237 1237 force=False,
1238 1238 remind=True,
1239 1239 labels=None,
1240 1240 ):
1241 1241 """Branch merge with node, resolving changes. Return true if any
1242 1242 unresolved conflicts."""
1243 1243 repo = ctx.repo()
1244 1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1245 1245 _showstats(repo, stats)
1246 1246 if stats.unresolvedcount:
1247 1247 repo.ui.status(
1248 1248 _(
1249 1249 b"use 'hg resolve' to retry unresolved file merges "
1250 1250 b"or 'hg merge --abort' to abandon\n"
1251 1251 )
1252 1252 )
1253 1253 elif remind:
1254 1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1255 1255 return stats.unresolvedcount > 0
1256 1256
1257 1257
1258 1258 def abortmerge(ui, repo):
1259 1259 ms = mergestatemod.mergestate.read(repo)
1260 1260 if ms.active():
1261 1261 # there were conflicts
1262 1262 node = ms.localctx.hex()
1263 1263 else:
1264 1264 # there were no conficts, mergestate was not stored
1265 1265 node = repo[b'.'].hex()
1266 1266
1267 1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1268 1268 stats = mergemod.clean_update(repo[node])
1269 1269 assert stats.unresolvedcount == 0
1270 1270 _showstats(repo, stats)
1271 1271
1272 1272
1273 1273 def _incoming(
1274 1274 displaychlist,
1275 1275 subreporecurse,
1276 1276 ui,
1277 1277 repo,
1278 1278 source,
1279 1279 opts,
1280 1280 buffered=False,
1281 1281 subpath=None,
1282 1282 ):
1283 1283 """
1284 1284 Helper for incoming / gincoming.
1285 1285 displaychlist gets called with
1286 1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1287 1287 and is supposed to contain only code that can't be unified.
1288 1288 """
1289 1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1290 1290 srcs = list(srcs)
1291 1291 if len(srcs) != 1:
1292 1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1293 1293 msg %= len(srcs)
1294 1294 raise error.Abort(msg)
1295 1295 path = srcs[0]
1296 1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1297 1297 if subpath is not None:
1298 1298 subpath = urlutil.url(subpath)
1299 1299 if subpath.isabs():
1300 1300 source = bytes(subpath)
1301 1301 else:
1302 1302 p = urlutil.url(source)
1303 1303 if p.islocal():
1304 1304 normpath = os.path.normpath
1305 1305 else:
1306 1306 normpath = posixpath.normpath
1307 1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1308 1308 source = bytes(p)
1309 1309 other = peer(repo, opts, source)
1310 1310 cleanupfn = other.close
1311 1311 try:
1312 1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1313 1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1314 1314
1315 1315 if revs:
1316 1316 revs = [other.lookup(rev) for rev in revs]
1317 1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1318 1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1319 1319 )
1320 1320
1321 1321 if not chlist:
1322 1322 ui.status(_(b"no changes found\n"))
1323 1323 return subreporecurse()
1324 1324 ui.pager(b'incoming')
1325 1325 displayer = logcmdutil.changesetdisplayer(
1326 1326 ui, other, opts, buffered=buffered
1327 1327 )
1328 1328 displaychlist(other, chlist, displayer)
1329 1329 displayer.close()
1330 1330 finally:
1331 1331 cleanupfn()
1332 1332 subreporecurse()
1333 1333 return 0 # exit code is zero since we found incoming changes
1334 1334
1335 1335
1336 1336 def incoming(ui, repo, source, opts, subpath=None):
1337 1337 def subreporecurse():
1338 1338 ret = 1
1339 1339 if opts.get(b'subrepos'):
1340 1340 ctx = repo[None]
1341 1341 for subpath in sorted(ctx.substate):
1342 1342 sub = ctx.sub(subpath)
1343 1343 ret = min(ret, sub.incoming(ui, source, opts))
1344 1344 return ret
1345 1345
1346 1346 def display(other, chlist, displayer):
1347 1347 limit = logcmdutil.getlimit(opts)
1348 1348 if opts.get(b'newest_first'):
1349 1349 chlist.reverse()
1350 1350 count = 0
1351 1351 for n in chlist:
1352 1352 if limit is not None and count >= limit:
1353 1353 break
1354 1354 parents = [
1355 1355 p for p in other.changelog.parents(n) if p != repo.nullid
1356 1356 ]
1357 1357 if opts.get(b'no_merges') and len(parents) == 2:
1358 1358 continue
1359 1359 count += 1
1360 1360 displayer.show(other[n])
1361 1361
1362 1362 return _incoming(
1363 1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1364 1364 )
1365 1365
1366 1366
1367 1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1368 1368 out = set()
1369 1369 others = []
1370 1370 for path in urlutil.get_push_paths(repo, ui, dests):
1371 1371 dest = path.pushloc or path.loc
1372 1372 if subpath is not None:
1373 1373 subpath = urlutil.url(subpath)
1374 1374 if subpath.isabs():
1375 1375 dest = bytes(subpath)
1376 1376 else:
1377 1377 p = urlutil.url(dest)
1378 1378 if p.islocal():
1379 1379 normpath = os.path.normpath
1380 1380 else:
1381 1381 normpath = posixpath.normpath
1382 1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1383 1383 dest = bytes(p)
1384 1384 branches = path.branch, opts.get(b'branch') or []
1385 1385
1386 1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1387 1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1388 1388 if revs:
1389 1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1390 1390
1391 1391 other = peer(repo, opts, dest)
1392 1392 try:
1393 1393 outgoing = discovery.findcommonoutgoing(
1394 1394 repo, other, revs, force=opts.get(b'force')
1395 1395 )
1396 1396 o = outgoing.missing
1397 1397 out.update(o)
1398 1398 if not o:
1399 1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1400 1400 others.append(other)
1401 1401 except: # re-raises
1402 1402 other.close()
1403 1403 raise
1404 1404 # make sure this is ordered by revision number
1405 1405 outgoing_revs = list(out)
1406 1406 cl = repo.changelog
1407 1407 outgoing_revs.sort(key=cl.rev)
1408 1408 return outgoing_revs, others
1409 1409
1410 1410
1411 1411 def _outgoing_recurse(ui, repo, dests, opts):
1412 1412 ret = 1
1413 1413 if opts.get(b'subrepos'):
1414 1414 ctx = repo[None]
1415 1415 for subpath in sorted(ctx.substate):
1416 1416 sub = ctx.sub(subpath)
1417 1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1418 1418 return ret
1419 1419
1420 1420
1421 1421 def _outgoing_filter(repo, revs, opts):
1422 1422 """apply revision filtering/ordering option for outgoing"""
1423 1423 limit = logcmdutil.getlimit(opts)
1424 1424 no_merges = opts.get(b'no_merges')
1425 1425 if opts.get(b'newest_first'):
1426 1426 revs.reverse()
1427 1427 if limit is None and not no_merges:
1428 1428 for r in revs:
1429 1429 yield r
1430 1430 return
1431 1431
1432 1432 count = 0
1433 1433 cl = repo.changelog
1434 1434 for n in revs:
1435 1435 if limit is not None and count >= limit:
1436 1436 break
1437 1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1438 1438 if no_merges and len(parents) == 2:
1439 1439 continue
1440 1440 count += 1
1441 1441 yield n
1442 1442
1443 1443
1444 1444 def outgoing(ui, repo, dests, opts, subpath=None):
1445 1445 if opts.get(b'graph'):
1446 1446 logcmdutil.checkunsupportedgraphflags([], opts)
1447 1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1448 1448 ret = 1
1449 1449 try:
1450 1450 if o:
1451 1451 ret = 0
1452 1452
1453 1453 if opts.get(b'graph'):
1454 1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1455 1455 ui.pager(b'outgoing')
1456 1456 displayer = logcmdutil.changesetdisplayer(
1457 1457 ui, repo, opts, buffered=True
1458 1458 )
1459 1459 logcmdutil.displaygraph(
1460 1460 ui, repo, revdag, displayer, graphmod.asciiedges
1461 1461 )
1462 1462 else:
1463 1463 ui.pager(b'outgoing')
1464 1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1465 1465 for n in _outgoing_filter(repo, o, opts):
1466 1466 displayer.show(repo[n])
1467 1467 displayer.close()
1468 1468 for oth in others:
1469 1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1470 1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1471 1471 return ret # exit code is zero since we found outgoing changes
1472 1472 finally:
1473 1473 for oth in others:
1474 1474 oth.close()
1475 1475
1476 1476
1477 1477 def verify(repo, level=None):
1478 1478 """verify the consistency of a repository"""
1479 1479 ret = verifymod.verify(repo, level=level)
1480 1480
1481 1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1482 1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1483 1483 # concern.
1484 1484
1485 1485 # pathto() is needed for -R case
1486 1486 revs = repo.revs(
1487 1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1488 1488 )
1489 1489
1490 1490 if revs:
1491 1491 repo.ui.status(_(b'checking subrepo links\n'))
1492 1492 for rev in revs:
1493 1493 ctx = repo[rev]
1494 1494 try:
1495 1495 for subpath in ctx.substate:
1496 1496 try:
1497 1497 ret = (
1498 1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1499 1499 )
1500 1500 except error.RepoError as e:
1501 1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1502 1502 except Exception:
1503 1503 repo.ui.warn(
1504 1504 _(b'.hgsubstate is corrupt in revision %s\n')
1505 1505 % short(ctx.node())
1506 1506 )
1507 1507
1508 1508 return ret
1509 1509
1510 1510
1511 1511 def remoteui(src, opts):
1512 1512 """build a remote ui from ui or repo and opts"""
1513 1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1514 1514 dst = src.baseui.copy() # drop repo-specific config
1515 1515 src = src.ui # copy target options from repo
1516 1516 else: # assume it's a global ui object
1517 1517 dst = src.copy() # keep all global options
1518 1518
1519 1519 # copy ssh-specific options
1520 1520 for o in b'ssh', b'remotecmd':
1521 1521 v = opts.get(o) or src.config(b'ui', o)
1522 1522 if v:
1523 1523 dst.setconfig(b"ui", o, v, b'copied')
1524 1524
1525 1525 # copy bundle-specific options
1526 1526 r = src.config(b'bundle', b'mainreporoot')
1527 1527 if r:
1528 1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1529 1529
1530 1530 # copy selected local settings to the remote ui
1531 1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1532 1532 for key, val in src.configitems(sect):
1533 1533 dst.setconfig(sect, key, val, b'copied')
1534 1534 v = src.config(b'web', b'cacerts')
1535 1535 if v:
1536 1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1537 1537
1538 1538 return dst
1539 1539
1540 1540
1541 1541 # Files of interest
1542 1542 # Used to check if the repository has changed looking at mtime and size of
1543 1543 # these files.
1544 1544 foi = [
1545 1545 (b'spath', b'00changelog.i'),
1546 1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1547 1547 (b'spath', b'obsstore'),
1548 1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1549 1549 ]
1550 1550
1551 1551
1552 1552 class cachedlocalrepo:
1553 1553 """Holds a localrepository that can be cached and reused."""
1554 1554
1555 1555 def __init__(self, repo):
1556 1556 """Create a new cached repo from an existing repo.
1557 1557
1558 1558 We assume the passed in repo was recently created. If the
1559 1559 repo has changed between when it was created and when it was
1560 1560 turned into a cache, it may not refresh properly.
1561 1561 """
1562 1562 assert isinstance(repo, localrepo.localrepository)
1563 1563 self._repo = repo
1564 1564 self._state, self.mtime = self._repostate()
1565 1565 self._filtername = repo.filtername
1566 1566
1567 1567 def fetch(self):
1568 1568 """Refresh (if necessary) and return a repository.
1569 1569
1570 1570 If the cached instance is out of date, it will be recreated
1571 1571 automatically and returned.
1572 1572
1573 1573 Returns a tuple of the repo and a boolean indicating whether a new
1574 1574 repo instance was created.
1575 1575 """
1576 1576 # We compare the mtimes and sizes of some well-known files to
1577 1577 # determine if the repo changed. This is not precise, as mtimes
1578 1578 # are susceptible to clock skew and imprecise filesystems and
1579 1579 # file content can change while maintaining the same size.
1580 1580
1581 1581 state, mtime = self._repostate()
1582 1582 if state == self._state:
1583 1583 return self._repo, False
1584 1584
1585 1585 repo = repository(self._repo.baseui, self._repo.url())
1586 1586 if self._filtername:
1587 1587 self._repo = repo.filtered(self._filtername)
1588 1588 else:
1589 1589 self._repo = repo.unfiltered()
1590 1590 self._state = state
1591 1591 self.mtime = mtime
1592 1592
1593 1593 return self._repo, True
1594 1594
1595 1595 def _repostate(self):
1596 1596 state = []
1597 1597 maxmtime = -1
1598 1598 for attr, fname in foi:
1599 1599 prefix = getattr(self._repo, attr)
1600 1600 p = os.path.join(prefix, fname)
1601 1601 try:
1602 1602 st = os.stat(p)
1603 1603 except OSError:
1604 1604 st = os.stat(prefix)
1605 1605 state.append((st[stat.ST_MTIME], st.st_size))
1606 1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1607 1607
1608 1608 return tuple(state), maxmtime
1609 1609
1610 1610 def copy(self):
1611 1611 """Obtain a copy of this class instance.
1612 1612
1613 1613 A new localrepository instance is obtained. The new instance should be
1614 1614 completely independent of the original.
1615 1615 """
1616 1616 repo = repository(self._repo.baseui, self._repo.origroot)
1617 1617 if self._filtername:
1618 1618 repo = repo.filtered(self._filtername)
1619 1619 else:
1620 1620 repo = repo.unfiltered()
1621 1621 c = cachedlocalrepo(repo)
1622 1622 c._state = self._state
1623 1623 c.mtime = self.mtime
1624 1624 return c
@@ -1,265 +1,265 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10
11 11 import errno
12 12
13 13 from .i18n import _
14 14 from .node import sha1nodeconstants
15 15 from . import (
16 16 branchmap,
17 17 changelog,
18 18 error,
19 19 localrepo,
20 20 manifest,
21 21 namespaces,
22 22 pathutil,
23 23 pycompat,
24 24 requirements as requirementsmod,
25 25 url,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29 from .utils import (
30 30 urlutil,
31 31 )
32 32
33 33 urlerr = util.urlerr
34 34 urlreq = util.urlreq
35 35
36 36
37 37 class httprangereader:
38 38 def __init__(self, url, opener):
39 39 # we assume opener has HTTPRangeHandler
40 40 self.url = url
41 41 self.pos = 0
42 42 self.opener = opener
43 43 self.name = url
44 44
45 45 def __enter__(self):
46 46 return self
47 47
48 48 def __exit__(self, exc_type, exc_value, traceback):
49 49 self.close()
50 50
51 51 def seek(self, pos):
52 52 self.pos = pos
53 53
54 54 def read(self, bytes=None):
55 55 req = urlreq.request(pycompat.strurl(self.url))
56 56 end = b''
57 57 if bytes:
58 58 end = self.pos + bytes - 1
59 59 if self.pos or end:
60 60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
61 61
62 62 try:
63 63 f = self.opener.open(req)
64 64 data = f.read()
65 65 code = f.code
66 66 except urlerr.httperror as inst:
67 67 num = inst.code == 404 and errno.ENOENT or None
68 68 # Explicitly convert the exception to str as Py3 will try
69 69 # convert it to local encoding and with as the HTTPResponse
70 70 # instance doesn't support encode.
71 71 raise IOError(num, str(inst))
72 72 except urlerr.urlerror as inst:
73 73 raise IOError(None, inst.reason)
74 74
75 75 if code == 200:
76 76 # HTTPRangeHandler does nothing if remote does not support
77 77 # Range headers and returns the full entity. Let's slice it.
78 78 if bytes:
79 79 data = data[self.pos : self.pos + bytes]
80 80 else:
81 81 data = data[self.pos :]
82 82 elif bytes:
83 83 data = data[:bytes]
84 84 self.pos += len(data)
85 85 return data
86 86
87 87 def readlines(self):
88 88 return self.read().splitlines(True)
89 89
90 90 def __iter__(self):
91 91 return iter(self.readlines())
92 92
93 93 def close(self):
94 94 pass
95 95
96 96
97 97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
98 98 # which was itself extracted from urlgrabber. See the last version of
99 99 # byterange.py from history if you need more information.
100 100 class _RangeError(IOError):
101 101 """Error raised when an unsatisfiable range is requested."""
102 102
103 103
104 104 class _HTTPRangeHandler(urlreq.basehandler):
105 105 """Handler that enables HTTP Range headers.
106 106
107 107 This was extremely simple. The Range header is a HTTP feature to
108 108 begin with so all this class does is tell urllib2 that the
109 109 "206 Partial Content" response from the HTTP server is what we
110 110 expected.
111 111 """
112 112
113 113 def http_error_206(self, req, fp, code, msg, hdrs):
114 114 # 206 Partial Content Response
115 115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
116 116 r.code = code
117 117 r.msg = msg
118 118 return r
119 119
120 120 def http_error_416(self, req, fp, code, msg, hdrs):
121 121 # HTTP's Range Not Satisfiable error
122 122 raise _RangeError(b'Requested Range Not Satisfiable')
123 123
124 124
125 125 def build_opener(ui, authinfo):
126 126 # urllib cannot handle URLs with embedded user or passwd
127 127 urlopener = url.opener(ui, authinfo)
128 128 urlopener.add_handler(_HTTPRangeHandler())
129 129
130 130 class statichttpvfs(vfsmod.abstractvfs):
131 131 def __init__(self, base):
132 132 self.base = base
133 133 self.options = {}
134 134
135 135 def __call__(self, path, mode=b'r', *args, **kw):
136 136 if mode not in (b'r', b'rb'):
137 137 raise IOError(b'Permission denied')
138 138 f = b"/".join((self.base, urlreq.quote(path)))
139 139 return httprangereader(f, urlopener)
140 140
141 141 def join(self, path):
142 142 if path:
143 143 return pathutil.join(self.base, path)
144 144 else:
145 145 return self.base
146 146
147 147 return statichttpvfs
148 148
149 149
150 150 class statichttppeer(localrepo.localpeer):
151 151 def local(self):
152 152 return None
153 153
154 154 def canpush(self):
155 155 return False
156 156
157 157
158 158 class statichttprepository(
159 159 localrepo.localrepository, localrepo.revlogfilestorage
160 160 ):
161 161 supported = localrepo.localrepository._basesupported
162 162
163 163 def __init__(self, ui, path):
164 164 self._url = path
165 165 self.ui = ui
166 166
167 167 self.root = path
168 168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
169 169 self.path, authinfo = u.authinfo()
170 170
171 171 vfsclass = build_opener(ui, authinfo)
172 172 self.vfs = vfsclass(self.path)
173 173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
174 174 self._phasedefaults = []
175 175
176 176 self.names = namespaces.namespaces()
177 177 self.filtername = None
178 178 self._extrafilterid = None
179 179 self._wanted_sidedata = set()
180 180 self.features = set()
181 181
182 182 try:
183 183 requirements = set(self.vfs.read(b'requires').splitlines())
184 184 except FileNotFoundError:
185 185 requirements = set()
186 186
187 187 # check if it is a non-empty old-style repository
188 188 try:
189 189 fp = self.vfs(b"00changelog.i")
190 190 fp.read(1)
191 191 fp.close()
192 192 except FileNotFoundError:
193 193 # we do not care about empty old-style repositories here
194 194 msg = _(b"'%s' does not appear to be an hg repository") % path
195 195 raise error.RepoError(msg)
196 196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
197 197 storevfs = vfsclass(self.vfs.join(b'store'))
198 198 requirements |= set(storevfs.read(b'requires').splitlines())
199 199
200 200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
201 201 localrepo.ensurerequirementsrecognized(
202 202 requirements, supportedrequirements
203 203 )
204 204 localrepo.ensurerequirementscompatible(ui, requirements)
205 205 self.nodeconstants = sha1nodeconstants
206 206 self.nullid = self.nodeconstants.nullid
207 207
208 208 # setup store
209 209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
210 210 self.spath = self.store.path
211 211 self.svfs = self.store.opener
212 212 self.sjoin = self.store.join
213 213 self._filecache = {}
214 214 self.requirements = requirements
215 215
216 216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
217 217 self.manifestlog = manifest.manifestlog(
218 218 self.svfs, self, rootmanifest, self.narrowmatch()
219 219 )
220 220 self.changelog = changelog.changelog(self.svfs)
221 221 self._tags = None
222 222 self.nodetagscache = None
223 223 self._branchcaches = branchmap.BranchMapCache()
224 224 self._revbranchcache = None
225 225 self.encodepats = None
226 226 self.decodepats = None
227 227 self._transref = None
228 228
229 229 def _restrictcapabilities(self, caps):
230 230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
231 231 return caps.difference([b"pushkey"])
232 232
233 233 def url(self):
234 234 return self._url
235 235
236 236 def local(self):
237 237 return False
238 238
239 239 def peer(self):
240 240 return statichttppeer(self)
241 241
242 242 def wlock(self, wait=True):
243 243 raise error.LockUnavailable(
244 244 0,
245 245 _(b'lock not available'),
246 246 b'lock',
247 247 _(b'cannot lock static-http repository'),
248 248 )
249 249
250 250 def lock(self, wait=True):
251 251 raise error.LockUnavailable(
252 252 0,
253 253 _(b'lock not available'),
254 254 b'lock',
255 255 _(b'cannot lock static-http repository'),
256 256 )
257 257
258 258 def _writecaches(self):
259 259 pass # statichttprepository are read only
260 260
261 261
262 262 def instance(ui, path, create, intents=None, createopts=None):
263 263 if create:
264 264 raise error.Abort(_(b'cannot create new static-http repository'))
265 return statichttprepository(ui, path[7:])
265 return statichttprepository(ui, path[7:]).peer()
General Comments 0
You need to be logged in to leave comments. Login now