##// END OF EJS Templates
narrow: write the narrow spec in a transaction during clone...
marmoute -
r51084:6065a893 default
parent child Browse files
Show More
@@ -1,1668 +1,1670 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 if util.safehasattr(other, 'peer'):
70 70 # a courtesy to callers using a localrepo for other
71 71 peer = other.peer()
72 72 else:
73 73 peer = other
74 74 hashbranch, branches = branches
75 75 if not hashbranch and not branches:
76 76 x = revs or None
77 77 if revs:
78 78 y = revs[0]
79 79 else:
80 80 y = None
81 81 return x, y
82 82 if revs:
83 83 revs = list(revs)
84 84 else:
85 85 revs = []
86 86
87 87 if not peer.capable(b'branchmap'):
88 88 if branches:
89 89 raise error.Abort(_(b"remote branch lookup not supported"))
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 with peer.commandexecutor() as e:
94 94 branchmap = e.callcommand(b'branchmap', {}).result()
95 95
96 96 def primary(branch):
97 97 if branch == b'.':
98 98 if not lrepo:
99 99 raise error.Abort(_(b"dirstate branch not accessible"))
100 100 branch = lrepo.dirstate.branch()
101 101 if branch in branchmap:
102 102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 103 return True
104 104 else:
105 105 return False
106 106
107 107 for branch in branches:
108 108 if not primary(branch):
109 109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 110 if hashbranch:
111 111 if not primary(hashbranch):
112 112 revs.append(hashbranch)
113 113 return revs, revs[0]
114 114
115 115
116 116 def _isfile(path):
117 117 try:
118 118 # we use os.stat() directly here instead of os.path.isfile()
119 119 # because the latter started returning `False` on invalid path
120 120 # exceptions starting in 3.8 and we care about handling
121 121 # invalid paths specially here.
122 122 st = os.stat(path)
123 123 except ValueError as e:
124 124 msg = stringutil.forcebytestr(e)
125 125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 126 except OSError:
127 127 return False
128 128 else:
129 129 return stat.S_ISREG(st.st_mode)
130 130
131 131
132 132 class LocalFactory:
133 133 """thin wrapper to dispatch between localrepo and bundle repo"""
134 134
135 135 @staticmethod
136 136 def islocal(path: bytes) -> bool:
137 137 path = util.expandpath(urlutil.urllocalpath(path))
138 138 return not _isfile(path)
139 139
140 140 @staticmethod
141 141 def instance(ui, path, *args, **kwargs):
142 142 path = util.expandpath(urlutil.urllocalpath(path))
143 143 if _isfile(path):
144 144 cls = bundlerepo
145 145 else:
146 146 cls = localrepo
147 147 return cls.instance(ui, path, *args, **kwargs)
148 148
149 149
150 150 repo_schemes = {
151 151 b'bundle': bundlerepo,
152 152 b'union': unionrepo,
153 153 b'file': LocalFactory,
154 154 }
155 155
156 156 peer_schemes = {
157 157 b'http': httppeer,
158 158 b'https': httppeer,
159 159 b'ssh': sshpeer,
160 160 b'static-http': statichttprepo,
161 161 }
162 162
163 163
164 164 def islocal(repo):
165 165 '''return true if repo (or path pointing to repo) is local'''
166 166 if isinstance(repo, bytes):
167 167 u = urlutil.url(repo)
168 168 scheme = u.scheme or b'file'
169 169 if scheme in peer_schemes:
170 170 cls = peer_schemes[scheme]
171 171 cls.make_peer # make sure we load the module
172 172 elif scheme in repo_schemes:
173 173 cls = repo_schemes[scheme]
174 174 cls.instance # make sure we load the module
175 175 else:
176 176 cls = LocalFactory
177 177 if util.safehasattr(cls, 'islocal'):
178 178 return cls.islocal(repo) # pytype: disable=module-attr
179 179 return False
180 180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 181 return repo.local()
182 182
183 183
184 184 def openpath(ui, path, sendaccept=True):
185 185 '''open path with open if local, url.open if remote'''
186 186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 187 if pathurl.islocal():
188 188 return util.posixfile(pathurl.localpath(), b'rb')
189 189 else:
190 190 return url.open(ui, path, sendaccept=sendaccept)
191 191
192 192
193 193 # a list of (ui, repo) functions called for wire peer initialization
194 194 wirepeersetupfuncs = []
195 195
196 196
197 197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 198 ui = getattr(obj, "ui", ui)
199 199 for f in presetupfuncs or []:
200 200 f(ui, obj)
201 201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 202 with util.timedcm('all reposetup') as allreposetupstats:
203 203 for name, module in extensions.extensions(ui):
204 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 205 hook = getattr(module, 'reposetup', None)
206 206 if hook:
207 207 with util.timedcm('reposetup %r', name) as stats:
208 208 hook(ui, obj)
209 209 msg = b' > reposetup for %s took %s\n'
210 210 ui.log(b'extension', msg, name, stats)
211 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 212 if not obj.local():
213 213 for f in wirepeersetupfuncs:
214 214 f(ui, obj)
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 scheme = urlutil.url(path).scheme
227 227 if scheme is None:
228 228 scheme = b'file'
229 229 cls = repo_schemes.get(scheme)
230 230 if cls is None:
231 231 if scheme in peer_schemes:
232 232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 233 cls = LocalFactory
234 234 repo = cls.instance(
235 235 ui,
236 236 path,
237 237 create,
238 238 intents=intents,
239 239 createopts=createopts,
240 240 )
241 241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 242 return repo.filtered(b'visible')
243 243
244 244
245 245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
246 246 '''return a repository peer for the specified path'''
247 247 ui = getattr(uiorrepo, 'ui', uiorrepo)
248 248 rui = remoteui(uiorrepo, opts)
249 249 if util.safehasattr(path, 'url'):
250 250 # this is already a urlutil.path object
251 251 peer_path = path
252 252 else:
253 253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 255 if scheme in peer_schemes:
256 256 cls = peer_schemes[scheme]
257 257 peer = cls.make_peer(
258 258 rui,
259 259 peer_path,
260 260 create,
261 261 intents=intents,
262 262 createopts=createopts,
263 263 )
264 264 _setup_repo_or_peer(rui, peer)
265 265 else:
266 266 # this is a repository
267 267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 268 if not repo_path:
269 269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
270 270 repo = repository(
271 271 rui,
272 272 repo_path,
273 273 create,
274 274 intents=intents,
275 275 createopts=createopts,
276 276 )
277 277 peer = repo.peer(path=peer_path)
278 278 return peer
279 279
280 280
281 281 def defaultdest(source):
282 282 """return default destination of clone if none is given
283 283
284 284 >>> defaultdest(b'foo')
285 285 'foo'
286 286 >>> defaultdest(b'/foo/bar')
287 287 'bar'
288 288 >>> defaultdest(b'/')
289 289 ''
290 290 >>> defaultdest(b'')
291 291 ''
292 292 >>> defaultdest(b'http://example.org/')
293 293 ''
294 294 >>> defaultdest(b'http://example.org/foo/')
295 295 'foo'
296 296 """
297 297 path = urlutil.url(source).path
298 298 if not path:
299 299 return b''
300 300 return os.path.basename(os.path.normpath(path))
301 301
302 302
303 303 def sharedreposource(repo):
304 304 """Returns repository object for source repository of a shared repo.
305 305
306 306 If repo is not a shared repository, returns None.
307 307 """
308 308 if repo.sharedpath == repo.path:
309 309 return None
310 310
311 311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
312 312 return repo.srcrepo
313 313
314 314 # the sharedpath always ends in the .hg; we want the path to the repo
315 315 source = repo.vfs.split(repo.sharedpath)[0]
316 316 srcurl, branches = urlutil.parseurl(source)
317 317 srcrepo = repository(repo.ui, srcurl)
318 318 repo.srcrepo = srcrepo
319 319 return srcrepo
320 320
321 321
322 322 def share(
323 323 ui,
324 324 source,
325 325 dest=None,
326 326 update=True,
327 327 bookmarks=True,
328 328 defaultpath=None,
329 329 relative=False,
330 330 ):
331 331 '''create a shared repository'''
332 332
333 333 not_local_msg = _(b'can only share local repositories')
334 334 if util.safehasattr(source, 'local'):
335 335 if source.local() is None:
336 336 raise error.Abort(not_local_msg)
337 337 elif not islocal(source):
338 338 # XXX why are we getting bytes here ?
339 339 raise error.Abort(not_local_msg)
340 340
341 341 if not dest:
342 342 dest = defaultdest(source)
343 343 else:
344 344 dest = urlutil.get_clone_path_obj(ui, dest).loc
345 345
346 346 if isinstance(source, bytes):
347 347 source_path = urlutil.get_clone_path_obj(ui, source)
348 348 srcrepo = repository(ui, source_path.loc)
349 349 branches = (source_path.branch, [])
350 350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
351 351 else:
352 352 srcrepo = source.local()
353 353 checkout = None
354 354
355 355 shareditems = set()
356 356 if bookmarks:
357 357 shareditems.add(sharedbookmarks)
358 358
359 359 r = repository(
360 360 ui,
361 361 dest,
362 362 create=True,
363 363 createopts={
364 364 b'sharedrepo': srcrepo,
365 365 b'sharedrelative': relative,
366 366 b'shareditems': shareditems,
367 367 },
368 368 )
369 369
370 370 postshare(srcrepo, r, defaultpath=defaultpath)
371 371 r = repository(ui, dest)
372 372 _postshareupdate(r, update, checkout=checkout)
373 373 return r
374 374
375 375
376 376 def _prependsourcehgrc(repo):
377 377 """copies the source repo config and prepend it in current repo .hg/hgrc
378 378 on unshare. This is only done if the share was perfomed using share safe
379 379 method where we share config of source in shares"""
380 380 srcvfs = vfsmod.vfs(repo.sharedpath)
381 381 dstvfs = vfsmod.vfs(repo.path)
382 382
383 383 if not srcvfs.exists(b'hgrc'):
384 384 return
385 385
386 386 currentconfig = b''
387 387 if dstvfs.exists(b'hgrc'):
388 388 currentconfig = dstvfs.read(b'hgrc')
389 389
390 390 with dstvfs(b'hgrc', b'wb') as fp:
391 391 sourceconfig = srcvfs.read(b'hgrc')
392 392 fp.write(b"# Config copied from shared source\n")
393 393 fp.write(sourceconfig)
394 394 fp.write(b'\n')
395 395 fp.write(currentconfig)
396 396
397 397
398 398 def unshare(ui, repo):
399 399 """convert a shared repository to a normal one
400 400
401 401 Copy the store data to the repo and remove the sharedpath data.
402 402
403 403 Returns a new repository object representing the unshared repository.
404 404
405 405 The passed repository object is not usable after this function is
406 406 called.
407 407 """
408 408
409 409 with repo.lock():
410 410 # we use locks here because if we race with commit, we
411 411 # can end up with extra data in the cloned revlogs that's
412 412 # not pointed to by changesets, thus causing verify to
413 413 # fail
414 414 destlock = copystore(ui, repo, repo.path)
415 415 with destlock or util.nullcontextmanager():
416 416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
417 417 # we were sharing .hg/hgrc of the share source with the current
418 418 # repo. We need to copy that while unsharing otherwise it can
419 419 # disable hooks and other checks
420 420 _prependsourcehgrc(repo)
421 421
422 422 sharefile = repo.vfs.join(b'sharedpath')
423 423 util.rename(sharefile, sharefile + b'.old')
424 424
425 425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
426 426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
427 427 scmutil.writereporequirements(repo)
428 428
429 429 # Removing share changes some fundamental properties of the repo instance.
430 430 # So we instantiate a new repo object and operate on it rather than
431 431 # try to keep the existing repo usable.
432 432 newrepo = repository(repo.baseui, repo.root, create=False)
433 433
434 434 # TODO: figure out how to access subrepos that exist, but were previously
435 435 # removed from .hgsub
436 436 c = newrepo[b'.']
437 437 subs = c.substate
438 438 for s in sorted(subs):
439 439 c.sub(s).unshare()
440 440
441 441 localrepo.poisonrepository(repo)
442 442
443 443 return newrepo
444 444
445 445
446 446 def postshare(sourcerepo, destrepo, defaultpath=None):
447 447 """Called after a new shared repo is created.
448 448
449 449 The new repo only has a requirements file and pointer to the source.
450 450 This function configures additional shared data.
451 451
452 452 Extensions can wrap this function and write additional entries to
453 453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
454 454 """
455 455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
456 456 if default:
457 457 template = b'[paths]\ndefault = %s\n'
458 458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
459 459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
460 460 with destrepo.wlock():
461 461 narrowspec.copytoworkingcopy(destrepo)
462 462
463 463
464 464 def _postshareupdate(repo, update, checkout=None):
465 465 """Maybe perform a working directory update after a shared repo is created.
466 466
467 467 ``update`` can be a boolean or a revision to update to.
468 468 """
469 469 if not update:
470 470 return
471 471
472 472 repo.ui.status(_(b"updating working directory\n"))
473 473 if update is not True:
474 474 checkout = update
475 475 for test in (checkout, b'default', b'tip'):
476 476 if test is None:
477 477 continue
478 478 try:
479 479 uprev = repo.lookup(test)
480 480 break
481 481 except error.RepoLookupError:
482 482 continue
483 483 _update(repo, uprev)
484 484
485 485
486 486 def copystore(ui, srcrepo, destpath):
487 487 """copy files from store of srcrepo in destpath
488 488
489 489 returns destlock
490 490 """
491 491 destlock = None
492 492 try:
493 493 hardlink = None
494 494 topic = _(b'linking') if hardlink else _(b'copying')
495 495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
496 496 num = 0
497 497 srcpublishing = srcrepo.publishing()
498 498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
499 499 dstvfs = vfsmod.vfs(destpath)
500 500 for f in srcrepo.store.copylist():
501 501 if srcpublishing and f.endswith(b'phaseroots'):
502 502 continue
503 503 dstbase = os.path.dirname(f)
504 504 if dstbase and not dstvfs.exists(dstbase):
505 505 dstvfs.mkdir(dstbase)
506 506 if srcvfs.exists(f):
507 507 if f.endswith(b'data'):
508 508 # 'dstbase' may be empty (e.g. revlog format 0)
509 509 lockfile = os.path.join(dstbase, b"lock")
510 510 # lock to avoid premature writing to the target
511 511 destlock = lock.lock(dstvfs, lockfile)
512 512 hardlink, n = util.copyfiles(
513 513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
514 514 )
515 515 num += n
516 516 if hardlink:
517 517 ui.debug(b"linked %d files\n" % num)
518 518 else:
519 519 ui.debug(b"copied %d files\n" % num)
520 520 return destlock
521 521 except: # re-raises
522 522 release(destlock)
523 523 raise
524 524
525 525
526 526 def clonewithshare(
527 527 ui,
528 528 peeropts,
529 529 sharepath,
530 530 source,
531 531 srcpeer,
532 532 dest,
533 533 pull=False,
534 534 rev=None,
535 535 update=True,
536 536 stream=False,
537 537 ):
538 538 """Perform a clone using a shared repo.
539 539
540 540 The store for the repository will be located at <sharepath>/.hg. The
541 541 specified revisions will be cloned or pulled from "source". A shared repo
542 542 will be created at "dest" and a working copy will be created if "update" is
543 543 True.
544 544 """
545 545 revs = None
546 546 if rev:
547 547 if not srcpeer.capable(b'lookup'):
548 548 raise error.Abort(
549 549 _(
550 550 b"src repository does not support "
551 551 b"revision lookup and so doesn't "
552 552 b"support clone by revision"
553 553 )
554 554 )
555 555
556 556 # TODO this is batchable.
557 557 remoterevs = []
558 558 for r in rev:
559 559 with srcpeer.commandexecutor() as e:
560 560 remoterevs.append(
561 561 e.callcommand(
562 562 b'lookup',
563 563 {
564 564 b'key': r,
565 565 },
566 566 ).result()
567 567 )
568 568 revs = remoterevs
569 569
570 570 # Obtain a lock before checking for or cloning the pooled repo otherwise
571 571 # 2 clients may race creating or populating it.
572 572 pooldir = os.path.dirname(sharepath)
573 573 # lock class requires the directory to exist.
574 574 try:
575 575 util.makedir(pooldir, False)
576 576 except FileExistsError:
577 577 pass
578 578
579 579 poolvfs = vfsmod.vfs(pooldir)
580 580 basename = os.path.basename(sharepath)
581 581
582 582 with lock.lock(poolvfs, b'%s.lock' % basename):
583 583 if os.path.exists(sharepath):
584 584 ui.status(
585 585 _(b'(sharing from existing pooled repository %s)\n') % basename
586 586 )
587 587 else:
588 588 ui.status(
589 589 _(b'(sharing from new pooled repository %s)\n') % basename
590 590 )
591 591 # Always use pull mode because hardlinks in share mode don't work
592 592 # well. Never update because working copies aren't necessary in
593 593 # share mode.
594 594 clone(
595 595 ui,
596 596 peeropts,
597 597 source,
598 598 dest=sharepath,
599 599 pull=True,
600 600 revs=rev,
601 601 update=False,
602 602 stream=stream,
603 603 )
604 604
605 605 # Resolve the value to put in [paths] section for the source.
606 606 if islocal(source):
607 607 defaultpath = util.abspath(urlutil.urllocalpath(source))
608 608 else:
609 609 defaultpath = source
610 610
611 611 sharerepo = repository(ui, path=sharepath)
612 612 destrepo = share(
613 613 ui,
614 614 sharerepo,
615 615 dest=dest,
616 616 update=False,
617 617 bookmarks=False,
618 618 defaultpath=defaultpath,
619 619 )
620 620
621 621 # We need to perform a pull against the dest repo to fetch bookmarks
622 622 # and other non-store data that isn't shared by default. In the case of
623 623 # non-existing shared repo, this means we pull from the remote twice. This
624 624 # is a bit weird. But at the time it was implemented, there wasn't an easy
625 625 # way to pull just non-changegroup data.
626 626 exchange.pull(destrepo, srcpeer, heads=revs)
627 627
628 628 _postshareupdate(destrepo, update)
629 629
630 630 return srcpeer, peer(ui, peeropts, dest)
631 631
632 632
633 633 # Recomputing caches is often slow on big repos, so copy them.
634 634 def _copycache(srcrepo, dstcachedir, fname):
635 635 """copy a cache from srcrepo to destcachedir (if it exists)"""
636 636 srcfname = srcrepo.cachevfs.join(fname)
637 637 dstfname = os.path.join(dstcachedir, fname)
638 638 if os.path.exists(srcfname):
639 639 if not os.path.exists(dstcachedir):
640 640 os.mkdir(dstcachedir)
641 641 util.copyfile(srcfname, dstfname)
642 642
643 643
644 644 def clone(
645 645 ui,
646 646 peeropts,
647 647 source,
648 648 dest=None,
649 649 pull=False,
650 650 revs=None,
651 651 update=True,
652 652 stream=False,
653 653 branch=None,
654 654 shareopts=None,
655 655 storeincludepats=None,
656 656 storeexcludepats=None,
657 657 depth=None,
658 658 ):
659 659 """Make a copy of an existing repository.
660 660
661 661 Create a copy of an existing repository in a new directory. The
662 662 source and destination are URLs, as passed to the repository
663 663 function. Returns a pair of repository peers, the source and
664 664 newly created destination.
665 665
666 666 The location of the source is added to the new repository's
667 667 .hg/hgrc file, as the default to be used for future pulls and
668 668 pushes.
669 669
670 670 If an exception is raised, the partly cloned/updated destination
671 671 repository will be deleted.
672 672
673 673 Arguments:
674 674
675 675 source: repository object or URL
676 676
677 677 dest: URL of destination repository to create (defaults to base
678 678 name of source repository)
679 679
680 680 pull: always pull from source repository, even in local case or if the
681 681 server prefers streaming
682 682
683 683 stream: stream raw data uncompressed from repository (fast over
684 684 LAN, slow over WAN)
685 685
686 686 revs: revision to clone up to (implies pull=True)
687 687
688 688 update: update working directory after clone completes, if
689 689 destination is local repository (True means update to default rev,
690 690 anything else is treated as a revision)
691 691
692 692 branch: branches to clone
693 693
694 694 shareopts: dict of options to control auto sharing behavior. The "pool" key
695 695 activates auto sharing mode and defines the directory for stores. The
696 696 "mode" key determines how to construct the directory name of the shared
697 697 repository. "identity" means the name is derived from the node of the first
698 698 changeset in the repository. "remote" means the name is derived from the
699 699 remote's path/URL. Defaults to "identity."
700 700
701 701 storeincludepats and storeexcludepats: sets of file patterns to include and
702 702 exclude in the repository copy, respectively. If not defined, all files
703 703 will be included (a "full" clone). Otherwise a "narrow" clone containing
704 704 only the requested files will be performed. If ``storeincludepats`` is not
705 705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
706 706 ``path:.``. If both are empty sets, no files will be cloned.
707 707 """
708 708
709 709 if isinstance(source, bytes):
710 710 src_path = urlutil.get_clone_path_obj(ui, source)
711 711 if src_path is None:
712 712 srcpeer = peer(ui, peeropts, b'')
713 713 origsource = source = b''
714 714 branches = (None, branch or [])
715 715 else:
716 716 srcpeer = peer(ui, peeropts, src_path)
717 717 origsource = src_path.rawloc
718 718 branches = (src_path.branch, branch or [])
719 719 source = src_path.loc
720 720 else:
721 721 if util.safehasattr(source, 'peer'):
722 722 srcpeer = source.peer() # in case we were called with a localrepo
723 723 else:
724 724 srcpeer = source
725 725 branches = (None, branch or [])
726 726 # XXX path: simply use the peer `path` object when this become available
727 727 origsource = source = srcpeer.url()
728 728 srclock = destlock = destwlock = cleandir = None
729 729 destpeer = None
730 730 try:
731 731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
732 732
733 733 if dest is None:
734 734 dest = defaultdest(source)
735 735 if dest:
736 736 ui.status(_(b"destination directory: %s\n") % dest)
737 737 else:
738 738 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 739 if dest_path is not None:
740 740 dest = dest_path.rawloc
741 741 else:
742 742 dest = b''
743 743
744 744 dest = urlutil.urllocalpath(dest)
745 745 source = urlutil.urllocalpath(source)
746 746
747 747 if not dest:
748 748 raise error.InputError(_(b"empty destination path is not valid"))
749 749
750 750 destvfs = vfsmod.vfs(dest, expandpath=True)
751 751 if destvfs.lexists():
752 752 if not destvfs.isdir():
753 753 raise error.InputError(
754 754 _(b"destination '%s' already exists") % dest
755 755 )
756 756 elif destvfs.listdir():
757 757 raise error.InputError(
758 758 _(b"destination '%s' is not empty") % dest
759 759 )
760 760
761 761 createopts = {}
762 762 narrow = False
763 763
764 764 if storeincludepats is not None:
765 765 narrowspec.validatepatterns(storeincludepats)
766 766 narrow = True
767 767
768 768 if storeexcludepats is not None:
769 769 narrowspec.validatepatterns(storeexcludepats)
770 770 narrow = True
771 771
772 772 if narrow:
773 773 # Include everything by default if only exclusion patterns defined.
774 774 if storeexcludepats and not storeincludepats:
775 775 storeincludepats = {b'path:.'}
776 776
777 777 createopts[b'narrowfiles'] = True
778 778
779 779 if depth:
780 780 createopts[b'shallowfilestore'] = True
781 781
782 782 if srcpeer.capable(b'lfs-serve'):
783 783 # Repository creation honors the config if it disabled the extension, so
784 784 # we can't just announce that lfs will be enabled. This check avoids
785 785 # saying that lfs will be enabled, and then saying it's an unknown
786 786 # feature. The lfs creation option is set in either case so that a
787 787 # requirement is added. If the extension is explicitly disabled but the
788 788 # requirement is set, the clone aborts early, before transferring any
789 789 # data.
790 790 createopts[b'lfs'] = True
791 791
792 792 if b'lfs' in extensions.disabled():
793 793 ui.status(
794 794 _(
795 795 b'(remote is using large file support (lfs), but it is '
796 796 b'explicitly disabled in the local configuration)\n'
797 797 )
798 798 )
799 799 else:
800 800 ui.status(
801 801 _(
802 802 b'(remote is using large file support (lfs); lfs will '
803 803 b'be enabled for this repository)\n'
804 804 )
805 805 )
806 806
807 807 shareopts = shareopts or {}
808 808 sharepool = shareopts.get(b'pool')
809 809 sharenamemode = shareopts.get(b'mode')
810 810 if sharepool and islocal(dest):
811 811 sharepath = None
812 812 if sharenamemode == b'identity':
813 813 # Resolve the name from the initial changeset in the remote
814 814 # repository. This returns nullid when the remote is empty. It
815 815 # raises RepoLookupError if revision 0 is filtered or otherwise
816 816 # not available. If we fail to resolve, sharing is not enabled.
817 817 try:
818 818 with srcpeer.commandexecutor() as e:
819 819 rootnode = e.callcommand(
820 820 b'lookup',
821 821 {
822 822 b'key': b'0',
823 823 },
824 824 ).result()
825 825
826 826 if rootnode != sha1nodeconstants.nullid:
827 827 sharepath = os.path.join(sharepool, hex(rootnode))
828 828 else:
829 829 ui.status(
830 830 _(
831 831 b'(not using pooled storage: '
832 832 b'remote appears to be empty)\n'
833 833 )
834 834 )
835 835 except error.RepoLookupError:
836 836 ui.status(
837 837 _(
838 838 b'(not using pooled storage: '
839 839 b'unable to resolve identity of remote)\n'
840 840 )
841 841 )
842 842 elif sharenamemode == b'remote':
843 843 sharepath = os.path.join(
844 844 sharepool, hex(hashutil.sha1(source).digest())
845 845 )
846 846 else:
847 847 raise error.Abort(
848 848 _(b'unknown share naming mode: %s') % sharenamemode
849 849 )
850 850
851 851 # TODO this is a somewhat arbitrary restriction.
852 852 if narrow:
853 853 ui.status(
854 854 _(b'(pooled storage not supported for narrow clones)\n')
855 855 )
856 856 sharepath = None
857 857
858 858 if sharepath:
859 859 return clonewithshare(
860 860 ui,
861 861 peeropts,
862 862 sharepath,
863 863 source,
864 864 srcpeer,
865 865 dest,
866 866 pull=pull,
867 867 rev=revs,
868 868 update=update,
869 869 stream=stream,
870 870 )
871 871
872 872 srcrepo = srcpeer.local()
873 873
874 874 abspath = origsource
875 875 if islocal(origsource):
876 876 abspath = util.abspath(urlutil.urllocalpath(origsource))
877 877
878 878 if islocal(dest):
879 879 if os.path.exists(dest):
880 880 # only clean up directories we create ourselves
881 881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
882 882 cleandir = hgdir
883 883 else:
884 884 cleandir = dest
885 885
886 886 copy = False
887 887 if (
888 888 srcrepo
889 889 and srcrepo.cancopy()
890 890 and islocal(dest)
891 891 and not phases.hassecret(srcrepo)
892 892 ):
893 893 copy = not pull and not revs
894 894
895 895 # TODO this is a somewhat arbitrary restriction.
896 896 if narrow:
897 897 copy = False
898 898
899 899 if copy:
900 900 try:
901 901 # we use a lock here because if we race with commit, we
902 902 # can end up with extra data in the cloned revlogs that's
903 903 # not pointed to by changesets, thus causing verify to
904 904 # fail
905 905 srclock = srcrepo.lock(wait=False)
906 906 except error.LockError:
907 907 copy = False
908 908
909 909 if copy:
910 910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
911 911
912 912 destrootpath = urlutil.urllocalpath(dest)
913 913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
914 914 localrepo.createrepository(
915 915 ui,
916 916 destrootpath,
917 917 requirements=dest_reqs,
918 918 )
919 919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
920 920
921 921 destwlock = destrepo.wlock()
922 922 destlock = destrepo.lock()
923 923 from . import streamclone # avoid cycle
924 924
925 925 streamclone.local_copy(srcrepo, destrepo)
926 926
927 927 # we need to re-init the repo after manually copying the data
928 928 # into it
929 929 destpeer = peer(srcrepo, peeropts, dest)
930 930
931 931 # make the peer aware that is it already locked
932 932 #
933 933 # important:
934 934 #
935 935 # We still need to release that lock at the end of the function
936 936 destpeer.local()._lockref = weakref.ref(destlock)
937 937 destpeer.local()._wlockref = weakref.ref(destwlock)
938 938 # dirstate also needs to be copied because `_wlockref` has a reference
939 939 # to it: this dirstate is saved to disk when the wlock is released
940 940 destpeer.local().dirstate = destrepo.dirstate
941 941
942 942 srcrepo.hook(
943 943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
944 944 )
945 945 else:
946 946 try:
947 947 # only pass ui when no srcrepo
948 948 destpeer = peer(
949 949 srcrepo or ui,
950 950 peeropts,
951 951 dest,
952 952 create=True,
953 953 createopts=createopts,
954 954 )
955 955 except FileExistsError:
956 956 cleandir = None
957 957 raise error.Abort(_(b"destination '%s' already exists") % dest)
958 958
959 959 if revs:
960 960 if not srcpeer.capable(b'lookup'):
961 961 raise error.Abort(
962 962 _(
963 963 b"src repository does not support "
964 964 b"revision lookup and so doesn't "
965 965 b"support clone by revision"
966 966 )
967 967 )
968 968
969 969 # TODO this is batchable.
970 970 remoterevs = []
971 971 for rev in revs:
972 972 with srcpeer.commandexecutor() as e:
973 973 remoterevs.append(
974 974 e.callcommand(
975 975 b'lookup',
976 976 {
977 977 b'key': rev,
978 978 },
979 979 ).result()
980 980 )
981 981 revs = remoterevs
982 982
983 983 checkout = revs[0]
984 984 else:
985 985 revs = None
986 986 local = destpeer.local()
987 987 if local:
988 988 if narrow:
989 with local.wlock(), local.lock():
989 with local.wlock(), local.lock(), local.transaction(
990 b'narrow-clone'
991 ):
990 992 local.setnarrowpats(storeincludepats, storeexcludepats)
991 993 narrowspec.copytoworkingcopy(local)
992 994
993 995 u = urlutil.url(abspath)
994 996 defaulturl = bytes(u)
995 997 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
996 998 if not stream:
997 999 if pull:
998 1000 stream = False
999 1001 else:
1000 1002 stream = None
1001 1003 # internal config: ui.quietbookmarkmove
1002 1004 overrides = {(b'ui', b'quietbookmarkmove'): True}
1003 1005 with local.ui.configoverride(overrides, b'clone'):
1004 1006 exchange.pull(
1005 1007 local,
1006 1008 srcpeer,
1007 1009 heads=revs,
1008 1010 streamclonerequested=stream,
1009 1011 includepats=storeincludepats,
1010 1012 excludepats=storeexcludepats,
1011 1013 depth=depth,
1012 1014 )
1013 1015 elif srcrepo:
1014 1016 # TODO lift restriction once exchange.push() accepts narrow
1015 1017 # push.
1016 1018 if narrow:
1017 1019 raise error.Abort(
1018 1020 _(
1019 1021 b'narrow clone not available for '
1020 1022 b'remote destinations'
1021 1023 )
1022 1024 )
1023 1025
1024 1026 exchange.push(
1025 1027 srcrepo,
1026 1028 destpeer,
1027 1029 revs=revs,
1028 1030 bookmarks=srcrepo._bookmarks.keys(),
1029 1031 )
1030 1032 else:
1031 1033 raise error.Abort(
1032 1034 _(b"clone from remote to remote not supported")
1033 1035 )
1034 1036
1035 1037 cleandir = None
1036 1038
1037 1039 destrepo = destpeer.local()
1038 1040 if destrepo:
1039 1041 template = uimod.samplehgrcs[b'cloned']
1040 1042 u = urlutil.url(abspath)
1041 1043 u.passwd = None
1042 1044 defaulturl = bytes(u)
1043 1045 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1044 1046 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1045 1047
1046 1048 if ui.configbool(b'experimental', b'remotenames'):
1047 1049 logexchange.pullremotenames(destrepo, srcpeer)
1048 1050
1049 1051 if update:
1050 1052 if update is not True:
1051 1053 with srcpeer.commandexecutor() as e:
1052 1054 checkout = e.callcommand(
1053 1055 b'lookup',
1054 1056 {
1055 1057 b'key': update,
1056 1058 },
1057 1059 ).result()
1058 1060
1059 1061 uprev = None
1060 1062 status = None
1061 1063 if checkout is not None:
1062 1064 # Some extensions (at least hg-git and hg-subversion) have
1063 1065 # a peer.lookup() implementation that returns a name instead
1064 1066 # of a nodeid. We work around it here until we've figured
1065 1067 # out a better solution.
1066 1068 if len(checkout) == 20 and checkout in destrepo:
1067 1069 uprev = checkout
1068 1070 elif scmutil.isrevsymbol(destrepo, checkout):
1069 1071 uprev = scmutil.revsymbol(destrepo, checkout).node()
1070 1072 else:
1071 1073 if update is not True:
1072 1074 try:
1073 1075 uprev = destrepo.lookup(update)
1074 1076 except error.RepoLookupError:
1075 1077 pass
1076 1078 if uprev is None:
1077 1079 try:
1078 1080 if destrepo._activebookmark:
1079 1081 uprev = destrepo.lookup(destrepo._activebookmark)
1080 1082 update = destrepo._activebookmark
1081 1083 else:
1082 1084 uprev = destrepo._bookmarks[b'@']
1083 1085 update = b'@'
1084 1086 bn = destrepo[uprev].branch()
1085 1087 if bn == b'default':
1086 1088 status = _(b"updating to bookmark %s\n" % update)
1087 1089 else:
1088 1090 status = (
1089 1091 _(b"updating to bookmark %s on branch %s\n")
1090 1092 ) % (update, bn)
1091 1093 except KeyError:
1092 1094 try:
1093 1095 uprev = destrepo.branchtip(b'default')
1094 1096 except error.RepoLookupError:
1095 1097 uprev = destrepo.lookup(b'tip')
1096 1098 if not status:
1097 1099 bn = destrepo[uprev].branch()
1098 1100 status = _(b"updating to branch %s\n") % bn
1099 1101 destrepo.ui.status(status)
1100 1102 _update(destrepo, uprev)
1101 1103 if update in destrepo._bookmarks:
1102 1104 bookmarks.activate(destrepo, update)
1103 1105 if destlock is not None:
1104 1106 release(destlock)
1105 1107 if destwlock is not None:
1106 1108 release(destlock)
1107 1109 # here is a tiny windows were someone could end up writing the
1108 1110 # repository before the cache are sure to be warm. This is "fine"
1109 1111 # as the only "bad" outcome would be some slowness. That potential
1110 1112 # slowness already affect reader.
1111 1113 with destrepo.lock():
1112 1114 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1113 1115 finally:
1114 1116 release(srclock, destlock, destwlock)
1115 1117 if cleandir is not None:
1116 1118 shutil.rmtree(cleandir, True)
1117 1119 if srcpeer is not None:
1118 1120 srcpeer.close()
1119 1121 if destpeer and destpeer.local() is None:
1120 1122 destpeer.close()
1121 1123 return srcpeer, destpeer
1122 1124
1123 1125
1124 1126 def _showstats(repo, stats, quietempty=False):
1125 1127 if quietempty and stats.isempty():
1126 1128 return
1127 1129 repo.ui.status(
1128 1130 _(
1129 1131 b"%d files updated, %d files merged, "
1130 1132 b"%d files removed, %d files unresolved\n"
1131 1133 )
1132 1134 % (
1133 1135 stats.updatedcount,
1134 1136 stats.mergedcount,
1135 1137 stats.removedcount,
1136 1138 stats.unresolvedcount,
1137 1139 )
1138 1140 )
1139 1141
1140 1142
1141 1143 def updaterepo(repo, node, overwrite, updatecheck=None):
1142 1144 """Update the working directory to node.
1143 1145
1144 1146 When overwrite is set, changes are clobbered, merged else
1145 1147
1146 1148 returns stats (see pydoc mercurial.merge.applyupdates)"""
1147 1149 repo.ui.deprecwarn(
1148 1150 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1149 1151 b'5.7',
1150 1152 )
1151 1153 return mergemod._update(
1152 1154 repo,
1153 1155 node,
1154 1156 branchmerge=False,
1155 1157 force=overwrite,
1156 1158 labels=[b'working copy', b'destination'],
1157 1159 updatecheck=updatecheck,
1158 1160 )
1159 1161
1160 1162
1161 1163 def update(repo, node, quietempty=False, updatecheck=None):
1162 1164 """update the working directory to node"""
1163 1165 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1164 1166 _showstats(repo, stats, quietempty)
1165 1167 if stats.unresolvedcount:
1166 1168 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1167 1169 return stats.unresolvedcount > 0
1168 1170
1169 1171
1170 1172 # naming conflict in clone()
1171 1173 _update = update
1172 1174
1173 1175
1174 1176 def clean(repo, node, show_stats=True, quietempty=False):
1175 1177 """forcibly switch the working directory to node, clobbering changes"""
1176 1178 stats = mergemod.clean_update(repo[node])
1177 1179 assert stats.unresolvedcount == 0
1178 1180 if show_stats:
1179 1181 _showstats(repo, stats, quietempty)
1180 1182 return False
1181 1183
1182 1184
1183 1185 # naming conflict in updatetotally()
1184 1186 _clean = clean
1185 1187
1186 1188 _VALID_UPDATECHECKS = {
1187 1189 mergemod.UPDATECHECK_ABORT,
1188 1190 mergemod.UPDATECHECK_NONE,
1189 1191 mergemod.UPDATECHECK_LINEAR,
1190 1192 mergemod.UPDATECHECK_NO_CONFLICT,
1191 1193 }
1192 1194
1193 1195
1194 1196 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1195 1197 """Update the working directory with extra care for non-file components
1196 1198
1197 1199 This takes care of non-file components below:
1198 1200
1199 1201 :bookmark: might be advanced or (in)activated
1200 1202
1201 1203 This takes arguments below:
1202 1204
1203 1205 :checkout: to which revision the working directory is updated
1204 1206 :brev: a name, which might be a bookmark to be activated after updating
1205 1207 :clean: whether changes in the working directory can be discarded
1206 1208 :updatecheck: how to deal with a dirty working directory
1207 1209
1208 1210 Valid values for updatecheck are the UPDATECHECK_* constants
1209 1211 defined in the merge module. Passing `None` will result in using the
1210 1212 configured default.
1211 1213
1212 1214 * ABORT: abort if the working directory is dirty
1213 1215 * NONE: don't check (merge working directory changes into destination)
1214 1216 * LINEAR: check that update is linear before merging working directory
1215 1217 changes into destination
1216 1218 * NO_CONFLICT: check that the update does not result in file merges
1217 1219
1218 1220 This returns whether conflict is detected at updating or not.
1219 1221 """
1220 1222 if updatecheck is None:
1221 1223 updatecheck = ui.config(b'commands', b'update.check')
1222 1224 if updatecheck not in _VALID_UPDATECHECKS:
1223 1225 # If not configured, or invalid value configured
1224 1226 updatecheck = mergemod.UPDATECHECK_LINEAR
1225 1227 if updatecheck not in _VALID_UPDATECHECKS:
1226 1228 raise ValueError(
1227 1229 r'Invalid updatecheck value %r (can accept %r)'
1228 1230 % (updatecheck, _VALID_UPDATECHECKS)
1229 1231 )
1230 1232 with repo.wlock():
1231 1233 movemarkfrom = None
1232 1234 warndest = False
1233 1235 if checkout is None:
1234 1236 updata = destutil.destupdate(repo, clean=clean)
1235 1237 checkout, movemarkfrom, brev = updata
1236 1238 warndest = True
1237 1239
1238 1240 if clean:
1239 1241 ret = _clean(repo, checkout)
1240 1242 else:
1241 1243 if updatecheck == mergemod.UPDATECHECK_ABORT:
1242 1244 cmdutil.bailifchanged(repo, merge=False)
1243 1245 updatecheck = mergemod.UPDATECHECK_NONE
1244 1246 ret = _update(repo, checkout, updatecheck=updatecheck)
1245 1247
1246 1248 if not ret and movemarkfrom:
1247 1249 if movemarkfrom == repo[b'.'].node():
1248 1250 pass # no-op update
1249 1251 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1250 1252 b = ui.label(repo._activebookmark, b'bookmarks.active')
1251 1253 ui.status(_(b"updating bookmark %s\n") % b)
1252 1254 else:
1253 1255 # this can happen with a non-linear update
1254 1256 b = ui.label(repo._activebookmark, b'bookmarks')
1255 1257 ui.status(_(b"(leaving bookmark %s)\n") % b)
1256 1258 bookmarks.deactivate(repo)
1257 1259 elif brev in repo._bookmarks:
1258 1260 if brev != repo._activebookmark:
1259 1261 b = ui.label(brev, b'bookmarks.active')
1260 1262 ui.status(_(b"(activating bookmark %s)\n") % b)
1261 1263 bookmarks.activate(repo, brev)
1262 1264 elif brev:
1263 1265 if repo._activebookmark:
1264 1266 b = ui.label(repo._activebookmark, b'bookmarks')
1265 1267 ui.status(_(b"(leaving bookmark %s)\n") % b)
1266 1268 bookmarks.deactivate(repo)
1267 1269
1268 1270 if warndest:
1269 1271 destutil.statusotherdests(ui, repo)
1270 1272
1271 1273 return ret
1272 1274
1273 1275
1274 1276 def merge(
1275 1277 ctx,
1276 1278 force=False,
1277 1279 remind=True,
1278 1280 labels=None,
1279 1281 ):
1280 1282 """Branch merge with node, resolving changes. Return true if any
1281 1283 unresolved conflicts."""
1282 1284 repo = ctx.repo()
1283 1285 stats = mergemod.merge(ctx, force=force, labels=labels)
1284 1286 _showstats(repo, stats)
1285 1287 if stats.unresolvedcount:
1286 1288 repo.ui.status(
1287 1289 _(
1288 1290 b"use 'hg resolve' to retry unresolved file merges "
1289 1291 b"or 'hg merge --abort' to abandon\n"
1290 1292 )
1291 1293 )
1292 1294 elif remind:
1293 1295 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1294 1296 return stats.unresolvedcount > 0
1295 1297
1296 1298
1297 1299 def abortmerge(ui, repo):
1298 1300 ms = mergestatemod.mergestate.read(repo)
1299 1301 if ms.active():
1300 1302 # there were conflicts
1301 1303 node = ms.localctx.hex()
1302 1304 else:
1303 1305 # there were no conficts, mergestate was not stored
1304 1306 node = repo[b'.'].hex()
1305 1307
1306 1308 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1307 1309 stats = mergemod.clean_update(repo[node])
1308 1310 assert stats.unresolvedcount == 0
1309 1311 _showstats(repo, stats)
1310 1312
1311 1313
1312 1314 def _incoming(
1313 1315 displaychlist,
1314 1316 subreporecurse,
1315 1317 ui,
1316 1318 repo,
1317 1319 source,
1318 1320 opts,
1319 1321 buffered=False,
1320 1322 subpath=None,
1321 1323 ):
1322 1324 """
1323 1325 Helper for incoming / gincoming.
1324 1326 displaychlist gets called with
1325 1327 (remoterepo, incomingchangesetlist, displayer) parameters,
1326 1328 and is supposed to contain only code that can't be unified.
1327 1329 """
1328 1330 srcs = urlutil.get_pull_paths(repo, ui, [source])
1329 1331 srcs = list(srcs)
1330 1332 if len(srcs) != 1:
1331 1333 msg = _(b'for now, incoming supports only a single source, %d provided')
1332 1334 msg %= len(srcs)
1333 1335 raise error.Abort(msg)
1334 1336 path = srcs[0]
1335 1337 if subpath is None:
1336 1338 peer_path = path
1337 1339 url = path.loc
1338 1340 else:
1339 1341 # XXX path: we are losing the `path` object here. Keeping it would be
1340 1342 # valuable. For example as a "variant" as we do for pushes.
1341 1343 subpath = urlutil.url(subpath)
1342 1344 if subpath.isabs():
1343 1345 peer_path = url = bytes(subpath)
1344 1346 else:
1345 1347 p = urlutil.url(path.loc)
1346 1348 if p.islocal():
1347 1349 normpath = os.path.normpath
1348 1350 else:
1349 1351 normpath = posixpath.normpath
1350 1352 p.path = normpath(b'%s/%s' % (p.path, subpath))
1351 1353 peer_path = url = bytes(p)
1352 1354 other = peer(repo, opts, peer_path)
1353 1355 cleanupfn = other.close
1354 1356 try:
1355 1357 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 1358 branches = (path.branch, opts.get(b'branch', []))
1357 1359 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1358 1360
1359 1361 if revs:
1360 1362 revs = [other.lookup(rev) for rev in revs]
1361 1363 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1362 1364 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1363 1365 )
1364 1366
1365 1367 if not chlist:
1366 1368 ui.status(_(b"no changes found\n"))
1367 1369 return subreporecurse()
1368 1370 ui.pager(b'incoming')
1369 1371 displayer = logcmdutil.changesetdisplayer(
1370 1372 ui, other, opts, buffered=buffered
1371 1373 )
1372 1374 displaychlist(other, chlist, displayer)
1373 1375 displayer.close()
1374 1376 finally:
1375 1377 cleanupfn()
1376 1378 subreporecurse()
1377 1379 return 0 # exit code is zero since we found incoming changes
1378 1380
1379 1381
1380 1382 def incoming(ui, repo, source, opts, subpath=None):
1381 1383 def subreporecurse():
1382 1384 ret = 1
1383 1385 if opts.get(b'subrepos'):
1384 1386 ctx = repo[None]
1385 1387 for subpath in sorted(ctx.substate):
1386 1388 sub = ctx.sub(subpath)
1387 1389 ret = min(ret, sub.incoming(ui, source, opts))
1388 1390 return ret
1389 1391
1390 1392 def display(other, chlist, displayer):
1391 1393 limit = logcmdutil.getlimit(opts)
1392 1394 if opts.get(b'newest_first'):
1393 1395 chlist.reverse()
1394 1396 count = 0
1395 1397 for n in chlist:
1396 1398 if limit is not None and count >= limit:
1397 1399 break
1398 1400 parents = [
1399 1401 p for p in other.changelog.parents(n) if p != repo.nullid
1400 1402 ]
1401 1403 if opts.get(b'no_merges') and len(parents) == 2:
1402 1404 continue
1403 1405 count += 1
1404 1406 displayer.show(other[n])
1405 1407
1406 1408 return _incoming(
1407 1409 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1408 1410 )
1409 1411
1410 1412
1411 1413 def _outgoing(ui, repo, dests, opts, subpath=None):
1412 1414 out = set()
1413 1415 others = []
1414 1416 for path in urlutil.get_push_paths(repo, ui, dests):
1415 1417 dest = path.loc
1416 1418 if subpath is not None:
1417 1419 subpath = urlutil.url(subpath)
1418 1420 if subpath.isabs():
1419 1421 dest = bytes(subpath)
1420 1422 else:
1421 1423 p = urlutil.url(dest)
1422 1424 if p.islocal():
1423 1425 normpath = os.path.normpath
1424 1426 else:
1425 1427 normpath = posixpath.normpath
1426 1428 p.path = normpath(b'%s/%s' % (p.path, subpath))
1427 1429 dest = bytes(p)
1428 1430 branches = path.branch, opts.get(b'branch') or []
1429 1431
1430 1432 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1431 1433 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1432 1434 if revs:
1433 1435 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1434 1436
1435 1437 other = peer(repo, opts, dest)
1436 1438 try:
1437 1439 outgoing = discovery.findcommonoutgoing(
1438 1440 repo, other, revs, force=opts.get(b'force')
1439 1441 )
1440 1442 o = outgoing.missing
1441 1443 out.update(o)
1442 1444 if not o:
1443 1445 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1444 1446 others.append(other)
1445 1447 except: # re-raises
1446 1448 other.close()
1447 1449 raise
1448 1450 # make sure this is ordered by revision number
1449 1451 outgoing_revs = list(out)
1450 1452 cl = repo.changelog
1451 1453 outgoing_revs.sort(key=cl.rev)
1452 1454 return outgoing_revs, others
1453 1455
1454 1456
1455 1457 def _outgoing_recurse(ui, repo, dests, opts):
1456 1458 ret = 1
1457 1459 if opts.get(b'subrepos'):
1458 1460 ctx = repo[None]
1459 1461 for subpath in sorted(ctx.substate):
1460 1462 sub = ctx.sub(subpath)
1461 1463 ret = min(ret, sub.outgoing(ui, dests, opts))
1462 1464 return ret
1463 1465
1464 1466
1465 1467 def _outgoing_filter(repo, revs, opts):
1466 1468 """apply revision filtering/ordering option for outgoing"""
1467 1469 limit = logcmdutil.getlimit(opts)
1468 1470 no_merges = opts.get(b'no_merges')
1469 1471 if opts.get(b'newest_first'):
1470 1472 revs.reverse()
1471 1473 if limit is None and not no_merges:
1472 1474 for r in revs:
1473 1475 yield r
1474 1476 return
1475 1477
1476 1478 count = 0
1477 1479 cl = repo.changelog
1478 1480 for n in revs:
1479 1481 if limit is not None and count >= limit:
1480 1482 break
1481 1483 parents = [p for p in cl.parents(n) if p != repo.nullid]
1482 1484 if no_merges and len(parents) == 2:
1483 1485 continue
1484 1486 count += 1
1485 1487 yield n
1486 1488
1487 1489
1488 1490 def outgoing(ui, repo, dests, opts, subpath=None):
1489 1491 if opts.get(b'graph'):
1490 1492 logcmdutil.checkunsupportedgraphflags([], opts)
1491 1493 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1492 1494 ret = 1
1493 1495 try:
1494 1496 if o:
1495 1497 ret = 0
1496 1498
1497 1499 if opts.get(b'graph'):
1498 1500 revdag = logcmdutil.graphrevs(repo, o, opts)
1499 1501 ui.pager(b'outgoing')
1500 1502 displayer = logcmdutil.changesetdisplayer(
1501 1503 ui, repo, opts, buffered=True
1502 1504 )
1503 1505 logcmdutil.displaygraph(
1504 1506 ui, repo, revdag, displayer, graphmod.asciiedges
1505 1507 )
1506 1508 else:
1507 1509 ui.pager(b'outgoing')
1508 1510 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1509 1511 for n in _outgoing_filter(repo, o, opts):
1510 1512 displayer.show(repo[n])
1511 1513 displayer.close()
1512 1514 for oth in others:
1513 1515 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1514 1516 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1515 1517 return ret # exit code is zero since we found outgoing changes
1516 1518 finally:
1517 1519 for oth in others:
1518 1520 oth.close()
1519 1521
1520 1522
1521 1523 def verify(repo, level=None):
1522 1524 """verify the consistency of a repository"""
1523 1525 ret = verifymod.verify(repo, level=level)
1524 1526
1525 1527 # Broken subrepo references in hidden csets don't seem worth worrying about,
1526 1528 # since they can't be pushed/pulled, and --hidden can be used if they are a
1527 1529 # concern.
1528 1530
1529 1531 # pathto() is needed for -R case
1530 1532 revs = repo.revs(
1531 1533 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1532 1534 )
1533 1535
1534 1536 if revs:
1535 1537 repo.ui.status(_(b'checking subrepo links\n'))
1536 1538 for rev in revs:
1537 1539 ctx = repo[rev]
1538 1540 try:
1539 1541 for subpath in ctx.substate:
1540 1542 try:
1541 1543 ret = (
1542 1544 ctx.sub(subpath, allowcreate=False).verify() or ret
1543 1545 )
1544 1546 except error.RepoError as e:
1545 1547 repo.ui.warn(b'%d: %s\n' % (rev, e))
1546 1548 except Exception:
1547 1549 repo.ui.warn(
1548 1550 _(b'.hgsubstate is corrupt in revision %s\n')
1549 1551 % short(ctx.node())
1550 1552 )
1551 1553
1552 1554 return ret
1553 1555
1554 1556
1555 1557 def remoteui(src, opts):
1556 1558 """build a remote ui from ui or repo and opts"""
1557 1559 if util.safehasattr(src, b'baseui'): # looks like a repository
1558 1560 dst = src.baseui.copy() # drop repo-specific config
1559 1561 src = src.ui # copy target options from repo
1560 1562 else: # assume it's a global ui object
1561 1563 dst = src.copy() # keep all global options
1562 1564
1563 1565 # copy ssh-specific options
1564 1566 for o in b'ssh', b'remotecmd':
1565 1567 v = opts.get(o) or src.config(b'ui', o)
1566 1568 if v:
1567 1569 dst.setconfig(b"ui", o, v, b'copied')
1568 1570
1569 1571 # copy bundle-specific options
1570 1572 r = src.config(b'bundle', b'mainreporoot')
1571 1573 if r:
1572 1574 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1573 1575
1574 1576 # copy selected local settings to the remote ui
1575 1577 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1576 1578 for key, val in src.configitems(sect):
1577 1579 dst.setconfig(sect, key, val, b'copied')
1578 1580 v = src.config(b'web', b'cacerts')
1579 1581 if v:
1580 1582 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1581 1583
1582 1584 return dst
1583 1585
1584 1586
1585 1587 # Files of interest
1586 1588 # Used to check if the repository has changed looking at mtime and size of
1587 1589 # these files.
1588 1590 foi = [
1589 1591 (b'spath', b'00changelog.i'),
1590 1592 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1591 1593 (b'spath', b'obsstore'),
1592 1594 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1593 1595 ]
1594 1596
1595 1597
1596 1598 class cachedlocalrepo:
1597 1599 """Holds a localrepository that can be cached and reused."""
1598 1600
1599 1601 def __init__(self, repo):
1600 1602 """Create a new cached repo from an existing repo.
1601 1603
1602 1604 We assume the passed in repo was recently created. If the
1603 1605 repo has changed between when it was created and when it was
1604 1606 turned into a cache, it may not refresh properly.
1605 1607 """
1606 1608 assert isinstance(repo, localrepo.localrepository)
1607 1609 self._repo = repo
1608 1610 self._state, self.mtime = self._repostate()
1609 1611 self._filtername = repo.filtername
1610 1612
1611 1613 def fetch(self):
1612 1614 """Refresh (if necessary) and return a repository.
1613 1615
1614 1616 If the cached instance is out of date, it will be recreated
1615 1617 automatically and returned.
1616 1618
1617 1619 Returns a tuple of the repo and a boolean indicating whether a new
1618 1620 repo instance was created.
1619 1621 """
1620 1622 # We compare the mtimes and sizes of some well-known files to
1621 1623 # determine if the repo changed. This is not precise, as mtimes
1622 1624 # are susceptible to clock skew and imprecise filesystems and
1623 1625 # file content can change while maintaining the same size.
1624 1626
1625 1627 state, mtime = self._repostate()
1626 1628 if state == self._state:
1627 1629 return self._repo, False
1628 1630
1629 1631 repo = repository(self._repo.baseui, self._repo.url())
1630 1632 if self._filtername:
1631 1633 self._repo = repo.filtered(self._filtername)
1632 1634 else:
1633 1635 self._repo = repo.unfiltered()
1634 1636 self._state = state
1635 1637 self.mtime = mtime
1636 1638
1637 1639 return self._repo, True
1638 1640
1639 1641 def _repostate(self):
1640 1642 state = []
1641 1643 maxmtime = -1
1642 1644 for attr, fname in foi:
1643 1645 prefix = getattr(self._repo, attr)
1644 1646 p = os.path.join(prefix, fname)
1645 1647 try:
1646 1648 st = os.stat(p)
1647 1649 except OSError:
1648 1650 st = os.stat(prefix)
1649 1651 state.append((st[stat.ST_MTIME], st.st_size))
1650 1652 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1651 1653
1652 1654 return tuple(state), maxmtime
1653 1655
1654 1656 def copy(self):
1655 1657 """Obtain a copy of this class instance.
1656 1658
1657 1659 A new localrepository instance is obtained. The new instance should be
1658 1660 completely independent of the original.
1659 1661 """
1660 1662 repo = repository(self._repo.baseui, self._repo.origroot)
1661 1663 if self._filtername:
1662 1664 repo = repo.filtered(self._filtername)
1663 1665 else:
1664 1666 repo = repo.unfiltered()
1665 1667 c = cachedlocalrepo(repo)
1666 1668 c._state = self._state
1667 1669 c.mtime = self.mtime
1668 1670 return c
General Comments 0
You need to be logged in to leave comments. Login now