##// END OF EJS Templates
hg: let extensions call the func without populating opts keys...
Sushil khanchi -
r48987:1d70fb83 default
parent child Browse files
Show More
@@ -1,1590 +1,1590 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def _local(path):
69 69 path = util.expandpath(urlutil.urllocalpath(path))
70 70
71 71 try:
72 72 # we use os.stat() directly here instead of os.path.isfile()
73 73 # because the latter started returning `False` on invalid path
74 74 # exceptions starting in 3.8 and we care about handling
75 75 # invalid paths specially here.
76 76 st = os.stat(path)
77 77 isfile = stat.S_ISREG(st.st_mode)
78 78 # Python 2 raises TypeError, Python 3 ValueError.
79 79 except (TypeError, ValueError) as e:
80 80 raise error.Abort(
81 81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 82 )
83 83 except OSError:
84 84 isfile = False
85 85
86 86 return isfile and bundlerepo or localrepo
87 87
88 88
89 89 def addbranchrevs(lrepo, other, branches, revs):
90 90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 91 hashbranch, branches = branches
92 92 if not hashbranch and not branches:
93 93 x = revs or None
94 94 if revs:
95 95 y = revs[0]
96 96 else:
97 97 y = None
98 98 return x, y
99 99 if revs:
100 100 revs = list(revs)
101 101 else:
102 102 revs = []
103 103
104 104 if not peer.capable(b'branchmap'):
105 105 if branches:
106 106 raise error.Abort(_(b"remote branch lookup not supported"))
107 107 revs.append(hashbranch)
108 108 return revs, revs[0]
109 109
110 110 with peer.commandexecutor() as e:
111 111 branchmap = e.callcommand(b'branchmap', {}).result()
112 112
113 113 def primary(branch):
114 114 if branch == b'.':
115 115 if not lrepo:
116 116 raise error.Abort(_(b"dirstate branch not accessible"))
117 117 branch = lrepo.dirstate.branch()
118 118 if branch in branchmap:
119 119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 120 return True
121 121 else:
122 122 return False
123 123
124 124 for branch in branches:
125 125 if not primary(branch):
126 126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 127 if hashbranch:
128 128 if not primary(hashbranch):
129 129 revs.append(hashbranch)
130 130 return revs, revs[0]
131 131
132 132
133 133 def parseurl(path, branches=None):
134 134 '''parse url#branch, returning (url, (branch, branches))'''
135 135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 137 return urlutil.parseurl(path, branches=branches)
138 138
139 139
140 140 schemes = {
141 141 b'bundle': bundlerepo,
142 142 b'union': unionrepo,
143 143 b'file': _local,
144 144 b'http': httppeer,
145 145 b'https': httppeer,
146 146 b'ssh': sshpeer,
147 147 b'static-http': statichttprepo,
148 148 }
149 149
150 150
151 151 def _peerlookup(path):
152 152 u = urlutil.url(path)
153 153 scheme = u.scheme or b'file'
154 154 thing = schemes.get(scheme) or schemes[b'file']
155 155 try:
156 156 return thing(path)
157 157 except TypeError:
158 158 # we can't test callable(thing) because 'thing' can be an unloaded
159 159 # module that implements __call__
160 160 if not util.safehasattr(thing, b'instance'):
161 161 raise
162 162 return thing
163 163
164 164
165 165 def islocal(repo):
166 166 '''return true if repo (or path pointing to repo) is local'''
167 167 if isinstance(repo, bytes):
168 168 try:
169 169 return _peerlookup(repo).islocal(repo)
170 170 except AttributeError:
171 171 return False
172 172 return repo.local()
173 173
174 174
175 175 def openpath(ui, path, sendaccept=True):
176 176 '''open path with open if local, url.open if remote'''
177 177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 178 if pathurl.islocal():
179 179 return util.posixfile(pathurl.localpath(), b'rb')
180 180 else:
181 181 return url.open(ui, path, sendaccept=sendaccept)
182 182
183 183
184 184 # a list of (ui, repo) functions called for wire peer initialization
185 185 wirepeersetupfuncs = []
186 186
187 187
188 188 def _peerorrepo(
189 189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 190 ):
191 191 """return a repository object for the specified path"""
192 192 obj = _peerlookup(path).instance(
193 193 ui, path, create, intents=intents, createopts=createopts
194 194 )
195 195 ui = getattr(obj, "ui", ui)
196 196 for f in presetupfuncs or []:
197 197 f(ui, obj)
198 198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 199 with util.timedcm('all reposetup') as allreposetupstats:
200 200 for name, module in extensions.extensions(ui):
201 201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 202 hook = getattr(module, 'reposetup', None)
203 203 if hook:
204 204 with util.timedcm('reposetup %r', name) as stats:
205 205 hook(ui, obj)
206 206 ui.log(
207 207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 208 )
209 209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 210 if not obj.local():
211 211 for f in wirepeersetupfuncs:
212 212 f(ui, obj)
213 213 return obj
214 214
215 215
216 216 def repository(
217 217 ui,
218 218 path=b'',
219 219 create=False,
220 220 presetupfuncs=None,
221 221 intents=None,
222 222 createopts=None,
223 223 ):
224 224 """return a repository object for the specified path"""
225 225 peer = _peerorrepo(
226 226 ui,
227 227 path,
228 228 create,
229 229 presetupfuncs=presetupfuncs,
230 230 intents=intents,
231 231 createopts=createopts,
232 232 )
233 233 repo = peer.local()
234 234 if not repo:
235 235 raise error.Abort(
236 236 _(b"repository '%s' is not local") % (path or peer.url())
237 237 )
238 238 return repo.filtered(b'visible')
239 239
240 240
241 241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 242 '''return a repository peer for the specified path'''
243 243 rui = remoteui(uiorrepo, opts)
244 244 return _peerorrepo(
245 245 rui, path, create, intents=intents, createopts=createopts
246 246 ).peer()
247 247
248 248
249 249 def defaultdest(source):
250 250 """return default destination of clone if none is given
251 251
252 252 >>> defaultdest(b'foo')
253 253 'foo'
254 254 >>> defaultdest(b'/foo/bar')
255 255 'bar'
256 256 >>> defaultdest(b'/')
257 257 ''
258 258 >>> defaultdest(b'')
259 259 ''
260 260 >>> defaultdest(b'http://example.org/')
261 261 ''
262 262 >>> defaultdest(b'http://example.org/foo/')
263 263 'foo'
264 264 """
265 265 path = urlutil.url(source).path
266 266 if not path:
267 267 return b''
268 268 return os.path.basename(os.path.normpath(path))
269 269
270 270
271 271 def sharedreposource(repo):
272 272 """Returns repository object for source repository of a shared repo.
273 273
274 274 If repo is not a shared repository, returns None.
275 275 """
276 276 if repo.sharedpath == repo.path:
277 277 return None
278 278
279 279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 280 return repo.srcrepo
281 281
282 282 # the sharedpath always ends in the .hg; we want the path to the repo
283 283 source = repo.vfs.split(repo.sharedpath)[0]
284 284 srcurl, branches = urlutil.parseurl(source)
285 285 srcrepo = repository(repo.ui, srcurl)
286 286 repo.srcrepo = srcrepo
287 287 return srcrepo
288 288
289 289
290 290 def share(
291 291 ui,
292 292 source,
293 293 dest=None,
294 294 update=True,
295 295 bookmarks=True,
296 296 defaultpath=None,
297 297 relative=False,
298 298 ):
299 299 '''create a shared repository'''
300 300
301 301 if not islocal(source):
302 302 raise error.Abort(_(b'can only share local repositories'))
303 303
304 304 if not dest:
305 305 dest = defaultdest(source)
306 306 else:
307 307 dest = urlutil.get_clone_path(ui, dest)[1]
308 308
309 309 if isinstance(source, bytes):
310 310 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 311 srcrepo = repository(ui, source)
312 312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 313 else:
314 314 srcrepo = source.local()
315 315 checkout = None
316 316
317 317 shareditems = set()
318 318 if bookmarks:
319 319 shareditems.add(sharedbookmarks)
320 320
321 321 r = repository(
322 322 ui,
323 323 dest,
324 324 create=True,
325 325 createopts={
326 326 b'sharedrepo': srcrepo,
327 327 b'sharedrelative': relative,
328 328 b'shareditems': shareditems,
329 329 },
330 330 )
331 331
332 332 postshare(srcrepo, r, defaultpath=defaultpath)
333 333 r = repository(ui, dest)
334 334 _postshareupdate(r, update, checkout=checkout)
335 335 return r
336 336
337 337
338 338 def _prependsourcehgrc(repo):
339 339 """copies the source repo config and prepend it in current repo .hg/hgrc
340 340 on unshare. This is only done if the share was perfomed using share safe
341 341 method where we share config of source in shares"""
342 342 srcvfs = vfsmod.vfs(repo.sharedpath)
343 343 dstvfs = vfsmod.vfs(repo.path)
344 344
345 345 if not srcvfs.exists(b'hgrc'):
346 346 return
347 347
348 348 currentconfig = b''
349 349 if dstvfs.exists(b'hgrc'):
350 350 currentconfig = dstvfs.read(b'hgrc')
351 351
352 352 with dstvfs(b'hgrc', b'wb') as fp:
353 353 sourceconfig = srcvfs.read(b'hgrc')
354 354 fp.write(b"# Config copied from shared source\n")
355 355 fp.write(sourceconfig)
356 356 fp.write(b'\n')
357 357 fp.write(currentconfig)
358 358
359 359
360 360 def unshare(ui, repo):
361 361 """convert a shared repository to a normal one
362 362
363 363 Copy the store data to the repo and remove the sharedpath data.
364 364
365 365 Returns a new repository object representing the unshared repository.
366 366
367 367 The passed repository object is not usable after this function is
368 368 called.
369 369 """
370 370
371 371 with repo.lock():
372 372 # we use locks here because if we race with commit, we
373 373 # can end up with extra data in the cloned revlogs that's
374 374 # not pointed to by changesets, thus causing verify to
375 375 # fail
376 376 destlock = copystore(ui, repo, repo.path)
377 377 with destlock or util.nullcontextmanager():
378 378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 379 # we were sharing .hg/hgrc of the share source with the current
380 380 # repo. We need to copy that while unsharing otherwise it can
381 381 # disable hooks and other checks
382 382 _prependsourcehgrc(repo)
383 383
384 384 sharefile = repo.vfs.join(b'sharedpath')
385 385 util.rename(sharefile, sharefile + b'.old')
386 386
387 387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 389 scmutil.writereporequirements(repo)
390 390
391 391 # Removing share changes some fundamental properties of the repo instance.
392 392 # So we instantiate a new repo object and operate on it rather than
393 393 # try to keep the existing repo usable.
394 394 newrepo = repository(repo.baseui, repo.root, create=False)
395 395
396 396 # TODO: figure out how to access subrepos that exist, but were previously
397 397 # removed from .hgsub
398 398 c = newrepo[b'.']
399 399 subs = c.substate
400 400 for s in sorted(subs):
401 401 c.sub(s).unshare()
402 402
403 403 localrepo.poisonrepository(repo)
404 404
405 405 return newrepo
406 406
407 407
408 408 def postshare(sourcerepo, destrepo, defaultpath=None):
409 409 """Called after a new shared repo is created.
410 410
411 411 The new repo only has a requirements file and pointer to the source.
412 412 This function configures additional shared data.
413 413
414 414 Extensions can wrap this function and write additional entries to
415 415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 416 """
417 417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 418 if default:
419 419 template = b'[paths]\ndefault = %s\n'
420 420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 422 with destrepo.wlock():
423 423 narrowspec.copytoworkingcopy(destrepo)
424 424
425 425
426 426 def _postshareupdate(repo, update, checkout=None):
427 427 """Maybe perform a working directory update after a shared repo is created.
428 428
429 429 ``update`` can be a boolean or a revision to update to.
430 430 """
431 431 if not update:
432 432 return
433 433
434 434 repo.ui.status(_(b"updating working directory\n"))
435 435 if update is not True:
436 436 checkout = update
437 437 for test in (checkout, b'default', b'tip'):
438 438 if test is None:
439 439 continue
440 440 try:
441 441 uprev = repo.lookup(test)
442 442 break
443 443 except error.RepoLookupError:
444 444 continue
445 445 _update(repo, uprev)
446 446
447 447
448 448 def copystore(ui, srcrepo, destpath):
449 449 """copy files from store of srcrepo in destpath
450 450
451 451 returns destlock
452 452 """
453 453 destlock = None
454 454 try:
455 455 hardlink = None
456 456 topic = _(b'linking') if hardlink else _(b'copying')
457 457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 458 num = 0
459 459 srcpublishing = srcrepo.publishing()
460 460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 461 dstvfs = vfsmod.vfs(destpath)
462 462 for f in srcrepo.store.copylist():
463 463 if srcpublishing and f.endswith(b'phaseroots'):
464 464 continue
465 465 dstbase = os.path.dirname(f)
466 466 if dstbase and not dstvfs.exists(dstbase):
467 467 dstvfs.mkdir(dstbase)
468 468 if srcvfs.exists(f):
469 469 if f.endswith(b'data'):
470 470 # 'dstbase' may be empty (e.g. revlog format 0)
471 471 lockfile = os.path.join(dstbase, b"lock")
472 472 # lock to avoid premature writing to the target
473 473 destlock = lock.lock(dstvfs, lockfile)
474 474 hardlink, n = util.copyfiles(
475 475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 476 )
477 477 num += n
478 478 if hardlink:
479 479 ui.debug(b"linked %d files\n" % num)
480 480 else:
481 481 ui.debug(b"copied %d files\n" % num)
482 482 return destlock
483 483 except: # re-raises
484 484 release(destlock)
485 485 raise
486 486
487 487
488 488 def clonewithshare(
489 489 ui,
490 490 peeropts,
491 491 sharepath,
492 492 source,
493 493 srcpeer,
494 494 dest,
495 495 pull=False,
496 496 rev=None,
497 497 update=True,
498 498 stream=False,
499 499 ):
500 500 """Perform a clone using a shared repo.
501 501
502 502 The store for the repository will be located at <sharepath>/.hg. The
503 503 specified revisions will be cloned or pulled from "source". A shared repo
504 504 will be created at "dest" and a working copy will be created if "update" is
505 505 True.
506 506 """
507 507 revs = None
508 508 if rev:
509 509 if not srcpeer.capable(b'lookup'):
510 510 raise error.Abort(
511 511 _(
512 512 b"src repository does not support "
513 513 b"revision lookup and so doesn't "
514 514 b"support clone by revision"
515 515 )
516 516 )
517 517
518 518 # TODO this is batchable.
519 519 remoterevs = []
520 520 for r in rev:
521 521 with srcpeer.commandexecutor() as e:
522 522 remoterevs.append(
523 523 e.callcommand(
524 524 b'lookup',
525 525 {
526 526 b'key': r,
527 527 },
528 528 ).result()
529 529 )
530 530 revs = remoterevs
531 531
532 532 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 533 # 2 clients may race creating or populating it.
534 534 pooldir = os.path.dirname(sharepath)
535 535 # lock class requires the directory to exist.
536 536 try:
537 537 util.makedir(pooldir, False)
538 538 except OSError as e:
539 539 if e.errno != errno.EEXIST:
540 540 raise
541 541
542 542 poolvfs = vfsmod.vfs(pooldir)
543 543 basename = os.path.basename(sharepath)
544 544
545 545 with lock.lock(poolvfs, b'%s.lock' % basename):
546 546 if os.path.exists(sharepath):
547 547 ui.status(
548 548 _(b'(sharing from existing pooled repository %s)\n') % basename
549 549 )
550 550 else:
551 551 ui.status(
552 552 _(b'(sharing from new pooled repository %s)\n') % basename
553 553 )
554 554 # Always use pull mode because hardlinks in share mode don't work
555 555 # well. Never update because working copies aren't necessary in
556 556 # share mode.
557 557 clone(
558 558 ui,
559 559 peeropts,
560 560 source,
561 561 dest=sharepath,
562 562 pull=True,
563 563 revs=rev,
564 564 update=False,
565 565 stream=stream,
566 566 )
567 567
568 568 # Resolve the value to put in [paths] section for the source.
569 569 if islocal(source):
570 570 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 571 else:
572 572 defaultpath = source
573 573
574 574 sharerepo = repository(ui, path=sharepath)
575 575 destrepo = share(
576 576 ui,
577 577 sharerepo,
578 578 dest=dest,
579 579 update=False,
580 580 bookmarks=False,
581 581 defaultpath=defaultpath,
582 582 )
583 583
584 584 # We need to perform a pull against the dest repo to fetch bookmarks
585 585 # and other non-store data that isn't shared by default. In the case of
586 586 # non-existing shared repo, this means we pull from the remote twice. This
587 587 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 588 # way to pull just non-changegroup data.
589 589 exchange.pull(destrepo, srcpeer, heads=revs)
590 590
591 591 _postshareupdate(destrepo, update)
592 592
593 593 return srcpeer, peer(ui, peeropts, dest)
594 594
595 595
596 596 # Recomputing caches is often slow on big repos, so copy them.
597 597 def _copycache(srcrepo, dstcachedir, fname):
598 598 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 599 srcfname = srcrepo.cachevfs.join(fname)
600 600 dstfname = os.path.join(dstcachedir, fname)
601 601 if os.path.exists(srcfname):
602 602 if not os.path.exists(dstcachedir):
603 603 os.mkdir(dstcachedir)
604 604 util.copyfile(srcfname, dstfname)
605 605
606 606
607 607 def clone(
608 608 ui,
609 609 peeropts,
610 610 source,
611 611 dest=None,
612 612 pull=False,
613 613 revs=None,
614 614 update=True,
615 615 stream=False,
616 616 branch=None,
617 617 shareopts=None,
618 618 storeincludepats=None,
619 619 storeexcludepats=None,
620 620 depth=None,
621 621 ):
622 622 """Make a copy of an existing repository.
623 623
624 624 Create a copy of an existing repository in a new directory. The
625 625 source and destination are URLs, as passed to the repository
626 626 function. Returns a pair of repository peers, the source and
627 627 newly created destination.
628 628
629 629 The location of the source is added to the new repository's
630 630 .hg/hgrc file, as the default to be used for future pulls and
631 631 pushes.
632 632
633 633 If an exception is raised, the partly cloned/updated destination
634 634 repository will be deleted.
635 635
636 636 Arguments:
637 637
638 638 source: repository object or URL
639 639
640 640 dest: URL of destination repository to create (defaults to base
641 641 name of source repository)
642 642
643 643 pull: always pull from source repository, even in local case or if the
644 644 server prefers streaming
645 645
646 646 stream: stream raw data uncompressed from repository (fast over
647 647 LAN, slow over WAN)
648 648
649 649 revs: revision to clone up to (implies pull=True)
650 650
651 651 update: update working directory after clone completes, if
652 652 destination is local repository (True means update to default rev,
653 653 anything else is treated as a revision)
654 654
655 655 branch: branches to clone
656 656
657 657 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 658 activates auto sharing mode and defines the directory for stores. The
659 659 "mode" key determines how to construct the directory name of the shared
660 660 repository. "identity" means the name is derived from the node of the first
661 661 changeset in the repository. "remote" means the name is derived from the
662 662 remote's path/URL. Defaults to "identity."
663 663
664 664 storeincludepats and storeexcludepats: sets of file patterns to include and
665 665 exclude in the repository copy, respectively. If not defined, all files
666 666 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 667 only the requested files will be performed. If ``storeincludepats`` is not
668 668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 669 ``path:.``. If both are empty sets, no files will be cloned.
670 670 """
671 671
672 672 if isinstance(source, bytes):
673 673 src = urlutil.get_clone_path(ui, source, branch)
674 674 origsource, source, branches = src
675 675 srcpeer = peer(ui, peeropts, source)
676 676 else:
677 677 srcpeer = source.peer() # in case we were called with a localrepo
678 678 branches = (None, branch or [])
679 679 origsource = source = srcpeer.url()
680 680 srclock = destlock = cleandir = None
681 681 destpeer = None
682 682 try:
683 683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 684
685 685 if dest is None:
686 686 dest = defaultdest(source)
687 687 if dest:
688 688 ui.status(_(b"destination directory: %s\n") % dest)
689 689 else:
690 690 dest = urlutil.get_clone_path(ui, dest)[0]
691 691
692 692 dest = urlutil.urllocalpath(dest)
693 693 source = urlutil.urllocalpath(source)
694 694
695 695 if not dest:
696 696 raise error.InputError(_(b"empty destination path is not valid"))
697 697
698 698 destvfs = vfsmod.vfs(dest, expandpath=True)
699 699 if destvfs.lexists():
700 700 if not destvfs.isdir():
701 701 raise error.InputError(
702 702 _(b"destination '%s' already exists") % dest
703 703 )
704 704 elif destvfs.listdir():
705 705 raise error.InputError(
706 706 _(b"destination '%s' is not empty") % dest
707 707 )
708 708
709 709 createopts = {}
710 710 narrow = False
711 711
712 712 if storeincludepats is not None:
713 713 narrowspec.validatepatterns(storeincludepats)
714 714 narrow = True
715 715
716 716 if storeexcludepats is not None:
717 717 narrowspec.validatepatterns(storeexcludepats)
718 718 narrow = True
719 719
720 720 if narrow:
721 721 # Include everything by default if only exclusion patterns defined.
722 722 if storeexcludepats and not storeincludepats:
723 723 storeincludepats = {b'path:.'}
724 724
725 725 createopts[b'narrowfiles'] = True
726 726
727 727 if depth:
728 728 createopts[b'shallowfilestore'] = True
729 729
730 730 if srcpeer.capable(b'lfs-serve'):
731 731 # Repository creation honors the config if it disabled the extension, so
732 732 # we can't just announce that lfs will be enabled. This check avoids
733 733 # saying that lfs will be enabled, and then saying it's an unknown
734 734 # feature. The lfs creation option is set in either case so that a
735 735 # requirement is added. If the extension is explicitly disabled but the
736 736 # requirement is set, the clone aborts early, before transferring any
737 737 # data.
738 738 createopts[b'lfs'] = True
739 739
740 740 if extensions.disabled_help(b'lfs'):
741 741 ui.status(
742 742 _(
743 743 b'(remote is using large file support (lfs), but it is '
744 744 b'explicitly disabled in the local configuration)\n'
745 745 )
746 746 )
747 747 else:
748 748 ui.status(
749 749 _(
750 750 b'(remote is using large file support (lfs); lfs will '
751 751 b'be enabled for this repository)\n'
752 752 )
753 753 )
754 754
755 755 shareopts = shareopts or {}
756 756 sharepool = shareopts.get(b'pool')
757 757 sharenamemode = shareopts.get(b'mode')
758 758 if sharepool and islocal(dest):
759 759 sharepath = None
760 760 if sharenamemode == b'identity':
761 761 # Resolve the name from the initial changeset in the remote
762 762 # repository. This returns nullid when the remote is empty. It
763 763 # raises RepoLookupError if revision 0 is filtered or otherwise
764 764 # not available. If we fail to resolve, sharing is not enabled.
765 765 try:
766 766 with srcpeer.commandexecutor() as e:
767 767 rootnode = e.callcommand(
768 768 b'lookup',
769 769 {
770 770 b'key': b'0',
771 771 },
772 772 ).result()
773 773
774 774 if rootnode != sha1nodeconstants.nullid:
775 775 sharepath = os.path.join(sharepool, hex(rootnode))
776 776 else:
777 777 ui.status(
778 778 _(
779 779 b'(not using pooled storage: '
780 780 b'remote appears to be empty)\n'
781 781 )
782 782 )
783 783 except error.RepoLookupError:
784 784 ui.status(
785 785 _(
786 786 b'(not using pooled storage: '
787 787 b'unable to resolve identity of remote)\n'
788 788 )
789 789 )
790 790 elif sharenamemode == b'remote':
791 791 sharepath = os.path.join(
792 792 sharepool, hex(hashutil.sha1(source).digest())
793 793 )
794 794 else:
795 795 raise error.Abort(
796 796 _(b'unknown share naming mode: %s') % sharenamemode
797 797 )
798 798
799 799 # TODO this is a somewhat arbitrary restriction.
800 800 if narrow:
801 801 ui.status(
802 802 _(b'(pooled storage not supported for narrow clones)\n')
803 803 )
804 804 sharepath = None
805 805
806 806 if sharepath:
807 807 return clonewithshare(
808 808 ui,
809 809 peeropts,
810 810 sharepath,
811 811 source,
812 812 srcpeer,
813 813 dest,
814 814 pull=pull,
815 815 rev=revs,
816 816 update=update,
817 817 stream=stream,
818 818 )
819 819
820 820 srcrepo = srcpeer.local()
821 821
822 822 abspath = origsource
823 823 if islocal(origsource):
824 824 abspath = util.abspath(urlutil.urllocalpath(origsource))
825 825
826 826 if islocal(dest):
827 827 if os.path.exists(dest):
828 828 # only clean up directories we create ourselves
829 829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 830 cleandir = hgdir
831 831 else:
832 832 cleandir = dest
833 833
834 834 copy = False
835 835 if (
836 836 srcrepo
837 837 and srcrepo.cancopy()
838 838 and islocal(dest)
839 839 and not phases.hassecret(srcrepo)
840 840 ):
841 841 copy = not pull and not revs
842 842
843 843 # TODO this is a somewhat arbitrary restriction.
844 844 if narrow:
845 845 copy = False
846 846
847 847 if copy:
848 848 try:
849 849 # we use a lock here because if we race with commit, we
850 850 # can end up with extra data in the cloned revlogs that's
851 851 # not pointed to by changesets, thus causing verify to
852 852 # fail
853 853 srclock = srcrepo.lock(wait=False)
854 854 except error.LockError:
855 855 copy = False
856 856
857 857 if copy:
858 858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859 859
860 860 destrootpath = urlutil.urllocalpath(dest)
861 861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 862 localrepo.createrepository(
863 863 ui,
864 864 destrootpath,
865 865 requirements=dest_reqs,
866 866 )
867 867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
868 868 destlock = destrepo.lock()
869 869 from . import streamclone # avoid cycle
870 870
871 871 streamclone.local_copy(srcrepo, destrepo)
872 872
873 873 # we need to re-init the repo after manually copying the data
874 874 # into it
875 875 destpeer = peer(srcrepo, peeropts, dest)
876 876 srcrepo.hook(
877 877 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
878 878 )
879 879 else:
880 880 try:
881 881 # only pass ui when no srcrepo
882 882 destpeer = peer(
883 883 srcrepo or ui,
884 884 peeropts,
885 885 dest,
886 886 create=True,
887 887 createopts=createopts,
888 888 )
889 889 except OSError as inst:
890 890 if inst.errno == errno.EEXIST:
891 891 cleandir = None
892 892 raise error.Abort(
893 893 _(b"destination '%s' already exists") % dest
894 894 )
895 895 raise
896 896
897 897 if revs:
898 898 if not srcpeer.capable(b'lookup'):
899 899 raise error.Abort(
900 900 _(
901 901 b"src repository does not support "
902 902 b"revision lookup and so doesn't "
903 903 b"support clone by revision"
904 904 )
905 905 )
906 906
907 907 # TODO this is batchable.
908 908 remoterevs = []
909 909 for rev in revs:
910 910 with srcpeer.commandexecutor() as e:
911 911 remoterevs.append(
912 912 e.callcommand(
913 913 b'lookup',
914 914 {
915 915 b'key': rev,
916 916 },
917 917 ).result()
918 918 )
919 919 revs = remoterevs
920 920
921 921 checkout = revs[0]
922 922 else:
923 923 revs = None
924 924 local = destpeer.local()
925 925 if local:
926 926 if narrow:
927 927 with local.wlock(), local.lock():
928 928 local.setnarrowpats(storeincludepats, storeexcludepats)
929 929 narrowspec.copytoworkingcopy(local)
930 930
931 931 u = urlutil.url(abspath)
932 932 defaulturl = bytes(u)
933 933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 934 if not stream:
935 935 if pull:
936 936 stream = False
937 937 else:
938 938 stream = None
939 939 # internal config: ui.quietbookmarkmove
940 940 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 941 with local.ui.configoverride(overrides, b'clone'):
942 942 exchange.pull(
943 943 local,
944 944 srcpeer,
945 945 revs,
946 946 streamclonerequested=stream,
947 947 includepats=storeincludepats,
948 948 excludepats=storeexcludepats,
949 949 depth=depth,
950 950 )
951 951 elif srcrepo:
952 952 # TODO lift restriction once exchange.push() accepts narrow
953 953 # push.
954 954 if narrow:
955 955 raise error.Abort(
956 956 _(
957 957 b'narrow clone not available for '
958 958 b'remote destinations'
959 959 )
960 960 )
961 961
962 962 exchange.push(
963 963 srcrepo,
964 964 destpeer,
965 965 revs=revs,
966 966 bookmarks=srcrepo._bookmarks.keys(),
967 967 )
968 968 else:
969 969 raise error.Abort(
970 970 _(b"clone from remote to remote not supported")
971 971 )
972 972
973 973 cleandir = None
974 974
975 975 destrepo = destpeer.local()
976 976 if destrepo:
977 977 template = uimod.samplehgrcs[b'cloned']
978 978 u = urlutil.url(abspath)
979 979 u.passwd = None
980 980 defaulturl = bytes(u)
981 981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983 983
984 984 if ui.configbool(b'experimental', b'remotenames'):
985 985 logexchange.pullremotenames(destrepo, srcpeer)
986 986
987 987 if update:
988 988 if update is not True:
989 989 with srcpeer.commandexecutor() as e:
990 990 checkout = e.callcommand(
991 991 b'lookup',
992 992 {
993 993 b'key': update,
994 994 },
995 995 ).result()
996 996
997 997 uprev = None
998 998 status = None
999 999 if checkout is not None:
1000 1000 # Some extensions (at least hg-git and hg-subversion) have
1001 1001 # a peer.lookup() implementation that returns a name instead
1002 1002 # of a nodeid. We work around it here until we've figured
1003 1003 # out a better solution.
1004 1004 if len(checkout) == 20 and checkout in destrepo:
1005 1005 uprev = checkout
1006 1006 elif scmutil.isrevsymbol(destrepo, checkout):
1007 1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 1008 else:
1009 1009 if update is not True:
1010 1010 try:
1011 1011 uprev = destrepo.lookup(update)
1012 1012 except error.RepoLookupError:
1013 1013 pass
1014 1014 if uprev is None:
1015 1015 try:
1016 1016 if destrepo._activebookmark:
1017 1017 uprev = destrepo.lookup(destrepo._activebookmark)
1018 1018 update = destrepo._activebookmark
1019 1019 else:
1020 1020 uprev = destrepo._bookmarks[b'@']
1021 1021 update = b'@'
1022 1022 bn = destrepo[uprev].branch()
1023 1023 if bn == b'default':
1024 1024 status = _(b"updating to bookmark %s\n" % update)
1025 1025 else:
1026 1026 status = (
1027 1027 _(b"updating to bookmark %s on branch %s\n")
1028 1028 ) % (update, bn)
1029 1029 except KeyError:
1030 1030 try:
1031 1031 uprev = destrepo.branchtip(b'default')
1032 1032 except error.RepoLookupError:
1033 1033 uprev = destrepo.lookup(b'tip')
1034 1034 if not status:
1035 1035 bn = destrepo[uprev].branch()
1036 1036 status = _(b"updating to branch %s\n") % bn
1037 1037 destrepo.ui.status(status)
1038 1038 _update(destrepo, uprev)
1039 1039 if update in destrepo._bookmarks:
1040 1040 bookmarks.activate(destrepo, update)
1041 1041 if destlock is not None:
1042 1042 release(destlock)
1043 1043 # here is a tiny windows were someone could end up writing the
1044 1044 # repository before the cache are sure to be warm. This is "fine"
1045 1045 # as the only "bad" outcome would be some slowness. That potential
1046 1046 # slowness already affect reader.
1047 1047 with destrepo.lock():
1048 1048 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1049 1049 finally:
1050 1050 release(srclock, destlock)
1051 1051 if cleandir is not None:
1052 1052 shutil.rmtree(cleandir, True)
1053 1053 if srcpeer is not None:
1054 1054 srcpeer.close()
1055 1055 if destpeer and destpeer.local() is None:
1056 1056 destpeer.close()
1057 1057 return srcpeer, destpeer
1058 1058
1059 1059
1060 1060 def _showstats(repo, stats, quietempty=False):
1061 1061 if quietempty and stats.isempty():
1062 1062 return
1063 1063 repo.ui.status(
1064 1064 _(
1065 1065 b"%d files updated, %d files merged, "
1066 1066 b"%d files removed, %d files unresolved\n"
1067 1067 )
1068 1068 % (
1069 1069 stats.updatedcount,
1070 1070 stats.mergedcount,
1071 1071 stats.removedcount,
1072 1072 stats.unresolvedcount,
1073 1073 )
1074 1074 )
1075 1075
1076 1076
1077 1077 def updaterepo(repo, node, overwrite, updatecheck=None):
1078 1078 """Update the working directory to node.
1079 1079
1080 1080 When overwrite is set, changes are clobbered, merged else
1081 1081
1082 1082 returns stats (see pydoc mercurial.merge.applyupdates)"""
1083 1083 repo.ui.deprecwarn(
1084 1084 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085 1085 b'5.7',
1086 1086 )
1087 1087 return mergemod._update(
1088 1088 repo,
1089 1089 node,
1090 1090 branchmerge=False,
1091 1091 force=overwrite,
1092 1092 labels=[b'working copy', b'destination'],
1093 1093 updatecheck=updatecheck,
1094 1094 )
1095 1095
1096 1096
1097 1097 def update(repo, node, quietempty=False, updatecheck=None):
1098 1098 """update the working directory to node"""
1099 1099 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100 1100 _showstats(repo, stats, quietempty)
1101 1101 if stats.unresolvedcount:
1102 1102 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103 1103 return stats.unresolvedcount > 0
1104 1104
1105 1105
1106 1106 # naming conflict in clone()
1107 1107 _update = update
1108 1108
1109 1109
1110 1110 def clean(repo, node, show_stats=True, quietempty=False):
1111 1111 """forcibly switch the working directory to node, clobbering changes"""
1112 1112 stats = mergemod.clean_update(repo[node])
1113 1113 assert stats.unresolvedcount == 0
1114 1114 if show_stats:
1115 1115 _showstats(repo, stats, quietempty)
1116 1116 return False
1117 1117
1118 1118
1119 1119 # naming conflict in updatetotally()
1120 1120 _clean = clean
1121 1121
1122 1122 _VALID_UPDATECHECKS = {
1123 1123 mergemod.UPDATECHECK_ABORT,
1124 1124 mergemod.UPDATECHECK_NONE,
1125 1125 mergemod.UPDATECHECK_LINEAR,
1126 1126 mergemod.UPDATECHECK_NO_CONFLICT,
1127 1127 }
1128 1128
1129 1129
1130 1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1131 1131 """Update the working directory with extra care for non-file components
1132 1132
1133 1133 This takes care of non-file components below:
1134 1134
1135 1135 :bookmark: might be advanced or (in)activated
1136 1136
1137 1137 This takes arguments below:
1138 1138
1139 1139 :checkout: to which revision the working directory is updated
1140 1140 :brev: a name, which might be a bookmark to be activated after updating
1141 1141 :clean: whether changes in the working directory can be discarded
1142 1142 :updatecheck: how to deal with a dirty working directory
1143 1143
1144 1144 Valid values for updatecheck are the UPDATECHECK_* constants
1145 1145 defined in the merge module. Passing `None` will result in using the
1146 1146 configured default.
1147 1147
1148 1148 * ABORT: abort if the working directory is dirty
1149 1149 * NONE: don't check (merge working directory changes into destination)
1150 1150 * LINEAR: check that update is linear before merging working directory
1151 1151 changes into destination
1152 1152 * NO_CONFLICT: check that the update does not result in file merges
1153 1153
1154 1154 This returns whether conflict is detected at updating or not.
1155 1155 """
1156 1156 if updatecheck is None:
1157 1157 updatecheck = ui.config(b'commands', b'update.check')
1158 1158 if updatecheck not in _VALID_UPDATECHECKS:
1159 1159 # If not configured, or invalid value configured
1160 1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1161 1161 if updatecheck not in _VALID_UPDATECHECKS:
1162 1162 raise ValueError(
1163 1163 r'Invalid updatecheck value %r (can accept %r)'
1164 1164 % (updatecheck, _VALID_UPDATECHECKS)
1165 1165 )
1166 1166 with repo.wlock():
1167 1167 movemarkfrom = None
1168 1168 warndest = False
1169 1169 if checkout is None:
1170 1170 updata = destutil.destupdate(repo, clean=clean)
1171 1171 checkout, movemarkfrom, brev = updata
1172 1172 warndest = True
1173 1173
1174 1174 if clean:
1175 1175 ret = _clean(repo, checkout)
1176 1176 else:
1177 1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1178 1178 cmdutil.bailifchanged(repo, merge=False)
1179 1179 updatecheck = mergemod.UPDATECHECK_NONE
1180 1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1181 1181
1182 1182 if not ret and movemarkfrom:
1183 1183 if movemarkfrom == repo[b'.'].node():
1184 1184 pass # no-op update
1185 1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1186 1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1187 1187 ui.status(_(b"updating bookmark %s\n") % b)
1188 1188 else:
1189 1189 # this can happen with a non-linear update
1190 1190 b = ui.label(repo._activebookmark, b'bookmarks')
1191 1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1192 1192 bookmarks.deactivate(repo)
1193 1193 elif brev in repo._bookmarks:
1194 1194 if brev != repo._activebookmark:
1195 1195 b = ui.label(brev, b'bookmarks.active')
1196 1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1197 1197 bookmarks.activate(repo, brev)
1198 1198 elif brev:
1199 1199 if repo._activebookmark:
1200 1200 b = ui.label(repo._activebookmark, b'bookmarks')
1201 1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 1202 bookmarks.deactivate(repo)
1203 1203
1204 1204 if warndest:
1205 1205 destutil.statusotherdests(ui, repo)
1206 1206
1207 1207 return ret
1208 1208
1209 1209
1210 1210 def merge(
1211 1211 ctx,
1212 1212 force=False,
1213 1213 remind=True,
1214 1214 labels=None,
1215 1215 ):
1216 1216 """Branch merge with node, resolving changes. Return true if any
1217 1217 unresolved conflicts."""
1218 1218 repo = ctx.repo()
1219 1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1220 1220 _showstats(repo, stats)
1221 1221 if stats.unresolvedcount:
1222 1222 repo.ui.status(
1223 1223 _(
1224 1224 b"use 'hg resolve' to retry unresolved file merges "
1225 1225 b"or 'hg merge --abort' to abandon\n"
1226 1226 )
1227 1227 )
1228 1228 elif remind:
1229 1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1230 1230 return stats.unresolvedcount > 0
1231 1231
1232 1232
1233 1233 def abortmerge(ui, repo):
1234 1234 ms = mergestatemod.mergestate.read(repo)
1235 1235 if ms.active():
1236 1236 # there were conflicts
1237 1237 node = ms.localctx.hex()
1238 1238 else:
1239 1239 # there were no conficts, mergestate was not stored
1240 1240 node = repo[b'.'].hex()
1241 1241
1242 1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1243 1243 stats = mergemod.clean_update(repo[node])
1244 1244 assert stats.unresolvedcount == 0
1245 1245 _showstats(repo, stats)
1246 1246
1247 1247
1248 1248 def _incoming(
1249 1249 displaychlist,
1250 1250 subreporecurse,
1251 1251 ui,
1252 1252 repo,
1253 1253 source,
1254 1254 opts,
1255 1255 buffered=False,
1256 1256 subpath=None,
1257 1257 ):
1258 1258 """
1259 1259 Helper for incoming / gincoming.
1260 1260 displaychlist gets called with
1261 1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 1262 and is supposed to contain only code that can't be unified.
1263 1263 """
1264 1264 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1265 1265 srcs = list(srcs)
1266 1266 if len(srcs) != 1:
1267 1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 1268 msg %= len(srcs)
1269 1269 raise error.Abort(msg)
1270 1270 source, branches = srcs[0]
1271 1271 if subpath is not None:
1272 1272 subpath = urlutil.url(subpath)
1273 1273 if subpath.isabs():
1274 1274 source = bytes(subpath)
1275 1275 else:
1276 1276 p = urlutil.url(source)
1277 1277 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1278 1278 source = bytes(p)
1279 1279 other = peer(repo, opts, source)
1280 1280 cleanupfn = other.close
1281 1281 try:
1282 1282 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1283 1283 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1284 1284
1285 1285 if revs:
1286 1286 revs = [other.lookup(rev) for rev in revs]
1287 1287 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1288 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1288 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1289 1289 )
1290 1290
1291 1291 if not chlist:
1292 1292 ui.status(_(b"no changes found\n"))
1293 1293 return subreporecurse()
1294 1294 ui.pager(b'incoming')
1295 1295 displayer = logcmdutil.changesetdisplayer(
1296 1296 ui, other, opts, buffered=buffered
1297 1297 )
1298 1298 displaychlist(other, chlist, displayer)
1299 1299 displayer.close()
1300 1300 finally:
1301 1301 cleanupfn()
1302 1302 subreporecurse()
1303 1303 return 0 # exit code is zero since we found incoming changes
1304 1304
1305 1305
1306 1306 def incoming(ui, repo, source, opts, subpath=None):
1307 1307 def subreporecurse():
1308 1308 ret = 1
1309 1309 if opts.get(b'subrepos'):
1310 1310 ctx = repo[None]
1311 1311 for subpath in sorted(ctx.substate):
1312 1312 sub = ctx.sub(subpath)
1313 1313 ret = min(ret, sub.incoming(ui, source, opts))
1314 1314 return ret
1315 1315
1316 1316 def display(other, chlist, displayer):
1317 1317 limit = logcmdutil.getlimit(opts)
1318 1318 if opts.get(b'newest_first'):
1319 1319 chlist.reverse()
1320 1320 count = 0
1321 1321 for n in chlist:
1322 1322 if limit is not None and count >= limit:
1323 1323 break
1324 1324 parents = [
1325 1325 p for p in other.changelog.parents(n) if p != repo.nullid
1326 1326 ]
1327 1327 if opts.get(b'no_merges') and len(parents) == 2:
1328 1328 continue
1329 1329 count += 1
1330 1330 displayer.show(other[n])
1331 1331
1332 1332 return _incoming(
1333 1333 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1334 1334 )
1335 1335
1336 1336
1337 1337 def _outgoing(ui, repo, dests, opts, subpath=None):
1338 1338 out = set()
1339 1339 others = []
1340 1340 for path in urlutil.get_push_paths(repo, ui, dests):
1341 1341 dest = path.pushloc or path.loc
1342 1342 if subpath is not None:
1343 1343 subpath = urlutil.url(subpath)
1344 1344 if subpath.isabs():
1345 1345 dest = bytes(subpath)
1346 1346 else:
1347 1347 p = urlutil.url(dest)
1348 1348 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1349 1349 dest = bytes(p)
1350 1350 branches = path.branch, opts.get(b'branch') or []
1351 1351
1352 1352 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1353 1353 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1354 1354 if revs:
1355 1355 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1356 1356
1357 1357 other = peer(repo, opts, dest)
1358 1358 try:
1359 1359 outgoing = discovery.findcommonoutgoing(
1360 1360 repo, other, revs, force=opts.get(b'force')
1361 1361 )
1362 1362 o = outgoing.missing
1363 1363 out.update(o)
1364 1364 if not o:
1365 1365 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1366 1366 others.append(other)
1367 1367 except: # re-raises
1368 1368 other.close()
1369 1369 raise
1370 1370 # make sure this is ordered by revision number
1371 1371 outgoing_revs = list(out)
1372 1372 cl = repo.changelog
1373 1373 outgoing_revs.sort(key=cl.rev)
1374 1374 return outgoing_revs, others
1375 1375
1376 1376
1377 1377 def _outgoing_recurse(ui, repo, dests, opts):
1378 1378 ret = 1
1379 1379 if opts.get(b'subrepos'):
1380 1380 ctx = repo[None]
1381 1381 for subpath in sorted(ctx.substate):
1382 1382 sub = ctx.sub(subpath)
1383 1383 ret = min(ret, sub.outgoing(ui, dests, opts))
1384 1384 return ret
1385 1385
1386 1386
1387 1387 def _outgoing_filter(repo, revs, opts):
1388 1388 """apply revision filtering/ordering option for outgoing"""
1389 1389 limit = logcmdutil.getlimit(opts)
1390 1390 no_merges = opts.get(b'no_merges')
1391 1391 if opts.get(b'newest_first'):
1392 1392 revs.reverse()
1393 1393 if limit is None and not no_merges:
1394 1394 for r in revs:
1395 1395 yield r
1396 1396 return
1397 1397
1398 1398 count = 0
1399 1399 cl = repo.changelog
1400 1400 for n in revs:
1401 1401 if limit is not None and count >= limit:
1402 1402 break
1403 1403 parents = [p for p in cl.parents(n) if p != repo.nullid]
1404 1404 if no_merges and len(parents) == 2:
1405 1405 continue
1406 1406 count += 1
1407 1407 yield n
1408 1408
1409 1409
1410 1410 def outgoing(ui, repo, dests, opts, subpath=None):
1411 1411 if opts.get(b'graph'):
1412 1412 logcmdutil.checkunsupportedgraphflags([], opts)
1413 1413 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1414 1414 ret = 1
1415 1415 try:
1416 1416 if o:
1417 1417 ret = 0
1418 1418
1419 1419 if opts.get(b'graph'):
1420 1420 revdag = logcmdutil.graphrevs(repo, o, opts)
1421 1421 ui.pager(b'outgoing')
1422 1422 displayer = logcmdutil.changesetdisplayer(
1423 1423 ui, repo, opts, buffered=True
1424 1424 )
1425 1425 logcmdutil.displaygraph(
1426 1426 ui, repo, revdag, displayer, graphmod.asciiedges
1427 1427 )
1428 1428 else:
1429 1429 ui.pager(b'outgoing')
1430 1430 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1431 1431 for n in _outgoing_filter(repo, o, opts):
1432 1432 displayer.show(repo[n])
1433 1433 displayer.close()
1434 1434 for oth in others:
1435 1435 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1436 1436 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1437 1437 return ret # exit code is zero since we found outgoing changes
1438 1438 finally:
1439 1439 for oth in others:
1440 1440 oth.close()
1441 1441
1442 1442
1443 1443 def verify(repo, level=None):
1444 1444 """verify the consistency of a repository"""
1445 1445 ret = verifymod.verify(repo, level=level)
1446 1446
1447 1447 # Broken subrepo references in hidden csets don't seem worth worrying about,
1448 1448 # since they can't be pushed/pulled, and --hidden can be used if they are a
1449 1449 # concern.
1450 1450
1451 1451 # pathto() is needed for -R case
1452 1452 revs = repo.revs(
1453 1453 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1454 1454 )
1455 1455
1456 1456 if revs:
1457 1457 repo.ui.status(_(b'checking subrepo links\n'))
1458 1458 for rev in revs:
1459 1459 ctx = repo[rev]
1460 1460 try:
1461 1461 for subpath in ctx.substate:
1462 1462 try:
1463 1463 ret = (
1464 1464 ctx.sub(subpath, allowcreate=False).verify() or ret
1465 1465 )
1466 1466 except error.RepoError as e:
1467 1467 repo.ui.warn(b'%d: %s\n' % (rev, e))
1468 1468 except Exception:
1469 1469 repo.ui.warn(
1470 1470 _(b'.hgsubstate is corrupt in revision %s\n')
1471 1471 % short(ctx.node())
1472 1472 )
1473 1473
1474 1474 return ret
1475 1475
1476 1476
1477 1477 def remoteui(src, opts):
1478 1478 """build a remote ui from ui or repo and opts"""
1479 1479 if util.safehasattr(src, b'baseui'): # looks like a repository
1480 1480 dst = src.baseui.copy() # drop repo-specific config
1481 1481 src = src.ui # copy target options from repo
1482 1482 else: # assume it's a global ui object
1483 1483 dst = src.copy() # keep all global options
1484 1484
1485 1485 # copy ssh-specific options
1486 1486 for o in b'ssh', b'remotecmd':
1487 1487 v = opts.get(o) or src.config(b'ui', o)
1488 1488 if v:
1489 1489 dst.setconfig(b"ui", o, v, b'copied')
1490 1490
1491 1491 # copy bundle-specific options
1492 1492 r = src.config(b'bundle', b'mainreporoot')
1493 1493 if r:
1494 1494 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1495 1495
1496 1496 # copy selected local settings to the remote ui
1497 1497 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1498 1498 for key, val in src.configitems(sect):
1499 1499 dst.setconfig(sect, key, val, b'copied')
1500 1500 v = src.config(b'web', b'cacerts')
1501 1501 if v:
1502 1502 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1503 1503
1504 1504 return dst
1505 1505
1506 1506
1507 1507 # Files of interest
1508 1508 # Used to check if the repository has changed looking at mtime and size of
1509 1509 # these files.
1510 1510 foi = [
1511 1511 (b'spath', b'00changelog.i'),
1512 1512 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1513 1513 (b'spath', b'obsstore'),
1514 1514 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1515 1515 ]
1516 1516
1517 1517
1518 1518 class cachedlocalrepo(object):
1519 1519 """Holds a localrepository that can be cached and reused."""
1520 1520
1521 1521 def __init__(self, repo):
1522 1522 """Create a new cached repo from an existing repo.
1523 1523
1524 1524 We assume the passed in repo was recently created. If the
1525 1525 repo has changed between when it was created and when it was
1526 1526 turned into a cache, it may not refresh properly.
1527 1527 """
1528 1528 assert isinstance(repo, localrepo.localrepository)
1529 1529 self._repo = repo
1530 1530 self._state, self.mtime = self._repostate()
1531 1531 self._filtername = repo.filtername
1532 1532
1533 1533 def fetch(self):
1534 1534 """Refresh (if necessary) and return a repository.
1535 1535
1536 1536 If the cached instance is out of date, it will be recreated
1537 1537 automatically and returned.
1538 1538
1539 1539 Returns a tuple of the repo and a boolean indicating whether a new
1540 1540 repo instance was created.
1541 1541 """
1542 1542 # We compare the mtimes and sizes of some well-known files to
1543 1543 # determine if the repo changed. This is not precise, as mtimes
1544 1544 # are susceptible to clock skew and imprecise filesystems and
1545 1545 # file content can change while maintaining the same size.
1546 1546
1547 1547 state, mtime = self._repostate()
1548 1548 if state == self._state:
1549 1549 return self._repo, False
1550 1550
1551 1551 repo = repository(self._repo.baseui, self._repo.url())
1552 1552 if self._filtername:
1553 1553 self._repo = repo.filtered(self._filtername)
1554 1554 else:
1555 1555 self._repo = repo.unfiltered()
1556 1556 self._state = state
1557 1557 self.mtime = mtime
1558 1558
1559 1559 return self._repo, True
1560 1560
1561 1561 def _repostate(self):
1562 1562 state = []
1563 1563 maxmtime = -1
1564 1564 for attr, fname in foi:
1565 1565 prefix = getattr(self._repo, attr)
1566 1566 p = os.path.join(prefix, fname)
1567 1567 try:
1568 1568 st = os.stat(p)
1569 1569 except OSError:
1570 1570 st = os.stat(prefix)
1571 1571 state.append((st[stat.ST_MTIME], st.st_size))
1572 1572 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1573 1573
1574 1574 return tuple(state), maxmtime
1575 1575
1576 1576 def copy(self):
1577 1577 """Obtain a copy of this class instance.
1578 1578
1579 1579 A new localrepository instance is obtained. The new instance should be
1580 1580 completely independent of the original.
1581 1581 """
1582 1582 repo = repository(self._repo.baseui, self._repo.origroot)
1583 1583 if self._filtername:
1584 1584 repo = repo.filtered(self._filtername)
1585 1585 else:
1586 1586 repo = repo.unfiltered()
1587 1587 c = cachedlocalrepo(repo)
1588 1588 c._state = self._state
1589 1589 c.mtime = self.mtime
1590 1590 return c
General Comments 0
You need to be logged in to leave comments. Login now