##// END OF EJS Templates
outgoing: move filtering logic in its own function...
marmoute -
r47676:50b79f8b default
parent child Browse files
Show More
@@ -1,1538 +1,1551 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 httppeer,
36 36 localrepo,
37 37 lock,
38 38 logcmdutil,
39 39 logexchange,
40 40 merge as mergemod,
41 41 mergestate as mergestatemod,
42 42 narrowspec,
43 43 phases,
44 44 requirements,
45 45 scmutil,
46 46 sshpeer,
47 47 statichttprepo,
48 48 ui as uimod,
49 49 unionrepo,
50 50 url,
51 51 util,
52 52 verify as verifymod,
53 53 vfs as vfsmod,
54 54 )
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def _local(path):
69 69 path = util.expandpath(urlutil.urllocalpath(path))
70 70
71 71 try:
72 72 # we use os.stat() directly here instead of os.path.isfile()
73 73 # because the latter started returning `False` on invalid path
74 74 # exceptions starting in 3.8 and we care about handling
75 75 # invalid paths specially here.
76 76 st = os.stat(path)
77 77 isfile = stat.S_ISREG(st.st_mode)
78 78 # Python 2 raises TypeError, Python 3 ValueError.
79 79 except (TypeError, ValueError) as e:
80 80 raise error.Abort(
81 81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 82 )
83 83 except OSError:
84 84 isfile = False
85 85
86 86 return isfile and bundlerepo or localrepo
87 87
88 88
89 89 def addbranchrevs(lrepo, other, branches, revs):
90 90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 91 hashbranch, branches = branches
92 92 if not hashbranch and not branches:
93 93 x = revs or None
94 94 if revs:
95 95 y = revs[0]
96 96 else:
97 97 y = None
98 98 return x, y
99 99 if revs:
100 100 revs = list(revs)
101 101 else:
102 102 revs = []
103 103
104 104 if not peer.capable(b'branchmap'):
105 105 if branches:
106 106 raise error.Abort(_(b"remote branch lookup not supported"))
107 107 revs.append(hashbranch)
108 108 return revs, revs[0]
109 109
110 110 with peer.commandexecutor() as e:
111 111 branchmap = e.callcommand(b'branchmap', {}).result()
112 112
113 113 def primary(branch):
114 114 if branch == b'.':
115 115 if not lrepo:
116 116 raise error.Abort(_(b"dirstate branch not accessible"))
117 117 branch = lrepo.dirstate.branch()
118 118 if branch in branchmap:
119 119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 120 return True
121 121 else:
122 122 return False
123 123
124 124 for branch in branches:
125 125 if not primary(branch):
126 126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 127 if hashbranch:
128 128 if not primary(hashbranch):
129 129 revs.append(hashbranch)
130 130 return revs, revs[0]
131 131
132 132
133 133 def parseurl(path, branches=None):
134 134 '''parse url#branch, returning (url, (branch, branches))'''
135 135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 137 return urlutil.parseurl(path, branches=branches)
138 138
139 139
140 140 schemes = {
141 141 b'bundle': bundlerepo,
142 142 b'union': unionrepo,
143 143 b'file': _local,
144 144 b'http': httppeer,
145 145 b'https': httppeer,
146 146 b'ssh': sshpeer,
147 147 b'static-http': statichttprepo,
148 148 }
149 149
150 150
151 151 def _peerlookup(path):
152 152 u = urlutil.url(path)
153 153 scheme = u.scheme or b'file'
154 154 thing = schemes.get(scheme) or schemes[b'file']
155 155 try:
156 156 return thing(path)
157 157 except TypeError:
158 158 # we can't test callable(thing) because 'thing' can be an unloaded
159 159 # module that implements __call__
160 160 if not util.safehasattr(thing, b'instance'):
161 161 raise
162 162 return thing
163 163
164 164
165 165 def islocal(repo):
166 166 '''return true if repo (or path pointing to repo) is local'''
167 167 if isinstance(repo, bytes):
168 168 try:
169 169 return _peerlookup(repo).islocal(repo)
170 170 except AttributeError:
171 171 return False
172 172 return repo.local()
173 173
174 174
175 175 def openpath(ui, path, sendaccept=True):
176 176 '''open path with open if local, url.open if remote'''
177 177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 178 if pathurl.islocal():
179 179 return util.posixfile(pathurl.localpath(), b'rb')
180 180 else:
181 181 return url.open(ui, path, sendaccept=sendaccept)
182 182
183 183
184 184 # a list of (ui, repo) functions called for wire peer initialization
185 185 wirepeersetupfuncs = []
186 186
187 187
188 188 def _peerorrepo(
189 189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 190 ):
191 191 """return a repository object for the specified path"""
192 192 obj = _peerlookup(path).instance(
193 193 ui, path, create, intents=intents, createopts=createopts
194 194 )
195 195 ui = getattr(obj, "ui", ui)
196 196 for f in presetupfuncs or []:
197 197 f(ui, obj)
198 198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 199 with util.timedcm('all reposetup') as allreposetupstats:
200 200 for name, module in extensions.extensions(ui):
201 201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 202 hook = getattr(module, 'reposetup', None)
203 203 if hook:
204 204 with util.timedcm('reposetup %r', name) as stats:
205 205 hook(ui, obj)
206 206 ui.log(
207 207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 208 )
209 209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 210 if not obj.local():
211 211 for f in wirepeersetupfuncs:
212 212 f(ui, obj)
213 213 return obj
214 214
215 215
216 216 def repository(
217 217 ui,
218 218 path=b'',
219 219 create=False,
220 220 presetupfuncs=None,
221 221 intents=None,
222 222 createopts=None,
223 223 ):
224 224 """return a repository object for the specified path"""
225 225 peer = _peerorrepo(
226 226 ui,
227 227 path,
228 228 create,
229 229 presetupfuncs=presetupfuncs,
230 230 intents=intents,
231 231 createopts=createopts,
232 232 )
233 233 repo = peer.local()
234 234 if not repo:
235 235 raise error.Abort(
236 236 _(b"repository '%s' is not local") % (path or peer.url())
237 237 )
238 238 return repo.filtered(b'visible')
239 239
240 240
241 241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 242 '''return a repository peer for the specified path'''
243 243 rui = remoteui(uiorrepo, opts)
244 244 return _peerorrepo(
245 245 rui, path, create, intents=intents, createopts=createopts
246 246 ).peer()
247 247
248 248
249 249 def defaultdest(source):
250 250 """return default destination of clone if none is given
251 251
252 252 >>> defaultdest(b'foo')
253 253 'foo'
254 254 >>> defaultdest(b'/foo/bar')
255 255 'bar'
256 256 >>> defaultdest(b'/')
257 257 ''
258 258 >>> defaultdest(b'')
259 259 ''
260 260 >>> defaultdest(b'http://example.org/')
261 261 ''
262 262 >>> defaultdest(b'http://example.org/foo/')
263 263 'foo'
264 264 """
265 265 path = urlutil.url(source).path
266 266 if not path:
267 267 return b''
268 268 return os.path.basename(os.path.normpath(path))
269 269
270 270
271 271 def sharedreposource(repo):
272 272 """Returns repository object for source repository of a shared repo.
273 273
274 274 If repo is not a shared repository, returns None.
275 275 """
276 276 if repo.sharedpath == repo.path:
277 277 return None
278 278
279 279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 280 return repo.srcrepo
281 281
282 282 # the sharedpath always ends in the .hg; we want the path to the repo
283 283 source = repo.vfs.split(repo.sharedpath)[0]
284 284 srcurl, branches = urlutil.parseurl(source)
285 285 srcrepo = repository(repo.ui, srcurl)
286 286 repo.srcrepo = srcrepo
287 287 return srcrepo
288 288
289 289
290 290 def share(
291 291 ui,
292 292 source,
293 293 dest=None,
294 294 update=True,
295 295 bookmarks=True,
296 296 defaultpath=None,
297 297 relative=False,
298 298 ):
299 299 '''create a shared repository'''
300 300
301 301 if not islocal(source):
302 302 raise error.Abort(_(b'can only share local repositories'))
303 303
304 304 if not dest:
305 305 dest = defaultdest(source)
306 306 else:
307 307 dest = ui.expandpath(dest)
308 308
309 309 if isinstance(source, bytes):
310 310 origsource = ui.expandpath(source)
311 311 source, branches = urlutil.parseurl(origsource)
312 312 srcrepo = repository(ui, source)
313 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 314 else:
315 315 srcrepo = source.local()
316 316 checkout = None
317 317
318 318 shareditems = set()
319 319 if bookmarks:
320 320 shareditems.add(sharedbookmarks)
321 321
322 322 r = repository(
323 323 ui,
324 324 dest,
325 325 create=True,
326 326 createopts={
327 327 b'sharedrepo': srcrepo,
328 328 b'sharedrelative': relative,
329 329 b'shareditems': shareditems,
330 330 },
331 331 )
332 332
333 333 postshare(srcrepo, r, defaultpath=defaultpath)
334 334 r = repository(ui, dest)
335 335 _postshareupdate(r, update, checkout=checkout)
336 336 return r
337 337
338 338
339 339 def _prependsourcehgrc(repo):
340 340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 341 on unshare. This is only done if the share was perfomed using share safe
342 342 method where we share config of source in shares"""
343 343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 344 dstvfs = vfsmod.vfs(repo.path)
345 345
346 346 if not srcvfs.exists(b'hgrc'):
347 347 return
348 348
349 349 currentconfig = b''
350 350 if dstvfs.exists(b'hgrc'):
351 351 currentconfig = dstvfs.read(b'hgrc')
352 352
353 353 with dstvfs(b'hgrc', b'wb') as fp:
354 354 sourceconfig = srcvfs.read(b'hgrc')
355 355 fp.write(b"# Config copied from shared source\n")
356 356 fp.write(sourceconfig)
357 357 fp.write(b'\n')
358 358 fp.write(currentconfig)
359 359
360 360
361 361 def unshare(ui, repo):
362 362 """convert a shared repository to a normal one
363 363
364 364 Copy the store data to the repo and remove the sharedpath data.
365 365
366 366 Returns a new repository object representing the unshared repository.
367 367
368 368 The passed repository object is not usable after this function is
369 369 called.
370 370 """
371 371
372 372 with repo.lock():
373 373 # we use locks here because if we race with commit, we
374 374 # can end up with extra data in the cloned revlogs that's
375 375 # not pointed to by changesets, thus causing verify to
376 376 # fail
377 377 destlock = copystore(ui, repo, repo.path)
378 378 with destlock or util.nullcontextmanager():
379 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 380 # we were sharing .hg/hgrc of the share source with the current
381 381 # repo. We need to copy that while unsharing otherwise it can
382 382 # disable hooks and other checks
383 383 _prependsourcehgrc(repo)
384 384
385 385 sharefile = repo.vfs.join(b'sharedpath')
386 386 util.rename(sharefile, sharefile + b'.old')
387 387
388 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 390 scmutil.writereporequirements(repo)
391 391
392 392 # Removing share changes some fundamental properties of the repo instance.
393 393 # So we instantiate a new repo object and operate on it rather than
394 394 # try to keep the existing repo usable.
395 395 newrepo = repository(repo.baseui, repo.root, create=False)
396 396
397 397 # TODO: figure out how to access subrepos that exist, but were previously
398 398 # removed from .hgsub
399 399 c = newrepo[b'.']
400 400 subs = c.substate
401 401 for s in sorted(subs):
402 402 c.sub(s).unshare()
403 403
404 404 localrepo.poisonrepository(repo)
405 405
406 406 return newrepo
407 407
408 408
409 409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 410 """Called after a new shared repo is created.
411 411
412 412 The new repo only has a requirements file and pointer to the source.
413 413 This function configures additional shared data.
414 414
415 415 Extensions can wrap this function and write additional entries to
416 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 417 """
418 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 419 if default:
420 420 template = b'[paths]\ndefault = %s\n'
421 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 423 with destrepo.wlock():
424 424 narrowspec.copytoworkingcopy(destrepo)
425 425
426 426
427 427 def _postshareupdate(repo, update, checkout=None):
428 428 """Maybe perform a working directory update after a shared repo is created.
429 429
430 430 ``update`` can be a boolean or a revision to update to.
431 431 """
432 432 if not update:
433 433 return
434 434
435 435 repo.ui.status(_(b"updating working directory\n"))
436 436 if update is not True:
437 437 checkout = update
438 438 for test in (checkout, b'default', b'tip'):
439 439 if test is None:
440 440 continue
441 441 try:
442 442 uprev = repo.lookup(test)
443 443 break
444 444 except error.RepoLookupError:
445 445 continue
446 446 _update(repo, uprev)
447 447
448 448
449 449 def copystore(ui, srcrepo, destpath):
450 450 """copy files from store of srcrepo in destpath
451 451
452 452 returns destlock
453 453 """
454 454 destlock = None
455 455 try:
456 456 hardlink = None
457 457 topic = _(b'linking') if hardlink else _(b'copying')
458 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 459 num = 0
460 460 srcpublishing = srcrepo.publishing()
461 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 462 dstvfs = vfsmod.vfs(destpath)
463 463 for f in srcrepo.store.copylist():
464 464 if srcpublishing and f.endswith(b'phaseroots'):
465 465 continue
466 466 dstbase = os.path.dirname(f)
467 467 if dstbase and not dstvfs.exists(dstbase):
468 468 dstvfs.mkdir(dstbase)
469 469 if srcvfs.exists(f):
470 470 if f.endswith(b'data'):
471 471 # 'dstbase' may be empty (e.g. revlog format 0)
472 472 lockfile = os.path.join(dstbase, b"lock")
473 473 # lock to avoid premature writing to the target
474 474 destlock = lock.lock(dstvfs, lockfile)
475 475 hardlink, n = util.copyfiles(
476 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 477 )
478 478 num += n
479 479 if hardlink:
480 480 ui.debug(b"linked %d files\n" % num)
481 481 else:
482 482 ui.debug(b"copied %d files\n" % num)
483 483 return destlock
484 484 except: # re-raises
485 485 release(destlock)
486 486 raise
487 487
488 488
489 489 def clonewithshare(
490 490 ui,
491 491 peeropts,
492 492 sharepath,
493 493 source,
494 494 srcpeer,
495 495 dest,
496 496 pull=False,
497 497 rev=None,
498 498 update=True,
499 499 stream=False,
500 500 ):
501 501 """Perform a clone using a shared repo.
502 502
503 503 The store for the repository will be located at <sharepath>/.hg. The
504 504 specified revisions will be cloned or pulled from "source". A shared repo
505 505 will be created at "dest" and a working copy will be created if "update" is
506 506 True.
507 507 """
508 508 revs = None
509 509 if rev:
510 510 if not srcpeer.capable(b'lookup'):
511 511 raise error.Abort(
512 512 _(
513 513 b"src repository does not support "
514 514 b"revision lookup and so doesn't "
515 515 b"support clone by revision"
516 516 )
517 517 )
518 518
519 519 # TODO this is batchable.
520 520 remoterevs = []
521 521 for r in rev:
522 522 with srcpeer.commandexecutor() as e:
523 523 remoterevs.append(
524 524 e.callcommand(
525 525 b'lookup',
526 526 {
527 527 b'key': r,
528 528 },
529 529 ).result()
530 530 )
531 531 revs = remoterevs
532 532
533 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 534 # 2 clients may race creating or populating it.
535 535 pooldir = os.path.dirname(sharepath)
536 536 # lock class requires the directory to exist.
537 537 try:
538 538 util.makedir(pooldir, False)
539 539 except OSError as e:
540 540 if e.errno != errno.EEXIST:
541 541 raise
542 542
543 543 poolvfs = vfsmod.vfs(pooldir)
544 544 basename = os.path.basename(sharepath)
545 545
546 546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 547 if os.path.exists(sharepath):
548 548 ui.status(
549 549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 550 )
551 551 else:
552 552 ui.status(
553 553 _(b'(sharing from new pooled repository %s)\n') % basename
554 554 )
555 555 # Always use pull mode because hardlinks in share mode don't work
556 556 # well. Never update because working copies aren't necessary in
557 557 # share mode.
558 558 clone(
559 559 ui,
560 560 peeropts,
561 561 source,
562 562 dest=sharepath,
563 563 pull=True,
564 564 revs=rev,
565 565 update=False,
566 566 stream=stream,
567 567 )
568 568
569 569 # Resolve the value to put in [paths] section for the source.
570 570 if islocal(source):
571 571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 572 else:
573 573 defaultpath = source
574 574
575 575 sharerepo = repository(ui, path=sharepath)
576 576 destrepo = share(
577 577 ui,
578 578 sharerepo,
579 579 dest=dest,
580 580 update=False,
581 581 bookmarks=False,
582 582 defaultpath=defaultpath,
583 583 )
584 584
585 585 # We need to perform a pull against the dest repo to fetch bookmarks
586 586 # and other non-store data that isn't shared by default. In the case of
587 587 # non-existing shared repo, this means we pull from the remote twice. This
588 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 589 # way to pull just non-changegroup data.
590 590 exchange.pull(destrepo, srcpeer, heads=revs)
591 591
592 592 _postshareupdate(destrepo, update)
593 593
594 594 return srcpeer, peer(ui, peeropts, dest)
595 595
596 596
597 597 # Recomputing caches is often slow on big repos, so copy them.
598 598 def _copycache(srcrepo, dstcachedir, fname):
599 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 600 srcfname = srcrepo.cachevfs.join(fname)
601 601 dstfname = os.path.join(dstcachedir, fname)
602 602 if os.path.exists(srcfname):
603 603 if not os.path.exists(dstcachedir):
604 604 os.mkdir(dstcachedir)
605 605 util.copyfile(srcfname, dstfname)
606 606
607 607
608 608 def clone(
609 609 ui,
610 610 peeropts,
611 611 source,
612 612 dest=None,
613 613 pull=False,
614 614 revs=None,
615 615 update=True,
616 616 stream=False,
617 617 branch=None,
618 618 shareopts=None,
619 619 storeincludepats=None,
620 620 storeexcludepats=None,
621 621 depth=None,
622 622 ):
623 623 """Make a copy of an existing repository.
624 624
625 625 Create a copy of an existing repository in a new directory. The
626 626 source and destination are URLs, as passed to the repository
627 627 function. Returns a pair of repository peers, the source and
628 628 newly created destination.
629 629
630 630 The location of the source is added to the new repository's
631 631 .hg/hgrc file, as the default to be used for future pulls and
632 632 pushes.
633 633
634 634 If an exception is raised, the partly cloned/updated destination
635 635 repository will be deleted.
636 636
637 637 Arguments:
638 638
639 639 source: repository object or URL
640 640
641 641 dest: URL of destination repository to create (defaults to base
642 642 name of source repository)
643 643
644 644 pull: always pull from source repository, even in local case or if the
645 645 server prefers streaming
646 646
647 647 stream: stream raw data uncompressed from repository (fast over
648 648 LAN, slow over WAN)
649 649
650 650 revs: revision to clone up to (implies pull=True)
651 651
652 652 update: update working directory after clone completes, if
653 653 destination is local repository (True means update to default rev,
654 654 anything else is treated as a revision)
655 655
656 656 branch: branches to clone
657 657
658 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 659 activates auto sharing mode and defines the directory for stores. The
660 660 "mode" key determines how to construct the directory name of the shared
661 661 repository. "identity" means the name is derived from the node of the first
662 662 changeset in the repository. "remote" means the name is derived from the
663 663 remote's path/URL. Defaults to "identity."
664 664
665 665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 666 exclude in the repository copy, respectively. If not defined, all files
667 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 668 only the requested files will be performed. If ``storeincludepats`` is not
669 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 670 ``path:.``. If both are empty sets, no files will be cloned.
671 671 """
672 672
673 673 if isinstance(source, bytes):
674 674 origsource = ui.expandpath(source)
675 675 source, branches = urlutil.parseurl(origsource, branch)
676 676 srcpeer = peer(ui, peeropts, source)
677 677 else:
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 681 srclock = destlock = cleandir = None
682 682 destpeer = None
683 683 try:
684 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 685
686 686 if dest is None:
687 687 dest = defaultdest(source)
688 688 if dest:
689 689 ui.status(_(b"destination directory: %s\n") % dest)
690 690 else:
691 691 dest = ui.expandpath(dest)
692 692
693 693 dest = urlutil.urllocalpath(dest)
694 694 source = urlutil.urllocalpath(source)
695 695
696 696 if not dest:
697 697 raise error.InputError(_(b"empty destination path is not valid"))
698 698
699 699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 700 if destvfs.lexists():
701 701 if not destvfs.isdir():
702 702 raise error.InputError(
703 703 _(b"destination '%s' already exists") % dest
704 704 )
705 705 elif destvfs.listdir():
706 706 raise error.InputError(
707 707 _(b"destination '%s' is not empty") % dest
708 708 )
709 709
710 710 createopts = {}
711 711 narrow = False
712 712
713 713 if storeincludepats is not None:
714 714 narrowspec.validatepatterns(storeincludepats)
715 715 narrow = True
716 716
717 717 if storeexcludepats is not None:
718 718 narrowspec.validatepatterns(storeexcludepats)
719 719 narrow = True
720 720
721 721 if narrow:
722 722 # Include everything by default if only exclusion patterns defined.
723 723 if storeexcludepats and not storeincludepats:
724 724 storeincludepats = {b'path:.'}
725 725
726 726 createopts[b'narrowfiles'] = True
727 727
728 728 if depth:
729 729 createopts[b'shallowfilestore'] = True
730 730
731 731 if srcpeer.capable(b'lfs-serve'):
732 732 # Repository creation honors the config if it disabled the extension, so
733 733 # we can't just announce that lfs will be enabled. This check avoids
734 734 # saying that lfs will be enabled, and then saying it's an unknown
735 735 # feature. The lfs creation option is set in either case so that a
736 736 # requirement is added. If the extension is explicitly disabled but the
737 737 # requirement is set, the clone aborts early, before transferring any
738 738 # data.
739 739 createopts[b'lfs'] = True
740 740
741 741 if extensions.disabled_help(b'lfs'):
742 742 ui.status(
743 743 _(
744 744 b'(remote is using large file support (lfs), but it is '
745 745 b'explicitly disabled in the local configuration)\n'
746 746 )
747 747 )
748 748 else:
749 749 ui.status(
750 750 _(
751 751 b'(remote is using large file support (lfs); lfs will '
752 752 b'be enabled for this repository)\n'
753 753 )
754 754 )
755 755
756 756 shareopts = shareopts or {}
757 757 sharepool = shareopts.get(b'pool')
758 758 sharenamemode = shareopts.get(b'mode')
759 759 if sharepool and islocal(dest):
760 760 sharepath = None
761 761 if sharenamemode == b'identity':
762 762 # Resolve the name from the initial changeset in the remote
763 763 # repository. This returns nullid when the remote is empty. It
764 764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 765 # not available. If we fail to resolve, sharing is not enabled.
766 766 try:
767 767 with srcpeer.commandexecutor() as e:
768 768 rootnode = e.callcommand(
769 769 b'lookup',
770 770 {
771 771 b'key': b'0',
772 772 },
773 773 ).result()
774 774
775 775 if rootnode != nullid:
776 776 sharepath = os.path.join(sharepool, hex(rootnode))
777 777 else:
778 778 ui.status(
779 779 _(
780 780 b'(not using pooled storage: '
781 781 b'remote appears to be empty)\n'
782 782 )
783 783 )
784 784 except error.RepoLookupError:
785 785 ui.status(
786 786 _(
787 787 b'(not using pooled storage: '
788 788 b'unable to resolve identity of remote)\n'
789 789 )
790 790 )
791 791 elif sharenamemode == b'remote':
792 792 sharepath = os.path.join(
793 793 sharepool, hex(hashutil.sha1(source).digest())
794 794 )
795 795 else:
796 796 raise error.Abort(
797 797 _(b'unknown share naming mode: %s') % sharenamemode
798 798 )
799 799
800 800 # TODO this is a somewhat arbitrary restriction.
801 801 if narrow:
802 802 ui.status(
803 803 _(b'(pooled storage not supported for narrow clones)\n')
804 804 )
805 805 sharepath = None
806 806
807 807 if sharepath:
808 808 return clonewithshare(
809 809 ui,
810 810 peeropts,
811 811 sharepath,
812 812 source,
813 813 srcpeer,
814 814 dest,
815 815 pull=pull,
816 816 rev=revs,
817 817 update=update,
818 818 stream=stream,
819 819 )
820 820
821 821 srcrepo = srcpeer.local()
822 822
823 823 abspath = origsource
824 824 if islocal(origsource):
825 825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 826
827 827 if islocal(dest):
828 828 cleandir = dest
829 829
830 830 copy = False
831 831 if (
832 832 srcrepo
833 833 and srcrepo.cancopy()
834 834 and islocal(dest)
835 835 and not phases.hassecret(srcrepo)
836 836 ):
837 837 copy = not pull and not revs
838 838
839 839 # TODO this is a somewhat arbitrary restriction.
840 840 if narrow:
841 841 copy = False
842 842
843 843 if copy:
844 844 try:
845 845 # we use a lock here because if we race with commit, we
846 846 # can end up with extra data in the cloned revlogs that's
847 847 # not pointed to by changesets, thus causing verify to
848 848 # fail
849 849 srclock = srcrepo.lock(wait=False)
850 850 except error.LockError:
851 851 copy = False
852 852
853 853 if copy:
854 854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 856 if not os.path.exists(dest):
857 857 util.makedirs(dest)
858 858 else:
859 859 # only clean up directories we create ourselves
860 860 cleandir = hgdir
861 861 try:
862 862 destpath = hgdir
863 863 util.makedir(destpath, notindexed=True)
864 864 except OSError as inst:
865 865 if inst.errno == errno.EEXIST:
866 866 cleandir = None
867 867 raise error.Abort(
868 868 _(b"destination '%s' already exists") % dest
869 869 )
870 870 raise
871 871
872 872 destlock = copystore(ui, srcrepo, destpath)
873 873 # copy bookmarks over
874 874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 876 if os.path.exists(srcbookmarks):
877 877 util.copyfile(srcbookmarks, dstbookmarks)
878 878
879 879 dstcachedir = os.path.join(destpath, b'cache')
880 880 for cache in cacheutil.cachetocopy(srcrepo):
881 881 _copycache(srcrepo, dstcachedir, cache)
882 882
883 883 # we need to re-init the repo after manually copying the data
884 884 # into it
885 885 destpeer = peer(srcrepo, peeropts, dest)
886 886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 887 else:
888 888 try:
889 889 # only pass ui when no srcrepo
890 890 destpeer = peer(
891 891 srcrepo or ui,
892 892 peeropts,
893 893 dest,
894 894 create=True,
895 895 createopts=createopts,
896 896 )
897 897 except OSError as inst:
898 898 if inst.errno == errno.EEXIST:
899 899 cleandir = None
900 900 raise error.Abort(
901 901 _(b"destination '%s' already exists") % dest
902 902 )
903 903 raise
904 904
905 905 if revs:
906 906 if not srcpeer.capable(b'lookup'):
907 907 raise error.Abort(
908 908 _(
909 909 b"src repository does not support "
910 910 b"revision lookup and so doesn't "
911 911 b"support clone by revision"
912 912 )
913 913 )
914 914
915 915 # TODO this is batchable.
916 916 remoterevs = []
917 917 for rev in revs:
918 918 with srcpeer.commandexecutor() as e:
919 919 remoterevs.append(
920 920 e.callcommand(
921 921 b'lookup',
922 922 {
923 923 b'key': rev,
924 924 },
925 925 ).result()
926 926 )
927 927 revs = remoterevs
928 928
929 929 checkout = revs[0]
930 930 else:
931 931 revs = None
932 932 local = destpeer.local()
933 933 if local:
934 934 if narrow:
935 935 with local.wlock(), local.lock():
936 936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 937 narrowspec.copytoworkingcopy(local)
938 938
939 939 u = urlutil.url(abspath)
940 940 defaulturl = bytes(u)
941 941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 942 if not stream:
943 943 if pull:
944 944 stream = False
945 945 else:
946 946 stream = None
947 947 # internal config: ui.quietbookmarkmove
948 948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 949 with local.ui.configoverride(overrides, b'clone'):
950 950 exchange.pull(
951 951 local,
952 952 srcpeer,
953 953 revs,
954 954 streamclonerequested=stream,
955 955 includepats=storeincludepats,
956 956 excludepats=storeexcludepats,
957 957 depth=depth,
958 958 )
959 959 elif srcrepo:
960 960 # TODO lift restriction once exchange.push() accepts narrow
961 961 # push.
962 962 if narrow:
963 963 raise error.Abort(
964 964 _(
965 965 b'narrow clone not available for '
966 966 b'remote destinations'
967 967 )
968 968 )
969 969
970 970 exchange.push(
971 971 srcrepo,
972 972 destpeer,
973 973 revs=revs,
974 974 bookmarks=srcrepo._bookmarks.keys(),
975 975 )
976 976 else:
977 977 raise error.Abort(
978 978 _(b"clone from remote to remote not supported")
979 979 )
980 980
981 981 cleandir = None
982 982
983 983 destrepo = destpeer.local()
984 984 if destrepo:
985 985 template = uimod.samplehgrcs[b'cloned']
986 986 u = urlutil.url(abspath)
987 987 u.passwd = None
988 988 defaulturl = bytes(u)
989 989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 991
992 992 if ui.configbool(b'experimental', b'remotenames'):
993 993 logexchange.pullremotenames(destrepo, srcpeer)
994 994
995 995 if update:
996 996 if update is not True:
997 997 with srcpeer.commandexecutor() as e:
998 998 checkout = e.callcommand(
999 999 b'lookup',
1000 1000 {
1001 1001 b'key': update,
1002 1002 },
1003 1003 ).result()
1004 1004
1005 1005 uprev = None
1006 1006 status = None
1007 1007 if checkout is not None:
1008 1008 # Some extensions (at least hg-git and hg-subversion) have
1009 1009 # a peer.lookup() implementation that returns a name instead
1010 1010 # of a nodeid. We work around it here until we've figured
1011 1011 # out a better solution.
1012 1012 if len(checkout) == 20 and checkout in destrepo:
1013 1013 uprev = checkout
1014 1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 1016 else:
1017 1017 if update is not True:
1018 1018 try:
1019 1019 uprev = destrepo.lookup(update)
1020 1020 except error.RepoLookupError:
1021 1021 pass
1022 1022 if uprev is None:
1023 1023 try:
1024 1024 if destrepo._activebookmark:
1025 1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 1026 update = destrepo._activebookmark
1027 1027 else:
1028 1028 uprev = destrepo._bookmarks[b'@']
1029 1029 update = b'@'
1030 1030 bn = destrepo[uprev].branch()
1031 1031 if bn == b'default':
1032 1032 status = _(b"updating to bookmark %s\n" % update)
1033 1033 else:
1034 1034 status = (
1035 1035 _(b"updating to bookmark %s on branch %s\n")
1036 1036 ) % (update, bn)
1037 1037 except KeyError:
1038 1038 try:
1039 1039 uprev = destrepo.branchtip(b'default')
1040 1040 except error.RepoLookupError:
1041 1041 uprev = destrepo.lookup(b'tip')
1042 1042 if not status:
1043 1043 bn = destrepo[uprev].branch()
1044 1044 status = _(b"updating to branch %s\n") % bn
1045 1045 destrepo.ui.status(status)
1046 1046 _update(destrepo, uprev)
1047 1047 if update in destrepo._bookmarks:
1048 1048 bookmarks.activate(destrepo, update)
1049 1049 if destlock is not None:
1050 1050 release(destlock)
1051 1051 # here is a tiny windows were someone could end up writing the
1052 1052 # repository before the cache are sure to be warm. This is "fine"
1053 1053 # as the only "bad" outcome would be some slowness. That potential
1054 1054 # slowness already affect reader.
1055 1055 with destrepo.lock():
1056 1056 destrepo.updatecaches(full=True)
1057 1057 finally:
1058 1058 release(srclock, destlock)
1059 1059 if cleandir is not None:
1060 1060 shutil.rmtree(cleandir, True)
1061 1061 if srcpeer is not None:
1062 1062 srcpeer.close()
1063 1063 if destpeer and destpeer.local() is None:
1064 1064 destpeer.close()
1065 1065 return srcpeer, destpeer
1066 1066
1067 1067
1068 1068 def _showstats(repo, stats, quietempty=False):
1069 1069 if quietempty and stats.isempty():
1070 1070 return
1071 1071 repo.ui.status(
1072 1072 _(
1073 1073 b"%d files updated, %d files merged, "
1074 1074 b"%d files removed, %d files unresolved\n"
1075 1075 )
1076 1076 % (
1077 1077 stats.updatedcount,
1078 1078 stats.mergedcount,
1079 1079 stats.removedcount,
1080 1080 stats.unresolvedcount,
1081 1081 )
1082 1082 )
1083 1083
1084 1084
1085 1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 1086 """Update the working directory to node.
1087 1087
1088 1088 When overwrite is set, changes are clobbered, merged else
1089 1089
1090 1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 1091 repo.ui.deprecwarn(
1092 1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 1093 b'5.7',
1094 1094 )
1095 1095 return mergemod._update(
1096 1096 repo,
1097 1097 node,
1098 1098 branchmerge=False,
1099 1099 force=overwrite,
1100 1100 labels=[b'working copy', b'destination'],
1101 1101 updatecheck=updatecheck,
1102 1102 )
1103 1103
1104 1104
1105 1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 1106 """update the working directory to node"""
1107 1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 1108 _showstats(repo, stats, quietempty)
1109 1109 if stats.unresolvedcount:
1110 1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 1111 return stats.unresolvedcount > 0
1112 1112
1113 1113
1114 1114 # naming conflict in clone()
1115 1115 _update = update
1116 1116
1117 1117
1118 1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 1119 """forcibly switch the working directory to node, clobbering changes"""
1120 1120 stats = mergemod.clean_update(repo[node])
1121 1121 assert stats.unresolvedcount == 0
1122 1122 if show_stats:
1123 1123 _showstats(repo, stats, quietempty)
1124 1124 return False
1125 1125
1126 1126
1127 1127 # naming conflict in updatetotally()
1128 1128 _clean = clean
1129 1129
1130 1130 _VALID_UPDATECHECKS = {
1131 1131 mergemod.UPDATECHECK_ABORT,
1132 1132 mergemod.UPDATECHECK_NONE,
1133 1133 mergemod.UPDATECHECK_LINEAR,
1134 1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 1135 }
1136 1136
1137 1137
1138 1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 1139 """Update the working directory with extra care for non-file components
1140 1140
1141 1141 This takes care of non-file components below:
1142 1142
1143 1143 :bookmark: might be advanced or (in)activated
1144 1144
1145 1145 This takes arguments below:
1146 1146
1147 1147 :checkout: to which revision the working directory is updated
1148 1148 :brev: a name, which might be a bookmark to be activated after updating
1149 1149 :clean: whether changes in the working directory can be discarded
1150 1150 :updatecheck: how to deal with a dirty working directory
1151 1151
1152 1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 1153 defined in the merge module. Passing `None` will result in using the
1154 1154 configured default.
1155 1155
1156 1156 * ABORT: abort if the working directory is dirty
1157 1157 * NONE: don't check (merge working directory changes into destination)
1158 1158 * LINEAR: check that update is linear before merging working directory
1159 1159 changes into destination
1160 1160 * NO_CONFLICT: check that the update does not result in file merges
1161 1161
1162 1162 This returns whether conflict is detected at updating or not.
1163 1163 """
1164 1164 if updatecheck is None:
1165 1165 updatecheck = ui.config(b'commands', b'update.check')
1166 1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 1167 # If not configured, or invalid value configured
1168 1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 1170 raise ValueError(
1171 1171 r'Invalid updatecheck value %r (can accept %r)'
1172 1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 1173 )
1174 1174 with repo.wlock():
1175 1175 movemarkfrom = None
1176 1176 warndest = False
1177 1177 if checkout is None:
1178 1178 updata = destutil.destupdate(repo, clean=clean)
1179 1179 checkout, movemarkfrom, brev = updata
1180 1180 warndest = True
1181 1181
1182 1182 if clean:
1183 1183 ret = _clean(repo, checkout)
1184 1184 else:
1185 1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 1186 cmdutil.bailifchanged(repo, merge=False)
1187 1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 1189
1190 1190 if not ret and movemarkfrom:
1191 1191 if movemarkfrom == repo[b'.'].node():
1192 1192 pass # no-op update
1193 1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 1196 else:
1197 1197 # this can happen with a non-linear update
1198 1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 1200 bookmarks.deactivate(repo)
1201 1201 elif brev in repo._bookmarks:
1202 1202 if brev != repo._activebookmark:
1203 1203 b = ui.label(brev, b'bookmarks.active')
1204 1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 1205 bookmarks.activate(repo, brev)
1206 1206 elif brev:
1207 1207 if repo._activebookmark:
1208 1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 1210 bookmarks.deactivate(repo)
1211 1211
1212 1212 if warndest:
1213 1213 destutil.statusotherdests(ui, repo)
1214 1214
1215 1215 return ret
1216 1216
1217 1217
1218 1218 def merge(
1219 1219 ctx,
1220 1220 force=False,
1221 1221 remind=True,
1222 1222 labels=None,
1223 1223 ):
1224 1224 """Branch merge with node, resolving changes. Return true if any
1225 1225 unresolved conflicts."""
1226 1226 repo = ctx.repo()
1227 1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 1228 _showstats(repo, stats)
1229 1229 if stats.unresolvedcount:
1230 1230 repo.ui.status(
1231 1231 _(
1232 1232 b"use 'hg resolve' to retry unresolved file merges "
1233 1233 b"or 'hg merge --abort' to abandon\n"
1234 1234 )
1235 1235 )
1236 1236 elif remind:
1237 1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 1238 return stats.unresolvedcount > 0
1239 1239
1240 1240
1241 1241 def abortmerge(ui, repo):
1242 1242 ms = mergestatemod.mergestate.read(repo)
1243 1243 if ms.active():
1244 1244 # there were conflicts
1245 1245 node = ms.localctx.hex()
1246 1246 else:
1247 1247 # there were no conficts, mergestate was not stored
1248 1248 node = repo[b'.'].hex()
1249 1249
1250 1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 1251 stats = mergemod.clean_update(repo[node])
1252 1252 assert stats.unresolvedcount == 0
1253 1253 _showstats(repo, stats)
1254 1254
1255 1255
1256 1256 def _incoming(
1257 1257 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1258 1258 ):
1259 1259 """
1260 1260 Helper for incoming / gincoming.
1261 1261 displaychlist gets called with
1262 1262 (remoterepo, incomingchangesetlist, displayer) parameters,
1263 1263 and is supposed to contain only code that can't be unified.
1264 1264 """
1265 1265 source, branches = urlutil.parseurl(
1266 1266 ui.expandpath(source), opts.get(b'branch')
1267 1267 )
1268 1268 other = peer(repo, opts, source)
1269 1269 cleanupfn = other.close
1270 1270 try:
1271 1271 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1272 1272 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1273 1273
1274 1274 if revs:
1275 1275 revs = [other.lookup(rev) for rev in revs]
1276 1276 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1277 1277 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1278 1278 )
1279 1279
1280 1280 if not chlist:
1281 1281 ui.status(_(b"no changes found\n"))
1282 1282 return subreporecurse()
1283 1283 ui.pager(b'incoming')
1284 1284 displayer = logcmdutil.changesetdisplayer(
1285 1285 ui, other, opts, buffered=buffered
1286 1286 )
1287 1287 displaychlist(other, chlist, displayer)
1288 1288 displayer.close()
1289 1289 finally:
1290 1290 cleanupfn()
1291 1291 subreporecurse()
1292 1292 return 0 # exit code is zero since we found incoming changes
1293 1293
1294 1294
1295 1295 def incoming(ui, repo, source, opts):
1296 1296 def subreporecurse():
1297 1297 ret = 1
1298 1298 if opts.get(b'subrepos'):
1299 1299 ctx = repo[None]
1300 1300 for subpath in sorted(ctx.substate):
1301 1301 sub = ctx.sub(subpath)
1302 1302 ret = min(ret, sub.incoming(ui, source, opts))
1303 1303 return ret
1304 1304
1305 1305 def display(other, chlist, displayer):
1306 1306 limit = logcmdutil.getlimit(opts)
1307 1307 if opts.get(b'newest_first'):
1308 1308 chlist.reverse()
1309 1309 count = 0
1310 1310 for n in chlist:
1311 1311 if limit is not None and count >= limit:
1312 1312 break
1313 1313 parents = [p for p in other.changelog.parents(n) if p != nullid]
1314 1314 if opts.get(b'no_merges') and len(parents) == 2:
1315 1315 continue
1316 1316 count += 1
1317 1317 displayer.show(other[n])
1318 1318
1319 1319 return _incoming(display, subreporecurse, ui, repo, source, opts)
1320 1320
1321 1321
1322 1322 def _outgoing(ui, repo, dest, opts):
1323 1323 path = ui.getpath(dest, default=(b'default-push', b'default'))
1324 1324 if not path:
1325 1325 raise error.Abort(
1326 1326 _(b'default repository not configured!'),
1327 1327 hint=_(b"see 'hg help config.paths'"),
1328 1328 )
1329 1329 dest = path.pushloc or path.loc
1330 1330 branches = path.branch, opts.get(b'branch') or []
1331 1331
1332 1332 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1333 1333 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1334 1334 if revs:
1335 1335 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1336 1336
1337 1337 other = peer(repo, opts, dest)
1338 1338 try:
1339 1339 outgoing = discovery.findcommonoutgoing(
1340 1340 repo, other, revs, force=opts.get(b'force')
1341 1341 )
1342 1342 o = outgoing.missing
1343 1343 if not o:
1344 1344 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1345 1345 return o, other
1346 1346 except: # re-raises
1347 1347 other.close()
1348 1348 raise
1349 1349
1350 1350
1351 1351 def _outgoing_recurse(ui, repo, dest, opts):
1352 1352 ret = 1
1353 1353 if opts.get(b'subrepos'):
1354 1354 ctx = repo[None]
1355 1355 for subpath in sorted(ctx.substate):
1356 1356 sub = ctx.sub(subpath)
1357 1357 ret = min(ret, sub.outgoing(ui, dest, opts))
1358 1358 return ret
1359 1359
1360 1360
1361 def _outgoing_filter(repo, revs, opts):
1362 """apply revision filtering/ordering option for outgoing"""
1363 limit = logcmdutil.getlimit(opts)
1364 no_merges = opts.get(b'no_merges')
1365 if opts.get(b'newest_first'):
1366 revs.reverse()
1367 if limit is None and not no_merges:
1368 for r in revs:
1369 yield r
1370 return
1371
1372 count = 0
1373 cl = repo.changelog
1374 for n in revs:
1375 if limit is not None and count >= limit:
1376 break
1377 parents = [p for p in cl.parents(n) if p != nullid]
1378 if no_merges and len(parents) == 2:
1379 continue
1380 count += 1
1381 yield n
1382
1383
1361 1384 def outgoing(ui, repo, dest, opts):
1362 1385
1363 limit = logcmdutil.getlimit(opts)
1364 1386 o, other = _outgoing(ui, repo, dest, opts)
1365 1387 ret = 1
1366 1388 try:
1367 1389 if o:
1368 1390 ret = 0
1369 1391
1370 if opts.get(b'newest_first'):
1371 o.reverse()
1372 1392 ui.pager(b'outgoing')
1373 1393 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1374 count = 0
1375 for n in o:
1376 if limit is not None and count >= limit:
1377 break
1378 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1379 if opts.get(b'no_merges') and len(parents) == 2:
1380 continue
1381 count += 1
1394 for n in _outgoing_filter(repo, o, opts):
1382 1395 displayer.show(repo[n])
1383 1396 displayer.close()
1384 1397 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1385 1398 ret = min(ret, _outgoing_recurse(ui, repo, dest, opts))
1386 1399 return ret # exit code is zero since we found outgoing changes
1387 1400 finally:
1388 1401 other.close()
1389 1402
1390 1403
1391 1404 def verify(repo, level=None):
1392 1405 """verify the consistency of a repository"""
1393 1406 ret = verifymod.verify(repo, level=level)
1394 1407
1395 1408 # Broken subrepo references in hidden csets don't seem worth worrying about,
1396 1409 # since they can't be pushed/pulled, and --hidden can be used if they are a
1397 1410 # concern.
1398 1411
1399 1412 # pathto() is needed for -R case
1400 1413 revs = repo.revs(
1401 1414 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1402 1415 )
1403 1416
1404 1417 if revs:
1405 1418 repo.ui.status(_(b'checking subrepo links\n'))
1406 1419 for rev in revs:
1407 1420 ctx = repo[rev]
1408 1421 try:
1409 1422 for subpath in ctx.substate:
1410 1423 try:
1411 1424 ret = (
1412 1425 ctx.sub(subpath, allowcreate=False).verify() or ret
1413 1426 )
1414 1427 except error.RepoError as e:
1415 1428 repo.ui.warn(b'%d: %s\n' % (rev, e))
1416 1429 except Exception:
1417 1430 repo.ui.warn(
1418 1431 _(b'.hgsubstate is corrupt in revision %s\n')
1419 1432 % short(ctx.node())
1420 1433 )
1421 1434
1422 1435 return ret
1423 1436
1424 1437
1425 1438 def remoteui(src, opts):
1426 1439 """build a remote ui from ui or repo and opts"""
1427 1440 if util.safehasattr(src, b'baseui'): # looks like a repository
1428 1441 dst = src.baseui.copy() # drop repo-specific config
1429 1442 src = src.ui # copy target options from repo
1430 1443 else: # assume it's a global ui object
1431 1444 dst = src.copy() # keep all global options
1432 1445
1433 1446 # copy ssh-specific options
1434 1447 for o in b'ssh', b'remotecmd':
1435 1448 v = opts.get(o) or src.config(b'ui', o)
1436 1449 if v:
1437 1450 dst.setconfig(b"ui", o, v, b'copied')
1438 1451
1439 1452 # copy bundle-specific options
1440 1453 r = src.config(b'bundle', b'mainreporoot')
1441 1454 if r:
1442 1455 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1443 1456
1444 1457 # copy selected local settings to the remote ui
1445 1458 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1446 1459 for key, val in src.configitems(sect):
1447 1460 dst.setconfig(sect, key, val, b'copied')
1448 1461 v = src.config(b'web', b'cacerts')
1449 1462 if v:
1450 1463 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1451 1464
1452 1465 return dst
1453 1466
1454 1467
1455 1468 # Files of interest
1456 1469 # Used to check if the repository has changed looking at mtime and size of
1457 1470 # these files.
1458 1471 foi = [
1459 1472 (b'spath', b'00changelog.i'),
1460 1473 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1461 1474 (b'spath', b'obsstore'),
1462 1475 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1463 1476 ]
1464 1477
1465 1478
1466 1479 class cachedlocalrepo(object):
1467 1480 """Holds a localrepository that can be cached and reused."""
1468 1481
1469 1482 def __init__(self, repo):
1470 1483 """Create a new cached repo from an existing repo.
1471 1484
1472 1485 We assume the passed in repo was recently created. If the
1473 1486 repo has changed between when it was created and when it was
1474 1487 turned into a cache, it may not refresh properly.
1475 1488 """
1476 1489 assert isinstance(repo, localrepo.localrepository)
1477 1490 self._repo = repo
1478 1491 self._state, self.mtime = self._repostate()
1479 1492 self._filtername = repo.filtername
1480 1493
1481 1494 def fetch(self):
1482 1495 """Refresh (if necessary) and return a repository.
1483 1496
1484 1497 If the cached instance is out of date, it will be recreated
1485 1498 automatically and returned.
1486 1499
1487 1500 Returns a tuple of the repo and a boolean indicating whether a new
1488 1501 repo instance was created.
1489 1502 """
1490 1503 # We compare the mtimes and sizes of some well-known files to
1491 1504 # determine if the repo changed. This is not precise, as mtimes
1492 1505 # are susceptible to clock skew and imprecise filesystems and
1493 1506 # file content can change while maintaining the same size.
1494 1507
1495 1508 state, mtime = self._repostate()
1496 1509 if state == self._state:
1497 1510 return self._repo, False
1498 1511
1499 1512 repo = repository(self._repo.baseui, self._repo.url())
1500 1513 if self._filtername:
1501 1514 self._repo = repo.filtered(self._filtername)
1502 1515 else:
1503 1516 self._repo = repo.unfiltered()
1504 1517 self._state = state
1505 1518 self.mtime = mtime
1506 1519
1507 1520 return self._repo, True
1508 1521
1509 1522 def _repostate(self):
1510 1523 state = []
1511 1524 maxmtime = -1
1512 1525 for attr, fname in foi:
1513 1526 prefix = getattr(self._repo, attr)
1514 1527 p = os.path.join(prefix, fname)
1515 1528 try:
1516 1529 st = os.stat(p)
1517 1530 except OSError:
1518 1531 st = os.stat(prefix)
1519 1532 state.append((st[stat.ST_MTIME], st.st_size))
1520 1533 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1521 1534
1522 1535 return tuple(state), maxmtime
1523 1536
1524 1537 def copy(self):
1525 1538 """Obtain a copy of this class instance.
1526 1539
1527 1540 A new localrepository instance is obtained. The new instance should be
1528 1541 completely independent of the original.
1529 1542 """
1530 1543 repo = repository(self._repo.baseui, self._repo.origroot)
1531 1544 if self._filtername:
1532 1545 repo = repo.filtered(self._filtername)
1533 1546 else:
1534 1547 repo = repo.unfiltered()
1535 1548 c = cachedlocalrepo(repo)
1536 1549 c._state = self._state
1537 1550 c.mtime = self.mtime
1538 1551 return c
General Comments 0
You need to be logged in to leave comments. Login now