##// END OF EJS Templates
clone: update to active bookmark, if set...
Dan Villiom Podlaski Christiansen -
r46814:72007a9a default
parent child Browse files
Show More
@@ -1,1502 +1,1506
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 httppeer,
36 36 localrepo,
37 37 lock,
38 38 logcmdutil,
39 39 logexchange,
40 40 merge as mergemod,
41 41 mergestate as mergestatemod,
42 42 narrowspec,
43 43 phases,
44 44 pycompat,
45 45 requirements,
46 46 scmutil,
47 47 sshpeer,
48 48 statichttprepo,
49 49 ui as uimod,
50 50 unionrepo,
51 51 url,
52 52 util,
53 53 verify as verifymod,
54 54 vfs as vfsmod,
55 55 )
56 56 from .utils import hashutil
57 57
58 58 release = lock.release
59 59
60 60 # shared features
61 61 sharedbookmarks = b'bookmarks'
62 62
63 63
64 64 def _local(path):
65 65 path = util.expandpath(util.urllocalpath(path))
66 66
67 67 try:
68 68 # we use os.stat() directly here instead of os.path.isfile()
69 69 # because the latter started returning `False` on invalid path
70 70 # exceptions starting in 3.8 and we care about handling
71 71 # invalid paths specially here.
72 72 st = os.stat(path)
73 73 isfile = stat.S_ISREG(st.st_mode)
74 74 # Python 2 raises TypeError, Python 3 ValueError.
75 75 except (TypeError, ValueError) as e:
76 76 raise error.Abort(
77 77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
78 78 )
79 79 except OSError:
80 80 isfile = False
81 81
82 82 return isfile and bundlerepo or localrepo
83 83
84 84
85 85 def addbranchrevs(lrepo, other, branches, revs):
86 86 peer = other.peer() # a courtesy to callers using a localrepo for other
87 87 hashbranch, branches = branches
88 88 if not hashbranch and not branches:
89 89 x = revs or None
90 90 if revs:
91 91 y = revs[0]
92 92 else:
93 93 y = None
94 94 return x, y
95 95 if revs:
96 96 revs = list(revs)
97 97 else:
98 98 revs = []
99 99
100 100 if not peer.capable(b'branchmap'):
101 101 if branches:
102 102 raise error.Abort(_(b"remote branch lookup not supported"))
103 103 revs.append(hashbranch)
104 104 return revs, revs[0]
105 105
106 106 with peer.commandexecutor() as e:
107 107 branchmap = e.callcommand(b'branchmap', {}).result()
108 108
109 109 def primary(branch):
110 110 if branch == b'.':
111 111 if not lrepo:
112 112 raise error.Abort(_(b"dirstate branch not accessible"))
113 113 branch = lrepo.dirstate.branch()
114 114 if branch in branchmap:
115 115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
116 116 return True
117 117 else:
118 118 return False
119 119
120 120 for branch in branches:
121 121 if not primary(branch):
122 122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
123 123 if hashbranch:
124 124 if not primary(hashbranch):
125 125 revs.append(hashbranch)
126 126 return revs, revs[0]
127 127
128 128
129 129 def parseurl(path, branches=None):
130 130 '''parse url#branch, returning (url, (branch, branches))'''
131 131
132 132 u = util.url(path)
133 133 branch = None
134 134 if u.fragment:
135 135 branch = u.fragment
136 136 u.fragment = None
137 137 return bytes(u), (branch, branches or [])
138 138
139 139
140 140 schemes = {
141 141 b'bundle': bundlerepo,
142 142 b'union': unionrepo,
143 143 b'file': _local,
144 144 b'http': httppeer,
145 145 b'https': httppeer,
146 146 b'ssh': sshpeer,
147 147 b'static-http': statichttprepo,
148 148 }
149 149
150 150
151 151 def _peerlookup(path):
152 152 u = util.url(path)
153 153 scheme = u.scheme or b'file'
154 154 thing = schemes.get(scheme) or schemes[b'file']
155 155 try:
156 156 return thing(path)
157 157 except TypeError:
158 158 # we can't test callable(thing) because 'thing' can be an unloaded
159 159 # module that implements __call__
160 160 if not util.safehasattr(thing, b'instance'):
161 161 raise
162 162 return thing
163 163
164 164
165 165 def islocal(repo):
166 166 '''return true if repo (or path pointing to repo) is local'''
167 167 if isinstance(repo, bytes):
168 168 try:
169 169 return _peerlookup(repo).islocal(repo)
170 170 except AttributeError:
171 171 return False
172 172 return repo.local()
173 173
174 174
175 175 def openpath(ui, path, sendaccept=True):
176 176 '''open path with open if local, url.open if remote'''
177 177 pathurl = util.url(path, parsequery=False, parsefragment=False)
178 178 if pathurl.islocal():
179 179 return util.posixfile(pathurl.localpath(), b'rb')
180 180 else:
181 181 return url.open(ui, path, sendaccept=sendaccept)
182 182
183 183
184 184 # a list of (ui, repo) functions called for wire peer initialization
185 185 wirepeersetupfuncs = []
186 186
187 187
188 188 def _peerorrepo(
189 189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 190 ):
191 191 """return a repository object for the specified path"""
192 192 obj = _peerlookup(path).instance(
193 193 ui, path, create, intents=intents, createopts=createopts
194 194 )
195 195 ui = getattr(obj, "ui", ui)
196 196 for f in presetupfuncs or []:
197 197 f(ui, obj)
198 198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 199 with util.timedcm('all reposetup') as allreposetupstats:
200 200 for name, module in extensions.extensions(ui):
201 201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 202 hook = getattr(module, 'reposetup', None)
203 203 if hook:
204 204 with util.timedcm('reposetup %r', name) as stats:
205 205 hook(ui, obj)
206 206 ui.log(
207 207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 208 )
209 209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 210 if not obj.local():
211 211 for f in wirepeersetupfuncs:
212 212 f(ui, obj)
213 213 return obj
214 214
215 215
216 216 def repository(
217 217 ui,
218 218 path=b'',
219 219 create=False,
220 220 presetupfuncs=None,
221 221 intents=None,
222 222 createopts=None,
223 223 ):
224 224 """return a repository object for the specified path"""
225 225 peer = _peerorrepo(
226 226 ui,
227 227 path,
228 228 create,
229 229 presetupfuncs=presetupfuncs,
230 230 intents=intents,
231 231 createopts=createopts,
232 232 )
233 233 repo = peer.local()
234 234 if not repo:
235 235 raise error.Abort(
236 236 _(b"repository '%s' is not local") % (path or peer.url())
237 237 )
238 238 return repo.filtered(b'visible')
239 239
240 240
241 241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 242 '''return a repository peer for the specified path'''
243 243 rui = remoteui(uiorrepo, opts)
244 244 return _peerorrepo(
245 245 rui, path, create, intents=intents, createopts=createopts
246 246 ).peer()
247 247
248 248
249 249 def defaultdest(source):
250 250 """return default destination of clone if none is given
251 251
252 252 >>> defaultdest(b'foo')
253 253 'foo'
254 254 >>> defaultdest(b'/foo/bar')
255 255 'bar'
256 256 >>> defaultdest(b'/')
257 257 ''
258 258 >>> defaultdest(b'')
259 259 ''
260 260 >>> defaultdest(b'http://example.org/')
261 261 ''
262 262 >>> defaultdest(b'http://example.org/foo/')
263 263 'foo'
264 264 """
265 265 path = util.url(source).path
266 266 if not path:
267 267 return b''
268 268 return os.path.basename(os.path.normpath(path))
269 269
270 270
271 271 def sharedreposource(repo):
272 272 """Returns repository object for source repository of a shared repo.
273 273
274 274 If repo is not a shared repository, returns None.
275 275 """
276 276 if repo.sharedpath == repo.path:
277 277 return None
278 278
279 279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 280 return repo.srcrepo
281 281
282 282 # the sharedpath always ends in the .hg; we want the path to the repo
283 283 source = repo.vfs.split(repo.sharedpath)[0]
284 284 srcurl, branches = parseurl(source)
285 285 srcrepo = repository(repo.ui, srcurl)
286 286 repo.srcrepo = srcrepo
287 287 return srcrepo
288 288
289 289
290 290 def share(
291 291 ui,
292 292 source,
293 293 dest=None,
294 294 update=True,
295 295 bookmarks=True,
296 296 defaultpath=None,
297 297 relative=False,
298 298 ):
299 299 '''create a shared repository'''
300 300
301 301 if not islocal(source):
302 302 raise error.Abort(_(b'can only share local repositories'))
303 303
304 304 if not dest:
305 305 dest = defaultdest(source)
306 306 else:
307 307 dest = ui.expandpath(dest)
308 308
309 309 if isinstance(source, bytes):
310 310 origsource = ui.expandpath(source)
311 311 source, branches = parseurl(origsource)
312 312 srcrepo = repository(ui, source)
313 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 314 else:
315 315 srcrepo = source.local()
316 316 checkout = None
317 317
318 318 shareditems = set()
319 319 if bookmarks:
320 320 shareditems.add(sharedbookmarks)
321 321
322 322 r = repository(
323 323 ui,
324 324 dest,
325 325 create=True,
326 326 createopts={
327 327 b'sharedrepo': srcrepo,
328 328 b'sharedrelative': relative,
329 329 b'shareditems': shareditems,
330 330 },
331 331 )
332 332
333 333 postshare(srcrepo, r, defaultpath=defaultpath)
334 334 r = repository(ui, dest)
335 335 _postshareupdate(r, update, checkout=checkout)
336 336 return r
337 337
338 338
339 339 def _prependsourcehgrc(repo):
340 340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 341 on unshare. This is only done if the share was perfomed using share safe
342 342 method where we share config of source in shares"""
343 343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 344 dstvfs = vfsmod.vfs(repo.path)
345 345
346 346 if not srcvfs.exists(b'hgrc'):
347 347 return
348 348
349 349 currentconfig = b''
350 350 if dstvfs.exists(b'hgrc'):
351 351 currentconfig = dstvfs.read(b'hgrc')
352 352
353 353 with dstvfs(b'hgrc', b'wb') as fp:
354 354 sourceconfig = srcvfs.read(b'hgrc')
355 355 fp.write(b"# Config copied from shared source\n")
356 356 fp.write(sourceconfig)
357 357 fp.write(b'\n')
358 358 fp.write(currentconfig)
359 359
360 360
361 361 def unshare(ui, repo):
362 362 """convert a shared repository to a normal one
363 363
364 364 Copy the store data to the repo and remove the sharedpath data.
365 365
366 366 Returns a new repository object representing the unshared repository.
367 367
368 368 The passed repository object is not usable after this function is
369 369 called.
370 370 """
371 371
372 372 with repo.lock():
373 373 # we use locks here because if we race with commit, we
374 374 # can end up with extra data in the cloned revlogs that's
375 375 # not pointed to by changesets, thus causing verify to
376 376 # fail
377 377 destlock = copystore(ui, repo, repo.path)
378 378 with destlock or util.nullcontextmanager():
379 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 380 # we were sharing .hg/hgrc of the share source with the current
381 381 # repo. We need to copy that while unsharing otherwise it can
382 382 # disable hooks and other checks
383 383 _prependsourcehgrc(repo)
384 384
385 385 sharefile = repo.vfs.join(b'sharedpath')
386 386 util.rename(sharefile, sharefile + b'.old')
387 387
388 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 390 scmutil.writereporequirements(repo)
391 391
392 392 # Removing share changes some fundamental properties of the repo instance.
393 393 # So we instantiate a new repo object and operate on it rather than
394 394 # try to keep the existing repo usable.
395 395 newrepo = repository(repo.baseui, repo.root, create=False)
396 396
397 397 # TODO: figure out how to access subrepos that exist, but were previously
398 398 # removed from .hgsub
399 399 c = newrepo[b'.']
400 400 subs = c.substate
401 401 for s in sorted(subs):
402 402 c.sub(s).unshare()
403 403
404 404 localrepo.poisonrepository(repo)
405 405
406 406 return newrepo
407 407
408 408
409 409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 410 """Called after a new shared repo is created.
411 411
412 412 The new repo only has a requirements file and pointer to the source.
413 413 This function configures additional shared data.
414 414
415 415 Extensions can wrap this function and write additional entries to
416 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 417 """
418 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 419 if default:
420 420 template = b'[paths]\ndefault = %s\n'
421 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 423 with destrepo.wlock():
424 424 narrowspec.copytoworkingcopy(destrepo)
425 425
426 426
427 427 def _postshareupdate(repo, update, checkout=None):
428 428 """Maybe perform a working directory update after a shared repo is created.
429 429
430 430 ``update`` can be a boolean or a revision to update to.
431 431 """
432 432 if not update:
433 433 return
434 434
435 435 repo.ui.status(_(b"updating working directory\n"))
436 436 if update is not True:
437 437 checkout = update
438 438 for test in (checkout, b'default', b'tip'):
439 439 if test is None:
440 440 continue
441 441 try:
442 442 uprev = repo.lookup(test)
443 443 break
444 444 except error.RepoLookupError:
445 445 continue
446 446 _update(repo, uprev)
447 447
448 448
449 449 def copystore(ui, srcrepo, destpath):
450 450 """copy files from store of srcrepo in destpath
451 451
452 452 returns destlock
453 453 """
454 454 destlock = None
455 455 try:
456 456 hardlink = None
457 457 topic = _(b'linking') if hardlink else _(b'copying')
458 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 459 num = 0
460 460 srcpublishing = srcrepo.publishing()
461 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 462 dstvfs = vfsmod.vfs(destpath)
463 463 for f in srcrepo.store.copylist():
464 464 if srcpublishing and f.endswith(b'phaseroots'):
465 465 continue
466 466 dstbase = os.path.dirname(f)
467 467 if dstbase and not dstvfs.exists(dstbase):
468 468 dstvfs.mkdir(dstbase)
469 469 if srcvfs.exists(f):
470 470 if f.endswith(b'data'):
471 471 # 'dstbase' may be empty (e.g. revlog format 0)
472 472 lockfile = os.path.join(dstbase, b"lock")
473 473 # lock to avoid premature writing to the target
474 474 destlock = lock.lock(dstvfs, lockfile)
475 475 hardlink, n = util.copyfiles(
476 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 477 )
478 478 num += n
479 479 if hardlink:
480 480 ui.debug(b"linked %d files\n" % num)
481 481 else:
482 482 ui.debug(b"copied %d files\n" % num)
483 483 return destlock
484 484 except: # re-raises
485 485 release(destlock)
486 486 raise
487 487
488 488
489 489 def clonewithshare(
490 490 ui,
491 491 peeropts,
492 492 sharepath,
493 493 source,
494 494 srcpeer,
495 495 dest,
496 496 pull=False,
497 497 rev=None,
498 498 update=True,
499 499 stream=False,
500 500 ):
501 501 """Perform a clone using a shared repo.
502 502
503 503 The store for the repository will be located at <sharepath>/.hg. The
504 504 specified revisions will be cloned or pulled from "source". A shared repo
505 505 will be created at "dest" and a working copy will be created if "update" is
506 506 True.
507 507 """
508 508 revs = None
509 509 if rev:
510 510 if not srcpeer.capable(b'lookup'):
511 511 raise error.Abort(
512 512 _(
513 513 b"src repository does not support "
514 514 b"revision lookup and so doesn't "
515 515 b"support clone by revision"
516 516 )
517 517 )
518 518
519 519 # TODO this is batchable.
520 520 remoterevs = []
521 521 for r in rev:
522 522 with srcpeer.commandexecutor() as e:
523 523 remoterevs.append(
524 524 e.callcommand(
525 525 b'lookup',
526 526 {
527 527 b'key': r,
528 528 },
529 529 ).result()
530 530 )
531 531 revs = remoterevs
532 532
533 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 534 # 2 clients may race creating or populating it.
535 535 pooldir = os.path.dirname(sharepath)
536 536 # lock class requires the directory to exist.
537 537 try:
538 538 util.makedir(pooldir, False)
539 539 except OSError as e:
540 540 if e.errno != errno.EEXIST:
541 541 raise
542 542
543 543 poolvfs = vfsmod.vfs(pooldir)
544 544 basename = os.path.basename(sharepath)
545 545
546 546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 547 if os.path.exists(sharepath):
548 548 ui.status(
549 549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 550 )
551 551 else:
552 552 ui.status(
553 553 _(b'(sharing from new pooled repository %s)\n') % basename
554 554 )
555 555 # Always use pull mode because hardlinks in share mode don't work
556 556 # well. Never update because working copies aren't necessary in
557 557 # share mode.
558 558 clone(
559 559 ui,
560 560 peeropts,
561 561 source,
562 562 dest=sharepath,
563 563 pull=True,
564 564 revs=rev,
565 565 update=False,
566 566 stream=stream,
567 567 )
568 568
569 569 # Resolve the value to put in [paths] section for the source.
570 570 if islocal(source):
571 571 defaultpath = os.path.abspath(util.urllocalpath(source))
572 572 else:
573 573 defaultpath = source
574 574
575 575 sharerepo = repository(ui, path=sharepath)
576 576 destrepo = share(
577 577 ui,
578 578 sharerepo,
579 579 dest=dest,
580 580 update=False,
581 581 bookmarks=False,
582 582 defaultpath=defaultpath,
583 583 )
584 584
585 585 # We need to perform a pull against the dest repo to fetch bookmarks
586 586 # and other non-store data that isn't shared by default. In the case of
587 587 # non-existing shared repo, this means we pull from the remote twice. This
588 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 589 # way to pull just non-changegroup data.
590 590 exchange.pull(destrepo, srcpeer, heads=revs)
591 591
592 592 _postshareupdate(destrepo, update)
593 593
594 594 return srcpeer, peer(ui, peeropts, dest)
595 595
596 596
597 597 # Recomputing caches is often slow on big repos, so copy them.
598 598 def _copycache(srcrepo, dstcachedir, fname):
599 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 600 srcfname = srcrepo.cachevfs.join(fname)
601 601 dstfname = os.path.join(dstcachedir, fname)
602 602 if os.path.exists(srcfname):
603 603 if not os.path.exists(dstcachedir):
604 604 os.mkdir(dstcachedir)
605 605 util.copyfile(srcfname, dstfname)
606 606
607 607
608 608 def clone(
609 609 ui,
610 610 peeropts,
611 611 source,
612 612 dest=None,
613 613 pull=False,
614 614 revs=None,
615 615 update=True,
616 616 stream=False,
617 617 branch=None,
618 618 shareopts=None,
619 619 storeincludepats=None,
620 620 storeexcludepats=None,
621 621 depth=None,
622 622 ):
623 623 """Make a copy of an existing repository.
624 624
625 625 Create a copy of an existing repository in a new directory. The
626 626 source and destination are URLs, as passed to the repository
627 627 function. Returns a pair of repository peers, the source and
628 628 newly created destination.
629 629
630 630 The location of the source is added to the new repository's
631 631 .hg/hgrc file, as the default to be used for future pulls and
632 632 pushes.
633 633
634 634 If an exception is raised, the partly cloned/updated destination
635 635 repository will be deleted.
636 636
637 637 Arguments:
638 638
639 639 source: repository object or URL
640 640
641 641 dest: URL of destination repository to create (defaults to base
642 642 name of source repository)
643 643
644 644 pull: always pull from source repository, even in local case or if the
645 645 server prefers streaming
646 646
647 647 stream: stream raw data uncompressed from repository (fast over
648 648 LAN, slow over WAN)
649 649
650 650 revs: revision to clone up to (implies pull=True)
651 651
652 652 update: update working directory after clone completes, if
653 653 destination is local repository (True means update to default rev,
654 654 anything else is treated as a revision)
655 655
656 656 branch: branches to clone
657 657
658 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 659 activates auto sharing mode and defines the directory for stores. The
660 660 "mode" key determines how to construct the directory name of the shared
661 661 repository. "identity" means the name is derived from the node of the first
662 662 changeset in the repository. "remote" means the name is derived from the
663 663 remote's path/URL. Defaults to "identity."
664 664
665 665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 666 exclude in the repository copy, respectively. If not defined, all files
667 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 668 only the requested files will be performed. If ``storeincludepats`` is not
669 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 670 ``path:.``. If both are empty sets, no files will be cloned.
671 671 """
672 672
673 673 if isinstance(source, bytes):
674 674 origsource = ui.expandpath(source)
675 675 source, branches = parseurl(origsource, branch)
676 676 srcpeer = peer(ui, peeropts, source)
677 677 else:
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682 682
683 683 if dest is None:
684 684 dest = defaultdest(source)
685 685 if dest:
686 686 ui.status(_(b"destination directory: %s\n") % dest)
687 687 else:
688 688 dest = ui.expandpath(dest)
689 689
690 690 dest = util.urllocalpath(dest)
691 691 source = util.urllocalpath(source)
692 692
693 693 if not dest:
694 694 raise error.InputError(_(b"empty destination path is not valid"))
695 695
696 696 destvfs = vfsmod.vfs(dest, expandpath=True)
697 697 if destvfs.lexists():
698 698 if not destvfs.isdir():
699 699 raise error.InputError(_(b"destination '%s' already exists") % dest)
700 700 elif destvfs.listdir():
701 701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
702 702
703 703 createopts = {}
704 704 narrow = False
705 705
706 706 if storeincludepats is not None:
707 707 narrowspec.validatepatterns(storeincludepats)
708 708 narrow = True
709 709
710 710 if storeexcludepats is not None:
711 711 narrowspec.validatepatterns(storeexcludepats)
712 712 narrow = True
713 713
714 714 if narrow:
715 715 # Include everything by default if only exclusion patterns defined.
716 716 if storeexcludepats and not storeincludepats:
717 717 storeincludepats = {b'path:.'}
718 718
719 719 createopts[b'narrowfiles'] = True
720 720
721 721 if depth:
722 722 createopts[b'shallowfilestore'] = True
723 723
724 724 if srcpeer.capable(b'lfs-serve'):
725 725 # Repository creation honors the config if it disabled the extension, so
726 726 # we can't just announce that lfs will be enabled. This check avoids
727 727 # saying that lfs will be enabled, and then saying it's an unknown
728 728 # feature. The lfs creation option is set in either case so that a
729 729 # requirement is added. If the extension is explicitly disabled but the
730 730 # requirement is set, the clone aborts early, before transferring any
731 731 # data.
732 732 createopts[b'lfs'] = True
733 733
734 734 if extensions.disabled_help(b'lfs'):
735 735 ui.status(
736 736 _(
737 737 b'(remote is using large file support (lfs), but it is '
738 738 b'explicitly disabled in the local configuration)\n'
739 739 )
740 740 )
741 741 else:
742 742 ui.status(
743 743 _(
744 744 b'(remote is using large file support (lfs); lfs will '
745 745 b'be enabled for this repository)\n'
746 746 )
747 747 )
748 748
749 749 shareopts = shareopts or {}
750 750 sharepool = shareopts.get(b'pool')
751 751 sharenamemode = shareopts.get(b'mode')
752 752 if sharepool and islocal(dest):
753 753 sharepath = None
754 754 if sharenamemode == b'identity':
755 755 # Resolve the name from the initial changeset in the remote
756 756 # repository. This returns nullid when the remote is empty. It
757 757 # raises RepoLookupError if revision 0 is filtered or otherwise
758 758 # not available. If we fail to resolve, sharing is not enabled.
759 759 try:
760 760 with srcpeer.commandexecutor() as e:
761 761 rootnode = e.callcommand(
762 762 b'lookup',
763 763 {
764 764 b'key': b'0',
765 765 },
766 766 ).result()
767 767
768 768 if rootnode != nullid:
769 769 sharepath = os.path.join(sharepool, hex(rootnode))
770 770 else:
771 771 ui.status(
772 772 _(
773 773 b'(not using pooled storage: '
774 774 b'remote appears to be empty)\n'
775 775 )
776 776 )
777 777 except error.RepoLookupError:
778 778 ui.status(
779 779 _(
780 780 b'(not using pooled storage: '
781 781 b'unable to resolve identity of remote)\n'
782 782 )
783 783 )
784 784 elif sharenamemode == b'remote':
785 785 sharepath = os.path.join(
786 786 sharepool, hex(hashutil.sha1(source).digest())
787 787 )
788 788 else:
789 789 raise error.Abort(
790 790 _(b'unknown share naming mode: %s') % sharenamemode
791 791 )
792 792
793 793 # TODO this is a somewhat arbitrary restriction.
794 794 if narrow:
795 795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
796 796 sharepath = None
797 797
798 798 if sharepath:
799 799 return clonewithshare(
800 800 ui,
801 801 peeropts,
802 802 sharepath,
803 803 source,
804 804 srcpeer,
805 805 dest,
806 806 pull=pull,
807 807 rev=revs,
808 808 update=update,
809 809 stream=stream,
810 810 )
811 811
812 812 srclock = destlock = cleandir = None
813 813 srcrepo = srcpeer.local()
814 814 try:
815 815 abspath = origsource
816 816 if islocal(origsource):
817 817 abspath = os.path.abspath(util.urllocalpath(origsource))
818 818
819 819 if islocal(dest):
820 820 cleandir = dest
821 821
822 822 copy = False
823 823 if (
824 824 srcrepo
825 825 and srcrepo.cancopy()
826 826 and islocal(dest)
827 827 and not phases.hassecret(srcrepo)
828 828 ):
829 829 copy = not pull and not revs
830 830
831 831 # TODO this is a somewhat arbitrary restriction.
832 832 if narrow:
833 833 copy = False
834 834
835 835 if copy:
836 836 try:
837 837 # we use a lock here because if we race with commit, we
838 838 # can end up with extra data in the cloned revlogs that's
839 839 # not pointed to by changesets, thus causing verify to
840 840 # fail
841 841 srclock = srcrepo.lock(wait=False)
842 842 except error.LockError:
843 843 copy = False
844 844
845 845 if copy:
846 846 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
847 847 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
848 848 if not os.path.exists(dest):
849 849 util.makedirs(dest)
850 850 else:
851 851 # only clean up directories we create ourselves
852 852 cleandir = hgdir
853 853 try:
854 854 destpath = hgdir
855 855 util.makedir(destpath, notindexed=True)
856 856 except OSError as inst:
857 857 if inst.errno == errno.EEXIST:
858 858 cleandir = None
859 859 raise error.Abort(
860 860 _(b"destination '%s' already exists") % dest
861 861 )
862 862 raise
863 863
864 864 destlock = copystore(ui, srcrepo, destpath)
865 865 # copy bookmarks over
866 866 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
867 867 dstbookmarks = os.path.join(destpath, b'bookmarks')
868 868 if os.path.exists(srcbookmarks):
869 869 util.copyfile(srcbookmarks, dstbookmarks)
870 870
871 871 dstcachedir = os.path.join(destpath, b'cache')
872 872 for cache in cacheutil.cachetocopy(srcrepo):
873 873 _copycache(srcrepo, dstcachedir, cache)
874 874
875 875 # we need to re-init the repo after manually copying the data
876 876 # into it
877 877 destpeer = peer(srcrepo, peeropts, dest)
878 878 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
879 879 else:
880 880 try:
881 881 # only pass ui when no srcrepo
882 882 destpeer = peer(
883 883 srcrepo or ui,
884 884 peeropts,
885 885 dest,
886 886 create=True,
887 887 createopts=createopts,
888 888 )
889 889 except OSError as inst:
890 890 if inst.errno == errno.EEXIST:
891 891 cleandir = None
892 892 raise error.Abort(
893 893 _(b"destination '%s' already exists") % dest
894 894 )
895 895 raise
896 896
897 897 if revs:
898 898 if not srcpeer.capable(b'lookup'):
899 899 raise error.Abort(
900 900 _(
901 901 b"src repository does not support "
902 902 b"revision lookup and so doesn't "
903 903 b"support clone by revision"
904 904 )
905 905 )
906 906
907 907 # TODO this is batchable.
908 908 remoterevs = []
909 909 for rev in revs:
910 910 with srcpeer.commandexecutor() as e:
911 911 remoterevs.append(
912 912 e.callcommand(
913 913 b'lookup',
914 914 {
915 915 b'key': rev,
916 916 },
917 917 ).result()
918 918 )
919 919 revs = remoterevs
920 920
921 921 checkout = revs[0]
922 922 else:
923 923 revs = None
924 924 local = destpeer.local()
925 925 if local:
926 926 if narrow:
927 927 with local.wlock(), local.lock():
928 928 local.setnarrowpats(storeincludepats, storeexcludepats)
929 929 narrowspec.copytoworkingcopy(local)
930 930
931 931 u = util.url(abspath)
932 932 defaulturl = bytes(u)
933 933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 934 if not stream:
935 935 if pull:
936 936 stream = False
937 937 else:
938 938 stream = None
939 939 # internal config: ui.quietbookmarkmove
940 940 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 941 with local.ui.configoverride(overrides, b'clone'):
942 942 exchange.pull(
943 943 local,
944 944 srcpeer,
945 945 revs,
946 946 streamclonerequested=stream,
947 947 includepats=storeincludepats,
948 948 excludepats=storeexcludepats,
949 949 depth=depth,
950 950 )
951 951 elif srcrepo:
952 952 # TODO lift restriction once exchange.push() accepts narrow
953 953 # push.
954 954 if narrow:
955 955 raise error.Abort(
956 956 _(
957 957 b'narrow clone not available for '
958 958 b'remote destinations'
959 959 )
960 960 )
961 961
962 962 exchange.push(
963 963 srcrepo,
964 964 destpeer,
965 965 revs=revs,
966 966 bookmarks=srcrepo._bookmarks.keys(),
967 967 )
968 968 else:
969 969 raise error.Abort(
970 970 _(b"clone from remote to remote not supported")
971 971 )
972 972
973 973 cleandir = None
974 974
975 975 destrepo = destpeer.local()
976 976 if destrepo:
977 977 template = uimod.samplehgrcs[b'cloned']
978 978 u = util.url(abspath)
979 979 u.passwd = None
980 980 defaulturl = bytes(u)
981 981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983 983
984 984 if ui.configbool(b'experimental', b'remotenames'):
985 985 logexchange.pullremotenames(destrepo, srcpeer)
986 986
987 987 if update:
988 988 if update is not True:
989 989 with srcpeer.commandexecutor() as e:
990 990 checkout = e.callcommand(
991 991 b'lookup',
992 992 {
993 993 b'key': update,
994 994 },
995 995 ).result()
996 996
997 997 uprev = None
998 998 status = None
999 999 if checkout is not None:
1000 1000 # Some extensions (at least hg-git and hg-subversion) have
1001 1001 # a peer.lookup() implementation that returns a name instead
1002 1002 # of a nodeid. We work around it here until we've figured
1003 1003 # out a better solution.
1004 1004 if len(checkout) == 20 and checkout in destrepo:
1005 1005 uprev = checkout
1006 1006 elif scmutil.isrevsymbol(destrepo, checkout):
1007 1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 1008 else:
1009 1009 if update is not True:
1010 1010 try:
1011 1011 uprev = destrepo.lookup(update)
1012 1012 except error.RepoLookupError:
1013 1013 pass
1014 1014 if uprev is None:
1015 1015 try:
1016 uprev = destrepo._bookmarks[b'@']
1017 update = b'@'
1016 if destrepo._activebookmark:
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1018 update = destrepo._activebookmark
1019 else:
1020 uprev = destrepo._bookmarks[b'@']
1021 update = b'@'
1018 1022 bn = destrepo[uprev].branch()
1019 1023 if bn == b'default':
1020 status = _(b"updating to bookmark @\n")
1024 status = _(b"updating to bookmark %s\n" % update)
1021 1025 else:
1022 1026 status = (
1023 _(b"updating to bookmark @ on branch %s\n") % bn
1024 )
1027 _(b"updating to bookmark %s on branch %s\n")
1028 ) % (update, bn)
1025 1029 except KeyError:
1026 1030 try:
1027 1031 uprev = destrepo.branchtip(b'default')
1028 1032 except error.RepoLookupError:
1029 1033 uprev = destrepo.lookup(b'tip')
1030 1034 if not status:
1031 1035 bn = destrepo[uprev].branch()
1032 1036 status = _(b"updating to branch %s\n") % bn
1033 1037 destrepo.ui.status(status)
1034 1038 _update(destrepo, uprev)
1035 1039 if update in destrepo._bookmarks:
1036 1040 bookmarks.activate(destrepo, update)
1037 1041 finally:
1038 1042 release(srclock, destlock)
1039 1043 if cleandir is not None:
1040 1044 shutil.rmtree(cleandir, True)
1041 1045 if srcpeer is not None:
1042 1046 srcpeer.close()
1043 1047 return srcpeer, destpeer
1044 1048
1045 1049
1046 1050 def _showstats(repo, stats, quietempty=False):
1047 1051 if quietempty and stats.isempty():
1048 1052 return
1049 1053 repo.ui.status(
1050 1054 _(
1051 1055 b"%d files updated, %d files merged, "
1052 1056 b"%d files removed, %d files unresolved\n"
1053 1057 )
1054 1058 % (
1055 1059 stats.updatedcount,
1056 1060 stats.mergedcount,
1057 1061 stats.removedcount,
1058 1062 stats.unresolvedcount,
1059 1063 )
1060 1064 )
1061 1065
1062 1066
1063 1067 def updaterepo(repo, node, overwrite, updatecheck=None):
1064 1068 """Update the working directory to node.
1065 1069
1066 1070 When overwrite is set, changes are clobbered, merged else
1067 1071
1068 1072 returns stats (see pydoc mercurial.merge.applyupdates)"""
1069 1073 repo.ui.deprecwarn(
1070 1074 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1071 1075 b'5.7',
1072 1076 )
1073 1077 return mergemod._update(
1074 1078 repo,
1075 1079 node,
1076 1080 branchmerge=False,
1077 1081 force=overwrite,
1078 1082 labels=[b'working copy', b'destination'],
1079 1083 updatecheck=updatecheck,
1080 1084 )
1081 1085
1082 1086
1083 1087 def update(repo, node, quietempty=False, updatecheck=None):
1084 1088 """update the working directory to node"""
1085 1089 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1086 1090 _showstats(repo, stats, quietempty)
1087 1091 if stats.unresolvedcount:
1088 1092 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1089 1093 return stats.unresolvedcount > 0
1090 1094
1091 1095
1092 1096 # naming conflict in clone()
1093 1097 _update = update
1094 1098
1095 1099
1096 1100 def clean(repo, node, show_stats=True, quietempty=False):
1097 1101 """forcibly switch the working directory to node, clobbering changes"""
1098 1102 stats = mergemod.clean_update(repo[node])
1099 1103 assert stats.unresolvedcount == 0
1100 1104 if show_stats:
1101 1105 _showstats(repo, stats, quietempty)
1102 1106
1103 1107
1104 1108 # naming conflict in updatetotally()
1105 1109 _clean = clean
1106 1110
1107 1111 _VALID_UPDATECHECKS = {
1108 1112 mergemod.UPDATECHECK_ABORT,
1109 1113 mergemod.UPDATECHECK_NONE,
1110 1114 mergemod.UPDATECHECK_LINEAR,
1111 1115 mergemod.UPDATECHECK_NO_CONFLICT,
1112 1116 }
1113 1117
1114 1118
1115 1119 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1116 1120 """Update the working directory with extra care for non-file components
1117 1121
1118 1122 This takes care of non-file components below:
1119 1123
1120 1124 :bookmark: might be advanced or (in)activated
1121 1125
1122 1126 This takes arguments below:
1123 1127
1124 1128 :checkout: to which revision the working directory is updated
1125 1129 :brev: a name, which might be a bookmark to be activated after updating
1126 1130 :clean: whether changes in the working directory can be discarded
1127 1131 :updatecheck: how to deal with a dirty working directory
1128 1132
1129 1133 Valid values for updatecheck are the UPDATECHECK_* constants
1130 1134 defined in the merge module. Passing `None` will result in using the
1131 1135 configured default.
1132 1136
1133 1137 * ABORT: abort if the working directory is dirty
1134 1138 * NONE: don't check (merge working directory changes into destination)
1135 1139 * LINEAR: check that update is linear before merging working directory
1136 1140 changes into destination
1137 1141 * NO_CONFLICT: check that the update does not result in file merges
1138 1142
1139 1143 This returns whether conflict is detected at updating or not.
1140 1144 """
1141 1145 if updatecheck is None:
1142 1146 updatecheck = ui.config(b'commands', b'update.check')
1143 1147 if updatecheck not in _VALID_UPDATECHECKS:
1144 1148 # If not configured, or invalid value configured
1145 1149 updatecheck = mergemod.UPDATECHECK_LINEAR
1146 1150 if updatecheck not in _VALID_UPDATECHECKS:
1147 1151 raise ValueError(
1148 1152 r'Invalid updatecheck value %r (can accept %r)'
1149 1153 % (updatecheck, _VALID_UPDATECHECKS)
1150 1154 )
1151 1155 with repo.wlock():
1152 1156 movemarkfrom = None
1153 1157 warndest = False
1154 1158 if checkout is None:
1155 1159 updata = destutil.destupdate(repo, clean=clean)
1156 1160 checkout, movemarkfrom, brev = updata
1157 1161 warndest = True
1158 1162
1159 1163 if clean:
1160 1164 ret = _clean(repo, checkout)
1161 1165 else:
1162 1166 if updatecheck == mergemod.UPDATECHECK_ABORT:
1163 1167 cmdutil.bailifchanged(repo, merge=False)
1164 1168 updatecheck = mergemod.UPDATECHECK_NONE
1165 1169 ret = _update(repo, checkout, updatecheck=updatecheck)
1166 1170
1167 1171 if not ret and movemarkfrom:
1168 1172 if movemarkfrom == repo[b'.'].node():
1169 1173 pass # no-op update
1170 1174 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1171 1175 b = ui.label(repo._activebookmark, b'bookmarks.active')
1172 1176 ui.status(_(b"updating bookmark %s\n") % b)
1173 1177 else:
1174 1178 # this can happen with a non-linear update
1175 1179 b = ui.label(repo._activebookmark, b'bookmarks')
1176 1180 ui.status(_(b"(leaving bookmark %s)\n") % b)
1177 1181 bookmarks.deactivate(repo)
1178 1182 elif brev in repo._bookmarks:
1179 1183 if brev != repo._activebookmark:
1180 1184 b = ui.label(brev, b'bookmarks.active')
1181 1185 ui.status(_(b"(activating bookmark %s)\n") % b)
1182 1186 bookmarks.activate(repo, brev)
1183 1187 elif brev:
1184 1188 if repo._activebookmark:
1185 1189 b = ui.label(repo._activebookmark, b'bookmarks')
1186 1190 ui.status(_(b"(leaving bookmark %s)\n") % b)
1187 1191 bookmarks.deactivate(repo)
1188 1192
1189 1193 if warndest:
1190 1194 destutil.statusotherdests(ui, repo)
1191 1195
1192 1196 return ret
1193 1197
1194 1198
1195 1199 def merge(
1196 1200 ctx,
1197 1201 force=False,
1198 1202 remind=True,
1199 1203 labels=None,
1200 1204 ):
1201 1205 """Branch merge with node, resolving changes. Return true if any
1202 1206 unresolved conflicts."""
1203 1207 repo = ctx.repo()
1204 1208 stats = mergemod.merge(ctx, force=force, labels=labels)
1205 1209 _showstats(repo, stats)
1206 1210 if stats.unresolvedcount:
1207 1211 repo.ui.status(
1208 1212 _(
1209 1213 b"use 'hg resolve' to retry unresolved file merges "
1210 1214 b"or 'hg merge --abort' to abandon\n"
1211 1215 )
1212 1216 )
1213 1217 elif remind:
1214 1218 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1215 1219 return stats.unresolvedcount > 0
1216 1220
1217 1221
1218 1222 def abortmerge(ui, repo):
1219 1223 ms = mergestatemod.mergestate.read(repo)
1220 1224 if ms.active():
1221 1225 # there were conflicts
1222 1226 node = ms.localctx.hex()
1223 1227 else:
1224 1228 # there were no conficts, mergestate was not stored
1225 1229 node = repo[b'.'].hex()
1226 1230
1227 1231 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1228 1232 stats = mergemod.clean_update(repo[node])
1229 1233 assert stats.unresolvedcount == 0
1230 1234 _showstats(repo, stats)
1231 1235
1232 1236
1233 1237 def _incoming(
1234 1238 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1235 1239 ):
1236 1240 """
1237 1241 Helper for incoming / gincoming.
1238 1242 displaychlist gets called with
1239 1243 (remoterepo, incomingchangesetlist, displayer) parameters,
1240 1244 and is supposed to contain only code that can't be unified.
1241 1245 """
1242 1246 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1243 1247 other = peer(repo, opts, source)
1244 1248 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1245 1249 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1246 1250
1247 1251 if revs:
1248 1252 revs = [other.lookup(rev) for rev in revs]
1249 1253 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1250 1254 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1251 1255 )
1252 1256 try:
1253 1257 if not chlist:
1254 1258 ui.status(_(b"no changes found\n"))
1255 1259 return subreporecurse()
1256 1260 ui.pager(b'incoming')
1257 1261 displayer = logcmdutil.changesetdisplayer(
1258 1262 ui, other, opts, buffered=buffered
1259 1263 )
1260 1264 displaychlist(other, chlist, displayer)
1261 1265 displayer.close()
1262 1266 finally:
1263 1267 cleanupfn()
1264 1268 subreporecurse()
1265 1269 return 0 # exit code is zero since we found incoming changes
1266 1270
1267 1271
1268 1272 def incoming(ui, repo, source, opts):
1269 1273 def subreporecurse():
1270 1274 ret = 1
1271 1275 if opts.get(b'subrepos'):
1272 1276 ctx = repo[None]
1273 1277 for subpath in sorted(ctx.substate):
1274 1278 sub = ctx.sub(subpath)
1275 1279 ret = min(ret, sub.incoming(ui, source, opts))
1276 1280 return ret
1277 1281
1278 1282 def display(other, chlist, displayer):
1279 1283 limit = logcmdutil.getlimit(opts)
1280 1284 if opts.get(b'newest_first'):
1281 1285 chlist.reverse()
1282 1286 count = 0
1283 1287 for n in chlist:
1284 1288 if limit is not None and count >= limit:
1285 1289 break
1286 1290 parents = [p for p in other.changelog.parents(n) if p != nullid]
1287 1291 if opts.get(b'no_merges') and len(parents) == 2:
1288 1292 continue
1289 1293 count += 1
1290 1294 displayer.show(other[n])
1291 1295
1292 1296 return _incoming(display, subreporecurse, ui, repo, source, opts)
1293 1297
1294 1298
1295 1299 def _outgoing(ui, repo, dest, opts):
1296 1300 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1297 1301 if not path:
1298 1302 raise error.Abort(
1299 1303 _(b'default repository not configured!'),
1300 1304 hint=_(b"see 'hg help config.paths'"),
1301 1305 )
1302 1306 dest = path.pushloc or path.loc
1303 1307 branches = path.branch, opts.get(b'branch') or []
1304 1308
1305 1309 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1306 1310 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1307 1311 if revs:
1308 1312 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1309 1313
1310 1314 other = peer(repo, opts, dest)
1311 1315 outgoing = discovery.findcommonoutgoing(
1312 1316 repo, other, revs, force=opts.get(b'force')
1313 1317 )
1314 1318 o = outgoing.missing
1315 1319 if not o:
1316 1320 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1317 1321 return o, other
1318 1322
1319 1323
1320 1324 def outgoing(ui, repo, dest, opts):
1321 1325 def recurse():
1322 1326 ret = 1
1323 1327 if opts.get(b'subrepos'):
1324 1328 ctx = repo[None]
1325 1329 for subpath in sorted(ctx.substate):
1326 1330 sub = ctx.sub(subpath)
1327 1331 ret = min(ret, sub.outgoing(ui, dest, opts))
1328 1332 return ret
1329 1333
1330 1334 limit = logcmdutil.getlimit(opts)
1331 1335 o, other = _outgoing(ui, repo, dest, opts)
1332 1336 if not o:
1333 1337 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1334 1338 return recurse()
1335 1339
1336 1340 if opts.get(b'newest_first'):
1337 1341 o.reverse()
1338 1342 ui.pager(b'outgoing')
1339 1343 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1340 1344 count = 0
1341 1345 for n in o:
1342 1346 if limit is not None and count >= limit:
1343 1347 break
1344 1348 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1345 1349 if opts.get(b'no_merges') and len(parents) == 2:
1346 1350 continue
1347 1351 count += 1
1348 1352 displayer.show(repo[n])
1349 1353 displayer.close()
1350 1354 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1351 1355 recurse()
1352 1356 return 0 # exit code is zero since we found outgoing changes
1353 1357
1354 1358
1355 1359 def verify(repo, level=None):
1356 1360 """verify the consistency of a repository"""
1357 1361 ret = verifymod.verify(repo, level=level)
1358 1362
1359 1363 # Broken subrepo references in hidden csets don't seem worth worrying about,
1360 1364 # since they can't be pushed/pulled, and --hidden can be used if they are a
1361 1365 # concern.
1362 1366
1363 1367 # pathto() is needed for -R case
1364 1368 revs = repo.revs(
1365 1369 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1366 1370 )
1367 1371
1368 1372 if revs:
1369 1373 repo.ui.status(_(b'checking subrepo links\n'))
1370 1374 for rev in revs:
1371 1375 ctx = repo[rev]
1372 1376 try:
1373 1377 for subpath in ctx.substate:
1374 1378 try:
1375 1379 ret = (
1376 1380 ctx.sub(subpath, allowcreate=False).verify() or ret
1377 1381 )
1378 1382 except error.RepoError as e:
1379 1383 repo.ui.warn(b'%d: %s\n' % (rev, e))
1380 1384 except Exception:
1381 1385 repo.ui.warn(
1382 1386 _(b'.hgsubstate is corrupt in revision %s\n')
1383 1387 % short(ctx.node())
1384 1388 )
1385 1389
1386 1390 return ret
1387 1391
1388 1392
1389 1393 def remoteui(src, opts):
1390 1394 """build a remote ui from ui or repo and opts"""
1391 1395 if util.safehasattr(src, b'baseui'): # looks like a repository
1392 1396 dst = src.baseui.copy() # drop repo-specific config
1393 1397 src = src.ui # copy target options from repo
1394 1398 else: # assume it's a global ui object
1395 1399 dst = src.copy() # keep all global options
1396 1400
1397 1401 # copy ssh-specific options
1398 1402 for o in b'ssh', b'remotecmd':
1399 1403 v = opts.get(o) or src.config(b'ui', o)
1400 1404 if v:
1401 1405 dst.setconfig(b"ui", o, v, b'copied')
1402 1406
1403 1407 # copy bundle-specific options
1404 1408 r = src.config(b'bundle', b'mainreporoot')
1405 1409 if r:
1406 1410 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1407 1411
1408 1412 # copy selected local settings to the remote ui
1409 1413 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1410 1414 for key, val in src.configitems(sect):
1411 1415 dst.setconfig(sect, key, val, b'copied')
1412 1416 v = src.config(b'web', b'cacerts')
1413 1417 if v:
1414 1418 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1415 1419
1416 1420 return dst
1417 1421
1418 1422
1419 1423 # Files of interest
1420 1424 # Used to check if the repository has changed looking at mtime and size of
1421 1425 # these files.
1422 1426 foi = [
1423 1427 (b'spath', b'00changelog.i'),
1424 1428 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1425 1429 (b'spath', b'obsstore'),
1426 1430 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1427 1431 ]
1428 1432
1429 1433
1430 1434 class cachedlocalrepo(object):
1431 1435 """Holds a localrepository that can be cached and reused."""
1432 1436
1433 1437 def __init__(self, repo):
1434 1438 """Create a new cached repo from an existing repo.
1435 1439
1436 1440 We assume the passed in repo was recently created. If the
1437 1441 repo has changed between when it was created and when it was
1438 1442 turned into a cache, it may not refresh properly.
1439 1443 """
1440 1444 assert isinstance(repo, localrepo.localrepository)
1441 1445 self._repo = repo
1442 1446 self._state, self.mtime = self._repostate()
1443 1447 self._filtername = repo.filtername
1444 1448
1445 1449 def fetch(self):
1446 1450 """Refresh (if necessary) and return a repository.
1447 1451
1448 1452 If the cached instance is out of date, it will be recreated
1449 1453 automatically and returned.
1450 1454
1451 1455 Returns a tuple of the repo and a boolean indicating whether a new
1452 1456 repo instance was created.
1453 1457 """
1454 1458 # We compare the mtimes and sizes of some well-known files to
1455 1459 # determine if the repo changed. This is not precise, as mtimes
1456 1460 # are susceptible to clock skew and imprecise filesystems and
1457 1461 # file content can change while maintaining the same size.
1458 1462
1459 1463 state, mtime = self._repostate()
1460 1464 if state == self._state:
1461 1465 return self._repo, False
1462 1466
1463 1467 repo = repository(self._repo.baseui, self._repo.url())
1464 1468 if self._filtername:
1465 1469 self._repo = repo.filtered(self._filtername)
1466 1470 else:
1467 1471 self._repo = repo.unfiltered()
1468 1472 self._state = state
1469 1473 self.mtime = mtime
1470 1474
1471 1475 return self._repo, True
1472 1476
1473 1477 def _repostate(self):
1474 1478 state = []
1475 1479 maxmtime = -1
1476 1480 for attr, fname in foi:
1477 1481 prefix = getattr(self._repo, attr)
1478 1482 p = os.path.join(prefix, fname)
1479 1483 try:
1480 1484 st = os.stat(p)
1481 1485 except OSError:
1482 1486 st = os.stat(prefix)
1483 1487 state.append((st[stat.ST_MTIME], st.st_size))
1484 1488 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1485 1489
1486 1490 return tuple(state), maxmtime
1487 1491
1488 1492 def copy(self):
1489 1493 """Obtain a copy of this class instance.
1490 1494
1491 1495 A new localrepository instance is obtained. The new instance should be
1492 1496 completely independent of the original.
1493 1497 """
1494 1498 repo = repository(self._repo.baseui, self._repo.origroot)
1495 1499 if self._filtername:
1496 1500 repo = repo.filtered(self._filtername)
1497 1501 else:
1498 1502 repo = repo.unfiltered()
1499 1503 c = cachedlocalrepo(repo)
1500 1504 c._state = self._state
1501 1505 c.mtime = self.mtime
1502 1506 return c
General Comments 0
You need to be logged in to leave comments. Login now