##// END OF EJS Templates
branching: merge with stable
Raphaël Gomès -
r49418:f13fb742 merge default
parent child Browse files
Show More
@@ -1,1601 +1,1610 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 import posixpath
13 14 import shutil
14 15 import stat
15 16 import weakref
16 17
17 18 from .i18n import _
18 19 from .node import (
19 20 hex,
20 21 sha1nodeconstants,
21 22 short,
22 23 )
23 24 from .pycompat import getattr
24 25
25 26 from . import (
26 27 bookmarks,
27 28 bundlerepo,
28 29 cmdutil,
29 30 destutil,
30 31 discovery,
31 32 error,
32 33 exchange,
33 34 extensions,
34 35 graphmod,
35 36 httppeer,
36 37 localrepo,
37 38 lock,
38 39 logcmdutil,
39 40 logexchange,
40 41 merge as mergemod,
41 42 mergestate as mergestatemod,
42 43 narrowspec,
43 44 phases,
44 45 requirements,
45 46 scmutil,
46 47 sshpeer,
47 48 statichttprepo,
48 49 ui as uimod,
49 50 unionrepo,
50 51 url,
51 52 util,
52 53 verify as verifymod,
53 54 vfs as vfsmod,
54 55 )
55 56 from .interfaces import repository as repositorymod
56 57 from .utils import (
57 58 hashutil,
58 59 stringutil,
59 60 urlutil,
60 61 )
61 62
62 63
63 64 release = lock.release
64 65
65 66 # shared features
66 67 sharedbookmarks = b'bookmarks'
67 68
68 69
69 70 def _local(path):
70 71 path = util.expandpath(urlutil.urllocalpath(path))
71 72
72 73 try:
73 74 # we use os.stat() directly here instead of os.path.isfile()
74 75 # because the latter started returning `False` on invalid path
75 76 # exceptions starting in 3.8 and we care about handling
76 77 # invalid paths specially here.
77 78 st = os.stat(path)
78 79 isfile = stat.S_ISREG(st.st_mode)
79 80 # Python 2 raises TypeError, Python 3 ValueError.
80 81 except (TypeError, ValueError) as e:
81 82 raise error.Abort(
82 83 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 84 )
84 85 except OSError:
85 86 isfile = False
86 87
87 88 return isfile and bundlerepo or localrepo
88 89
89 90
90 91 def addbranchrevs(lrepo, other, branches, revs):
91 92 peer = other.peer() # a courtesy to callers using a localrepo for other
92 93 hashbranch, branches = branches
93 94 if not hashbranch and not branches:
94 95 x = revs or None
95 96 if revs:
96 97 y = revs[0]
97 98 else:
98 99 y = None
99 100 return x, y
100 101 if revs:
101 102 revs = list(revs)
102 103 else:
103 104 revs = []
104 105
105 106 if not peer.capable(b'branchmap'):
106 107 if branches:
107 108 raise error.Abort(_(b"remote branch lookup not supported"))
108 109 revs.append(hashbranch)
109 110 return revs, revs[0]
110 111
111 112 with peer.commandexecutor() as e:
112 113 branchmap = e.callcommand(b'branchmap', {}).result()
113 114
114 115 def primary(branch):
115 116 if branch == b'.':
116 117 if not lrepo:
117 118 raise error.Abort(_(b"dirstate branch not accessible"))
118 119 branch = lrepo.dirstate.branch()
119 120 if branch in branchmap:
120 121 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 122 return True
122 123 else:
123 124 return False
124 125
125 126 for branch in branches:
126 127 if not primary(branch):
127 128 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 129 if hashbranch:
129 130 if not primary(hashbranch):
130 131 revs.append(hashbranch)
131 132 return revs, revs[0]
132 133
133 134
134 135 schemes = {
135 136 b'bundle': bundlerepo,
136 137 b'union': unionrepo,
137 138 b'file': _local,
138 139 b'http': httppeer,
139 140 b'https': httppeer,
140 141 b'ssh': sshpeer,
141 142 b'static-http': statichttprepo,
142 143 }
143 144
144 145
145 146 def _peerlookup(path):
146 147 u = urlutil.url(path)
147 148 scheme = u.scheme or b'file'
148 149 thing = schemes.get(scheme) or schemes[b'file']
149 150 try:
150 151 return thing(path)
151 152 except TypeError:
152 153 # we can't test callable(thing) because 'thing' can be an unloaded
153 154 # module that implements __call__
154 155 if not util.safehasattr(thing, b'instance'):
155 156 raise
156 157 return thing
157 158
158 159
159 160 def islocal(repo):
160 161 '''return true if repo (or path pointing to repo) is local'''
161 162 if isinstance(repo, bytes):
162 163 try:
163 164 return _peerlookup(repo).islocal(repo)
164 165 except AttributeError:
165 166 return False
166 167 return repo.local()
167 168
168 169
169 170 def openpath(ui, path, sendaccept=True):
170 171 '''open path with open if local, url.open if remote'''
171 172 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
172 173 if pathurl.islocal():
173 174 return util.posixfile(pathurl.localpath(), b'rb')
174 175 else:
175 176 return url.open(ui, path, sendaccept=sendaccept)
176 177
177 178
178 179 # a list of (ui, repo) functions called for wire peer initialization
179 180 wirepeersetupfuncs = []
180 181
181 182
182 183 def _peerorrepo(
183 184 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
184 185 ):
185 186 """return a repository object for the specified path"""
186 187 obj = _peerlookup(path).instance(
187 188 ui, path, create, intents=intents, createopts=createopts
188 189 )
189 190 ui = getattr(obj, "ui", ui)
190 191 for f in presetupfuncs or []:
191 192 f(ui, obj)
192 193 ui.log(b'extension', b'- executing reposetup hooks\n')
193 194 with util.timedcm('all reposetup') as allreposetupstats:
194 195 for name, module in extensions.extensions(ui):
195 196 ui.log(b'extension', b' - running reposetup for %s\n', name)
196 197 hook = getattr(module, 'reposetup', None)
197 198 if hook:
198 199 with util.timedcm('reposetup %r', name) as stats:
199 200 hook(ui, obj)
200 201 ui.log(
201 202 b'extension', b' > reposetup for %s took %s\n', name, stats
202 203 )
203 204 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
204 205 if not obj.local():
205 206 for f in wirepeersetupfuncs:
206 207 f(ui, obj)
207 208 return obj
208 209
209 210
210 211 def repository(
211 212 ui,
212 213 path=b'',
213 214 create=False,
214 215 presetupfuncs=None,
215 216 intents=None,
216 217 createopts=None,
217 218 ):
218 219 """return a repository object for the specified path"""
219 220 peer = _peerorrepo(
220 221 ui,
221 222 path,
222 223 create,
223 224 presetupfuncs=presetupfuncs,
224 225 intents=intents,
225 226 createopts=createopts,
226 227 )
227 228 repo = peer.local()
228 229 if not repo:
229 230 raise error.Abort(
230 231 _(b"repository '%s' is not local") % (path or peer.url())
231 232 )
232 233 return repo.filtered(b'visible')
233 234
234 235
235 236 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
236 237 '''return a repository peer for the specified path'''
237 238 rui = remoteui(uiorrepo, opts)
238 239 return _peerorrepo(
239 240 rui, path, create, intents=intents, createopts=createopts
240 241 ).peer()
241 242
242 243
243 244 def defaultdest(source):
244 245 """return default destination of clone if none is given
245 246
246 247 >>> defaultdest(b'foo')
247 248 'foo'
248 249 >>> defaultdest(b'/foo/bar')
249 250 'bar'
250 251 >>> defaultdest(b'/')
251 252 ''
252 253 >>> defaultdest(b'')
253 254 ''
254 255 >>> defaultdest(b'http://example.org/')
255 256 ''
256 257 >>> defaultdest(b'http://example.org/foo/')
257 258 'foo'
258 259 """
259 260 path = urlutil.url(source).path
260 261 if not path:
261 262 return b''
262 263 return os.path.basename(os.path.normpath(path))
263 264
264 265
265 266 def sharedreposource(repo):
266 267 """Returns repository object for source repository of a shared repo.
267 268
268 269 If repo is not a shared repository, returns None.
269 270 """
270 271 if repo.sharedpath == repo.path:
271 272 return None
272 273
273 274 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
274 275 return repo.srcrepo
275 276
276 277 # the sharedpath always ends in the .hg; we want the path to the repo
277 278 source = repo.vfs.split(repo.sharedpath)[0]
278 279 srcurl, branches = urlutil.parseurl(source)
279 280 srcrepo = repository(repo.ui, srcurl)
280 281 repo.srcrepo = srcrepo
281 282 return srcrepo
282 283
283 284
284 285 def share(
285 286 ui,
286 287 source,
287 288 dest=None,
288 289 update=True,
289 290 bookmarks=True,
290 291 defaultpath=None,
291 292 relative=False,
292 293 ):
293 294 '''create a shared repository'''
294 295
295 296 if not islocal(source):
296 297 raise error.Abort(_(b'can only share local repositories'))
297 298
298 299 if not dest:
299 300 dest = defaultdest(source)
300 301 else:
301 302 dest = urlutil.get_clone_path(ui, dest)[1]
302 303
303 304 if isinstance(source, bytes):
304 305 origsource, source, branches = urlutil.get_clone_path(ui, source)
305 306 srcrepo = repository(ui, source)
306 307 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
307 308 else:
308 309 srcrepo = source.local()
309 310 checkout = None
310 311
311 312 shareditems = set()
312 313 if bookmarks:
313 314 shareditems.add(sharedbookmarks)
314 315
315 316 r = repository(
316 317 ui,
317 318 dest,
318 319 create=True,
319 320 createopts={
320 321 b'sharedrepo': srcrepo,
321 322 b'sharedrelative': relative,
322 323 b'shareditems': shareditems,
323 324 },
324 325 )
325 326
326 327 postshare(srcrepo, r, defaultpath=defaultpath)
327 328 r = repository(ui, dest)
328 329 _postshareupdate(r, update, checkout=checkout)
329 330 return r
330 331
331 332
332 333 def _prependsourcehgrc(repo):
333 334 """copies the source repo config and prepend it in current repo .hg/hgrc
334 335 on unshare. This is only done if the share was perfomed using share safe
335 336 method where we share config of source in shares"""
336 337 srcvfs = vfsmod.vfs(repo.sharedpath)
337 338 dstvfs = vfsmod.vfs(repo.path)
338 339
339 340 if not srcvfs.exists(b'hgrc'):
340 341 return
341 342
342 343 currentconfig = b''
343 344 if dstvfs.exists(b'hgrc'):
344 345 currentconfig = dstvfs.read(b'hgrc')
345 346
346 347 with dstvfs(b'hgrc', b'wb') as fp:
347 348 sourceconfig = srcvfs.read(b'hgrc')
348 349 fp.write(b"# Config copied from shared source\n")
349 350 fp.write(sourceconfig)
350 351 fp.write(b'\n')
351 352 fp.write(currentconfig)
352 353
353 354
354 355 def unshare(ui, repo):
355 356 """convert a shared repository to a normal one
356 357
357 358 Copy the store data to the repo and remove the sharedpath data.
358 359
359 360 Returns a new repository object representing the unshared repository.
360 361
361 362 The passed repository object is not usable after this function is
362 363 called.
363 364 """
364 365
365 366 with repo.lock():
366 367 # we use locks here because if we race with commit, we
367 368 # can end up with extra data in the cloned revlogs that's
368 369 # not pointed to by changesets, thus causing verify to
369 370 # fail
370 371 destlock = copystore(ui, repo, repo.path)
371 372 with destlock or util.nullcontextmanager():
372 373 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
373 374 # we were sharing .hg/hgrc of the share source with the current
374 375 # repo. We need to copy that while unsharing otherwise it can
375 376 # disable hooks and other checks
376 377 _prependsourcehgrc(repo)
377 378
378 379 sharefile = repo.vfs.join(b'sharedpath')
379 380 util.rename(sharefile, sharefile + b'.old')
380 381
381 382 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
382 383 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
383 384 scmutil.writereporequirements(repo)
384 385
385 386 # Removing share changes some fundamental properties of the repo instance.
386 387 # So we instantiate a new repo object and operate on it rather than
387 388 # try to keep the existing repo usable.
388 389 newrepo = repository(repo.baseui, repo.root, create=False)
389 390
390 391 # TODO: figure out how to access subrepos that exist, but were previously
391 392 # removed from .hgsub
392 393 c = newrepo[b'.']
393 394 subs = c.substate
394 395 for s in sorted(subs):
395 396 c.sub(s).unshare()
396 397
397 398 localrepo.poisonrepository(repo)
398 399
399 400 return newrepo
400 401
401 402
402 403 def postshare(sourcerepo, destrepo, defaultpath=None):
403 404 """Called after a new shared repo is created.
404 405
405 406 The new repo only has a requirements file and pointer to the source.
406 407 This function configures additional shared data.
407 408
408 409 Extensions can wrap this function and write additional entries to
409 410 destrepo/.hg/shared to indicate additional pieces of data to be shared.
410 411 """
411 412 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
412 413 if default:
413 414 template = b'[paths]\ndefault = %s\n'
414 415 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
415 416 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
416 417 with destrepo.wlock():
417 418 narrowspec.copytoworkingcopy(destrepo)
418 419
419 420
420 421 def _postshareupdate(repo, update, checkout=None):
421 422 """Maybe perform a working directory update after a shared repo is created.
422 423
423 424 ``update`` can be a boolean or a revision to update to.
424 425 """
425 426 if not update:
426 427 return
427 428
428 429 repo.ui.status(_(b"updating working directory\n"))
429 430 if update is not True:
430 431 checkout = update
431 432 for test in (checkout, b'default', b'tip'):
432 433 if test is None:
433 434 continue
434 435 try:
435 436 uprev = repo.lookup(test)
436 437 break
437 438 except error.RepoLookupError:
438 439 continue
439 440 _update(repo, uprev)
440 441
441 442
442 443 def copystore(ui, srcrepo, destpath):
443 444 """copy files from store of srcrepo in destpath
444 445
445 446 returns destlock
446 447 """
447 448 destlock = None
448 449 try:
449 450 hardlink = None
450 451 topic = _(b'linking') if hardlink else _(b'copying')
451 452 with ui.makeprogress(topic, unit=_(b'files')) as progress:
452 453 num = 0
453 454 srcpublishing = srcrepo.publishing()
454 455 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
455 456 dstvfs = vfsmod.vfs(destpath)
456 457 for f in srcrepo.store.copylist():
457 458 if srcpublishing and f.endswith(b'phaseroots'):
458 459 continue
459 460 dstbase = os.path.dirname(f)
460 461 if dstbase and not dstvfs.exists(dstbase):
461 462 dstvfs.mkdir(dstbase)
462 463 if srcvfs.exists(f):
463 464 if f.endswith(b'data'):
464 465 # 'dstbase' may be empty (e.g. revlog format 0)
465 466 lockfile = os.path.join(dstbase, b"lock")
466 467 # lock to avoid premature writing to the target
467 468 destlock = lock.lock(dstvfs, lockfile)
468 469 hardlink, n = util.copyfiles(
469 470 srcvfs.join(f), dstvfs.join(f), hardlink, progress
470 471 )
471 472 num += n
472 473 if hardlink:
473 474 ui.debug(b"linked %d files\n" % num)
474 475 else:
475 476 ui.debug(b"copied %d files\n" % num)
476 477 return destlock
477 478 except: # re-raises
478 479 release(destlock)
479 480 raise
480 481
481 482
482 483 def clonewithshare(
483 484 ui,
484 485 peeropts,
485 486 sharepath,
486 487 source,
487 488 srcpeer,
488 489 dest,
489 490 pull=False,
490 491 rev=None,
491 492 update=True,
492 493 stream=False,
493 494 ):
494 495 """Perform a clone using a shared repo.
495 496
496 497 The store for the repository will be located at <sharepath>/.hg. The
497 498 specified revisions will be cloned or pulled from "source". A shared repo
498 499 will be created at "dest" and a working copy will be created if "update" is
499 500 True.
500 501 """
501 502 revs = None
502 503 if rev:
503 504 if not srcpeer.capable(b'lookup'):
504 505 raise error.Abort(
505 506 _(
506 507 b"src repository does not support "
507 508 b"revision lookup and so doesn't "
508 509 b"support clone by revision"
509 510 )
510 511 )
511 512
512 513 # TODO this is batchable.
513 514 remoterevs = []
514 515 for r in rev:
515 516 with srcpeer.commandexecutor() as e:
516 517 remoterevs.append(
517 518 e.callcommand(
518 519 b'lookup',
519 520 {
520 521 b'key': r,
521 522 },
522 523 ).result()
523 524 )
524 525 revs = remoterevs
525 526
526 527 # Obtain a lock before checking for or cloning the pooled repo otherwise
527 528 # 2 clients may race creating or populating it.
528 529 pooldir = os.path.dirname(sharepath)
529 530 # lock class requires the directory to exist.
530 531 try:
531 532 util.makedir(pooldir, False)
532 533 except OSError as e:
533 534 if e.errno != errno.EEXIST:
534 535 raise
535 536
536 537 poolvfs = vfsmod.vfs(pooldir)
537 538 basename = os.path.basename(sharepath)
538 539
539 540 with lock.lock(poolvfs, b'%s.lock' % basename):
540 541 if os.path.exists(sharepath):
541 542 ui.status(
542 543 _(b'(sharing from existing pooled repository %s)\n') % basename
543 544 )
544 545 else:
545 546 ui.status(
546 547 _(b'(sharing from new pooled repository %s)\n') % basename
547 548 )
548 549 # Always use pull mode because hardlinks in share mode don't work
549 550 # well. Never update because working copies aren't necessary in
550 551 # share mode.
551 552 clone(
552 553 ui,
553 554 peeropts,
554 555 source,
555 556 dest=sharepath,
556 557 pull=True,
557 558 revs=rev,
558 559 update=False,
559 560 stream=stream,
560 561 )
561 562
562 563 # Resolve the value to put in [paths] section for the source.
563 564 if islocal(source):
564 565 defaultpath = util.abspath(urlutil.urllocalpath(source))
565 566 else:
566 567 defaultpath = source
567 568
568 569 sharerepo = repository(ui, path=sharepath)
569 570 destrepo = share(
570 571 ui,
571 572 sharerepo,
572 573 dest=dest,
573 574 update=False,
574 575 bookmarks=False,
575 576 defaultpath=defaultpath,
576 577 )
577 578
578 579 # We need to perform a pull against the dest repo to fetch bookmarks
579 580 # and other non-store data that isn't shared by default. In the case of
580 581 # non-existing shared repo, this means we pull from the remote twice. This
581 582 # is a bit weird. But at the time it was implemented, there wasn't an easy
582 583 # way to pull just non-changegroup data.
583 584 exchange.pull(destrepo, srcpeer, heads=revs)
584 585
585 586 _postshareupdate(destrepo, update)
586 587
587 588 return srcpeer, peer(ui, peeropts, dest)
588 589
589 590
590 591 # Recomputing caches is often slow on big repos, so copy them.
591 592 def _copycache(srcrepo, dstcachedir, fname):
592 593 """copy a cache from srcrepo to destcachedir (if it exists)"""
593 594 srcfname = srcrepo.cachevfs.join(fname)
594 595 dstfname = os.path.join(dstcachedir, fname)
595 596 if os.path.exists(srcfname):
596 597 if not os.path.exists(dstcachedir):
597 598 os.mkdir(dstcachedir)
598 599 util.copyfile(srcfname, dstfname)
599 600
600 601
601 602 def clone(
602 603 ui,
603 604 peeropts,
604 605 source,
605 606 dest=None,
606 607 pull=False,
607 608 revs=None,
608 609 update=True,
609 610 stream=False,
610 611 branch=None,
611 612 shareopts=None,
612 613 storeincludepats=None,
613 614 storeexcludepats=None,
614 615 depth=None,
615 616 ):
616 617 """Make a copy of an existing repository.
617 618
618 619 Create a copy of an existing repository in a new directory. The
619 620 source and destination are URLs, as passed to the repository
620 621 function. Returns a pair of repository peers, the source and
621 622 newly created destination.
622 623
623 624 The location of the source is added to the new repository's
624 625 .hg/hgrc file, as the default to be used for future pulls and
625 626 pushes.
626 627
627 628 If an exception is raised, the partly cloned/updated destination
628 629 repository will be deleted.
629 630
630 631 Arguments:
631 632
632 633 source: repository object or URL
633 634
634 635 dest: URL of destination repository to create (defaults to base
635 636 name of source repository)
636 637
637 638 pull: always pull from source repository, even in local case or if the
638 639 server prefers streaming
639 640
640 641 stream: stream raw data uncompressed from repository (fast over
641 642 LAN, slow over WAN)
642 643
643 644 revs: revision to clone up to (implies pull=True)
644 645
645 646 update: update working directory after clone completes, if
646 647 destination is local repository (True means update to default rev,
647 648 anything else is treated as a revision)
648 649
649 650 branch: branches to clone
650 651
651 652 shareopts: dict of options to control auto sharing behavior. The "pool" key
652 653 activates auto sharing mode and defines the directory for stores. The
653 654 "mode" key determines how to construct the directory name of the shared
654 655 repository. "identity" means the name is derived from the node of the first
655 656 changeset in the repository. "remote" means the name is derived from the
656 657 remote's path/URL. Defaults to "identity."
657 658
658 659 storeincludepats and storeexcludepats: sets of file patterns to include and
659 660 exclude in the repository copy, respectively. If not defined, all files
660 661 will be included (a "full" clone). Otherwise a "narrow" clone containing
661 662 only the requested files will be performed. If ``storeincludepats`` is not
662 663 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
663 664 ``path:.``. If both are empty sets, no files will be cloned.
664 665 """
665 666
666 667 if isinstance(source, bytes):
667 668 src = urlutil.get_clone_path(ui, source, branch)
668 669 origsource, source, branches = src
669 670 srcpeer = peer(ui, peeropts, source)
670 671 else:
671 672 srcpeer = source.peer() # in case we were called with a localrepo
672 673 branches = (None, branch or [])
673 674 origsource = source = srcpeer.url()
674 675 srclock = destlock = destwlock = cleandir = None
675 676 destpeer = None
676 677 try:
677 678 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
678 679
679 680 if dest is None:
680 681 dest = defaultdest(source)
681 682 if dest:
682 683 ui.status(_(b"destination directory: %s\n") % dest)
683 684 else:
684 685 dest = urlutil.get_clone_path(ui, dest)[0]
685 686
686 687 dest = urlutil.urllocalpath(dest)
687 688 source = urlutil.urllocalpath(source)
688 689
689 690 if not dest:
690 691 raise error.InputError(_(b"empty destination path is not valid"))
691 692
692 693 destvfs = vfsmod.vfs(dest, expandpath=True)
693 694 if destvfs.lexists():
694 695 if not destvfs.isdir():
695 696 raise error.InputError(
696 697 _(b"destination '%s' already exists") % dest
697 698 )
698 699 elif destvfs.listdir():
699 700 raise error.InputError(
700 701 _(b"destination '%s' is not empty") % dest
701 702 )
702 703
703 704 createopts = {}
704 705 narrow = False
705 706
706 707 if storeincludepats is not None:
707 708 narrowspec.validatepatterns(storeincludepats)
708 709 narrow = True
709 710
710 711 if storeexcludepats is not None:
711 712 narrowspec.validatepatterns(storeexcludepats)
712 713 narrow = True
713 714
714 715 if narrow:
715 716 # Include everything by default if only exclusion patterns defined.
716 717 if storeexcludepats and not storeincludepats:
717 718 storeincludepats = {b'path:.'}
718 719
719 720 createopts[b'narrowfiles'] = True
720 721
721 722 if depth:
722 723 createopts[b'shallowfilestore'] = True
723 724
724 725 if srcpeer.capable(b'lfs-serve'):
725 726 # Repository creation honors the config if it disabled the extension, so
726 727 # we can't just announce that lfs will be enabled. This check avoids
727 728 # saying that lfs will be enabled, and then saying it's an unknown
728 729 # feature. The lfs creation option is set in either case so that a
729 730 # requirement is added. If the extension is explicitly disabled but the
730 731 # requirement is set, the clone aborts early, before transferring any
731 732 # data.
732 733 createopts[b'lfs'] = True
733 734
734 735 if extensions.disabled_help(b'lfs'):
735 736 ui.status(
736 737 _(
737 738 b'(remote is using large file support (lfs), but it is '
738 739 b'explicitly disabled in the local configuration)\n'
739 740 )
740 741 )
741 742 else:
742 743 ui.status(
743 744 _(
744 745 b'(remote is using large file support (lfs); lfs will '
745 746 b'be enabled for this repository)\n'
746 747 )
747 748 )
748 749
749 750 shareopts = shareopts or {}
750 751 sharepool = shareopts.get(b'pool')
751 752 sharenamemode = shareopts.get(b'mode')
752 753 if sharepool and islocal(dest):
753 754 sharepath = None
754 755 if sharenamemode == b'identity':
755 756 # Resolve the name from the initial changeset in the remote
756 757 # repository. This returns nullid when the remote is empty. It
757 758 # raises RepoLookupError if revision 0 is filtered or otherwise
758 759 # not available. If we fail to resolve, sharing is not enabled.
759 760 try:
760 761 with srcpeer.commandexecutor() as e:
761 762 rootnode = e.callcommand(
762 763 b'lookup',
763 764 {
764 765 b'key': b'0',
765 766 },
766 767 ).result()
767 768
768 769 if rootnode != sha1nodeconstants.nullid:
769 770 sharepath = os.path.join(sharepool, hex(rootnode))
770 771 else:
771 772 ui.status(
772 773 _(
773 774 b'(not using pooled storage: '
774 775 b'remote appears to be empty)\n'
775 776 )
776 777 )
777 778 except error.RepoLookupError:
778 779 ui.status(
779 780 _(
780 781 b'(not using pooled storage: '
781 782 b'unable to resolve identity of remote)\n'
782 783 )
783 784 )
784 785 elif sharenamemode == b'remote':
785 786 sharepath = os.path.join(
786 787 sharepool, hex(hashutil.sha1(source).digest())
787 788 )
788 789 else:
789 790 raise error.Abort(
790 791 _(b'unknown share naming mode: %s') % sharenamemode
791 792 )
792 793
793 794 # TODO this is a somewhat arbitrary restriction.
794 795 if narrow:
795 796 ui.status(
796 797 _(b'(pooled storage not supported for narrow clones)\n')
797 798 )
798 799 sharepath = None
799 800
800 801 if sharepath:
801 802 return clonewithshare(
802 803 ui,
803 804 peeropts,
804 805 sharepath,
805 806 source,
806 807 srcpeer,
807 808 dest,
808 809 pull=pull,
809 810 rev=revs,
810 811 update=update,
811 812 stream=stream,
812 813 )
813 814
814 815 srcrepo = srcpeer.local()
815 816
816 817 abspath = origsource
817 818 if islocal(origsource):
818 819 abspath = util.abspath(urlutil.urllocalpath(origsource))
819 820
820 821 if islocal(dest):
821 822 if os.path.exists(dest):
822 823 # only clean up directories we create ourselves
823 824 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
824 825 cleandir = hgdir
825 826 else:
826 827 cleandir = dest
827 828
828 829 copy = False
829 830 if (
830 831 srcrepo
831 832 and srcrepo.cancopy()
832 833 and islocal(dest)
833 834 and not phases.hassecret(srcrepo)
834 835 ):
835 836 copy = not pull and not revs
836 837
837 838 # TODO this is a somewhat arbitrary restriction.
838 839 if narrow:
839 840 copy = False
840 841
841 842 if copy:
842 843 try:
843 844 # we use a lock here because if we race with commit, we
844 845 # can end up with extra data in the cloned revlogs that's
845 846 # not pointed to by changesets, thus causing verify to
846 847 # fail
847 848 srclock = srcrepo.lock(wait=False)
848 849 except error.LockError:
849 850 copy = False
850 851
851 852 if copy:
852 853 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
853 854
854 855 destrootpath = urlutil.urllocalpath(dest)
855 856 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
856 857 localrepo.createrepository(
857 858 ui,
858 859 destrootpath,
859 860 requirements=dest_reqs,
860 861 )
861 862 destrepo = localrepo.makelocalrepository(ui, destrootpath)
862 863
863 864 destwlock = destrepo.wlock()
864 865 destlock = destrepo.lock()
865 866 from . import streamclone # avoid cycle
866 867
867 868 streamclone.local_copy(srcrepo, destrepo)
868 869
869 870 # we need to re-init the repo after manually copying the data
870 871 # into it
871 872 destpeer = peer(srcrepo, peeropts, dest)
872 873
873 874 # make the peer aware that is it already locked
874 875 #
875 876 # important:
876 877 #
877 878 # We still need to release that lock at the end of the function
878 879 destpeer.local()._lockref = weakref.ref(destlock)
879 880 destpeer.local()._wlockref = weakref.ref(destwlock)
880 881 # dirstate also needs to be copied because `_wlockref` has a reference
881 882 # to it: this dirstate is saved to disk when the wlock is released
882 883 destpeer.local().dirstate = destrepo.dirstate
883 884
884 885 srcrepo.hook(
885 886 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
886 887 )
887 888 else:
888 889 try:
889 890 # only pass ui when no srcrepo
890 891 destpeer = peer(
891 892 srcrepo or ui,
892 893 peeropts,
893 894 dest,
894 895 create=True,
895 896 createopts=createopts,
896 897 )
897 898 except OSError as inst:
898 899 if inst.errno == errno.EEXIST:
899 900 cleandir = None
900 901 raise error.Abort(
901 902 _(b"destination '%s' already exists") % dest
902 903 )
903 904 raise
904 905
905 906 if revs:
906 907 if not srcpeer.capable(b'lookup'):
907 908 raise error.Abort(
908 909 _(
909 910 b"src repository does not support "
910 911 b"revision lookup and so doesn't "
911 912 b"support clone by revision"
912 913 )
913 914 )
914 915
915 916 # TODO this is batchable.
916 917 remoterevs = []
917 918 for rev in revs:
918 919 with srcpeer.commandexecutor() as e:
919 920 remoterevs.append(
920 921 e.callcommand(
921 922 b'lookup',
922 923 {
923 924 b'key': rev,
924 925 },
925 926 ).result()
926 927 )
927 928 revs = remoterevs
928 929
929 930 checkout = revs[0]
930 931 else:
931 932 revs = None
932 933 local = destpeer.local()
933 934 if local:
934 935 if narrow:
935 936 with local.wlock(), local.lock():
936 937 local.setnarrowpats(storeincludepats, storeexcludepats)
937 938 narrowspec.copytoworkingcopy(local)
938 939
939 940 u = urlutil.url(abspath)
940 941 defaulturl = bytes(u)
941 942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 943 if not stream:
943 944 if pull:
944 945 stream = False
945 946 else:
946 947 stream = None
947 948 # internal config: ui.quietbookmarkmove
948 949 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 950 with local.ui.configoverride(overrides, b'clone'):
950 951 exchange.pull(
951 952 local,
952 953 srcpeer,
953 954 heads=revs,
954 955 streamclonerequested=stream,
955 956 includepats=storeincludepats,
956 957 excludepats=storeexcludepats,
957 958 depth=depth,
958 959 )
959 960 elif srcrepo:
960 961 # TODO lift restriction once exchange.push() accepts narrow
961 962 # push.
962 963 if narrow:
963 964 raise error.Abort(
964 965 _(
965 966 b'narrow clone not available for '
966 967 b'remote destinations'
967 968 )
968 969 )
969 970
970 971 exchange.push(
971 972 srcrepo,
972 973 destpeer,
973 974 revs=revs,
974 975 bookmarks=srcrepo._bookmarks.keys(),
975 976 )
976 977 else:
977 978 raise error.Abort(
978 979 _(b"clone from remote to remote not supported")
979 980 )
980 981
981 982 cleandir = None
982 983
983 984 destrepo = destpeer.local()
984 985 if destrepo:
985 986 template = uimod.samplehgrcs[b'cloned']
986 987 u = urlutil.url(abspath)
987 988 u.passwd = None
988 989 defaulturl = bytes(u)
989 990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 992
992 993 if ui.configbool(b'experimental', b'remotenames'):
993 994 logexchange.pullremotenames(destrepo, srcpeer)
994 995
995 996 if update:
996 997 if update is not True:
997 998 with srcpeer.commandexecutor() as e:
998 999 checkout = e.callcommand(
999 1000 b'lookup',
1000 1001 {
1001 1002 b'key': update,
1002 1003 },
1003 1004 ).result()
1004 1005
1005 1006 uprev = None
1006 1007 status = None
1007 1008 if checkout is not None:
1008 1009 # Some extensions (at least hg-git and hg-subversion) have
1009 1010 # a peer.lookup() implementation that returns a name instead
1010 1011 # of a nodeid. We work around it here until we've figured
1011 1012 # out a better solution.
1012 1013 if len(checkout) == 20 and checkout in destrepo:
1013 1014 uprev = checkout
1014 1015 elif scmutil.isrevsymbol(destrepo, checkout):
1015 1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 1017 else:
1017 1018 if update is not True:
1018 1019 try:
1019 1020 uprev = destrepo.lookup(update)
1020 1021 except error.RepoLookupError:
1021 1022 pass
1022 1023 if uprev is None:
1023 1024 try:
1024 1025 if destrepo._activebookmark:
1025 1026 uprev = destrepo.lookup(destrepo._activebookmark)
1026 1027 update = destrepo._activebookmark
1027 1028 else:
1028 1029 uprev = destrepo._bookmarks[b'@']
1029 1030 update = b'@'
1030 1031 bn = destrepo[uprev].branch()
1031 1032 if bn == b'default':
1032 1033 status = _(b"updating to bookmark %s\n" % update)
1033 1034 else:
1034 1035 status = (
1035 1036 _(b"updating to bookmark %s on branch %s\n")
1036 1037 ) % (update, bn)
1037 1038 except KeyError:
1038 1039 try:
1039 1040 uprev = destrepo.branchtip(b'default')
1040 1041 except error.RepoLookupError:
1041 1042 uprev = destrepo.lookup(b'tip')
1042 1043 if not status:
1043 1044 bn = destrepo[uprev].branch()
1044 1045 status = _(b"updating to branch %s\n") % bn
1045 1046 destrepo.ui.status(status)
1046 1047 _update(destrepo, uprev)
1047 1048 if update in destrepo._bookmarks:
1048 1049 bookmarks.activate(destrepo, update)
1049 1050 if destlock is not None:
1050 1051 release(destlock)
1051 1052 if destwlock is not None:
1052 1053 release(destlock)
1053 1054 # here is a tiny windows were someone could end up writing the
1054 1055 # repository before the cache are sure to be warm. This is "fine"
1055 1056 # as the only "bad" outcome would be some slowness. That potential
1056 1057 # slowness already affect reader.
1057 1058 with destrepo.lock():
1058 1059 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1059 1060 finally:
1060 1061 release(srclock, destlock, destwlock)
1061 1062 if cleandir is not None:
1062 1063 shutil.rmtree(cleandir, True)
1063 1064 if srcpeer is not None:
1064 1065 srcpeer.close()
1065 1066 if destpeer and destpeer.local() is None:
1066 1067 destpeer.close()
1067 1068 return srcpeer, destpeer
1068 1069
1069 1070
1070 1071 def _showstats(repo, stats, quietempty=False):
1071 1072 if quietempty and stats.isempty():
1072 1073 return
1073 1074 repo.ui.status(
1074 1075 _(
1075 1076 b"%d files updated, %d files merged, "
1076 1077 b"%d files removed, %d files unresolved\n"
1077 1078 )
1078 1079 % (
1079 1080 stats.updatedcount,
1080 1081 stats.mergedcount,
1081 1082 stats.removedcount,
1082 1083 stats.unresolvedcount,
1083 1084 )
1084 1085 )
1085 1086
1086 1087
1087 1088 def updaterepo(repo, node, overwrite, updatecheck=None):
1088 1089 """Update the working directory to node.
1089 1090
1090 1091 When overwrite is set, changes are clobbered, merged else
1091 1092
1092 1093 returns stats (see pydoc mercurial.merge.applyupdates)"""
1093 1094 repo.ui.deprecwarn(
1094 1095 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1095 1096 b'5.7',
1096 1097 )
1097 1098 return mergemod._update(
1098 1099 repo,
1099 1100 node,
1100 1101 branchmerge=False,
1101 1102 force=overwrite,
1102 1103 labels=[b'working copy', b'destination'],
1103 1104 updatecheck=updatecheck,
1104 1105 )
1105 1106
1106 1107
1107 1108 def update(repo, node, quietempty=False, updatecheck=None):
1108 1109 """update the working directory to node"""
1109 1110 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1110 1111 _showstats(repo, stats, quietempty)
1111 1112 if stats.unresolvedcount:
1112 1113 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1113 1114 return stats.unresolvedcount > 0
1114 1115
1115 1116
1116 1117 # naming conflict in clone()
1117 1118 _update = update
1118 1119
1119 1120
1120 1121 def clean(repo, node, show_stats=True, quietempty=False):
1121 1122 """forcibly switch the working directory to node, clobbering changes"""
1122 1123 stats = mergemod.clean_update(repo[node])
1123 1124 assert stats.unresolvedcount == 0
1124 1125 if show_stats:
1125 1126 _showstats(repo, stats, quietempty)
1126 1127 return False
1127 1128
1128 1129
1129 1130 # naming conflict in updatetotally()
1130 1131 _clean = clean
1131 1132
1132 1133 _VALID_UPDATECHECKS = {
1133 1134 mergemod.UPDATECHECK_ABORT,
1134 1135 mergemod.UPDATECHECK_NONE,
1135 1136 mergemod.UPDATECHECK_LINEAR,
1136 1137 mergemod.UPDATECHECK_NO_CONFLICT,
1137 1138 }
1138 1139
1139 1140
1140 1141 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1141 1142 """Update the working directory with extra care for non-file components
1142 1143
1143 1144 This takes care of non-file components below:
1144 1145
1145 1146 :bookmark: might be advanced or (in)activated
1146 1147
1147 1148 This takes arguments below:
1148 1149
1149 1150 :checkout: to which revision the working directory is updated
1150 1151 :brev: a name, which might be a bookmark to be activated after updating
1151 1152 :clean: whether changes in the working directory can be discarded
1152 1153 :updatecheck: how to deal with a dirty working directory
1153 1154
1154 1155 Valid values for updatecheck are the UPDATECHECK_* constants
1155 1156 defined in the merge module. Passing `None` will result in using the
1156 1157 configured default.
1157 1158
1158 1159 * ABORT: abort if the working directory is dirty
1159 1160 * NONE: don't check (merge working directory changes into destination)
1160 1161 * LINEAR: check that update is linear before merging working directory
1161 1162 changes into destination
1162 1163 * NO_CONFLICT: check that the update does not result in file merges
1163 1164
1164 1165 This returns whether conflict is detected at updating or not.
1165 1166 """
1166 1167 if updatecheck is None:
1167 1168 updatecheck = ui.config(b'commands', b'update.check')
1168 1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 1170 # If not configured, or invalid value configured
1170 1171 updatecheck = mergemod.UPDATECHECK_LINEAR
1171 1172 if updatecheck not in _VALID_UPDATECHECKS:
1172 1173 raise ValueError(
1173 1174 r'Invalid updatecheck value %r (can accept %r)'
1174 1175 % (updatecheck, _VALID_UPDATECHECKS)
1175 1176 )
1176 1177 with repo.wlock():
1177 1178 movemarkfrom = None
1178 1179 warndest = False
1179 1180 if checkout is None:
1180 1181 updata = destutil.destupdate(repo, clean=clean)
1181 1182 checkout, movemarkfrom, brev = updata
1182 1183 warndest = True
1183 1184
1184 1185 if clean:
1185 1186 ret = _clean(repo, checkout)
1186 1187 else:
1187 1188 if updatecheck == mergemod.UPDATECHECK_ABORT:
1188 1189 cmdutil.bailifchanged(repo, merge=False)
1189 1190 updatecheck = mergemod.UPDATECHECK_NONE
1190 1191 ret = _update(repo, checkout, updatecheck=updatecheck)
1191 1192
1192 1193 if not ret and movemarkfrom:
1193 1194 if movemarkfrom == repo[b'.'].node():
1194 1195 pass # no-op update
1195 1196 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1196 1197 b = ui.label(repo._activebookmark, b'bookmarks.active')
1197 1198 ui.status(_(b"updating bookmark %s\n") % b)
1198 1199 else:
1199 1200 # this can happen with a non-linear update
1200 1201 b = ui.label(repo._activebookmark, b'bookmarks')
1201 1202 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 1203 bookmarks.deactivate(repo)
1203 1204 elif brev in repo._bookmarks:
1204 1205 if brev != repo._activebookmark:
1205 1206 b = ui.label(brev, b'bookmarks.active')
1206 1207 ui.status(_(b"(activating bookmark %s)\n") % b)
1207 1208 bookmarks.activate(repo, brev)
1208 1209 elif brev:
1209 1210 if repo._activebookmark:
1210 1211 b = ui.label(repo._activebookmark, b'bookmarks')
1211 1212 ui.status(_(b"(leaving bookmark %s)\n") % b)
1212 1213 bookmarks.deactivate(repo)
1213 1214
1214 1215 if warndest:
1215 1216 destutil.statusotherdests(ui, repo)
1216 1217
1217 1218 return ret
1218 1219
1219 1220
1220 1221 def merge(
1221 1222 ctx,
1222 1223 force=False,
1223 1224 remind=True,
1224 1225 labels=None,
1225 1226 ):
1226 1227 """Branch merge with node, resolving changes. Return true if any
1227 1228 unresolved conflicts."""
1228 1229 repo = ctx.repo()
1229 1230 stats = mergemod.merge(ctx, force=force, labels=labels)
1230 1231 _showstats(repo, stats)
1231 1232 if stats.unresolvedcount:
1232 1233 repo.ui.status(
1233 1234 _(
1234 1235 b"use 'hg resolve' to retry unresolved file merges "
1235 1236 b"or 'hg merge --abort' to abandon\n"
1236 1237 )
1237 1238 )
1238 1239 elif remind:
1239 1240 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1240 1241 return stats.unresolvedcount > 0
1241 1242
1242 1243
1243 1244 def abortmerge(ui, repo):
1244 1245 ms = mergestatemod.mergestate.read(repo)
1245 1246 if ms.active():
1246 1247 # there were conflicts
1247 1248 node = ms.localctx.hex()
1248 1249 else:
1249 1250 # there were no conficts, mergestate was not stored
1250 1251 node = repo[b'.'].hex()
1251 1252
1252 1253 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1253 1254 stats = mergemod.clean_update(repo[node])
1254 1255 assert stats.unresolvedcount == 0
1255 1256 _showstats(repo, stats)
1256 1257
1257 1258
1258 1259 def _incoming(
1259 1260 displaychlist,
1260 1261 subreporecurse,
1261 1262 ui,
1262 1263 repo,
1263 1264 source,
1264 1265 opts,
1265 1266 buffered=False,
1266 1267 subpath=None,
1267 1268 ):
1268 1269 """
1269 1270 Helper for incoming / gincoming.
1270 1271 displaychlist gets called with
1271 1272 (remoterepo, incomingchangesetlist, displayer) parameters,
1272 1273 and is supposed to contain only code that can't be unified.
1273 1274 """
1274 1275 srcs = urlutil.get_pull_paths(repo, ui, [source])
1275 1276 srcs = list(srcs)
1276 1277 if len(srcs) != 1:
1277 1278 msg = _(b'for now, incoming supports only a single source, %d provided')
1278 1279 msg %= len(srcs)
1279 1280 raise error.Abort(msg)
1280 1281 path = srcs[0]
1281 1282 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1282 1283 if subpath is not None:
1283 1284 subpath = urlutil.url(subpath)
1284 1285 if subpath.isabs():
1285 1286 source = bytes(subpath)
1286 1287 else:
1287 1288 p = urlutil.url(source)
1288 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1289 if p.islocal():
1290 normpath = os.path.normpath
1291 else:
1292 normpath = posixpath.normpath
1293 p.path = normpath(b'%s/%s' % (p.path, subpath))
1289 1294 source = bytes(p)
1290 1295 other = peer(repo, opts, source)
1291 1296 cleanupfn = other.close
1292 1297 try:
1293 1298 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1294 1299 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1295 1300
1296 1301 if revs:
1297 1302 revs = [other.lookup(rev) for rev in revs]
1298 1303 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1299 1304 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1300 1305 )
1301 1306
1302 1307 if not chlist:
1303 1308 ui.status(_(b"no changes found\n"))
1304 1309 return subreporecurse()
1305 1310 ui.pager(b'incoming')
1306 1311 displayer = logcmdutil.changesetdisplayer(
1307 1312 ui, other, opts, buffered=buffered
1308 1313 )
1309 1314 displaychlist(other, chlist, displayer)
1310 1315 displayer.close()
1311 1316 finally:
1312 1317 cleanupfn()
1313 1318 subreporecurse()
1314 1319 return 0 # exit code is zero since we found incoming changes
1315 1320
1316 1321
1317 1322 def incoming(ui, repo, source, opts, subpath=None):
1318 1323 def subreporecurse():
1319 1324 ret = 1
1320 1325 if opts.get(b'subrepos'):
1321 1326 ctx = repo[None]
1322 1327 for subpath in sorted(ctx.substate):
1323 1328 sub = ctx.sub(subpath)
1324 1329 ret = min(ret, sub.incoming(ui, source, opts))
1325 1330 return ret
1326 1331
1327 1332 def display(other, chlist, displayer):
1328 1333 limit = logcmdutil.getlimit(opts)
1329 1334 if opts.get(b'newest_first'):
1330 1335 chlist.reverse()
1331 1336 count = 0
1332 1337 for n in chlist:
1333 1338 if limit is not None and count >= limit:
1334 1339 break
1335 1340 parents = [
1336 1341 p for p in other.changelog.parents(n) if p != repo.nullid
1337 1342 ]
1338 1343 if opts.get(b'no_merges') and len(parents) == 2:
1339 1344 continue
1340 1345 count += 1
1341 1346 displayer.show(other[n])
1342 1347
1343 1348 return _incoming(
1344 1349 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1345 1350 )
1346 1351
1347 1352
1348 1353 def _outgoing(ui, repo, dests, opts, subpath=None):
1349 1354 out = set()
1350 1355 others = []
1351 1356 for path in urlutil.get_push_paths(repo, ui, dests):
1352 1357 dest = path.pushloc or path.loc
1353 1358 if subpath is not None:
1354 1359 subpath = urlutil.url(subpath)
1355 1360 if subpath.isabs():
1356 1361 dest = bytes(subpath)
1357 1362 else:
1358 1363 p = urlutil.url(dest)
1359 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1364 if p.islocal():
1365 normpath = os.path.normpath
1366 else:
1367 normpath = posixpath.normpath
1368 p.path = normpath(b'%s/%s' % (p.path, subpath))
1360 1369 dest = bytes(p)
1361 1370 branches = path.branch, opts.get(b'branch') or []
1362 1371
1363 1372 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1364 1373 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1365 1374 if revs:
1366 1375 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1367 1376
1368 1377 other = peer(repo, opts, dest)
1369 1378 try:
1370 1379 outgoing = discovery.findcommonoutgoing(
1371 1380 repo, other, revs, force=opts.get(b'force')
1372 1381 )
1373 1382 o = outgoing.missing
1374 1383 out.update(o)
1375 1384 if not o:
1376 1385 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1377 1386 others.append(other)
1378 1387 except: # re-raises
1379 1388 other.close()
1380 1389 raise
1381 1390 # make sure this is ordered by revision number
1382 1391 outgoing_revs = list(out)
1383 1392 cl = repo.changelog
1384 1393 outgoing_revs.sort(key=cl.rev)
1385 1394 return outgoing_revs, others
1386 1395
1387 1396
1388 1397 def _outgoing_recurse(ui, repo, dests, opts):
1389 1398 ret = 1
1390 1399 if opts.get(b'subrepos'):
1391 1400 ctx = repo[None]
1392 1401 for subpath in sorted(ctx.substate):
1393 1402 sub = ctx.sub(subpath)
1394 1403 ret = min(ret, sub.outgoing(ui, dests, opts))
1395 1404 return ret
1396 1405
1397 1406
1398 1407 def _outgoing_filter(repo, revs, opts):
1399 1408 """apply revision filtering/ordering option for outgoing"""
1400 1409 limit = logcmdutil.getlimit(opts)
1401 1410 no_merges = opts.get(b'no_merges')
1402 1411 if opts.get(b'newest_first'):
1403 1412 revs.reverse()
1404 1413 if limit is None and not no_merges:
1405 1414 for r in revs:
1406 1415 yield r
1407 1416 return
1408 1417
1409 1418 count = 0
1410 1419 cl = repo.changelog
1411 1420 for n in revs:
1412 1421 if limit is not None and count >= limit:
1413 1422 break
1414 1423 parents = [p for p in cl.parents(n) if p != repo.nullid]
1415 1424 if no_merges and len(parents) == 2:
1416 1425 continue
1417 1426 count += 1
1418 1427 yield n
1419 1428
1420 1429
1421 1430 def outgoing(ui, repo, dests, opts, subpath=None):
1422 1431 if opts.get(b'graph'):
1423 1432 logcmdutil.checkunsupportedgraphflags([], opts)
1424 1433 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1425 1434 ret = 1
1426 1435 try:
1427 1436 if o:
1428 1437 ret = 0
1429 1438
1430 1439 if opts.get(b'graph'):
1431 1440 revdag = logcmdutil.graphrevs(repo, o, opts)
1432 1441 ui.pager(b'outgoing')
1433 1442 displayer = logcmdutil.changesetdisplayer(
1434 1443 ui, repo, opts, buffered=True
1435 1444 )
1436 1445 logcmdutil.displaygraph(
1437 1446 ui, repo, revdag, displayer, graphmod.asciiedges
1438 1447 )
1439 1448 else:
1440 1449 ui.pager(b'outgoing')
1441 1450 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1442 1451 for n in _outgoing_filter(repo, o, opts):
1443 1452 displayer.show(repo[n])
1444 1453 displayer.close()
1445 1454 for oth in others:
1446 1455 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1447 1456 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1448 1457 return ret # exit code is zero since we found outgoing changes
1449 1458 finally:
1450 1459 for oth in others:
1451 1460 oth.close()
1452 1461
1453 1462
1454 1463 def verify(repo, level=None):
1455 1464 """verify the consistency of a repository"""
1456 1465 ret = verifymod.verify(repo, level=level)
1457 1466
1458 1467 # Broken subrepo references in hidden csets don't seem worth worrying about,
1459 1468 # since they can't be pushed/pulled, and --hidden can be used if they are a
1460 1469 # concern.
1461 1470
1462 1471 # pathto() is needed for -R case
1463 1472 revs = repo.revs(
1464 1473 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1465 1474 )
1466 1475
1467 1476 if revs:
1468 1477 repo.ui.status(_(b'checking subrepo links\n'))
1469 1478 for rev in revs:
1470 1479 ctx = repo[rev]
1471 1480 try:
1472 1481 for subpath in ctx.substate:
1473 1482 try:
1474 1483 ret = (
1475 1484 ctx.sub(subpath, allowcreate=False).verify() or ret
1476 1485 )
1477 1486 except error.RepoError as e:
1478 1487 repo.ui.warn(b'%d: %s\n' % (rev, e))
1479 1488 except Exception:
1480 1489 repo.ui.warn(
1481 1490 _(b'.hgsubstate is corrupt in revision %s\n')
1482 1491 % short(ctx.node())
1483 1492 )
1484 1493
1485 1494 return ret
1486 1495
1487 1496
1488 1497 def remoteui(src, opts):
1489 1498 """build a remote ui from ui or repo and opts"""
1490 1499 if util.safehasattr(src, b'baseui'): # looks like a repository
1491 1500 dst = src.baseui.copy() # drop repo-specific config
1492 1501 src = src.ui # copy target options from repo
1493 1502 else: # assume it's a global ui object
1494 1503 dst = src.copy() # keep all global options
1495 1504
1496 1505 # copy ssh-specific options
1497 1506 for o in b'ssh', b'remotecmd':
1498 1507 v = opts.get(o) or src.config(b'ui', o)
1499 1508 if v:
1500 1509 dst.setconfig(b"ui", o, v, b'copied')
1501 1510
1502 1511 # copy bundle-specific options
1503 1512 r = src.config(b'bundle', b'mainreporoot')
1504 1513 if r:
1505 1514 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1506 1515
1507 1516 # copy selected local settings to the remote ui
1508 1517 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1509 1518 for key, val in src.configitems(sect):
1510 1519 dst.setconfig(sect, key, val, b'copied')
1511 1520 v = src.config(b'web', b'cacerts')
1512 1521 if v:
1513 1522 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1514 1523
1515 1524 return dst
1516 1525
1517 1526
1518 1527 # Files of interest
1519 1528 # Used to check if the repository has changed looking at mtime and size of
1520 1529 # these files.
1521 1530 foi = [
1522 1531 (b'spath', b'00changelog.i'),
1523 1532 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1524 1533 (b'spath', b'obsstore'),
1525 1534 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1526 1535 ]
1527 1536
1528 1537
1529 1538 class cachedlocalrepo(object):
1530 1539 """Holds a localrepository that can be cached and reused."""
1531 1540
1532 1541 def __init__(self, repo):
1533 1542 """Create a new cached repo from an existing repo.
1534 1543
1535 1544 We assume the passed in repo was recently created. If the
1536 1545 repo has changed between when it was created and when it was
1537 1546 turned into a cache, it may not refresh properly.
1538 1547 """
1539 1548 assert isinstance(repo, localrepo.localrepository)
1540 1549 self._repo = repo
1541 1550 self._state, self.mtime = self._repostate()
1542 1551 self._filtername = repo.filtername
1543 1552
1544 1553 def fetch(self):
1545 1554 """Refresh (if necessary) and return a repository.
1546 1555
1547 1556 If the cached instance is out of date, it will be recreated
1548 1557 automatically and returned.
1549 1558
1550 1559 Returns a tuple of the repo and a boolean indicating whether a new
1551 1560 repo instance was created.
1552 1561 """
1553 1562 # We compare the mtimes and sizes of some well-known files to
1554 1563 # determine if the repo changed. This is not precise, as mtimes
1555 1564 # are susceptible to clock skew and imprecise filesystems and
1556 1565 # file content can change while maintaining the same size.
1557 1566
1558 1567 state, mtime = self._repostate()
1559 1568 if state == self._state:
1560 1569 return self._repo, False
1561 1570
1562 1571 repo = repository(self._repo.baseui, self._repo.url())
1563 1572 if self._filtername:
1564 1573 self._repo = repo.filtered(self._filtername)
1565 1574 else:
1566 1575 self._repo = repo.unfiltered()
1567 1576 self._state = state
1568 1577 self.mtime = mtime
1569 1578
1570 1579 return self._repo, True
1571 1580
1572 1581 def _repostate(self):
1573 1582 state = []
1574 1583 maxmtime = -1
1575 1584 for attr, fname in foi:
1576 1585 prefix = getattr(self._repo, attr)
1577 1586 p = os.path.join(prefix, fname)
1578 1587 try:
1579 1588 st = os.stat(p)
1580 1589 except OSError:
1581 1590 st = os.stat(prefix)
1582 1591 state.append((st[stat.ST_MTIME], st.st_size))
1583 1592 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1584 1593
1585 1594 return tuple(state), maxmtime
1586 1595
1587 1596 def copy(self):
1588 1597 """Obtain a copy of this class instance.
1589 1598
1590 1599 A new localrepository instance is obtained. The new instance should be
1591 1600 completely independent of the original.
1592 1601 """
1593 1602 repo = repository(self._repo.baseui, self._repo.origroot)
1594 1603 if self._filtername:
1595 1604 repo = repo.filtered(self._filtername)
1596 1605 else:
1597 1606 repo = repo.unfiltered()
1598 1607 c = cachedlocalrepo(repo)
1599 1608 c._state = self._state
1600 1609 c.mtime = self.mtime
1601 1610 return c
General Comments 0
You need to be logged in to leave comments. Login now