##// END OF EJS Templates
incoming: use bytes for an error message...
Matt Harbison -
r47768:27602e03 stable
parent child Browse files
Show More
@@ -1,1596 +1,1596
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 graphmod,
36 36 httppeer,
37 37 localrepo,
38 38 lock,
39 39 logcmdutil,
40 40 logexchange,
41 41 merge as mergemod,
42 42 mergestate as mergestatemod,
43 43 narrowspec,
44 44 phases,
45 45 requirements,
46 46 scmutil,
47 47 sshpeer,
48 48 statichttprepo,
49 49 ui as uimod,
50 50 unionrepo,
51 51 url,
52 52 util,
53 53 verify as verifymod,
54 54 vfs as vfsmod,
55 55 )
56 56 from .utils import (
57 57 hashutil,
58 58 stringutil,
59 59 urlutil,
60 60 )
61 61
62 62
63 63 release = lock.release
64 64
65 65 # shared features
66 66 sharedbookmarks = b'bookmarks'
67 67
68 68
69 69 def _local(path):
70 70 path = util.expandpath(urlutil.urllocalpath(path))
71 71
72 72 try:
73 73 # we use os.stat() directly here instead of os.path.isfile()
74 74 # because the latter started returning `False` on invalid path
75 75 # exceptions starting in 3.8 and we care about handling
76 76 # invalid paths specially here.
77 77 st = os.stat(path)
78 78 isfile = stat.S_ISREG(st.st_mode)
79 79 # Python 2 raises TypeError, Python 3 ValueError.
80 80 except (TypeError, ValueError) as e:
81 81 raise error.Abort(
82 82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 83 )
84 84 except OSError:
85 85 isfile = False
86 86
87 87 return isfile and bundlerepo or localrepo
88 88
89 89
90 90 def addbranchrevs(lrepo, other, branches, revs):
91 91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 92 hashbranch, branches = branches
93 93 if not hashbranch and not branches:
94 94 x = revs or None
95 95 if revs:
96 96 y = revs[0]
97 97 else:
98 98 y = None
99 99 return x, y
100 100 if revs:
101 101 revs = list(revs)
102 102 else:
103 103 revs = []
104 104
105 105 if not peer.capable(b'branchmap'):
106 106 if branches:
107 107 raise error.Abort(_(b"remote branch lookup not supported"))
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111 with peer.commandexecutor() as e:
112 112 branchmap = e.callcommand(b'branchmap', {}).result()
113 113
114 114 def primary(branch):
115 115 if branch == b'.':
116 116 if not lrepo:
117 117 raise error.Abort(_(b"dirstate branch not accessible"))
118 118 branch = lrepo.dirstate.branch()
119 119 if branch in branchmap:
120 120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 121 return True
122 122 else:
123 123 return False
124 124
125 125 for branch in branches:
126 126 if not primary(branch):
127 127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 128 if hashbranch:
129 129 if not primary(hashbranch):
130 130 revs.append(hashbranch)
131 131 return revs, revs[0]
132 132
133 133
134 134 def parseurl(path, branches=None):
135 135 '''parse url#branch, returning (url, (branch, branches))'''
136 136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 138 return urlutil.parseurl(path, branches=branches)
139 139
140 140
141 141 schemes = {
142 142 b'bundle': bundlerepo,
143 143 b'union': unionrepo,
144 144 b'file': _local,
145 145 b'http': httppeer,
146 146 b'https': httppeer,
147 147 b'ssh': sshpeer,
148 148 b'static-http': statichttprepo,
149 149 }
150 150
151 151
152 152 def _peerlookup(path):
153 153 u = urlutil.url(path)
154 154 scheme = u.scheme or b'file'
155 155 thing = schemes.get(scheme) or schemes[b'file']
156 156 try:
157 157 return thing(path)
158 158 except TypeError:
159 159 # we can't test callable(thing) because 'thing' can be an unloaded
160 160 # module that implements __call__
161 161 if not util.safehasattr(thing, b'instance'):
162 162 raise
163 163 return thing
164 164
165 165
166 166 def islocal(repo):
167 167 '''return true if repo (or path pointing to repo) is local'''
168 168 if isinstance(repo, bytes):
169 169 try:
170 170 return _peerlookup(repo).islocal(repo)
171 171 except AttributeError:
172 172 return False
173 173 return repo.local()
174 174
175 175
176 176 def openpath(ui, path, sendaccept=True):
177 177 '''open path with open if local, url.open if remote'''
178 178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 179 if pathurl.islocal():
180 180 return util.posixfile(pathurl.localpath(), b'rb')
181 181 else:
182 182 return url.open(ui, path, sendaccept=sendaccept)
183 183
184 184
185 185 # a list of (ui, repo) functions called for wire peer initialization
186 186 wirepeersetupfuncs = []
187 187
188 188
189 189 def _peerorrepo(
190 190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 191 ):
192 192 """return a repository object for the specified path"""
193 193 obj = _peerlookup(path).instance(
194 194 ui, path, create, intents=intents, createopts=createopts
195 195 )
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 ui.log(
208 208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 209 )
210 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 211 if not obj.local():
212 212 for f in wirepeersetupfuncs:
213 213 f(ui, obj)
214 214 return obj
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 peer = _peerorrepo(
227 227 ui,
228 228 path,
229 229 create,
230 230 presetupfuncs=presetupfuncs,
231 231 intents=intents,
232 232 createopts=createopts,
233 233 )
234 234 repo = peer.local()
235 235 if not repo:
236 236 raise error.Abort(
237 237 _(b"repository '%s' is not local") % (path or peer.url())
238 238 )
239 239 return repo.filtered(b'visible')
240 240
241 241
242 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 243 '''return a repository peer for the specified path'''
244 244 rui = remoteui(uiorrepo, opts)
245 245 return _peerorrepo(
246 246 rui, path, create, intents=intents, createopts=createopts
247 247 ).peer()
248 248
249 249
250 250 def defaultdest(source):
251 251 """return default destination of clone if none is given
252 252
253 253 >>> defaultdest(b'foo')
254 254 'foo'
255 255 >>> defaultdest(b'/foo/bar')
256 256 'bar'
257 257 >>> defaultdest(b'/')
258 258 ''
259 259 >>> defaultdest(b'')
260 260 ''
261 261 >>> defaultdest(b'http://example.org/')
262 262 ''
263 263 >>> defaultdest(b'http://example.org/foo/')
264 264 'foo'
265 265 """
266 266 path = urlutil.url(source).path
267 267 if not path:
268 268 return b''
269 269 return os.path.basename(os.path.normpath(path))
270 270
271 271
272 272 def sharedreposource(repo):
273 273 """Returns repository object for source repository of a shared repo.
274 274
275 275 If repo is not a shared repository, returns None.
276 276 """
277 277 if repo.sharedpath == repo.path:
278 278 return None
279 279
280 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 281 return repo.srcrepo
282 282
283 283 # the sharedpath always ends in the .hg; we want the path to the repo
284 284 source = repo.vfs.split(repo.sharedpath)[0]
285 285 srcurl, branches = urlutil.parseurl(source)
286 286 srcrepo = repository(repo.ui, srcurl)
287 287 repo.srcrepo = srcrepo
288 288 return srcrepo
289 289
290 290
291 291 def share(
292 292 ui,
293 293 source,
294 294 dest=None,
295 295 update=True,
296 296 bookmarks=True,
297 297 defaultpath=None,
298 298 relative=False,
299 299 ):
300 300 '''create a shared repository'''
301 301
302 302 if not islocal(source):
303 303 raise error.Abort(_(b'can only share local repositories'))
304 304
305 305 if not dest:
306 306 dest = defaultdest(source)
307 307 else:
308 308 dest = urlutil.get_clone_path(ui, dest)[1]
309 309
310 310 if isinstance(source, bytes):
311 311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 312 srcrepo = repository(ui, source)
313 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 314 else:
315 315 srcrepo = source.local()
316 316 checkout = None
317 317
318 318 shareditems = set()
319 319 if bookmarks:
320 320 shareditems.add(sharedbookmarks)
321 321
322 322 r = repository(
323 323 ui,
324 324 dest,
325 325 create=True,
326 326 createopts={
327 327 b'sharedrepo': srcrepo,
328 328 b'sharedrelative': relative,
329 329 b'shareditems': shareditems,
330 330 },
331 331 )
332 332
333 333 postshare(srcrepo, r, defaultpath=defaultpath)
334 334 r = repository(ui, dest)
335 335 _postshareupdate(r, update, checkout=checkout)
336 336 return r
337 337
338 338
339 339 def _prependsourcehgrc(repo):
340 340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 341 on unshare. This is only done if the share was perfomed using share safe
342 342 method where we share config of source in shares"""
343 343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 344 dstvfs = vfsmod.vfs(repo.path)
345 345
346 346 if not srcvfs.exists(b'hgrc'):
347 347 return
348 348
349 349 currentconfig = b''
350 350 if dstvfs.exists(b'hgrc'):
351 351 currentconfig = dstvfs.read(b'hgrc')
352 352
353 353 with dstvfs(b'hgrc', b'wb') as fp:
354 354 sourceconfig = srcvfs.read(b'hgrc')
355 355 fp.write(b"# Config copied from shared source\n")
356 356 fp.write(sourceconfig)
357 357 fp.write(b'\n')
358 358 fp.write(currentconfig)
359 359
360 360
361 361 def unshare(ui, repo):
362 362 """convert a shared repository to a normal one
363 363
364 364 Copy the store data to the repo and remove the sharedpath data.
365 365
366 366 Returns a new repository object representing the unshared repository.
367 367
368 368 The passed repository object is not usable after this function is
369 369 called.
370 370 """
371 371
372 372 with repo.lock():
373 373 # we use locks here because if we race with commit, we
374 374 # can end up with extra data in the cloned revlogs that's
375 375 # not pointed to by changesets, thus causing verify to
376 376 # fail
377 377 destlock = copystore(ui, repo, repo.path)
378 378 with destlock or util.nullcontextmanager():
379 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 380 # we were sharing .hg/hgrc of the share source with the current
381 381 # repo. We need to copy that while unsharing otherwise it can
382 382 # disable hooks and other checks
383 383 _prependsourcehgrc(repo)
384 384
385 385 sharefile = repo.vfs.join(b'sharedpath')
386 386 util.rename(sharefile, sharefile + b'.old')
387 387
388 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 390 scmutil.writereporequirements(repo)
391 391
392 392 # Removing share changes some fundamental properties of the repo instance.
393 393 # So we instantiate a new repo object and operate on it rather than
394 394 # try to keep the existing repo usable.
395 395 newrepo = repository(repo.baseui, repo.root, create=False)
396 396
397 397 # TODO: figure out how to access subrepos that exist, but were previously
398 398 # removed from .hgsub
399 399 c = newrepo[b'.']
400 400 subs = c.substate
401 401 for s in sorted(subs):
402 402 c.sub(s).unshare()
403 403
404 404 localrepo.poisonrepository(repo)
405 405
406 406 return newrepo
407 407
408 408
409 409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 410 """Called after a new shared repo is created.
411 411
412 412 The new repo only has a requirements file and pointer to the source.
413 413 This function configures additional shared data.
414 414
415 415 Extensions can wrap this function and write additional entries to
416 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 417 """
418 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 419 if default:
420 420 template = b'[paths]\ndefault = %s\n'
421 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 423 with destrepo.wlock():
424 424 narrowspec.copytoworkingcopy(destrepo)
425 425
426 426
427 427 def _postshareupdate(repo, update, checkout=None):
428 428 """Maybe perform a working directory update after a shared repo is created.
429 429
430 430 ``update`` can be a boolean or a revision to update to.
431 431 """
432 432 if not update:
433 433 return
434 434
435 435 repo.ui.status(_(b"updating working directory\n"))
436 436 if update is not True:
437 437 checkout = update
438 438 for test in (checkout, b'default', b'tip'):
439 439 if test is None:
440 440 continue
441 441 try:
442 442 uprev = repo.lookup(test)
443 443 break
444 444 except error.RepoLookupError:
445 445 continue
446 446 _update(repo, uprev)
447 447
448 448
449 449 def copystore(ui, srcrepo, destpath):
450 450 """copy files from store of srcrepo in destpath
451 451
452 452 returns destlock
453 453 """
454 454 destlock = None
455 455 try:
456 456 hardlink = None
457 457 topic = _(b'linking') if hardlink else _(b'copying')
458 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 459 num = 0
460 460 srcpublishing = srcrepo.publishing()
461 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 462 dstvfs = vfsmod.vfs(destpath)
463 463 for f in srcrepo.store.copylist():
464 464 if srcpublishing and f.endswith(b'phaseroots'):
465 465 continue
466 466 dstbase = os.path.dirname(f)
467 467 if dstbase and not dstvfs.exists(dstbase):
468 468 dstvfs.mkdir(dstbase)
469 469 if srcvfs.exists(f):
470 470 if f.endswith(b'data'):
471 471 # 'dstbase' may be empty (e.g. revlog format 0)
472 472 lockfile = os.path.join(dstbase, b"lock")
473 473 # lock to avoid premature writing to the target
474 474 destlock = lock.lock(dstvfs, lockfile)
475 475 hardlink, n = util.copyfiles(
476 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 477 )
478 478 num += n
479 479 if hardlink:
480 480 ui.debug(b"linked %d files\n" % num)
481 481 else:
482 482 ui.debug(b"copied %d files\n" % num)
483 483 return destlock
484 484 except: # re-raises
485 485 release(destlock)
486 486 raise
487 487
488 488
489 489 def clonewithshare(
490 490 ui,
491 491 peeropts,
492 492 sharepath,
493 493 source,
494 494 srcpeer,
495 495 dest,
496 496 pull=False,
497 497 rev=None,
498 498 update=True,
499 499 stream=False,
500 500 ):
501 501 """Perform a clone using a shared repo.
502 502
503 503 The store for the repository will be located at <sharepath>/.hg. The
504 504 specified revisions will be cloned or pulled from "source". A shared repo
505 505 will be created at "dest" and a working copy will be created if "update" is
506 506 True.
507 507 """
508 508 revs = None
509 509 if rev:
510 510 if not srcpeer.capable(b'lookup'):
511 511 raise error.Abort(
512 512 _(
513 513 b"src repository does not support "
514 514 b"revision lookup and so doesn't "
515 515 b"support clone by revision"
516 516 )
517 517 )
518 518
519 519 # TODO this is batchable.
520 520 remoterevs = []
521 521 for r in rev:
522 522 with srcpeer.commandexecutor() as e:
523 523 remoterevs.append(
524 524 e.callcommand(
525 525 b'lookup',
526 526 {
527 527 b'key': r,
528 528 },
529 529 ).result()
530 530 )
531 531 revs = remoterevs
532 532
533 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 534 # 2 clients may race creating or populating it.
535 535 pooldir = os.path.dirname(sharepath)
536 536 # lock class requires the directory to exist.
537 537 try:
538 538 util.makedir(pooldir, False)
539 539 except OSError as e:
540 540 if e.errno != errno.EEXIST:
541 541 raise
542 542
543 543 poolvfs = vfsmod.vfs(pooldir)
544 544 basename = os.path.basename(sharepath)
545 545
546 546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 547 if os.path.exists(sharepath):
548 548 ui.status(
549 549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 550 )
551 551 else:
552 552 ui.status(
553 553 _(b'(sharing from new pooled repository %s)\n') % basename
554 554 )
555 555 # Always use pull mode because hardlinks in share mode don't work
556 556 # well. Never update because working copies aren't necessary in
557 557 # share mode.
558 558 clone(
559 559 ui,
560 560 peeropts,
561 561 source,
562 562 dest=sharepath,
563 563 pull=True,
564 564 revs=rev,
565 565 update=False,
566 566 stream=stream,
567 567 )
568 568
569 569 # Resolve the value to put in [paths] section for the source.
570 570 if islocal(source):
571 571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 572 else:
573 573 defaultpath = source
574 574
575 575 sharerepo = repository(ui, path=sharepath)
576 576 destrepo = share(
577 577 ui,
578 578 sharerepo,
579 579 dest=dest,
580 580 update=False,
581 581 bookmarks=False,
582 582 defaultpath=defaultpath,
583 583 )
584 584
585 585 # We need to perform a pull against the dest repo to fetch bookmarks
586 586 # and other non-store data that isn't shared by default. In the case of
587 587 # non-existing shared repo, this means we pull from the remote twice. This
588 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 589 # way to pull just non-changegroup data.
590 590 exchange.pull(destrepo, srcpeer, heads=revs)
591 591
592 592 _postshareupdate(destrepo, update)
593 593
594 594 return srcpeer, peer(ui, peeropts, dest)
595 595
596 596
597 597 # Recomputing caches is often slow on big repos, so copy them.
598 598 def _copycache(srcrepo, dstcachedir, fname):
599 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 600 srcfname = srcrepo.cachevfs.join(fname)
601 601 dstfname = os.path.join(dstcachedir, fname)
602 602 if os.path.exists(srcfname):
603 603 if not os.path.exists(dstcachedir):
604 604 os.mkdir(dstcachedir)
605 605 util.copyfile(srcfname, dstfname)
606 606
607 607
608 608 def clone(
609 609 ui,
610 610 peeropts,
611 611 source,
612 612 dest=None,
613 613 pull=False,
614 614 revs=None,
615 615 update=True,
616 616 stream=False,
617 617 branch=None,
618 618 shareopts=None,
619 619 storeincludepats=None,
620 620 storeexcludepats=None,
621 621 depth=None,
622 622 ):
623 623 """Make a copy of an existing repository.
624 624
625 625 Create a copy of an existing repository in a new directory. The
626 626 source and destination are URLs, as passed to the repository
627 627 function. Returns a pair of repository peers, the source and
628 628 newly created destination.
629 629
630 630 The location of the source is added to the new repository's
631 631 .hg/hgrc file, as the default to be used for future pulls and
632 632 pushes.
633 633
634 634 If an exception is raised, the partly cloned/updated destination
635 635 repository will be deleted.
636 636
637 637 Arguments:
638 638
639 639 source: repository object or URL
640 640
641 641 dest: URL of destination repository to create (defaults to base
642 642 name of source repository)
643 643
644 644 pull: always pull from source repository, even in local case or if the
645 645 server prefers streaming
646 646
647 647 stream: stream raw data uncompressed from repository (fast over
648 648 LAN, slow over WAN)
649 649
650 650 revs: revision to clone up to (implies pull=True)
651 651
652 652 update: update working directory after clone completes, if
653 653 destination is local repository (True means update to default rev,
654 654 anything else is treated as a revision)
655 655
656 656 branch: branches to clone
657 657
658 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 659 activates auto sharing mode and defines the directory for stores. The
660 660 "mode" key determines how to construct the directory name of the shared
661 661 repository. "identity" means the name is derived from the node of the first
662 662 changeset in the repository. "remote" means the name is derived from the
663 663 remote's path/URL. Defaults to "identity."
664 664
665 665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 666 exclude in the repository copy, respectively. If not defined, all files
667 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 668 only the requested files will be performed. If ``storeincludepats`` is not
669 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 670 ``path:.``. If both are empty sets, no files will be cloned.
671 671 """
672 672
673 673 if isinstance(source, bytes):
674 674 src = urlutil.get_clone_path(ui, source, branch)
675 675 origsource, source, branches = src
676 676 srcpeer = peer(ui, peeropts, source)
677 677 else:
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 681 srclock = destlock = cleandir = None
682 682 destpeer = None
683 683 try:
684 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 685
686 686 if dest is None:
687 687 dest = defaultdest(source)
688 688 if dest:
689 689 ui.status(_(b"destination directory: %s\n") % dest)
690 690 else:
691 691 dest = urlutil.get_clone_path(ui, dest)[0]
692 692
693 693 dest = urlutil.urllocalpath(dest)
694 694 source = urlutil.urllocalpath(source)
695 695
696 696 if not dest:
697 697 raise error.InputError(_(b"empty destination path is not valid"))
698 698
699 699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 700 if destvfs.lexists():
701 701 if not destvfs.isdir():
702 702 raise error.InputError(
703 703 _(b"destination '%s' already exists") % dest
704 704 )
705 705 elif destvfs.listdir():
706 706 raise error.InputError(
707 707 _(b"destination '%s' is not empty") % dest
708 708 )
709 709
710 710 createopts = {}
711 711 narrow = False
712 712
713 713 if storeincludepats is not None:
714 714 narrowspec.validatepatterns(storeincludepats)
715 715 narrow = True
716 716
717 717 if storeexcludepats is not None:
718 718 narrowspec.validatepatterns(storeexcludepats)
719 719 narrow = True
720 720
721 721 if narrow:
722 722 # Include everything by default if only exclusion patterns defined.
723 723 if storeexcludepats and not storeincludepats:
724 724 storeincludepats = {b'path:.'}
725 725
726 726 createopts[b'narrowfiles'] = True
727 727
728 728 if depth:
729 729 createopts[b'shallowfilestore'] = True
730 730
731 731 if srcpeer.capable(b'lfs-serve'):
732 732 # Repository creation honors the config if it disabled the extension, so
733 733 # we can't just announce that lfs will be enabled. This check avoids
734 734 # saying that lfs will be enabled, and then saying it's an unknown
735 735 # feature. The lfs creation option is set in either case so that a
736 736 # requirement is added. If the extension is explicitly disabled but the
737 737 # requirement is set, the clone aborts early, before transferring any
738 738 # data.
739 739 createopts[b'lfs'] = True
740 740
741 741 if extensions.disabled_help(b'lfs'):
742 742 ui.status(
743 743 _(
744 744 b'(remote is using large file support (lfs), but it is '
745 745 b'explicitly disabled in the local configuration)\n'
746 746 )
747 747 )
748 748 else:
749 749 ui.status(
750 750 _(
751 751 b'(remote is using large file support (lfs); lfs will '
752 752 b'be enabled for this repository)\n'
753 753 )
754 754 )
755 755
756 756 shareopts = shareopts or {}
757 757 sharepool = shareopts.get(b'pool')
758 758 sharenamemode = shareopts.get(b'mode')
759 759 if sharepool and islocal(dest):
760 760 sharepath = None
761 761 if sharenamemode == b'identity':
762 762 # Resolve the name from the initial changeset in the remote
763 763 # repository. This returns nullid when the remote is empty. It
764 764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 765 # not available. If we fail to resolve, sharing is not enabled.
766 766 try:
767 767 with srcpeer.commandexecutor() as e:
768 768 rootnode = e.callcommand(
769 769 b'lookup',
770 770 {
771 771 b'key': b'0',
772 772 },
773 773 ).result()
774 774
775 775 if rootnode != nullid:
776 776 sharepath = os.path.join(sharepool, hex(rootnode))
777 777 else:
778 778 ui.status(
779 779 _(
780 780 b'(not using pooled storage: '
781 781 b'remote appears to be empty)\n'
782 782 )
783 783 )
784 784 except error.RepoLookupError:
785 785 ui.status(
786 786 _(
787 787 b'(not using pooled storage: '
788 788 b'unable to resolve identity of remote)\n'
789 789 )
790 790 )
791 791 elif sharenamemode == b'remote':
792 792 sharepath = os.path.join(
793 793 sharepool, hex(hashutil.sha1(source).digest())
794 794 )
795 795 else:
796 796 raise error.Abort(
797 797 _(b'unknown share naming mode: %s') % sharenamemode
798 798 )
799 799
800 800 # TODO this is a somewhat arbitrary restriction.
801 801 if narrow:
802 802 ui.status(
803 803 _(b'(pooled storage not supported for narrow clones)\n')
804 804 )
805 805 sharepath = None
806 806
807 807 if sharepath:
808 808 return clonewithshare(
809 809 ui,
810 810 peeropts,
811 811 sharepath,
812 812 source,
813 813 srcpeer,
814 814 dest,
815 815 pull=pull,
816 816 rev=revs,
817 817 update=update,
818 818 stream=stream,
819 819 )
820 820
821 821 srcrepo = srcpeer.local()
822 822
823 823 abspath = origsource
824 824 if islocal(origsource):
825 825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 826
827 827 if islocal(dest):
828 828 cleandir = dest
829 829
830 830 copy = False
831 831 if (
832 832 srcrepo
833 833 and srcrepo.cancopy()
834 834 and islocal(dest)
835 835 and not phases.hassecret(srcrepo)
836 836 ):
837 837 copy = not pull and not revs
838 838
839 839 # TODO this is a somewhat arbitrary restriction.
840 840 if narrow:
841 841 copy = False
842 842
843 843 if copy:
844 844 try:
845 845 # we use a lock here because if we race with commit, we
846 846 # can end up with extra data in the cloned revlogs that's
847 847 # not pointed to by changesets, thus causing verify to
848 848 # fail
849 849 srclock = srcrepo.lock(wait=False)
850 850 except error.LockError:
851 851 copy = False
852 852
853 853 if copy:
854 854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 856 if not os.path.exists(dest):
857 857 util.makedirs(dest)
858 858 else:
859 859 # only clean up directories we create ourselves
860 860 cleandir = hgdir
861 861 try:
862 862 destpath = hgdir
863 863 util.makedir(destpath, notindexed=True)
864 864 except OSError as inst:
865 865 if inst.errno == errno.EEXIST:
866 866 cleandir = None
867 867 raise error.Abort(
868 868 _(b"destination '%s' already exists") % dest
869 869 )
870 870 raise
871 871
872 872 destlock = copystore(ui, srcrepo, destpath)
873 873 # copy bookmarks over
874 874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 876 if os.path.exists(srcbookmarks):
877 877 util.copyfile(srcbookmarks, dstbookmarks)
878 878
879 879 dstcachedir = os.path.join(destpath, b'cache')
880 880 for cache in cacheutil.cachetocopy(srcrepo):
881 881 _copycache(srcrepo, dstcachedir, cache)
882 882
883 883 # we need to re-init the repo after manually copying the data
884 884 # into it
885 885 destpeer = peer(srcrepo, peeropts, dest)
886 886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 887 else:
888 888 try:
889 889 # only pass ui when no srcrepo
890 890 destpeer = peer(
891 891 srcrepo or ui,
892 892 peeropts,
893 893 dest,
894 894 create=True,
895 895 createopts=createopts,
896 896 )
897 897 except OSError as inst:
898 898 if inst.errno == errno.EEXIST:
899 899 cleandir = None
900 900 raise error.Abort(
901 901 _(b"destination '%s' already exists") % dest
902 902 )
903 903 raise
904 904
905 905 if revs:
906 906 if not srcpeer.capable(b'lookup'):
907 907 raise error.Abort(
908 908 _(
909 909 b"src repository does not support "
910 910 b"revision lookup and so doesn't "
911 911 b"support clone by revision"
912 912 )
913 913 )
914 914
915 915 # TODO this is batchable.
916 916 remoterevs = []
917 917 for rev in revs:
918 918 with srcpeer.commandexecutor() as e:
919 919 remoterevs.append(
920 920 e.callcommand(
921 921 b'lookup',
922 922 {
923 923 b'key': rev,
924 924 },
925 925 ).result()
926 926 )
927 927 revs = remoterevs
928 928
929 929 checkout = revs[0]
930 930 else:
931 931 revs = None
932 932 local = destpeer.local()
933 933 if local:
934 934 if narrow:
935 935 with local.wlock(), local.lock():
936 936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 937 narrowspec.copytoworkingcopy(local)
938 938
939 939 u = urlutil.url(abspath)
940 940 defaulturl = bytes(u)
941 941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 942 if not stream:
943 943 if pull:
944 944 stream = False
945 945 else:
946 946 stream = None
947 947 # internal config: ui.quietbookmarkmove
948 948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 949 with local.ui.configoverride(overrides, b'clone'):
950 950 exchange.pull(
951 951 local,
952 952 srcpeer,
953 953 revs,
954 954 streamclonerequested=stream,
955 955 includepats=storeincludepats,
956 956 excludepats=storeexcludepats,
957 957 depth=depth,
958 958 )
959 959 elif srcrepo:
960 960 # TODO lift restriction once exchange.push() accepts narrow
961 961 # push.
962 962 if narrow:
963 963 raise error.Abort(
964 964 _(
965 965 b'narrow clone not available for '
966 966 b'remote destinations'
967 967 )
968 968 )
969 969
970 970 exchange.push(
971 971 srcrepo,
972 972 destpeer,
973 973 revs=revs,
974 974 bookmarks=srcrepo._bookmarks.keys(),
975 975 )
976 976 else:
977 977 raise error.Abort(
978 978 _(b"clone from remote to remote not supported")
979 979 )
980 980
981 981 cleandir = None
982 982
983 983 destrepo = destpeer.local()
984 984 if destrepo:
985 985 template = uimod.samplehgrcs[b'cloned']
986 986 u = urlutil.url(abspath)
987 987 u.passwd = None
988 988 defaulturl = bytes(u)
989 989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 991
992 992 if ui.configbool(b'experimental', b'remotenames'):
993 993 logexchange.pullremotenames(destrepo, srcpeer)
994 994
995 995 if update:
996 996 if update is not True:
997 997 with srcpeer.commandexecutor() as e:
998 998 checkout = e.callcommand(
999 999 b'lookup',
1000 1000 {
1001 1001 b'key': update,
1002 1002 },
1003 1003 ).result()
1004 1004
1005 1005 uprev = None
1006 1006 status = None
1007 1007 if checkout is not None:
1008 1008 # Some extensions (at least hg-git and hg-subversion) have
1009 1009 # a peer.lookup() implementation that returns a name instead
1010 1010 # of a nodeid. We work around it here until we've figured
1011 1011 # out a better solution.
1012 1012 if len(checkout) == 20 and checkout in destrepo:
1013 1013 uprev = checkout
1014 1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 1016 else:
1017 1017 if update is not True:
1018 1018 try:
1019 1019 uprev = destrepo.lookup(update)
1020 1020 except error.RepoLookupError:
1021 1021 pass
1022 1022 if uprev is None:
1023 1023 try:
1024 1024 if destrepo._activebookmark:
1025 1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 1026 update = destrepo._activebookmark
1027 1027 else:
1028 1028 uprev = destrepo._bookmarks[b'@']
1029 1029 update = b'@'
1030 1030 bn = destrepo[uprev].branch()
1031 1031 if bn == b'default':
1032 1032 status = _(b"updating to bookmark %s\n" % update)
1033 1033 else:
1034 1034 status = (
1035 1035 _(b"updating to bookmark %s on branch %s\n")
1036 1036 ) % (update, bn)
1037 1037 except KeyError:
1038 1038 try:
1039 1039 uprev = destrepo.branchtip(b'default')
1040 1040 except error.RepoLookupError:
1041 1041 uprev = destrepo.lookup(b'tip')
1042 1042 if not status:
1043 1043 bn = destrepo[uprev].branch()
1044 1044 status = _(b"updating to branch %s\n") % bn
1045 1045 destrepo.ui.status(status)
1046 1046 _update(destrepo, uprev)
1047 1047 if update in destrepo._bookmarks:
1048 1048 bookmarks.activate(destrepo, update)
1049 1049 if destlock is not None:
1050 1050 release(destlock)
1051 1051 # here is a tiny windows were someone could end up writing the
1052 1052 # repository before the cache are sure to be warm. This is "fine"
1053 1053 # as the only "bad" outcome would be some slowness. That potential
1054 1054 # slowness already affect reader.
1055 1055 with destrepo.lock():
1056 1056 destrepo.updatecaches(full=True)
1057 1057 finally:
1058 1058 release(srclock, destlock)
1059 1059 if cleandir is not None:
1060 1060 shutil.rmtree(cleandir, True)
1061 1061 if srcpeer is not None:
1062 1062 srcpeer.close()
1063 1063 if destpeer and destpeer.local() is None:
1064 1064 destpeer.close()
1065 1065 return srcpeer, destpeer
1066 1066
1067 1067
1068 1068 def _showstats(repo, stats, quietempty=False):
1069 1069 if quietempty and stats.isempty():
1070 1070 return
1071 1071 repo.ui.status(
1072 1072 _(
1073 1073 b"%d files updated, %d files merged, "
1074 1074 b"%d files removed, %d files unresolved\n"
1075 1075 )
1076 1076 % (
1077 1077 stats.updatedcount,
1078 1078 stats.mergedcount,
1079 1079 stats.removedcount,
1080 1080 stats.unresolvedcount,
1081 1081 )
1082 1082 )
1083 1083
1084 1084
1085 1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 1086 """Update the working directory to node.
1087 1087
1088 1088 When overwrite is set, changes are clobbered, merged else
1089 1089
1090 1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 1091 repo.ui.deprecwarn(
1092 1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 1093 b'5.7',
1094 1094 )
1095 1095 return mergemod._update(
1096 1096 repo,
1097 1097 node,
1098 1098 branchmerge=False,
1099 1099 force=overwrite,
1100 1100 labels=[b'working copy', b'destination'],
1101 1101 updatecheck=updatecheck,
1102 1102 )
1103 1103
1104 1104
1105 1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 1106 """update the working directory to node"""
1107 1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 1108 _showstats(repo, stats, quietempty)
1109 1109 if stats.unresolvedcount:
1110 1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 1111 return stats.unresolvedcount > 0
1112 1112
1113 1113
1114 1114 # naming conflict in clone()
1115 1115 _update = update
1116 1116
1117 1117
1118 1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 1119 """forcibly switch the working directory to node, clobbering changes"""
1120 1120 stats = mergemod.clean_update(repo[node])
1121 1121 assert stats.unresolvedcount == 0
1122 1122 if show_stats:
1123 1123 _showstats(repo, stats, quietempty)
1124 1124 return False
1125 1125
1126 1126
1127 1127 # naming conflict in updatetotally()
1128 1128 _clean = clean
1129 1129
1130 1130 _VALID_UPDATECHECKS = {
1131 1131 mergemod.UPDATECHECK_ABORT,
1132 1132 mergemod.UPDATECHECK_NONE,
1133 1133 mergemod.UPDATECHECK_LINEAR,
1134 1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 1135 }
1136 1136
1137 1137
1138 1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 1139 """Update the working directory with extra care for non-file components
1140 1140
1141 1141 This takes care of non-file components below:
1142 1142
1143 1143 :bookmark: might be advanced or (in)activated
1144 1144
1145 1145 This takes arguments below:
1146 1146
1147 1147 :checkout: to which revision the working directory is updated
1148 1148 :brev: a name, which might be a bookmark to be activated after updating
1149 1149 :clean: whether changes in the working directory can be discarded
1150 1150 :updatecheck: how to deal with a dirty working directory
1151 1151
1152 1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 1153 defined in the merge module. Passing `None` will result in using the
1154 1154 configured default.
1155 1155
1156 1156 * ABORT: abort if the working directory is dirty
1157 1157 * NONE: don't check (merge working directory changes into destination)
1158 1158 * LINEAR: check that update is linear before merging working directory
1159 1159 changes into destination
1160 1160 * NO_CONFLICT: check that the update does not result in file merges
1161 1161
1162 1162 This returns whether conflict is detected at updating or not.
1163 1163 """
1164 1164 if updatecheck is None:
1165 1165 updatecheck = ui.config(b'commands', b'update.check')
1166 1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 1167 # If not configured, or invalid value configured
1168 1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 1170 raise ValueError(
1171 1171 r'Invalid updatecheck value %r (can accept %r)'
1172 1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 1173 )
1174 1174 with repo.wlock():
1175 1175 movemarkfrom = None
1176 1176 warndest = False
1177 1177 if checkout is None:
1178 1178 updata = destutil.destupdate(repo, clean=clean)
1179 1179 checkout, movemarkfrom, brev = updata
1180 1180 warndest = True
1181 1181
1182 1182 if clean:
1183 1183 ret = _clean(repo, checkout)
1184 1184 else:
1185 1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 1186 cmdutil.bailifchanged(repo, merge=False)
1187 1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 1189
1190 1190 if not ret and movemarkfrom:
1191 1191 if movemarkfrom == repo[b'.'].node():
1192 1192 pass # no-op update
1193 1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 1196 else:
1197 1197 # this can happen with a non-linear update
1198 1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 1200 bookmarks.deactivate(repo)
1201 1201 elif brev in repo._bookmarks:
1202 1202 if brev != repo._activebookmark:
1203 1203 b = ui.label(brev, b'bookmarks.active')
1204 1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 1205 bookmarks.activate(repo, brev)
1206 1206 elif brev:
1207 1207 if repo._activebookmark:
1208 1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 1210 bookmarks.deactivate(repo)
1211 1211
1212 1212 if warndest:
1213 1213 destutil.statusotherdests(ui, repo)
1214 1214
1215 1215 return ret
1216 1216
1217 1217
1218 1218 def merge(
1219 1219 ctx,
1220 1220 force=False,
1221 1221 remind=True,
1222 1222 labels=None,
1223 1223 ):
1224 1224 """Branch merge with node, resolving changes. Return true if any
1225 1225 unresolved conflicts."""
1226 1226 repo = ctx.repo()
1227 1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 1228 _showstats(repo, stats)
1229 1229 if stats.unresolvedcount:
1230 1230 repo.ui.status(
1231 1231 _(
1232 1232 b"use 'hg resolve' to retry unresolved file merges "
1233 1233 b"or 'hg merge --abort' to abandon\n"
1234 1234 )
1235 1235 )
1236 1236 elif remind:
1237 1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 1238 return stats.unresolvedcount > 0
1239 1239
1240 1240
1241 1241 def abortmerge(ui, repo):
1242 1242 ms = mergestatemod.mergestate.read(repo)
1243 1243 if ms.active():
1244 1244 # there were conflicts
1245 1245 node = ms.localctx.hex()
1246 1246 else:
1247 1247 # there were no conficts, mergestate was not stored
1248 1248 node = repo[b'.'].hex()
1249 1249
1250 1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 1251 stats = mergemod.clean_update(repo[node])
1252 1252 assert stats.unresolvedcount == 0
1253 1253 _showstats(repo, stats)
1254 1254
1255 1255
1256 1256 def _incoming(
1257 1257 displaychlist,
1258 1258 subreporecurse,
1259 1259 ui,
1260 1260 repo,
1261 1261 source,
1262 1262 opts,
1263 1263 buffered=False,
1264 1264 subpath=None,
1265 1265 ):
1266 1266 """
1267 1267 Helper for incoming / gincoming.
1268 1268 displaychlist gets called with
1269 1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 1270 and is supposed to contain only code that can't be unified.
1271 1271 """
1272 1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 1273 srcs = list(srcs)
1274 1274 if len(srcs) != 1:
1275 msg = _('for now, incoming supports only a single source, %d provided')
1275 msg = _(b'for now, incoming supports only a single source, %d provided')
1276 1276 msg %= len(srcs)
1277 1277 raise error.Abort(msg)
1278 1278 source, branches = srcs[0]
1279 1279 if subpath is not None:
1280 1280 subpath = urlutil.url(subpath)
1281 1281 if subpath.isabs():
1282 1282 source = bytes(subpath)
1283 1283 else:
1284 1284 p = urlutil.url(source)
1285 1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 1286 source = bytes(p)
1287 1287 other = peer(repo, opts, source)
1288 1288 cleanupfn = other.close
1289 1289 try:
1290 1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 1292
1293 1293 if revs:
1294 1294 revs = [other.lookup(rev) for rev in revs]
1295 1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 1297 )
1298 1298
1299 1299 if not chlist:
1300 1300 ui.status(_(b"no changes found\n"))
1301 1301 return subreporecurse()
1302 1302 ui.pager(b'incoming')
1303 1303 displayer = logcmdutil.changesetdisplayer(
1304 1304 ui, other, opts, buffered=buffered
1305 1305 )
1306 1306 displaychlist(other, chlist, displayer)
1307 1307 displayer.close()
1308 1308 finally:
1309 1309 cleanupfn()
1310 1310 subreporecurse()
1311 1311 return 0 # exit code is zero since we found incoming changes
1312 1312
1313 1313
1314 1314 def incoming(ui, repo, source, opts, subpath=None):
1315 1315 def subreporecurse():
1316 1316 ret = 1
1317 1317 if opts.get(b'subrepos'):
1318 1318 ctx = repo[None]
1319 1319 for subpath in sorted(ctx.substate):
1320 1320 sub = ctx.sub(subpath)
1321 1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 1322 return ret
1323 1323
1324 1324 def display(other, chlist, displayer):
1325 1325 limit = logcmdutil.getlimit(opts)
1326 1326 if opts.get(b'newest_first'):
1327 1327 chlist.reverse()
1328 1328 count = 0
1329 1329 for n in chlist:
1330 1330 if limit is not None and count >= limit:
1331 1331 break
1332 1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 1333 if opts.get(b'no_merges') and len(parents) == 2:
1334 1334 continue
1335 1335 count += 1
1336 1336 displayer.show(other[n])
1337 1337
1338 1338 return _incoming(
1339 1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1340 1340 )
1341 1341
1342 1342
1343 1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1344 1344 out = set()
1345 1345 others = []
1346 1346 for path in urlutil.get_push_paths(repo, ui, dests):
1347 1347 dest = path.pushloc or path.loc
1348 1348 if subpath is not None:
1349 1349 subpath = urlutil.url(subpath)
1350 1350 if subpath.isabs():
1351 1351 dest = bytes(subpath)
1352 1352 else:
1353 1353 p = urlutil.url(dest)
1354 1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1355 1355 dest = bytes(p)
1356 1356 branches = path.branch, opts.get(b'branch') or []
1357 1357
1358 1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1359 1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1360 1360 if revs:
1361 1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1362 1362
1363 1363 other = peer(repo, opts, dest)
1364 1364 try:
1365 1365 outgoing = discovery.findcommonoutgoing(
1366 1366 repo, other, revs, force=opts.get(b'force')
1367 1367 )
1368 1368 o = outgoing.missing
1369 1369 out.update(o)
1370 1370 if not o:
1371 1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1372 1372 others.append(other)
1373 1373 except: # re-raises
1374 1374 other.close()
1375 1375 raise
1376 1376 # make sure this is ordered by revision number
1377 1377 outgoing_revs = list(out)
1378 1378 cl = repo.changelog
1379 1379 outgoing_revs.sort(key=cl.rev)
1380 1380 return outgoing_revs, others
1381 1381
1382 1382
1383 1383 def _outgoing_recurse(ui, repo, dests, opts):
1384 1384 ret = 1
1385 1385 if opts.get(b'subrepos'):
1386 1386 ctx = repo[None]
1387 1387 for subpath in sorted(ctx.substate):
1388 1388 sub = ctx.sub(subpath)
1389 1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1390 1390 return ret
1391 1391
1392 1392
1393 1393 def _outgoing_filter(repo, revs, opts):
1394 1394 """apply revision filtering/ordering option for outgoing"""
1395 1395 limit = logcmdutil.getlimit(opts)
1396 1396 no_merges = opts.get(b'no_merges')
1397 1397 if opts.get(b'newest_first'):
1398 1398 revs.reverse()
1399 1399 if limit is None and not no_merges:
1400 1400 for r in revs:
1401 1401 yield r
1402 1402 return
1403 1403
1404 1404 count = 0
1405 1405 cl = repo.changelog
1406 1406 for n in revs:
1407 1407 if limit is not None and count >= limit:
1408 1408 break
1409 1409 parents = [p for p in cl.parents(n) if p != nullid]
1410 1410 if no_merges and len(parents) == 2:
1411 1411 continue
1412 1412 count += 1
1413 1413 yield n
1414 1414
1415 1415
1416 1416 def outgoing(ui, repo, dests, opts, subpath=None):
1417 1417 if opts.get(b'graph'):
1418 1418 logcmdutil.checkunsupportedgraphflags([], opts)
1419 1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1420 1420 ret = 1
1421 1421 try:
1422 1422 if o:
1423 1423 ret = 0
1424 1424
1425 1425 if opts.get(b'graph'):
1426 1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1427 1427 ui.pager(b'outgoing')
1428 1428 displayer = logcmdutil.changesetdisplayer(
1429 1429 ui, repo, opts, buffered=True
1430 1430 )
1431 1431 logcmdutil.displaygraph(
1432 1432 ui, repo, revdag, displayer, graphmod.asciiedges
1433 1433 )
1434 1434 else:
1435 1435 ui.pager(b'outgoing')
1436 1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1437 1437 for n in _outgoing_filter(repo, o, opts):
1438 1438 displayer.show(repo[n])
1439 1439 displayer.close()
1440 1440 for oth in others:
1441 1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1442 1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1443 1443 return ret # exit code is zero since we found outgoing changes
1444 1444 finally:
1445 1445 for oth in others:
1446 1446 oth.close()
1447 1447
1448 1448
1449 1449 def verify(repo, level=None):
1450 1450 """verify the consistency of a repository"""
1451 1451 ret = verifymod.verify(repo, level=level)
1452 1452
1453 1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1454 1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1455 1455 # concern.
1456 1456
1457 1457 # pathto() is needed for -R case
1458 1458 revs = repo.revs(
1459 1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1460 1460 )
1461 1461
1462 1462 if revs:
1463 1463 repo.ui.status(_(b'checking subrepo links\n'))
1464 1464 for rev in revs:
1465 1465 ctx = repo[rev]
1466 1466 try:
1467 1467 for subpath in ctx.substate:
1468 1468 try:
1469 1469 ret = (
1470 1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1471 1471 )
1472 1472 except error.RepoError as e:
1473 1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1474 1474 except Exception:
1475 1475 repo.ui.warn(
1476 1476 _(b'.hgsubstate is corrupt in revision %s\n')
1477 1477 % short(ctx.node())
1478 1478 )
1479 1479
1480 1480 return ret
1481 1481
1482 1482
1483 1483 def remoteui(src, opts):
1484 1484 """build a remote ui from ui or repo and opts"""
1485 1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1486 1486 dst = src.baseui.copy() # drop repo-specific config
1487 1487 src = src.ui # copy target options from repo
1488 1488 else: # assume it's a global ui object
1489 1489 dst = src.copy() # keep all global options
1490 1490
1491 1491 # copy ssh-specific options
1492 1492 for o in b'ssh', b'remotecmd':
1493 1493 v = opts.get(o) or src.config(b'ui', o)
1494 1494 if v:
1495 1495 dst.setconfig(b"ui", o, v, b'copied')
1496 1496
1497 1497 # copy bundle-specific options
1498 1498 r = src.config(b'bundle', b'mainreporoot')
1499 1499 if r:
1500 1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1501 1501
1502 1502 # copy selected local settings to the remote ui
1503 1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1504 1504 for key, val in src.configitems(sect):
1505 1505 dst.setconfig(sect, key, val, b'copied')
1506 1506 v = src.config(b'web', b'cacerts')
1507 1507 if v:
1508 1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1509 1509
1510 1510 return dst
1511 1511
1512 1512
1513 1513 # Files of interest
1514 1514 # Used to check if the repository has changed looking at mtime and size of
1515 1515 # these files.
1516 1516 foi = [
1517 1517 (b'spath', b'00changelog.i'),
1518 1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1519 1519 (b'spath', b'obsstore'),
1520 1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1521 1521 ]
1522 1522
1523 1523
1524 1524 class cachedlocalrepo(object):
1525 1525 """Holds a localrepository that can be cached and reused."""
1526 1526
1527 1527 def __init__(self, repo):
1528 1528 """Create a new cached repo from an existing repo.
1529 1529
1530 1530 We assume the passed in repo was recently created. If the
1531 1531 repo has changed between when it was created and when it was
1532 1532 turned into a cache, it may not refresh properly.
1533 1533 """
1534 1534 assert isinstance(repo, localrepo.localrepository)
1535 1535 self._repo = repo
1536 1536 self._state, self.mtime = self._repostate()
1537 1537 self._filtername = repo.filtername
1538 1538
1539 1539 def fetch(self):
1540 1540 """Refresh (if necessary) and return a repository.
1541 1541
1542 1542 If the cached instance is out of date, it will be recreated
1543 1543 automatically and returned.
1544 1544
1545 1545 Returns a tuple of the repo and a boolean indicating whether a new
1546 1546 repo instance was created.
1547 1547 """
1548 1548 # We compare the mtimes and sizes of some well-known files to
1549 1549 # determine if the repo changed. This is not precise, as mtimes
1550 1550 # are susceptible to clock skew and imprecise filesystems and
1551 1551 # file content can change while maintaining the same size.
1552 1552
1553 1553 state, mtime = self._repostate()
1554 1554 if state == self._state:
1555 1555 return self._repo, False
1556 1556
1557 1557 repo = repository(self._repo.baseui, self._repo.url())
1558 1558 if self._filtername:
1559 1559 self._repo = repo.filtered(self._filtername)
1560 1560 else:
1561 1561 self._repo = repo.unfiltered()
1562 1562 self._state = state
1563 1563 self.mtime = mtime
1564 1564
1565 1565 return self._repo, True
1566 1566
1567 1567 def _repostate(self):
1568 1568 state = []
1569 1569 maxmtime = -1
1570 1570 for attr, fname in foi:
1571 1571 prefix = getattr(self._repo, attr)
1572 1572 p = os.path.join(prefix, fname)
1573 1573 try:
1574 1574 st = os.stat(p)
1575 1575 except OSError:
1576 1576 st = os.stat(prefix)
1577 1577 state.append((st[stat.ST_MTIME], st.st_size))
1578 1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1579 1579
1580 1580 return tuple(state), maxmtime
1581 1581
1582 1582 def copy(self):
1583 1583 """Obtain a copy of this class instance.
1584 1584
1585 1585 A new localrepository instance is obtained. The new instance should be
1586 1586 completely independent of the original.
1587 1587 """
1588 1588 repo = repository(self._repo.baseui, self._repo.origroot)
1589 1589 if self._filtername:
1590 1590 repo = repo.filtered(self._filtername)
1591 1591 else:
1592 1592 repo = repo.unfiltered()
1593 1593 c = cachedlocalrepo(repo)
1594 1594 c._state = self._state
1595 1595 c.mtime = self.mtime
1596 1596 return c
General Comments 0
You need to be logged in to leave comments. Login now