##// END OF EJS Templates
hg: convert an exception to bytes in the repo creation exception handler...
Matt Harbison -
r47518:1099541b stable
parent child Browse files
Show More
@@ -1,1514 +1,1517
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 httppeer,
36 36 localrepo,
37 37 lock,
38 38 logcmdutil,
39 39 logexchange,
40 40 merge as mergemod,
41 41 mergestate as mergestatemod,
42 42 narrowspec,
43 43 phases,
44 pycompat,
45 44 requirements,
46 45 scmutil,
47 46 sshpeer,
48 47 statichttprepo,
49 48 ui as uimod,
50 49 unionrepo,
51 50 url,
52 51 util,
53 52 verify as verifymod,
54 53 vfs as vfsmod,
55 54 )
56 from .utils import hashutil
55 from .utils import (
56 hashutil,
57 stringutil,
58 )
59
57 60
58 61 release = lock.release
59 62
60 63 # shared features
61 64 sharedbookmarks = b'bookmarks'
62 65
63 66
64 67 def _local(path):
65 68 path = util.expandpath(util.urllocalpath(path))
66 69
67 70 try:
68 71 # we use os.stat() directly here instead of os.path.isfile()
69 72 # because the latter started returning `False` on invalid path
70 73 # exceptions starting in 3.8 and we care about handling
71 74 # invalid paths specially here.
72 75 st = os.stat(path)
73 76 isfile = stat.S_ISREG(st.st_mode)
74 77 # Python 2 raises TypeError, Python 3 ValueError.
75 78 except (TypeError, ValueError) as e:
76 79 raise error.Abort(
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
78 81 )
79 82 except OSError:
80 83 isfile = False
81 84
82 85 return isfile and bundlerepo or localrepo
83 86
84 87
85 88 def addbranchrevs(lrepo, other, branches, revs):
86 89 peer = other.peer() # a courtesy to callers using a localrepo for other
87 90 hashbranch, branches = branches
88 91 if not hashbranch and not branches:
89 92 x = revs or None
90 93 if revs:
91 94 y = revs[0]
92 95 else:
93 96 y = None
94 97 return x, y
95 98 if revs:
96 99 revs = list(revs)
97 100 else:
98 101 revs = []
99 102
100 103 if not peer.capable(b'branchmap'):
101 104 if branches:
102 105 raise error.Abort(_(b"remote branch lookup not supported"))
103 106 revs.append(hashbranch)
104 107 return revs, revs[0]
105 108
106 109 with peer.commandexecutor() as e:
107 110 branchmap = e.callcommand(b'branchmap', {}).result()
108 111
109 112 def primary(branch):
110 113 if branch == b'.':
111 114 if not lrepo:
112 115 raise error.Abort(_(b"dirstate branch not accessible"))
113 116 branch = lrepo.dirstate.branch()
114 117 if branch in branchmap:
115 118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
116 119 return True
117 120 else:
118 121 return False
119 122
120 123 for branch in branches:
121 124 if not primary(branch):
122 125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
123 126 if hashbranch:
124 127 if not primary(hashbranch):
125 128 revs.append(hashbranch)
126 129 return revs, revs[0]
127 130
128 131
129 132 def parseurl(path, branches=None):
130 133 '''parse url#branch, returning (url, (branch, branches))'''
131 134
132 135 u = util.url(path)
133 136 branch = None
134 137 if u.fragment:
135 138 branch = u.fragment
136 139 u.fragment = None
137 140 return bytes(u), (branch, branches or [])
138 141
139 142
140 143 schemes = {
141 144 b'bundle': bundlerepo,
142 145 b'union': unionrepo,
143 146 b'file': _local,
144 147 b'http': httppeer,
145 148 b'https': httppeer,
146 149 b'ssh': sshpeer,
147 150 b'static-http': statichttprepo,
148 151 }
149 152
150 153
151 154 def _peerlookup(path):
152 155 u = util.url(path)
153 156 scheme = u.scheme or b'file'
154 157 thing = schemes.get(scheme) or schemes[b'file']
155 158 try:
156 159 return thing(path)
157 160 except TypeError:
158 161 # we can't test callable(thing) because 'thing' can be an unloaded
159 162 # module that implements __call__
160 163 if not util.safehasattr(thing, b'instance'):
161 164 raise
162 165 return thing
163 166
164 167
165 168 def islocal(repo):
166 169 '''return true if repo (or path pointing to repo) is local'''
167 170 if isinstance(repo, bytes):
168 171 try:
169 172 return _peerlookup(repo).islocal(repo)
170 173 except AttributeError:
171 174 return False
172 175 return repo.local()
173 176
174 177
175 178 def openpath(ui, path, sendaccept=True):
176 179 '''open path with open if local, url.open if remote'''
177 180 pathurl = util.url(path, parsequery=False, parsefragment=False)
178 181 if pathurl.islocal():
179 182 return util.posixfile(pathurl.localpath(), b'rb')
180 183 else:
181 184 return url.open(ui, path, sendaccept=sendaccept)
182 185
183 186
184 187 # a list of (ui, repo) functions called for wire peer initialization
185 188 wirepeersetupfuncs = []
186 189
187 190
188 191 def _peerorrepo(
189 192 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 193 ):
191 194 """return a repository object for the specified path"""
192 195 obj = _peerlookup(path).instance(
193 196 ui, path, create, intents=intents, createopts=createopts
194 197 )
195 198 ui = getattr(obj, "ui", ui)
196 199 for f in presetupfuncs or []:
197 200 f(ui, obj)
198 201 ui.log(b'extension', b'- executing reposetup hooks\n')
199 202 with util.timedcm('all reposetup') as allreposetupstats:
200 203 for name, module in extensions.extensions(ui):
201 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 205 hook = getattr(module, 'reposetup', None)
203 206 if hook:
204 207 with util.timedcm('reposetup %r', name) as stats:
205 208 hook(ui, obj)
206 209 ui.log(
207 210 b'extension', b' > reposetup for %s took %s\n', name, stats
208 211 )
209 212 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 213 if not obj.local():
211 214 for f in wirepeersetupfuncs:
212 215 f(ui, obj)
213 216 return obj
214 217
215 218
216 219 def repository(
217 220 ui,
218 221 path=b'',
219 222 create=False,
220 223 presetupfuncs=None,
221 224 intents=None,
222 225 createopts=None,
223 226 ):
224 227 """return a repository object for the specified path"""
225 228 peer = _peerorrepo(
226 229 ui,
227 230 path,
228 231 create,
229 232 presetupfuncs=presetupfuncs,
230 233 intents=intents,
231 234 createopts=createopts,
232 235 )
233 236 repo = peer.local()
234 237 if not repo:
235 238 raise error.Abort(
236 239 _(b"repository '%s' is not local") % (path or peer.url())
237 240 )
238 241 return repo.filtered(b'visible')
239 242
240 243
241 244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 245 '''return a repository peer for the specified path'''
243 246 rui = remoteui(uiorrepo, opts)
244 247 return _peerorrepo(
245 248 rui, path, create, intents=intents, createopts=createopts
246 249 ).peer()
247 250
248 251
249 252 def defaultdest(source):
250 253 """return default destination of clone if none is given
251 254
252 255 >>> defaultdest(b'foo')
253 256 'foo'
254 257 >>> defaultdest(b'/foo/bar')
255 258 'bar'
256 259 >>> defaultdest(b'/')
257 260 ''
258 261 >>> defaultdest(b'')
259 262 ''
260 263 >>> defaultdest(b'http://example.org/')
261 264 ''
262 265 >>> defaultdest(b'http://example.org/foo/')
263 266 'foo'
264 267 """
265 268 path = util.url(source).path
266 269 if not path:
267 270 return b''
268 271 return os.path.basename(os.path.normpath(path))
269 272
270 273
271 274 def sharedreposource(repo):
272 275 """Returns repository object for source repository of a shared repo.
273 276
274 277 If repo is not a shared repository, returns None.
275 278 """
276 279 if repo.sharedpath == repo.path:
277 280 return None
278 281
279 282 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 283 return repo.srcrepo
281 284
282 285 # the sharedpath always ends in the .hg; we want the path to the repo
283 286 source = repo.vfs.split(repo.sharedpath)[0]
284 287 srcurl, branches = parseurl(source)
285 288 srcrepo = repository(repo.ui, srcurl)
286 289 repo.srcrepo = srcrepo
287 290 return srcrepo
288 291
289 292
290 293 def share(
291 294 ui,
292 295 source,
293 296 dest=None,
294 297 update=True,
295 298 bookmarks=True,
296 299 defaultpath=None,
297 300 relative=False,
298 301 ):
299 302 '''create a shared repository'''
300 303
301 304 if not islocal(source):
302 305 raise error.Abort(_(b'can only share local repositories'))
303 306
304 307 if not dest:
305 308 dest = defaultdest(source)
306 309 else:
307 310 dest = ui.expandpath(dest)
308 311
309 312 if isinstance(source, bytes):
310 313 origsource = ui.expandpath(source)
311 314 source, branches = parseurl(origsource)
312 315 srcrepo = repository(ui, source)
313 316 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 317 else:
315 318 srcrepo = source.local()
316 319 checkout = None
317 320
318 321 shareditems = set()
319 322 if bookmarks:
320 323 shareditems.add(sharedbookmarks)
321 324
322 325 r = repository(
323 326 ui,
324 327 dest,
325 328 create=True,
326 329 createopts={
327 330 b'sharedrepo': srcrepo,
328 331 b'sharedrelative': relative,
329 332 b'shareditems': shareditems,
330 333 },
331 334 )
332 335
333 336 postshare(srcrepo, r, defaultpath=defaultpath)
334 337 r = repository(ui, dest)
335 338 _postshareupdate(r, update, checkout=checkout)
336 339 return r
337 340
338 341
339 342 def _prependsourcehgrc(repo):
340 343 """copies the source repo config and prepend it in current repo .hg/hgrc
341 344 on unshare. This is only done if the share was perfomed using share safe
342 345 method where we share config of source in shares"""
343 346 srcvfs = vfsmod.vfs(repo.sharedpath)
344 347 dstvfs = vfsmod.vfs(repo.path)
345 348
346 349 if not srcvfs.exists(b'hgrc'):
347 350 return
348 351
349 352 currentconfig = b''
350 353 if dstvfs.exists(b'hgrc'):
351 354 currentconfig = dstvfs.read(b'hgrc')
352 355
353 356 with dstvfs(b'hgrc', b'wb') as fp:
354 357 sourceconfig = srcvfs.read(b'hgrc')
355 358 fp.write(b"# Config copied from shared source\n")
356 359 fp.write(sourceconfig)
357 360 fp.write(b'\n')
358 361 fp.write(currentconfig)
359 362
360 363
361 364 def unshare(ui, repo):
362 365 """convert a shared repository to a normal one
363 366
364 367 Copy the store data to the repo and remove the sharedpath data.
365 368
366 369 Returns a new repository object representing the unshared repository.
367 370
368 371 The passed repository object is not usable after this function is
369 372 called.
370 373 """
371 374
372 375 with repo.lock():
373 376 # we use locks here because if we race with commit, we
374 377 # can end up with extra data in the cloned revlogs that's
375 378 # not pointed to by changesets, thus causing verify to
376 379 # fail
377 380 destlock = copystore(ui, repo, repo.path)
378 381 with destlock or util.nullcontextmanager():
379 382 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 383 # we were sharing .hg/hgrc of the share source with the current
381 384 # repo. We need to copy that while unsharing otherwise it can
382 385 # disable hooks and other checks
383 386 _prependsourcehgrc(repo)
384 387
385 388 sharefile = repo.vfs.join(b'sharedpath')
386 389 util.rename(sharefile, sharefile + b'.old')
387 390
388 391 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 392 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 393 scmutil.writereporequirements(repo)
391 394
392 395 # Removing share changes some fundamental properties of the repo instance.
393 396 # So we instantiate a new repo object and operate on it rather than
394 397 # try to keep the existing repo usable.
395 398 newrepo = repository(repo.baseui, repo.root, create=False)
396 399
397 400 # TODO: figure out how to access subrepos that exist, but were previously
398 401 # removed from .hgsub
399 402 c = newrepo[b'.']
400 403 subs = c.substate
401 404 for s in sorted(subs):
402 405 c.sub(s).unshare()
403 406
404 407 localrepo.poisonrepository(repo)
405 408
406 409 return newrepo
407 410
408 411
409 412 def postshare(sourcerepo, destrepo, defaultpath=None):
410 413 """Called after a new shared repo is created.
411 414
412 415 The new repo only has a requirements file and pointer to the source.
413 416 This function configures additional shared data.
414 417
415 418 Extensions can wrap this function and write additional entries to
416 419 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 420 """
418 421 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 422 if default:
420 423 template = b'[paths]\ndefault = %s\n'
421 424 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 425 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 426 with destrepo.wlock():
424 427 narrowspec.copytoworkingcopy(destrepo)
425 428
426 429
427 430 def _postshareupdate(repo, update, checkout=None):
428 431 """Maybe perform a working directory update after a shared repo is created.
429 432
430 433 ``update`` can be a boolean or a revision to update to.
431 434 """
432 435 if not update:
433 436 return
434 437
435 438 repo.ui.status(_(b"updating working directory\n"))
436 439 if update is not True:
437 440 checkout = update
438 441 for test in (checkout, b'default', b'tip'):
439 442 if test is None:
440 443 continue
441 444 try:
442 445 uprev = repo.lookup(test)
443 446 break
444 447 except error.RepoLookupError:
445 448 continue
446 449 _update(repo, uprev)
447 450
448 451
449 452 def copystore(ui, srcrepo, destpath):
450 453 """copy files from store of srcrepo in destpath
451 454
452 455 returns destlock
453 456 """
454 457 destlock = None
455 458 try:
456 459 hardlink = None
457 460 topic = _(b'linking') if hardlink else _(b'copying')
458 461 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 462 num = 0
460 463 srcpublishing = srcrepo.publishing()
461 464 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 465 dstvfs = vfsmod.vfs(destpath)
463 466 for f in srcrepo.store.copylist():
464 467 if srcpublishing and f.endswith(b'phaseroots'):
465 468 continue
466 469 dstbase = os.path.dirname(f)
467 470 if dstbase and not dstvfs.exists(dstbase):
468 471 dstvfs.mkdir(dstbase)
469 472 if srcvfs.exists(f):
470 473 if f.endswith(b'data'):
471 474 # 'dstbase' may be empty (e.g. revlog format 0)
472 475 lockfile = os.path.join(dstbase, b"lock")
473 476 # lock to avoid premature writing to the target
474 477 destlock = lock.lock(dstvfs, lockfile)
475 478 hardlink, n = util.copyfiles(
476 479 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 480 )
478 481 num += n
479 482 if hardlink:
480 483 ui.debug(b"linked %d files\n" % num)
481 484 else:
482 485 ui.debug(b"copied %d files\n" % num)
483 486 return destlock
484 487 except: # re-raises
485 488 release(destlock)
486 489 raise
487 490
488 491
489 492 def clonewithshare(
490 493 ui,
491 494 peeropts,
492 495 sharepath,
493 496 source,
494 497 srcpeer,
495 498 dest,
496 499 pull=False,
497 500 rev=None,
498 501 update=True,
499 502 stream=False,
500 503 ):
501 504 """Perform a clone using a shared repo.
502 505
503 506 The store for the repository will be located at <sharepath>/.hg. The
504 507 specified revisions will be cloned or pulled from "source". A shared repo
505 508 will be created at "dest" and a working copy will be created if "update" is
506 509 True.
507 510 """
508 511 revs = None
509 512 if rev:
510 513 if not srcpeer.capable(b'lookup'):
511 514 raise error.Abort(
512 515 _(
513 516 b"src repository does not support "
514 517 b"revision lookup and so doesn't "
515 518 b"support clone by revision"
516 519 )
517 520 )
518 521
519 522 # TODO this is batchable.
520 523 remoterevs = []
521 524 for r in rev:
522 525 with srcpeer.commandexecutor() as e:
523 526 remoterevs.append(
524 527 e.callcommand(
525 528 b'lookup',
526 529 {
527 530 b'key': r,
528 531 },
529 532 ).result()
530 533 )
531 534 revs = remoterevs
532 535
533 536 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 537 # 2 clients may race creating or populating it.
535 538 pooldir = os.path.dirname(sharepath)
536 539 # lock class requires the directory to exist.
537 540 try:
538 541 util.makedir(pooldir, False)
539 542 except OSError as e:
540 543 if e.errno != errno.EEXIST:
541 544 raise
542 545
543 546 poolvfs = vfsmod.vfs(pooldir)
544 547 basename = os.path.basename(sharepath)
545 548
546 549 with lock.lock(poolvfs, b'%s.lock' % basename):
547 550 if os.path.exists(sharepath):
548 551 ui.status(
549 552 _(b'(sharing from existing pooled repository %s)\n') % basename
550 553 )
551 554 else:
552 555 ui.status(
553 556 _(b'(sharing from new pooled repository %s)\n') % basename
554 557 )
555 558 # Always use pull mode because hardlinks in share mode don't work
556 559 # well. Never update because working copies aren't necessary in
557 560 # share mode.
558 561 clone(
559 562 ui,
560 563 peeropts,
561 564 source,
562 565 dest=sharepath,
563 566 pull=True,
564 567 revs=rev,
565 568 update=False,
566 569 stream=stream,
567 570 )
568 571
569 572 # Resolve the value to put in [paths] section for the source.
570 573 if islocal(source):
571 574 defaultpath = os.path.abspath(util.urllocalpath(source))
572 575 else:
573 576 defaultpath = source
574 577
575 578 sharerepo = repository(ui, path=sharepath)
576 579 destrepo = share(
577 580 ui,
578 581 sharerepo,
579 582 dest=dest,
580 583 update=False,
581 584 bookmarks=False,
582 585 defaultpath=defaultpath,
583 586 )
584 587
585 588 # We need to perform a pull against the dest repo to fetch bookmarks
586 589 # and other non-store data that isn't shared by default. In the case of
587 590 # non-existing shared repo, this means we pull from the remote twice. This
588 591 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 592 # way to pull just non-changegroup data.
590 593 exchange.pull(destrepo, srcpeer, heads=revs)
591 594
592 595 _postshareupdate(destrepo, update)
593 596
594 597 return srcpeer, peer(ui, peeropts, dest)
595 598
596 599
597 600 # Recomputing caches is often slow on big repos, so copy them.
598 601 def _copycache(srcrepo, dstcachedir, fname):
599 602 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 603 srcfname = srcrepo.cachevfs.join(fname)
601 604 dstfname = os.path.join(dstcachedir, fname)
602 605 if os.path.exists(srcfname):
603 606 if not os.path.exists(dstcachedir):
604 607 os.mkdir(dstcachedir)
605 608 util.copyfile(srcfname, dstfname)
606 609
607 610
608 611 def clone(
609 612 ui,
610 613 peeropts,
611 614 source,
612 615 dest=None,
613 616 pull=False,
614 617 revs=None,
615 618 update=True,
616 619 stream=False,
617 620 branch=None,
618 621 shareopts=None,
619 622 storeincludepats=None,
620 623 storeexcludepats=None,
621 624 depth=None,
622 625 ):
623 626 """Make a copy of an existing repository.
624 627
625 628 Create a copy of an existing repository in a new directory. The
626 629 source and destination are URLs, as passed to the repository
627 630 function. Returns a pair of repository peers, the source and
628 631 newly created destination.
629 632
630 633 The location of the source is added to the new repository's
631 634 .hg/hgrc file, as the default to be used for future pulls and
632 635 pushes.
633 636
634 637 If an exception is raised, the partly cloned/updated destination
635 638 repository will be deleted.
636 639
637 640 Arguments:
638 641
639 642 source: repository object or URL
640 643
641 644 dest: URL of destination repository to create (defaults to base
642 645 name of source repository)
643 646
644 647 pull: always pull from source repository, even in local case or if the
645 648 server prefers streaming
646 649
647 650 stream: stream raw data uncompressed from repository (fast over
648 651 LAN, slow over WAN)
649 652
650 653 revs: revision to clone up to (implies pull=True)
651 654
652 655 update: update working directory after clone completes, if
653 656 destination is local repository (True means update to default rev,
654 657 anything else is treated as a revision)
655 658
656 659 branch: branches to clone
657 660
658 661 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 662 activates auto sharing mode and defines the directory for stores. The
660 663 "mode" key determines how to construct the directory name of the shared
661 664 repository. "identity" means the name is derived from the node of the first
662 665 changeset in the repository. "remote" means the name is derived from the
663 666 remote's path/URL. Defaults to "identity."
664 667
665 668 storeincludepats and storeexcludepats: sets of file patterns to include and
666 669 exclude in the repository copy, respectively. If not defined, all files
667 670 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 671 only the requested files will be performed. If ``storeincludepats`` is not
669 672 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 673 ``path:.``. If both are empty sets, no files will be cloned.
671 674 """
672 675
673 676 if isinstance(source, bytes):
674 677 origsource = ui.expandpath(source)
675 678 source, branches = parseurl(origsource, branch)
676 679 srcpeer = peer(ui, peeropts, source)
677 680 else:
678 681 srcpeer = source.peer() # in case we were called with a localrepo
679 682 branches = (None, branch or [])
680 683 origsource = source = srcpeer.url()
681 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682 685
683 686 if dest is None:
684 687 dest = defaultdest(source)
685 688 if dest:
686 689 ui.status(_(b"destination directory: %s\n") % dest)
687 690 else:
688 691 dest = ui.expandpath(dest)
689 692
690 693 dest = util.urllocalpath(dest)
691 694 source = util.urllocalpath(source)
692 695
693 696 if not dest:
694 697 raise error.InputError(_(b"empty destination path is not valid"))
695 698
696 699 destvfs = vfsmod.vfs(dest, expandpath=True)
697 700 if destvfs.lexists():
698 701 if not destvfs.isdir():
699 702 raise error.InputError(_(b"destination '%s' already exists") % dest)
700 703 elif destvfs.listdir():
701 704 raise error.InputError(_(b"destination '%s' is not empty") % dest)
702 705
703 706 createopts = {}
704 707 narrow = False
705 708
706 709 if storeincludepats is not None:
707 710 narrowspec.validatepatterns(storeincludepats)
708 711 narrow = True
709 712
710 713 if storeexcludepats is not None:
711 714 narrowspec.validatepatterns(storeexcludepats)
712 715 narrow = True
713 716
714 717 if narrow:
715 718 # Include everything by default if only exclusion patterns defined.
716 719 if storeexcludepats and not storeincludepats:
717 720 storeincludepats = {b'path:.'}
718 721
719 722 createopts[b'narrowfiles'] = True
720 723
721 724 if depth:
722 725 createopts[b'shallowfilestore'] = True
723 726
724 727 if srcpeer.capable(b'lfs-serve'):
725 728 # Repository creation honors the config if it disabled the extension, so
726 729 # we can't just announce that lfs will be enabled. This check avoids
727 730 # saying that lfs will be enabled, and then saying it's an unknown
728 731 # feature. The lfs creation option is set in either case so that a
729 732 # requirement is added. If the extension is explicitly disabled but the
730 733 # requirement is set, the clone aborts early, before transferring any
731 734 # data.
732 735 createopts[b'lfs'] = True
733 736
734 737 if extensions.disabled_help(b'lfs'):
735 738 ui.status(
736 739 _(
737 740 b'(remote is using large file support (lfs), but it is '
738 741 b'explicitly disabled in the local configuration)\n'
739 742 )
740 743 )
741 744 else:
742 745 ui.status(
743 746 _(
744 747 b'(remote is using large file support (lfs); lfs will '
745 748 b'be enabled for this repository)\n'
746 749 )
747 750 )
748 751
749 752 shareopts = shareopts or {}
750 753 sharepool = shareopts.get(b'pool')
751 754 sharenamemode = shareopts.get(b'mode')
752 755 if sharepool and islocal(dest):
753 756 sharepath = None
754 757 if sharenamemode == b'identity':
755 758 # Resolve the name from the initial changeset in the remote
756 759 # repository. This returns nullid when the remote is empty. It
757 760 # raises RepoLookupError if revision 0 is filtered or otherwise
758 761 # not available. If we fail to resolve, sharing is not enabled.
759 762 try:
760 763 with srcpeer.commandexecutor() as e:
761 764 rootnode = e.callcommand(
762 765 b'lookup',
763 766 {
764 767 b'key': b'0',
765 768 },
766 769 ).result()
767 770
768 771 if rootnode != nullid:
769 772 sharepath = os.path.join(sharepool, hex(rootnode))
770 773 else:
771 774 ui.status(
772 775 _(
773 776 b'(not using pooled storage: '
774 777 b'remote appears to be empty)\n'
775 778 )
776 779 )
777 780 except error.RepoLookupError:
778 781 ui.status(
779 782 _(
780 783 b'(not using pooled storage: '
781 784 b'unable to resolve identity of remote)\n'
782 785 )
783 786 )
784 787 elif sharenamemode == b'remote':
785 788 sharepath = os.path.join(
786 789 sharepool, hex(hashutil.sha1(source).digest())
787 790 )
788 791 else:
789 792 raise error.Abort(
790 793 _(b'unknown share naming mode: %s') % sharenamemode
791 794 )
792 795
793 796 # TODO this is a somewhat arbitrary restriction.
794 797 if narrow:
795 798 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
796 799 sharepath = None
797 800
798 801 if sharepath:
799 802 return clonewithshare(
800 803 ui,
801 804 peeropts,
802 805 sharepath,
803 806 source,
804 807 srcpeer,
805 808 dest,
806 809 pull=pull,
807 810 rev=revs,
808 811 update=update,
809 812 stream=stream,
810 813 )
811 814
812 815 srclock = destlock = cleandir = None
813 816 srcrepo = srcpeer.local()
814 817 try:
815 818 abspath = origsource
816 819 if islocal(origsource):
817 820 abspath = os.path.abspath(util.urllocalpath(origsource))
818 821
819 822 if islocal(dest):
820 823 cleandir = dest
821 824
822 825 copy = False
823 826 if (
824 827 srcrepo
825 828 and srcrepo.cancopy()
826 829 and islocal(dest)
827 830 and not phases.hassecret(srcrepo)
828 831 ):
829 832 copy = not pull and not revs
830 833
831 834 # TODO this is a somewhat arbitrary restriction.
832 835 if narrow:
833 836 copy = False
834 837
835 838 if copy:
836 839 try:
837 840 # we use a lock here because if we race with commit, we
838 841 # can end up with extra data in the cloned revlogs that's
839 842 # not pointed to by changesets, thus causing verify to
840 843 # fail
841 844 srclock = srcrepo.lock(wait=False)
842 845 except error.LockError:
843 846 copy = False
844 847
845 848 if copy:
846 849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
847 850 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
848 851 if not os.path.exists(dest):
849 852 util.makedirs(dest)
850 853 else:
851 854 # only clean up directories we create ourselves
852 855 cleandir = hgdir
853 856 try:
854 857 destpath = hgdir
855 858 util.makedir(destpath, notindexed=True)
856 859 except OSError as inst:
857 860 if inst.errno == errno.EEXIST:
858 861 cleandir = None
859 862 raise error.Abort(
860 863 _(b"destination '%s' already exists") % dest
861 864 )
862 865 raise
863 866
864 867 destlock = copystore(ui, srcrepo, destpath)
865 868 # copy bookmarks over
866 869 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
867 870 dstbookmarks = os.path.join(destpath, b'bookmarks')
868 871 if os.path.exists(srcbookmarks):
869 872 util.copyfile(srcbookmarks, dstbookmarks)
870 873
871 874 dstcachedir = os.path.join(destpath, b'cache')
872 875 for cache in cacheutil.cachetocopy(srcrepo):
873 876 _copycache(srcrepo, dstcachedir, cache)
874 877
875 878 # we need to re-init the repo after manually copying the data
876 879 # into it
877 880 destpeer = peer(srcrepo, peeropts, dest)
878 881 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
879 882 else:
880 883 try:
881 884 # only pass ui when no srcrepo
882 885 destpeer = peer(
883 886 srcrepo or ui,
884 887 peeropts,
885 888 dest,
886 889 create=True,
887 890 createopts=createopts,
888 891 )
889 892 except OSError as inst:
890 893 if inst.errno == errno.EEXIST:
891 894 cleandir = None
892 895 raise error.Abort(
893 896 _(b"destination '%s' already exists") % dest
894 897 )
895 898 raise
896 899
897 900 if revs:
898 901 if not srcpeer.capable(b'lookup'):
899 902 raise error.Abort(
900 903 _(
901 904 b"src repository does not support "
902 905 b"revision lookup and so doesn't "
903 906 b"support clone by revision"
904 907 )
905 908 )
906 909
907 910 # TODO this is batchable.
908 911 remoterevs = []
909 912 for rev in revs:
910 913 with srcpeer.commandexecutor() as e:
911 914 remoterevs.append(
912 915 e.callcommand(
913 916 b'lookup',
914 917 {
915 918 b'key': rev,
916 919 },
917 920 ).result()
918 921 )
919 922 revs = remoterevs
920 923
921 924 checkout = revs[0]
922 925 else:
923 926 revs = None
924 927 local = destpeer.local()
925 928 if local:
926 929 if narrow:
927 930 with local.wlock(), local.lock():
928 931 local.setnarrowpats(storeincludepats, storeexcludepats)
929 932 narrowspec.copytoworkingcopy(local)
930 933
931 934 u = util.url(abspath)
932 935 defaulturl = bytes(u)
933 936 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 937 if not stream:
935 938 if pull:
936 939 stream = False
937 940 else:
938 941 stream = None
939 942 # internal config: ui.quietbookmarkmove
940 943 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 944 with local.ui.configoverride(overrides, b'clone'):
942 945 exchange.pull(
943 946 local,
944 947 srcpeer,
945 948 revs,
946 949 streamclonerequested=stream,
947 950 includepats=storeincludepats,
948 951 excludepats=storeexcludepats,
949 952 depth=depth,
950 953 )
951 954 elif srcrepo:
952 955 # TODO lift restriction once exchange.push() accepts narrow
953 956 # push.
954 957 if narrow:
955 958 raise error.Abort(
956 959 _(
957 960 b'narrow clone not available for '
958 961 b'remote destinations'
959 962 )
960 963 )
961 964
962 965 exchange.push(
963 966 srcrepo,
964 967 destpeer,
965 968 revs=revs,
966 969 bookmarks=srcrepo._bookmarks.keys(),
967 970 )
968 971 else:
969 972 raise error.Abort(
970 973 _(b"clone from remote to remote not supported")
971 974 )
972 975
973 976 cleandir = None
974 977
975 978 destrepo = destpeer.local()
976 979 if destrepo:
977 980 template = uimod.samplehgrcs[b'cloned']
978 981 u = util.url(abspath)
979 982 u.passwd = None
980 983 defaulturl = bytes(u)
981 984 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 985 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983 986
984 987 if ui.configbool(b'experimental', b'remotenames'):
985 988 logexchange.pullremotenames(destrepo, srcpeer)
986 989
987 990 if update:
988 991 if update is not True:
989 992 with srcpeer.commandexecutor() as e:
990 993 checkout = e.callcommand(
991 994 b'lookup',
992 995 {
993 996 b'key': update,
994 997 },
995 998 ).result()
996 999
997 1000 uprev = None
998 1001 status = None
999 1002 if checkout is not None:
1000 1003 # Some extensions (at least hg-git and hg-subversion) have
1001 1004 # a peer.lookup() implementation that returns a name instead
1002 1005 # of a nodeid. We work around it here until we've figured
1003 1006 # out a better solution.
1004 1007 if len(checkout) == 20 and checkout in destrepo:
1005 1008 uprev = checkout
1006 1009 elif scmutil.isrevsymbol(destrepo, checkout):
1007 1010 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 1011 else:
1009 1012 if update is not True:
1010 1013 try:
1011 1014 uprev = destrepo.lookup(update)
1012 1015 except error.RepoLookupError:
1013 1016 pass
1014 1017 if uprev is None:
1015 1018 try:
1016 1019 if destrepo._activebookmark:
1017 1020 uprev = destrepo.lookup(destrepo._activebookmark)
1018 1021 update = destrepo._activebookmark
1019 1022 else:
1020 1023 uprev = destrepo._bookmarks[b'@']
1021 1024 update = b'@'
1022 1025 bn = destrepo[uprev].branch()
1023 1026 if bn == b'default':
1024 1027 status = _(b"updating to bookmark %s\n" % update)
1025 1028 else:
1026 1029 status = (
1027 1030 _(b"updating to bookmark %s on branch %s\n")
1028 1031 ) % (update, bn)
1029 1032 except KeyError:
1030 1033 try:
1031 1034 uprev = destrepo.branchtip(b'default')
1032 1035 except error.RepoLookupError:
1033 1036 uprev = destrepo.lookup(b'tip')
1034 1037 if not status:
1035 1038 bn = destrepo[uprev].branch()
1036 1039 status = _(b"updating to branch %s\n") % bn
1037 1040 destrepo.ui.status(status)
1038 1041 _update(destrepo, uprev)
1039 1042 if update in destrepo._bookmarks:
1040 1043 bookmarks.activate(destrepo, update)
1041 1044 if destlock is not None:
1042 1045 release(destlock)
1043 1046 # here is a tiny windows were someone could end up writing the
1044 1047 # repository before the cache are sure to be warm. This is "fine"
1045 1048 # as the only "bad" outcome would be some slowness. That potential
1046 1049 # slowness already affect reader.
1047 1050 with destrepo.lock():
1048 1051 destrepo.updatecaches(full=True)
1049 1052 finally:
1050 1053 release(srclock, destlock)
1051 1054 if cleandir is not None:
1052 1055 shutil.rmtree(cleandir, True)
1053 1056 if srcpeer is not None:
1054 1057 srcpeer.close()
1055 1058 return srcpeer, destpeer
1056 1059
1057 1060
1058 1061 def _showstats(repo, stats, quietempty=False):
1059 1062 if quietempty and stats.isempty():
1060 1063 return
1061 1064 repo.ui.status(
1062 1065 _(
1063 1066 b"%d files updated, %d files merged, "
1064 1067 b"%d files removed, %d files unresolved\n"
1065 1068 )
1066 1069 % (
1067 1070 stats.updatedcount,
1068 1071 stats.mergedcount,
1069 1072 stats.removedcount,
1070 1073 stats.unresolvedcount,
1071 1074 )
1072 1075 )
1073 1076
1074 1077
1075 1078 def updaterepo(repo, node, overwrite, updatecheck=None):
1076 1079 """Update the working directory to node.
1077 1080
1078 1081 When overwrite is set, changes are clobbered, merged else
1079 1082
1080 1083 returns stats (see pydoc mercurial.merge.applyupdates)"""
1081 1084 repo.ui.deprecwarn(
1082 1085 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1083 1086 b'5.7',
1084 1087 )
1085 1088 return mergemod._update(
1086 1089 repo,
1087 1090 node,
1088 1091 branchmerge=False,
1089 1092 force=overwrite,
1090 1093 labels=[b'working copy', b'destination'],
1091 1094 updatecheck=updatecheck,
1092 1095 )
1093 1096
1094 1097
1095 1098 def update(repo, node, quietempty=False, updatecheck=None):
1096 1099 """update the working directory to node"""
1097 1100 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1098 1101 _showstats(repo, stats, quietempty)
1099 1102 if stats.unresolvedcount:
1100 1103 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1101 1104 return stats.unresolvedcount > 0
1102 1105
1103 1106
1104 1107 # naming conflict in clone()
1105 1108 _update = update
1106 1109
1107 1110
1108 1111 def clean(repo, node, show_stats=True, quietempty=False):
1109 1112 """forcibly switch the working directory to node, clobbering changes"""
1110 1113 stats = mergemod.clean_update(repo[node])
1111 1114 assert stats.unresolvedcount == 0
1112 1115 if show_stats:
1113 1116 _showstats(repo, stats, quietempty)
1114 1117
1115 1118
1116 1119 # naming conflict in updatetotally()
1117 1120 _clean = clean
1118 1121
1119 1122 _VALID_UPDATECHECKS = {
1120 1123 mergemod.UPDATECHECK_ABORT,
1121 1124 mergemod.UPDATECHECK_NONE,
1122 1125 mergemod.UPDATECHECK_LINEAR,
1123 1126 mergemod.UPDATECHECK_NO_CONFLICT,
1124 1127 }
1125 1128
1126 1129
1127 1130 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1128 1131 """Update the working directory with extra care for non-file components
1129 1132
1130 1133 This takes care of non-file components below:
1131 1134
1132 1135 :bookmark: might be advanced or (in)activated
1133 1136
1134 1137 This takes arguments below:
1135 1138
1136 1139 :checkout: to which revision the working directory is updated
1137 1140 :brev: a name, which might be a bookmark to be activated after updating
1138 1141 :clean: whether changes in the working directory can be discarded
1139 1142 :updatecheck: how to deal with a dirty working directory
1140 1143
1141 1144 Valid values for updatecheck are the UPDATECHECK_* constants
1142 1145 defined in the merge module. Passing `None` will result in using the
1143 1146 configured default.
1144 1147
1145 1148 * ABORT: abort if the working directory is dirty
1146 1149 * NONE: don't check (merge working directory changes into destination)
1147 1150 * LINEAR: check that update is linear before merging working directory
1148 1151 changes into destination
1149 1152 * NO_CONFLICT: check that the update does not result in file merges
1150 1153
1151 1154 This returns whether conflict is detected at updating or not.
1152 1155 """
1153 1156 if updatecheck is None:
1154 1157 updatecheck = ui.config(b'commands', b'update.check')
1155 1158 if updatecheck not in _VALID_UPDATECHECKS:
1156 1159 # If not configured, or invalid value configured
1157 1160 updatecheck = mergemod.UPDATECHECK_LINEAR
1158 1161 if updatecheck not in _VALID_UPDATECHECKS:
1159 1162 raise ValueError(
1160 1163 r'Invalid updatecheck value %r (can accept %r)'
1161 1164 % (updatecheck, _VALID_UPDATECHECKS)
1162 1165 )
1163 1166 with repo.wlock():
1164 1167 movemarkfrom = None
1165 1168 warndest = False
1166 1169 if checkout is None:
1167 1170 updata = destutil.destupdate(repo, clean=clean)
1168 1171 checkout, movemarkfrom, brev = updata
1169 1172 warndest = True
1170 1173
1171 1174 if clean:
1172 1175 ret = _clean(repo, checkout)
1173 1176 else:
1174 1177 if updatecheck == mergemod.UPDATECHECK_ABORT:
1175 1178 cmdutil.bailifchanged(repo, merge=False)
1176 1179 updatecheck = mergemod.UPDATECHECK_NONE
1177 1180 ret = _update(repo, checkout, updatecheck=updatecheck)
1178 1181
1179 1182 if not ret and movemarkfrom:
1180 1183 if movemarkfrom == repo[b'.'].node():
1181 1184 pass # no-op update
1182 1185 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1183 1186 b = ui.label(repo._activebookmark, b'bookmarks.active')
1184 1187 ui.status(_(b"updating bookmark %s\n") % b)
1185 1188 else:
1186 1189 # this can happen with a non-linear update
1187 1190 b = ui.label(repo._activebookmark, b'bookmarks')
1188 1191 ui.status(_(b"(leaving bookmark %s)\n") % b)
1189 1192 bookmarks.deactivate(repo)
1190 1193 elif brev in repo._bookmarks:
1191 1194 if brev != repo._activebookmark:
1192 1195 b = ui.label(brev, b'bookmarks.active')
1193 1196 ui.status(_(b"(activating bookmark %s)\n") % b)
1194 1197 bookmarks.activate(repo, brev)
1195 1198 elif brev:
1196 1199 if repo._activebookmark:
1197 1200 b = ui.label(repo._activebookmark, b'bookmarks')
1198 1201 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 1202 bookmarks.deactivate(repo)
1200 1203
1201 1204 if warndest:
1202 1205 destutil.statusotherdests(ui, repo)
1203 1206
1204 1207 return ret
1205 1208
1206 1209
1207 1210 def merge(
1208 1211 ctx,
1209 1212 force=False,
1210 1213 remind=True,
1211 1214 labels=None,
1212 1215 ):
1213 1216 """Branch merge with node, resolving changes. Return true if any
1214 1217 unresolved conflicts."""
1215 1218 repo = ctx.repo()
1216 1219 stats = mergemod.merge(ctx, force=force, labels=labels)
1217 1220 _showstats(repo, stats)
1218 1221 if stats.unresolvedcount:
1219 1222 repo.ui.status(
1220 1223 _(
1221 1224 b"use 'hg resolve' to retry unresolved file merges "
1222 1225 b"or 'hg merge --abort' to abandon\n"
1223 1226 )
1224 1227 )
1225 1228 elif remind:
1226 1229 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1227 1230 return stats.unresolvedcount > 0
1228 1231
1229 1232
1230 1233 def abortmerge(ui, repo):
1231 1234 ms = mergestatemod.mergestate.read(repo)
1232 1235 if ms.active():
1233 1236 # there were conflicts
1234 1237 node = ms.localctx.hex()
1235 1238 else:
1236 1239 # there were no conficts, mergestate was not stored
1237 1240 node = repo[b'.'].hex()
1238 1241
1239 1242 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1240 1243 stats = mergemod.clean_update(repo[node])
1241 1244 assert stats.unresolvedcount == 0
1242 1245 _showstats(repo, stats)
1243 1246
1244 1247
1245 1248 def _incoming(
1246 1249 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1247 1250 ):
1248 1251 """
1249 1252 Helper for incoming / gincoming.
1250 1253 displaychlist gets called with
1251 1254 (remoterepo, incomingchangesetlist, displayer) parameters,
1252 1255 and is supposed to contain only code that can't be unified.
1253 1256 """
1254 1257 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1255 1258 other = peer(repo, opts, source)
1256 1259 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1257 1260 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1258 1261
1259 1262 if revs:
1260 1263 revs = [other.lookup(rev) for rev in revs]
1261 1264 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1262 1265 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1263 1266 )
1264 1267 try:
1265 1268 if not chlist:
1266 1269 ui.status(_(b"no changes found\n"))
1267 1270 return subreporecurse()
1268 1271 ui.pager(b'incoming')
1269 1272 displayer = logcmdutil.changesetdisplayer(
1270 1273 ui, other, opts, buffered=buffered
1271 1274 )
1272 1275 displaychlist(other, chlist, displayer)
1273 1276 displayer.close()
1274 1277 finally:
1275 1278 cleanupfn()
1276 1279 subreporecurse()
1277 1280 return 0 # exit code is zero since we found incoming changes
1278 1281
1279 1282
1280 1283 def incoming(ui, repo, source, opts):
1281 1284 def subreporecurse():
1282 1285 ret = 1
1283 1286 if opts.get(b'subrepos'):
1284 1287 ctx = repo[None]
1285 1288 for subpath in sorted(ctx.substate):
1286 1289 sub = ctx.sub(subpath)
1287 1290 ret = min(ret, sub.incoming(ui, source, opts))
1288 1291 return ret
1289 1292
1290 1293 def display(other, chlist, displayer):
1291 1294 limit = logcmdutil.getlimit(opts)
1292 1295 if opts.get(b'newest_first'):
1293 1296 chlist.reverse()
1294 1297 count = 0
1295 1298 for n in chlist:
1296 1299 if limit is not None and count >= limit:
1297 1300 break
1298 1301 parents = [p for p in other.changelog.parents(n) if p != nullid]
1299 1302 if opts.get(b'no_merges') and len(parents) == 2:
1300 1303 continue
1301 1304 count += 1
1302 1305 displayer.show(other[n])
1303 1306
1304 1307 return _incoming(display, subreporecurse, ui, repo, source, opts)
1305 1308
1306 1309
1307 1310 def _outgoing(ui, repo, dest, opts):
1308 1311 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1309 1312 if not path:
1310 1313 raise error.Abort(
1311 1314 _(b'default repository not configured!'),
1312 1315 hint=_(b"see 'hg help config.paths'"),
1313 1316 )
1314 1317 dest = path.pushloc or path.loc
1315 1318 branches = path.branch, opts.get(b'branch') or []
1316 1319
1317 1320 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1318 1321 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1319 1322 if revs:
1320 1323 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1321 1324
1322 1325 other = peer(repo, opts, dest)
1323 1326 outgoing = discovery.findcommonoutgoing(
1324 1327 repo, other, revs, force=opts.get(b'force')
1325 1328 )
1326 1329 o = outgoing.missing
1327 1330 if not o:
1328 1331 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1329 1332 return o, other
1330 1333
1331 1334
1332 1335 def outgoing(ui, repo, dest, opts):
1333 1336 def recurse():
1334 1337 ret = 1
1335 1338 if opts.get(b'subrepos'):
1336 1339 ctx = repo[None]
1337 1340 for subpath in sorted(ctx.substate):
1338 1341 sub = ctx.sub(subpath)
1339 1342 ret = min(ret, sub.outgoing(ui, dest, opts))
1340 1343 return ret
1341 1344
1342 1345 limit = logcmdutil.getlimit(opts)
1343 1346 o, other = _outgoing(ui, repo, dest, opts)
1344 1347 if not o:
1345 1348 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1346 1349 return recurse()
1347 1350
1348 1351 if opts.get(b'newest_first'):
1349 1352 o.reverse()
1350 1353 ui.pager(b'outgoing')
1351 1354 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1352 1355 count = 0
1353 1356 for n in o:
1354 1357 if limit is not None and count >= limit:
1355 1358 break
1356 1359 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1357 1360 if opts.get(b'no_merges') and len(parents) == 2:
1358 1361 continue
1359 1362 count += 1
1360 1363 displayer.show(repo[n])
1361 1364 displayer.close()
1362 1365 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 1366 recurse()
1364 1367 return 0 # exit code is zero since we found outgoing changes
1365 1368
1366 1369
1367 1370 def verify(repo, level=None):
1368 1371 """verify the consistency of a repository"""
1369 1372 ret = verifymod.verify(repo, level=level)
1370 1373
1371 1374 # Broken subrepo references in hidden csets don't seem worth worrying about,
1372 1375 # since they can't be pushed/pulled, and --hidden can be used if they are a
1373 1376 # concern.
1374 1377
1375 1378 # pathto() is needed for -R case
1376 1379 revs = repo.revs(
1377 1380 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1378 1381 )
1379 1382
1380 1383 if revs:
1381 1384 repo.ui.status(_(b'checking subrepo links\n'))
1382 1385 for rev in revs:
1383 1386 ctx = repo[rev]
1384 1387 try:
1385 1388 for subpath in ctx.substate:
1386 1389 try:
1387 1390 ret = (
1388 1391 ctx.sub(subpath, allowcreate=False).verify() or ret
1389 1392 )
1390 1393 except error.RepoError as e:
1391 1394 repo.ui.warn(b'%d: %s\n' % (rev, e))
1392 1395 except Exception:
1393 1396 repo.ui.warn(
1394 1397 _(b'.hgsubstate is corrupt in revision %s\n')
1395 1398 % short(ctx.node())
1396 1399 )
1397 1400
1398 1401 return ret
1399 1402
1400 1403
1401 1404 def remoteui(src, opts):
1402 1405 """build a remote ui from ui or repo and opts"""
1403 1406 if util.safehasattr(src, b'baseui'): # looks like a repository
1404 1407 dst = src.baseui.copy() # drop repo-specific config
1405 1408 src = src.ui # copy target options from repo
1406 1409 else: # assume it's a global ui object
1407 1410 dst = src.copy() # keep all global options
1408 1411
1409 1412 # copy ssh-specific options
1410 1413 for o in b'ssh', b'remotecmd':
1411 1414 v = opts.get(o) or src.config(b'ui', o)
1412 1415 if v:
1413 1416 dst.setconfig(b"ui", o, v, b'copied')
1414 1417
1415 1418 # copy bundle-specific options
1416 1419 r = src.config(b'bundle', b'mainreporoot')
1417 1420 if r:
1418 1421 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1419 1422
1420 1423 # copy selected local settings to the remote ui
1421 1424 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1422 1425 for key, val in src.configitems(sect):
1423 1426 dst.setconfig(sect, key, val, b'copied')
1424 1427 v = src.config(b'web', b'cacerts')
1425 1428 if v:
1426 1429 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1427 1430
1428 1431 return dst
1429 1432
1430 1433
1431 1434 # Files of interest
1432 1435 # Used to check if the repository has changed looking at mtime and size of
1433 1436 # these files.
1434 1437 foi = [
1435 1438 (b'spath', b'00changelog.i'),
1436 1439 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1437 1440 (b'spath', b'obsstore'),
1438 1441 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1439 1442 ]
1440 1443
1441 1444
1442 1445 class cachedlocalrepo(object):
1443 1446 """Holds a localrepository that can be cached and reused."""
1444 1447
1445 1448 def __init__(self, repo):
1446 1449 """Create a new cached repo from an existing repo.
1447 1450
1448 1451 We assume the passed in repo was recently created. If the
1449 1452 repo has changed between when it was created and when it was
1450 1453 turned into a cache, it may not refresh properly.
1451 1454 """
1452 1455 assert isinstance(repo, localrepo.localrepository)
1453 1456 self._repo = repo
1454 1457 self._state, self.mtime = self._repostate()
1455 1458 self._filtername = repo.filtername
1456 1459
1457 1460 def fetch(self):
1458 1461 """Refresh (if necessary) and return a repository.
1459 1462
1460 1463 If the cached instance is out of date, it will be recreated
1461 1464 automatically and returned.
1462 1465
1463 1466 Returns a tuple of the repo and a boolean indicating whether a new
1464 1467 repo instance was created.
1465 1468 """
1466 1469 # We compare the mtimes and sizes of some well-known files to
1467 1470 # determine if the repo changed. This is not precise, as mtimes
1468 1471 # are susceptible to clock skew and imprecise filesystems and
1469 1472 # file content can change while maintaining the same size.
1470 1473
1471 1474 state, mtime = self._repostate()
1472 1475 if state == self._state:
1473 1476 return self._repo, False
1474 1477
1475 1478 repo = repository(self._repo.baseui, self._repo.url())
1476 1479 if self._filtername:
1477 1480 self._repo = repo.filtered(self._filtername)
1478 1481 else:
1479 1482 self._repo = repo.unfiltered()
1480 1483 self._state = state
1481 1484 self.mtime = mtime
1482 1485
1483 1486 return self._repo, True
1484 1487
1485 1488 def _repostate(self):
1486 1489 state = []
1487 1490 maxmtime = -1
1488 1491 for attr, fname in foi:
1489 1492 prefix = getattr(self._repo, attr)
1490 1493 p = os.path.join(prefix, fname)
1491 1494 try:
1492 1495 st = os.stat(p)
1493 1496 except OSError:
1494 1497 st = os.stat(prefix)
1495 1498 state.append((st[stat.ST_MTIME], st.st_size))
1496 1499 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1497 1500
1498 1501 return tuple(state), maxmtime
1499 1502
1500 1503 def copy(self):
1501 1504 """Obtain a copy of this class instance.
1502 1505
1503 1506 A new localrepository instance is obtained. The new instance should be
1504 1507 completely independent of the original.
1505 1508 """
1506 1509 repo = repository(self._repo.baseui, self._repo.origroot)
1507 1510 if self._filtername:
1508 1511 repo = repo.filtered(self._filtername)
1509 1512 else:
1510 1513 repo = repo.unfiltered()
1511 1514 c = cachedlocalrepo(repo)
1512 1515 c._state = self._state
1513 1516 c.mtime = self.mtime
1514 1517 return c
General Comments 0
You need to be logged in to leave comments. Login now