##// END OF EJS Templates
cachedlocalrepo: use sysstr for attribute name of "file of interest"...
marmoute -
r51804:1339158a default
parent child Browse files
Show More
@@ -1,1681 +1,1681 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
69 69 if util.safehasattr(other, 'peer'):
70 70 # a courtesy to callers using a localrepo for other
71 71 peer = other.peer(remotehidden=remotehidden)
72 72 else:
73 73 peer = other
74 74 hashbranch, branches = branches
75 75 if not hashbranch and not branches:
76 76 x = revs or None
77 77 if revs:
78 78 y = revs[0]
79 79 else:
80 80 y = None
81 81 return x, y
82 82 if revs:
83 83 revs = list(revs)
84 84 else:
85 85 revs = []
86 86
87 87 if not peer.capable(b'branchmap'):
88 88 if branches:
89 89 raise error.Abort(_(b"remote branch lookup not supported"))
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 with peer.commandexecutor() as e:
94 94 branchmap = e.callcommand(b'branchmap', {}).result()
95 95
96 96 def primary(branch):
97 97 if branch == b'.':
98 98 if not lrepo:
99 99 raise error.Abort(_(b"dirstate branch not accessible"))
100 100 branch = lrepo.dirstate.branch()
101 101 if branch in branchmap:
102 102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 103 return True
104 104 else:
105 105 return False
106 106
107 107 for branch in branches:
108 108 if not primary(branch):
109 109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 110 if hashbranch:
111 111 if not primary(hashbranch):
112 112 revs.append(hashbranch)
113 113 return revs, revs[0]
114 114
115 115
116 116 def _isfile(path):
117 117 try:
118 118 # we use os.stat() directly here instead of os.path.isfile()
119 119 # because the latter started returning `False` on invalid path
120 120 # exceptions starting in 3.8 and we care about handling
121 121 # invalid paths specially here.
122 122 st = os.stat(path)
123 123 except ValueError as e:
124 124 msg = stringutil.forcebytestr(e)
125 125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 126 except OSError:
127 127 return False
128 128 else:
129 129 return stat.S_ISREG(st.st_mode)
130 130
131 131
132 132 class LocalFactory:
133 133 """thin wrapper to dispatch between localrepo and bundle repo"""
134 134
135 135 @staticmethod
136 136 def islocal(path: bytes) -> bool:
137 137 path = util.expandpath(urlutil.urllocalpath(path))
138 138 return not _isfile(path)
139 139
140 140 @staticmethod
141 141 def instance(ui, path, *args, **kwargs):
142 142 path = util.expandpath(urlutil.urllocalpath(path))
143 143 if _isfile(path):
144 144 cls = bundlerepo
145 145 else:
146 146 cls = localrepo
147 147 return cls.instance(ui, path, *args, **kwargs)
148 148
149 149
150 150 repo_schemes = {
151 151 b'bundle': bundlerepo,
152 152 b'union': unionrepo,
153 153 b'file': LocalFactory,
154 154 }
155 155
156 156 peer_schemes = {
157 157 b'http': httppeer,
158 158 b'https': httppeer,
159 159 b'ssh': sshpeer,
160 160 b'static-http': statichttprepo,
161 161 }
162 162
163 163
164 164 def islocal(repo):
165 165 '''return true if repo (or path pointing to repo) is local'''
166 166 if isinstance(repo, bytes):
167 167 u = urlutil.url(repo)
168 168 scheme = u.scheme or b'file'
169 169 if scheme in peer_schemes:
170 170 cls = peer_schemes[scheme]
171 171 cls.make_peer # make sure we load the module
172 172 elif scheme in repo_schemes:
173 173 cls = repo_schemes[scheme]
174 174 cls.instance # make sure we load the module
175 175 else:
176 176 cls = LocalFactory
177 177 if util.safehasattr(cls, 'islocal'):
178 178 return cls.islocal(repo) # pytype: disable=module-attr
179 179 return False
180 180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 181 return repo.local()
182 182
183 183
184 184 def openpath(ui, path, sendaccept=True):
185 185 '''open path with open if local, url.open if remote'''
186 186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 187 if pathurl.islocal():
188 188 return util.posixfile(pathurl.localpath(), b'rb')
189 189 else:
190 190 return url.open(ui, path, sendaccept=sendaccept)
191 191
192 192
193 193 # a list of (ui, repo) functions called for wire peer initialization
194 194 wirepeersetupfuncs = []
195 195
196 196
197 197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 198 ui = getattr(obj, "ui", ui)
199 199 for f in presetupfuncs or []:
200 200 f(ui, obj)
201 201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 202 with util.timedcm('all reposetup') as allreposetupstats:
203 203 for name, module in extensions.extensions(ui):
204 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 205 hook = getattr(module, 'reposetup', None)
206 206 if hook:
207 207 with util.timedcm('reposetup %r', name) as stats:
208 208 hook(ui, obj)
209 209 msg = b' > reposetup for %s took %s\n'
210 210 ui.log(b'extension', msg, name, stats)
211 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 212 if not obj.local():
213 213 for f in wirepeersetupfuncs:
214 214 f(ui, obj)
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 scheme = urlutil.url(path).scheme
227 227 if scheme is None:
228 228 scheme = b'file'
229 229 cls = repo_schemes.get(scheme)
230 230 if cls is None:
231 231 if scheme in peer_schemes:
232 232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 233 cls = LocalFactory
234 234 repo = cls.instance(
235 235 ui,
236 236 path,
237 237 create,
238 238 intents=intents,
239 239 createopts=createopts,
240 240 )
241 241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 242 return repo.filtered(b'visible')
243 243
244 244
245 245 def peer(
246 246 uiorrepo,
247 247 opts,
248 248 path,
249 249 create=False,
250 250 intents=None,
251 251 createopts=None,
252 252 remotehidden=False,
253 253 ):
254 254 '''return a repository peer for the specified path'''
255 255 ui = getattr(uiorrepo, 'ui', uiorrepo)
256 256 rui = remoteui(uiorrepo, opts)
257 257 if util.safehasattr(path, 'url'):
258 258 # this is already a urlutil.path object
259 259 peer_path = path
260 260 else:
261 261 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
262 262 scheme = peer_path.url.scheme # pytype: disable=attribute-error
263 263 if scheme in peer_schemes:
264 264 cls = peer_schemes[scheme]
265 265 peer = cls.make_peer(
266 266 rui,
267 267 peer_path,
268 268 create,
269 269 intents=intents,
270 270 createopts=createopts,
271 271 remotehidden=remotehidden,
272 272 )
273 273 _setup_repo_or_peer(rui, peer)
274 274 else:
275 275 # this is a repository
276 276 repo_path = peer_path.loc # pytype: disable=attribute-error
277 277 if not repo_path:
278 278 repo_path = peer_path.rawloc # pytype: disable=attribute-error
279 279 repo = repository(
280 280 rui,
281 281 repo_path,
282 282 create,
283 283 intents=intents,
284 284 createopts=createopts,
285 285 )
286 286 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
287 287 return peer
288 288
289 289
290 290 def defaultdest(source):
291 291 """return default destination of clone if none is given
292 292
293 293 >>> defaultdest(b'foo')
294 294 'foo'
295 295 >>> defaultdest(b'/foo/bar')
296 296 'bar'
297 297 >>> defaultdest(b'/')
298 298 ''
299 299 >>> defaultdest(b'')
300 300 ''
301 301 >>> defaultdest(b'http://example.org/')
302 302 ''
303 303 >>> defaultdest(b'http://example.org/foo/')
304 304 'foo'
305 305 """
306 306 path = urlutil.url(source).path
307 307 if not path:
308 308 return b''
309 309 return os.path.basename(os.path.normpath(path))
310 310
311 311
312 312 def sharedreposource(repo):
313 313 """Returns repository object for source repository of a shared repo.
314 314
315 315 If repo is not a shared repository, returns None.
316 316 """
317 317 if repo.sharedpath == repo.path:
318 318 return None
319 319
320 320 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
321 321 return repo.srcrepo
322 322
323 323 # the sharedpath always ends in the .hg; we want the path to the repo
324 324 source = repo.vfs.split(repo.sharedpath)[0]
325 325 srcurl, branches = urlutil.parseurl(source)
326 326 srcrepo = repository(repo.ui, srcurl)
327 327 repo.srcrepo = srcrepo
328 328 return srcrepo
329 329
330 330
331 331 def share(
332 332 ui,
333 333 source,
334 334 dest=None,
335 335 update=True,
336 336 bookmarks=True,
337 337 defaultpath=None,
338 338 relative=False,
339 339 ):
340 340 '''create a shared repository'''
341 341
342 342 not_local_msg = _(b'can only share local repositories')
343 343 if util.safehasattr(source, 'local'):
344 344 if source.local() is None:
345 345 raise error.Abort(not_local_msg)
346 346 elif not islocal(source):
347 347 # XXX why are we getting bytes here ?
348 348 raise error.Abort(not_local_msg)
349 349
350 350 if not dest:
351 351 dest = defaultdest(source)
352 352 else:
353 353 dest = urlutil.get_clone_path_obj(ui, dest).loc
354 354
355 355 if isinstance(source, bytes):
356 356 source_path = urlutil.get_clone_path_obj(ui, source)
357 357 srcrepo = repository(ui, source_path.loc)
358 358 branches = (source_path.branch, [])
359 359 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
360 360 else:
361 361 srcrepo = source.local()
362 362 checkout = None
363 363
364 364 shareditems = set()
365 365 if bookmarks:
366 366 shareditems.add(sharedbookmarks)
367 367
368 368 r = repository(
369 369 ui,
370 370 dest,
371 371 create=True,
372 372 createopts={
373 373 b'sharedrepo': srcrepo,
374 374 b'sharedrelative': relative,
375 375 b'shareditems': shareditems,
376 376 },
377 377 )
378 378
379 379 postshare(srcrepo, r, defaultpath=defaultpath)
380 380 r = repository(ui, dest)
381 381 _postshareupdate(r, update, checkout=checkout)
382 382 return r
383 383
384 384
385 385 def _prependsourcehgrc(repo):
386 386 """copies the source repo config and prepend it in current repo .hg/hgrc
387 387 on unshare. This is only done if the share was perfomed using share safe
388 388 method where we share config of source in shares"""
389 389 srcvfs = vfsmod.vfs(repo.sharedpath)
390 390 dstvfs = vfsmod.vfs(repo.path)
391 391
392 392 if not srcvfs.exists(b'hgrc'):
393 393 return
394 394
395 395 currentconfig = b''
396 396 if dstvfs.exists(b'hgrc'):
397 397 currentconfig = dstvfs.read(b'hgrc')
398 398
399 399 with dstvfs(b'hgrc', b'wb') as fp:
400 400 sourceconfig = srcvfs.read(b'hgrc')
401 401 fp.write(b"# Config copied from shared source\n")
402 402 fp.write(sourceconfig)
403 403 fp.write(b'\n')
404 404 fp.write(currentconfig)
405 405
406 406
407 407 def unshare(ui, repo):
408 408 """convert a shared repository to a normal one
409 409
410 410 Copy the store data to the repo and remove the sharedpath data.
411 411
412 412 Returns a new repository object representing the unshared repository.
413 413
414 414 The passed repository object is not usable after this function is
415 415 called.
416 416 """
417 417
418 418 with repo.lock():
419 419 # we use locks here because if we race with commit, we
420 420 # can end up with extra data in the cloned revlogs that's
421 421 # not pointed to by changesets, thus causing verify to
422 422 # fail
423 423 destlock = copystore(ui, repo, repo.path)
424 424 with destlock or util.nullcontextmanager():
425 425 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
426 426 # we were sharing .hg/hgrc of the share source with the current
427 427 # repo. We need to copy that while unsharing otherwise it can
428 428 # disable hooks and other checks
429 429 _prependsourcehgrc(repo)
430 430
431 431 sharefile = repo.vfs.join(b'sharedpath')
432 432 util.rename(sharefile, sharefile + b'.old')
433 433
434 434 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
435 435 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
436 436 scmutil.writereporequirements(repo)
437 437
438 438 # Removing share changes some fundamental properties of the repo instance.
439 439 # So we instantiate a new repo object and operate on it rather than
440 440 # try to keep the existing repo usable.
441 441 newrepo = repository(repo.baseui, repo.root, create=False)
442 442
443 443 # TODO: figure out how to access subrepos that exist, but were previously
444 444 # removed from .hgsub
445 445 c = newrepo[b'.']
446 446 subs = c.substate
447 447 for s in sorted(subs):
448 448 c.sub(s).unshare()
449 449
450 450 localrepo.poisonrepository(repo)
451 451
452 452 return newrepo
453 453
454 454
455 455 def postshare(sourcerepo, destrepo, defaultpath=None):
456 456 """Called after a new shared repo is created.
457 457
458 458 The new repo only has a requirements file and pointer to the source.
459 459 This function configures additional shared data.
460 460
461 461 Extensions can wrap this function and write additional entries to
462 462 destrepo/.hg/shared to indicate additional pieces of data to be shared.
463 463 """
464 464 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
465 465 if default:
466 466 template = b'[paths]\ndefault = %s\n'
467 467 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
468 468 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
469 469 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
470 470 b"narrow-share"
471 471 ):
472 472 narrowspec.copytoworkingcopy(destrepo)
473 473
474 474
475 475 def _postshareupdate(repo, update, checkout=None):
476 476 """Maybe perform a working directory update after a shared repo is created.
477 477
478 478 ``update`` can be a boolean or a revision to update to.
479 479 """
480 480 if not update:
481 481 return
482 482
483 483 repo.ui.status(_(b"updating working directory\n"))
484 484 if update is not True:
485 485 checkout = update
486 486 for test in (checkout, b'default', b'tip'):
487 487 if test is None:
488 488 continue
489 489 try:
490 490 uprev = repo.lookup(test)
491 491 break
492 492 except error.RepoLookupError:
493 493 continue
494 494 _update(repo, uprev)
495 495
496 496
497 497 def copystore(ui, srcrepo, destpath):
498 498 """copy files from store of srcrepo in destpath
499 499
500 500 returns destlock
501 501 """
502 502 destlock = None
503 503 try:
504 504 hardlink = None
505 505 topic = _(b'linking') if hardlink else _(b'copying')
506 506 with ui.makeprogress(topic, unit=_(b'files')) as progress:
507 507 num = 0
508 508 srcpublishing = srcrepo.publishing()
509 509 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
510 510 dstvfs = vfsmod.vfs(destpath)
511 511 for f in srcrepo.store.copylist():
512 512 if srcpublishing and f.endswith(b'phaseroots'):
513 513 continue
514 514 dstbase = os.path.dirname(f)
515 515 if dstbase and not dstvfs.exists(dstbase):
516 516 dstvfs.mkdir(dstbase)
517 517 if srcvfs.exists(f):
518 518 if f.endswith(b'data'):
519 519 # 'dstbase' may be empty (e.g. revlog format 0)
520 520 lockfile = os.path.join(dstbase, b"lock")
521 521 # lock to avoid premature writing to the target
522 522 destlock = lock.lock(dstvfs, lockfile)
523 523 hardlink, n = util.copyfiles(
524 524 srcvfs.join(f), dstvfs.join(f), hardlink, progress
525 525 )
526 526 num += n
527 527 if hardlink:
528 528 ui.debug(b"linked %d files\n" % num)
529 529 else:
530 530 ui.debug(b"copied %d files\n" % num)
531 531 return destlock
532 532 except: # re-raises
533 533 release(destlock)
534 534 raise
535 535
536 536
537 537 def clonewithshare(
538 538 ui,
539 539 peeropts,
540 540 sharepath,
541 541 source,
542 542 srcpeer,
543 543 dest,
544 544 pull=False,
545 545 rev=None,
546 546 update=True,
547 547 stream=False,
548 548 ):
549 549 """Perform a clone using a shared repo.
550 550
551 551 The store for the repository will be located at <sharepath>/.hg. The
552 552 specified revisions will be cloned or pulled from "source". A shared repo
553 553 will be created at "dest" and a working copy will be created if "update" is
554 554 True.
555 555 """
556 556 revs = None
557 557 if rev:
558 558 if not srcpeer.capable(b'lookup'):
559 559 raise error.Abort(
560 560 _(
561 561 b"src repository does not support "
562 562 b"revision lookup and so doesn't "
563 563 b"support clone by revision"
564 564 )
565 565 )
566 566
567 567 # TODO this is batchable.
568 568 remoterevs = []
569 569 for r in rev:
570 570 with srcpeer.commandexecutor() as e:
571 571 remoterevs.append(
572 572 e.callcommand(
573 573 b'lookup',
574 574 {
575 575 b'key': r,
576 576 },
577 577 ).result()
578 578 )
579 579 revs = remoterevs
580 580
581 581 # Obtain a lock before checking for or cloning the pooled repo otherwise
582 582 # 2 clients may race creating or populating it.
583 583 pooldir = os.path.dirname(sharepath)
584 584 # lock class requires the directory to exist.
585 585 try:
586 586 util.makedir(pooldir, False)
587 587 except FileExistsError:
588 588 pass
589 589
590 590 poolvfs = vfsmod.vfs(pooldir)
591 591 basename = os.path.basename(sharepath)
592 592
593 593 with lock.lock(poolvfs, b'%s.lock' % basename):
594 594 if os.path.exists(sharepath):
595 595 ui.status(
596 596 _(b'(sharing from existing pooled repository %s)\n') % basename
597 597 )
598 598 else:
599 599 ui.status(
600 600 _(b'(sharing from new pooled repository %s)\n') % basename
601 601 )
602 602 # Always use pull mode because hardlinks in share mode don't work
603 603 # well. Never update because working copies aren't necessary in
604 604 # share mode.
605 605 clone(
606 606 ui,
607 607 peeropts,
608 608 source,
609 609 dest=sharepath,
610 610 pull=True,
611 611 revs=rev,
612 612 update=False,
613 613 stream=stream,
614 614 )
615 615
616 616 # Resolve the value to put in [paths] section for the source.
617 617 if islocal(source):
618 618 defaultpath = util.abspath(urlutil.urllocalpath(source))
619 619 else:
620 620 defaultpath = source
621 621
622 622 sharerepo = repository(ui, path=sharepath)
623 623 destrepo = share(
624 624 ui,
625 625 sharerepo,
626 626 dest=dest,
627 627 update=False,
628 628 bookmarks=False,
629 629 defaultpath=defaultpath,
630 630 )
631 631
632 632 # We need to perform a pull against the dest repo to fetch bookmarks
633 633 # and other non-store data that isn't shared by default. In the case of
634 634 # non-existing shared repo, this means we pull from the remote twice. This
635 635 # is a bit weird. But at the time it was implemented, there wasn't an easy
636 636 # way to pull just non-changegroup data.
637 637 exchange.pull(destrepo, srcpeer, heads=revs)
638 638
639 639 _postshareupdate(destrepo, update)
640 640
641 641 return srcpeer, peer(ui, peeropts, dest)
642 642
643 643
644 644 # Recomputing caches is often slow on big repos, so copy them.
645 645 def _copycache(srcrepo, dstcachedir, fname):
646 646 """copy a cache from srcrepo to destcachedir (if it exists)"""
647 647 srcfname = srcrepo.cachevfs.join(fname)
648 648 dstfname = os.path.join(dstcachedir, fname)
649 649 if os.path.exists(srcfname):
650 650 if not os.path.exists(dstcachedir):
651 651 os.mkdir(dstcachedir)
652 652 util.copyfile(srcfname, dstfname)
653 653
654 654
655 655 def clone(
656 656 ui,
657 657 peeropts,
658 658 source,
659 659 dest=None,
660 660 pull=False,
661 661 revs=None,
662 662 update=True,
663 663 stream=False,
664 664 branch=None,
665 665 shareopts=None,
666 666 storeincludepats=None,
667 667 storeexcludepats=None,
668 668 depth=None,
669 669 ):
670 670 """Make a copy of an existing repository.
671 671
672 672 Create a copy of an existing repository in a new directory. The
673 673 source and destination are URLs, as passed to the repository
674 674 function. Returns a pair of repository peers, the source and
675 675 newly created destination.
676 676
677 677 The location of the source is added to the new repository's
678 678 .hg/hgrc file, as the default to be used for future pulls and
679 679 pushes.
680 680
681 681 If an exception is raised, the partly cloned/updated destination
682 682 repository will be deleted.
683 683
684 684 Arguments:
685 685
686 686 source: repository object or URL
687 687
688 688 dest: URL of destination repository to create (defaults to base
689 689 name of source repository)
690 690
691 691 pull: always pull from source repository, even in local case or if the
692 692 server prefers streaming
693 693
694 694 stream: stream raw data uncompressed from repository (fast over
695 695 LAN, slow over WAN)
696 696
697 697 revs: revision to clone up to (implies pull=True)
698 698
699 699 update: update working directory after clone completes, if
700 700 destination is local repository (True means update to default rev,
701 701 anything else is treated as a revision)
702 702
703 703 branch: branches to clone
704 704
705 705 shareopts: dict of options to control auto sharing behavior. The "pool" key
706 706 activates auto sharing mode and defines the directory for stores. The
707 707 "mode" key determines how to construct the directory name of the shared
708 708 repository. "identity" means the name is derived from the node of the first
709 709 changeset in the repository. "remote" means the name is derived from the
710 710 remote's path/URL. Defaults to "identity."
711 711
712 712 storeincludepats and storeexcludepats: sets of file patterns to include and
713 713 exclude in the repository copy, respectively. If not defined, all files
714 714 will be included (a "full" clone). Otherwise a "narrow" clone containing
715 715 only the requested files will be performed. If ``storeincludepats`` is not
716 716 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
717 717 ``path:.``. If both are empty sets, no files will be cloned.
718 718 """
719 719
720 720 if isinstance(source, bytes):
721 721 src_path = urlutil.get_clone_path_obj(ui, source)
722 722 if src_path is None:
723 723 srcpeer = peer(ui, peeropts, b'')
724 724 origsource = source = b''
725 725 branches = (None, branch or [])
726 726 else:
727 727 srcpeer = peer(ui, peeropts, src_path)
728 728 origsource = src_path.rawloc
729 729 branches = (src_path.branch, branch or [])
730 730 source = src_path.loc
731 731 else:
732 732 if util.safehasattr(source, 'peer'):
733 733 srcpeer = source.peer() # in case we were called with a localrepo
734 734 else:
735 735 srcpeer = source
736 736 branches = (None, branch or [])
737 737 # XXX path: simply use the peer `path` object when this become available
738 738 origsource = source = srcpeer.url()
739 739 srclock = destlock = destwlock = cleandir = None
740 740 destpeer = None
741 741 try:
742 742 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
743 743
744 744 if dest is None:
745 745 dest = defaultdest(source)
746 746 if dest:
747 747 ui.status(_(b"destination directory: %s\n") % dest)
748 748 else:
749 749 dest_path = urlutil.get_clone_path_obj(ui, dest)
750 750 if dest_path is not None:
751 751 dest = dest_path.rawloc
752 752 else:
753 753 dest = b''
754 754
755 755 dest = urlutil.urllocalpath(dest)
756 756 source = urlutil.urllocalpath(source)
757 757
758 758 if not dest:
759 759 raise error.InputError(_(b"empty destination path is not valid"))
760 760
761 761 destvfs = vfsmod.vfs(dest, expandpath=True)
762 762 if destvfs.lexists():
763 763 if not destvfs.isdir():
764 764 raise error.InputError(
765 765 _(b"destination '%s' already exists") % dest
766 766 )
767 767 elif destvfs.listdir():
768 768 raise error.InputError(
769 769 _(b"destination '%s' is not empty") % dest
770 770 )
771 771
772 772 createopts = {}
773 773 narrow = False
774 774
775 775 if storeincludepats is not None:
776 776 narrowspec.validatepatterns(storeincludepats)
777 777 narrow = True
778 778
779 779 if storeexcludepats is not None:
780 780 narrowspec.validatepatterns(storeexcludepats)
781 781 narrow = True
782 782
783 783 if narrow:
784 784 # Include everything by default if only exclusion patterns defined.
785 785 if storeexcludepats and not storeincludepats:
786 786 storeincludepats = {b'path:.'}
787 787
788 788 createopts[b'narrowfiles'] = True
789 789
790 790 if depth:
791 791 createopts[b'shallowfilestore'] = True
792 792
793 793 if srcpeer.capable(b'lfs-serve'):
794 794 # Repository creation honors the config if it disabled the extension, so
795 795 # we can't just announce that lfs will be enabled. This check avoids
796 796 # saying that lfs will be enabled, and then saying it's an unknown
797 797 # feature. The lfs creation option is set in either case so that a
798 798 # requirement is added. If the extension is explicitly disabled but the
799 799 # requirement is set, the clone aborts early, before transferring any
800 800 # data.
801 801 createopts[b'lfs'] = True
802 802
803 803 if b'lfs' in extensions.disabled():
804 804 ui.status(
805 805 _(
806 806 b'(remote is using large file support (lfs), but it is '
807 807 b'explicitly disabled in the local configuration)\n'
808 808 )
809 809 )
810 810 else:
811 811 ui.status(
812 812 _(
813 813 b'(remote is using large file support (lfs); lfs will '
814 814 b'be enabled for this repository)\n'
815 815 )
816 816 )
817 817
818 818 shareopts = shareopts or {}
819 819 sharepool = shareopts.get(b'pool')
820 820 sharenamemode = shareopts.get(b'mode')
821 821 if sharepool and islocal(dest):
822 822 sharepath = None
823 823 if sharenamemode == b'identity':
824 824 # Resolve the name from the initial changeset in the remote
825 825 # repository. This returns nullid when the remote is empty. It
826 826 # raises RepoLookupError if revision 0 is filtered or otherwise
827 827 # not available. If we fail to resolve, sharing is not enabled.
828 828 try:
829 829 with srcpeer.commandexecutor() as e:
830 830 rootnode = e.callcommand(
831 831 b'lookup',
832 832 {
833 833 b'key': b'0',
834 834 },
835 835 ).result()
836 836
837 837 if rootnode != sha1nodeconstants.nullid:
838 838 sharepath = os.path.join(sharepool, hex(rootnode))
839 839 else:
840 840 ui.status(
841 841 _(
842 842 b'(not using pooled storage: '
843 843 b'remote appears to be empty)\n'
844 844 )
845 845 )
846 846 except error.RepoLookupError:
847 847 ui.status(
848 848 _(
849 849 b'(not using pooled storage: '
850 850 b'unable to resolve identity of remote)\n'
851 851 )
852 852 )
853 853 elif sharenamemode == b'remote':
854 854 sharepath = os.path.join(
855 855 sharepool, hex(hashutil.sha1(source).digest())
856 856 )
857 857 else:
858 858 raise error.Abort(
859 859 _(b'unknown share naming mode: %s') % sharenamemode
860 860 )
861 861
862 862 # TODO this is a somewhat arbitrary restriction.
863 863 if narrow:
864 864 ui.status(
865 865 _(b'(pooled storage not supported for narrow clones)\n')
866 866 )
867 867 sharepath = None
868 868
869 869 if sharepath:
870 870 return clonewithshare(
871 871 ui,
872 872 peeropts,
873 873 sharepath,
874 874 source,
875 875 srcpeer,
876 876 dest,
877 877 pull=pull,
878 878 rev=revs,
879 879 update=update,
880 880 stream=stream,
881 881 )
882 882
883 883 srcrepo = srcpeer.local()
884 884
885 885 abspath = origsource
886 886 if islocal(origsource):
887 887 abspath = util.abspath(urlutil.urllocalpath(origsource))
888 888
889 889 if islocal(dest):
890 890 if os.path.exists(dest):
891 891 # only clean up directories we create ourselves
892 892 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
893 893 cleandir = hgdir
894 894 else:
895 895 cleandir = dest
896 896
897 897 copy = False
898 898 if (
899 899 srcrepo
900 900 and srcrepo.cancopy()
901 901 and islocal(dest)
902 902 and not phases.hassecret(srcrepo)
903 903 ):
904 904 copy = not pull and not revs
905 905
906 906 # TODO this is a somewhat arbitrary restriction.
907 907 if narrow:
908 908 copy = False
909 909
910 910 if copy:
911 911 try:
912 912 # we use a lock here because if we race with commit, we
913 913 # can end up with extra data in the cloned revlogs that's
914 914 # not pointed to by changesets, thus causing verify to
915 915 # fail
916 916 srclock = srcrepo.lock(wait=False)
917 917 except error.LockError:
918 918 copy = False
919 919
920 920 if copy:
921 921 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
922 922
923 923 destrootpath = urlutil.urllocalpath(dest)
924 924 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
925 925 localrepo.createrepository(
926 926 ui,
927 927 destrootpath,
928 928 requirements=dest_reqs,
929 929 )
930 930 destrepo = localrepo.makelocalrepository(ui, destrootpath)
931 931
932 932 destwlock = destrepo.wlock()
933 933 destlock = destrepo.lock()
934 934 from . import streamclone # avoid cycle
935 935
936 936 streamclone.local_copy(srcrepo, destrepo)
937 937
938 938 # we need to re-init the repo after manually copying the data
939 939 # into it
940 940 destpeer = peer(srcrepo, peeropts, dest)
941 941
942 942 # make the peer aware that is it already locked
943 943 #
944 944 # important:
945 945 #
946 946 # We still need to release that lock at the end of the function
947 947 destpeer.local()._lockref = weakref.ref(destlock)
948 948 destpeer.local()._wlockref = weakref.ref(destwlock)
949 949 # dirstate also needs to be copied because `_wlockref` has a reference
950 950 # to it: this dirstate is saved to disk when the wlock is released
951 951 destpeer.local().dirstate = destrepo.dirstate
952 952
953 953 srcrepo.hook(
954 954 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
955 955 )
956 956 else:
957 957 try:
958 958 # only pass ui when no srcrepo
959 959 destpeer = peer(
960 960 srcrepo or ui,
961 961 peeropts,
962 962 dest,
963 963 create=True,
964 964 createopts=createopts,
965 965 )
966 966 except FileExistsError:
967 967 cleandir = None
968 968 raise error.Abort(_(b"destination '%s' already exists") % dest)
969 969
970 970 if revs:
971 971 if not srcpeer.capable(b'lookup'):
972 972 raise error.Abort(
973 973 _(
974 974 b"src repository does not support "
975 975 b"revision lookup and so doesn't "
976 976 b"support clone by revision"
977 977 )
978 978 )
979 979
980 980 # TODO this is batchable.
981 981 remoterevs = []
982 982 for rev in revs:
983 983 with srcpeer.commandexecutor() as e:
984 984 remoterevs.append(
985 985 e.callcommand(
986 986 b'lookup',
987 987 {
988 988 b'key': rev,
989 989 },
990 990 ).result()
991 991 )
992 992 revs = remoterevs
993 993
994 994 checkout = revs[0]
995 995 else:
996 996 revs = None
997 997 local = destpeer.local()
998 998 if local:
999 999 if narrow:
1000 1000 with local.wlock(), local.lock(), local.transaction(
1001 1001 b'narrow-clone'
1002 1002 ):
1003 1003 local.setnarrowpats(storeincludepats, storeexcludepats)
1004 1004 narrowspec.copytoworkingcopy(local)
1005 1005
1006 1006 u = urlutil.url(abspath)
1007 1007 defaulturl = bytes(u)
1008 1008 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1009 1009 if not stream:
1010 1010 if pull:
1011 1011 stream = False
1012 1012 else:
1013 1013 stream = None
1014 1014 # internal config: ui.quietbookmarkmove
1015 1015 overrides = {(b'ui', b'quietbookmarkmove'): True}
1016 1016 with local.ui.configoverride(overrides, b'clone'):
1017 1017 exchange.pull(
1018 1018 local,
1019 1019 srcpeer,
1020 1020 heads=revs,
1021 1021 streamclonerequested=stream,
1022 1022 includepats=storeincludepats,
1023 1023 excludepats=storeexcludepats,
1024 1024 depth=depth,
1025 1025 )
1026 1026 elif srcrepo:
1027 1027 # TODO lift restriction once exchange.push() accepts narrow
1028 1028 # push.
1029 1029 if narrow:
1030 1030 raise error.Abort(
1031 1031 _(
1032 1032 b'narrow clone not available for '
1033 1033 b'remote destinations'
1034 1034 )
1035 1035 )
1036 1036
1037 1037 exchange.push(
1038 1038 srcrepo,
1039 1039 destpeer,
1040 1040 revs=revs,
1041 1041 bookmarks=srcrepo._bookmarks.keys(),
1042 1042 )
1043 1043 else:
1044 1044 raise error.Abort(
1045 1045 _(b"clone from remote to remote not supported")
1046 1046 )
1047 1047
1048 1048 cleandir = None
1049 1049
1050 1050 destrepo = destpeer.local()
1051 1051 if destrepo:
1052 1052 template = uimod.samplehgrcs[b'cloned']
1053 1053 u = urlutil.url(abspath)
1054 1054 u.passwd = None
1055 1055 defaulturl = bytes(u)
1056 1056 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1057 1057 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1058 1058
1059 1059 if ui.configbool(b'experimental', b'remotenames'):
1060 1060 logexchange.pullremotenames(destrepo, srcpeer)
1061 1061
1062 1062 if update:
1063 1063 if update is not True:
1064 1064 with srcpeer.commandexecutor() as e:
1065 1065 checkout = e.callcommand(
1066 1066 b'lookup',
1067 1067 {
1068 1068 b'key': update,
1069 1069 },
1070 1070 ).result()
1071 1071
1072 1072 uprev = None
1073 1073 status = None
1074 1074 if checkout is not None:
1075 1075 # Some extensions (at least hg-git and hg-subversion) have
1076 1076 # a peer.lookup() implementation that returns a name instead
1077 1077 # of a nodeid. We work around it here until we've figured
1078 1078 # out a better solution.
1079 1079 if len(checkout) == 20 and checkout in destrepo:
1080 1080 uprev = checkout
1081 1081 elif scmutil.isrevsymbol(destrepo, checkout):
1082 1082 uprev = scmutil.revsymbol(destrepo, checkout).node()
1083 1083 else:
1084 1084 if update is not True:
1085 1085 try:
1086 1086 uprev = destrepo.lookup(update)
1087 1087 except error.RepoLookupError:
1088 1088 pass
1089 1089 if uprev is None:
1090 1090 try:
1091 1091 if destrepo._activebookmark:
1092 1092 uprev = destrepo.lookup(destrepo._activebookmark)
1093 1093 update = destrepo._activebookmark
1094 1094 else:
1095 1095 uprev = destrepo._bookmarks[b'@']
1096 1096 update = b'@'
1097 1097 bn = destrepo[uprev].branch()
1098 1098 if bn == b'default':
1099 1099 status = _(b"updating to bookmark %s\n" % update)
1100 1100 else:
1101 1101 status = (
1102 1102 _(b"updating to bookmark %s on branch %s\n")
1103 1103 ) % (update, bn)
1104 1104 except KeyError:
1105 1105 try:
1106 1106 uprev = destrepo.branchtip(b'default')
1107 1107 except error.RepoLookupError:
1108 1108 uprev = destrepo.lookup(b'tip')
1109 1109 if not status:
1110 1110 bn = destrepo[uprev].branch()
1111 1111 status = _(b"updating to branch %s\n") % bn
1112 1112 destrepo.ui.status(status)
1113 1113 _update(destrepo, uprev)
1114 1114 if update in destrepo._bookmarks:
1115 1115 bookmarks.activate(destrepo, update)
1116 1116 if destlock is not None:
1117 1117 release(destlock)
1118 1118 if destwlock is not None:
1119 1119 release(destlock)
1120 1120 # here is a tiny windows were someone could end up writing the
1121 1121 # repository before the cache are sure to be warm. This is "fine"
1122 1122 # as the only "bad" outcome would be some slowness. That potential
1123 1123 # slowness already affect reader.
1124 1124 with destrepo.lock():
1125 1125 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1126 1126 finally:
1127 1127 release(srclock, destlock, destwlock)
1128 1128 if cleandir is not None:
1129 1129 shutil.rmtree(cleandir, True)
1130 1130 if srcpeer is not None:
1131 1131 srcpeer.close()
1132 1132 if destpeer and destpeer.local() is None:
1133 1133 destpeer.close()
1134 1134 return srcpeer, destpeer
1135 1135
1136 1136
1137 1137 def _showstats(repo, stats, quietempty=False):
1138 1138 if quietempty and stats.isempty():
1139 1139 return
1140 1140 repo.ui.status(
1141 1141 _(
1142 1142 b"%d files updated, %d files merged, "
1143 1143 b"%d files removed, %d files unresolved\n"
1144 1144 )
1145 1145 % (
1146 1146 stats.updatedcount,
1147 1147 stats.mergedcount,
1148 1148 stats.removedcount,
1149 1149 stats.unresolvedcount,
1150 1150 )
1151 1151 )
1152 1152
1153 1153
1154 1154 def updaterepo(repo, node, overwrite, updatecheck=None):
1155 1155 """Update the working directory to node.
1156 1156
1157 1157 When overwrite is set, changes are clobbered, merged else
1158 1158
1159 1159 returns stats (see pydoc mercurial.merge.applyupdates)"""
1160 1160 repo.ui.deprecwarn(
1161 1161 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1162 1162 b'5.7',
1163 1163 )
1164 1164 return mergemod._update(
1165 1165 repo,
1166 1166 node,
1167 1167 branchmerge=False,
1168 1168 force=overwrite,
1169 1169 labels=[b'working copy', b'destination'],
1170 1170 updatecheck=updatecheck,
1171 1171 )
1172 1172
1173 1173
1174 1174 def update(repo, node, quietempty=False, updatecheck=None):
1175 1175 """update the working directory to node"""
1176 1176 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1177 1177 _showstats(repo, stats, quietempty)
1178 1178 if stats.unresolvedcount:
1179 1179 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1180 1180 return stats.unresolvedcount > 0
1181 1181
1182 1182
1183 1183 # naming conflict in clone()
1184 1184 _update = update
1185 1185
1186 1186
1187 1187 def clean(repo, node, show_stats=True, quietempty=False):
1188 1188 """forcibly switch the working directory to node, clobbering changes"""
1189 1189 stats = mergemod.clean_update(repo[node])
1190 1190 assert stats.unresolvedcount == 0
1191 1191 if show_stats:
1192 1192 _showstats(repo, stats, quietempty)
1193 1193 return False
1194 1194
1195 1195
1196 1196 # naming conflict in updatetotally()
1197 1197 _clean = clean
1198 1198
1199 1199 _VALID_UPDATECHECKS = {
1200 1200 mergemod.UPDATECHECK_ABORT,
1201 1201 mergemod.UPDATECHECK_NONE,
1202 1202 mergemod.UPDATECHECK_LINEAR,
1203 1203 mergemod.UPDATECHECK_NO_CONFLICT,
1204 1204 }
1205 1205
1206 1206
1207 1207 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1208 1208 """Update the working directory with extra care for non-file components
1209 1209
1210 1210 This takes care of non-file components below:
1211 1211
1212 1212 :bookmark: might be advanced or (in)activated
1213 1213
1214 1214 This takes arguments below:
1215 1215
1216 1216 :checkout: to which revision the working directory is updated
1217 1217 :brev: a name, which might be a bookmark to be activated after updating
1218 1218 :clean: whether changes in the working directory can be discarded
1219 1219 :updatecheck: how to deal with a dirty working directory
1220 1220
1221 1221 Valid values for updatecheck are the UPDATECHECK_* constants
1222 1222 defined in the merge module. Passing `None` will result in using the
1223 1223 configured default.
1224 1224
1225 1225 * ABORT: abort if the working directory is dirty
1226 1226 * NONE: don't check (merge working directory changes into destination)
1227 1227 * LINEAR: check that update is linear before merging working directory
1228 1228 changes into destination
1229 1229 * NO_CONFLICT: check that the update does not result in file merges
1230 1230
1231 1231 This returns whether conflict is detected at updating or not.
1232 1232 """
1233 1233 if updatecheck is None:
1234 1234 updatecheck = ui.config(b'commands', b'update.check')
1235 1235 if updatecheck not in _VALID_UPDATECHECKS:
1236 1236 # If not configured, or invalid value configured
1237 1237 updatecheck = mergemod.UPDATECHECK_LINEAR
1238 1238 if updatecheck not in _VALID_UPDATECHECKS:
1239 1239 raise ValueError(
1240 1240 r'Invalid updatecheck value %r (can accept %r)'
1241 1241 % (updatecheck, _VALID_UPDATECHECKS)
1242 1242 )
1243 1243 with repo.wlock():
1244 1244 movemarkfrom = None
1245 1245 warndest = False
1246 1246 if checkout is None:
1247 1247 updata = destutil.destupdate(repo, clean=clean)
1248 1248 checkout, movemarkfrom, brev = updata
1249 1249 warndest = True
1250 1250
1251 1251 if clean:
1252 1252 ret = _clean(repo, checkout)
1253 1253 else:
1254 1254 if updatecheck == mergemod.UPDATECHECK_ABORT:
1255 1255 cmdutil.bailifchanged(repo, merge=False)
1256 1256 updatecheck = mergemod.UPDATECHECK_NONE
1257 1257 ret = _update(repo, checkout, updatecheck=updatecheck)
1258 1258
1259 1259 if not ret and movemarkfrom:
1260 1260 if movemarkfrom == repo[b'.'].node():
1261 1261 pass # no-op update
1262 1262 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1263 1263 b = ui.label(repo._activebookmark, b'bookmarks.active')
1264 1264 ui.status(_(b"updating bookmark %s\n") % b)
1265 1265 else:
1266 1266 # this can happen with a non-linear update
1267 1267 b = ui.label(repo._activebookmark, b'bookmarks')
1268 1268 ui.status(_(b"(leaving bookmark %s)\n") % b)
1269 1269 bookmarks.deactivate(repo)
1270 1270 elif brev in repo._bookmarks:
1271 1271 if brev != repo._activebookmark:
1272 1272 b = ui.label(brev, b'bookmarks.active')
1273 1273 ui.status(_(b"(activating bookmark %s)\n") % b)
1274 1274 bookmarks.activate(repo, brev)
1275 1275 elif brev:
1276 1276 if repo._activebookmark:
1277 1277 b = ui.label(repo._activebookmark, b'bookmarks')
1278 1278 ui.status(_(b"(leaving bookmark %s)\n") % b)
1279 1279 bookmarks.deactivate(repo)
1280 1280
1281 1281 if warndest:
1282 1282 destutil.statusotherdests(ui, repo)
1283 1283
1284 1284 return ret
1285 1285
1286 1286
1287 1287 def merge(
1288 1288 ctx,
1289 1289 force=False,
1290 1290 remind=True,
1291 1291 labels=None,
1292 1292 ):
1293 1293 """Branch merge with node, resolving changes. Return true if any
1294 1294 unresolved conflicts."""
1295 1295 repo = ctx.repo()
1296 1296 stats = mergemod.merge(ctx, force=force, labels=labels)
1297 1297 _showstats(repo, stats)
1298 1298 if stats.unresolvedcount:
1299 1299 repo.ui.status(
1300 1300 _(
1301 1301 b"use 'hg resolve' to retry unresolved file merges "
1302 1302 b"or 'hg merge --abort' to abandon\n"
1303 1303 )
1304 1304 )
1305 1305 elif remind:
1306 1306 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1307 1307 return stats.unresolvedcount > 0
1308 1308
1309 1309
1310 1310 def abortmerge(ui, repo):
1311 1311 ms = mergestatemod.mergestate.read(repo)
1312 1312 if ms.active():
1313 1313 # there were conflicts
1314 1314 node = ms.localctx.hex()
1315 1315 else:
1316 1316 # there were no conficts, mergestate was not stored
1317 1317 node = repo[b'.'].hex()
1318 1318
1319 1319 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1320 1320 stats = mergemod.clean_update(repo[node])
1321 1321 assert stats.unresolvedcount == 0
1322 1322 _showstats(repo, stats)
1323 1323
1324 1324
1325 1325 def _incoming(
1326 1326 displaychlist,
1327 1327 subreporecurse,
1328 1328 ui,
1329 1329 repo,
1330 1330 source,
1331 1331 opts,
1332 1332 buffered=False,
1333 1333 subpath=None,
1334 1334 ):
1335 1335 """
1336 1336 Helper for incoming / gincoming.
1337 1337 displaychlist gets called with
1338 1338 (remoterepo, incomingchangesetlist, displayer) parameters,
1339 1339 and is supposed to contain only code that can't be unified.
1340 1340 """
1341 1341 srcs = urlutil.get_pull_paths(repo, ui, [source])
1342 1342 srcs = list(srcs)
1343 1343 if len(srcs) != 1:
1344 1344 msg = _(b'for now, incoming supports only a single source, %d provided')
1345 1345 msg %= len(srcs)
1346 1346 raise error.Abort(msg)
1347 1347 path = srcs[0]
1348 1348 if subpath is None:
1349 1349 peer_path = path
1350 1350 url = path.loc
1351 1351 else:
1352 1352 # XXX path: we are losing the `path` object here. Keeping it would be
1353 1353 # valuable. For example as a "variant" as we do for pushes.
1354 1354 subpath = urlutil.url(subpath)
1355 1355 if subpath.isabs():
1356 1356 peer_path = url = bytes(subpath)
1357 1357 else:
1358 1358 p = urlutil.url(path.loc)
1359 1359 if p.islocal():
1360 1360 normpath = os.path.normpath
1361 1361 else:
1362 1362 normpath = posixpath.normpath
1363 1363 p.path = normpath(b'%s/%s' % (p.path, subpath))
1364 1364 peer_path = url = bytes(p)
1365 1365 other = peer(repo, opts, peer_path)
1366 1366 cleanupfn = other.close
1367 1367 try:
1368 1368 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1369 1369 branches = (path.branch, opts.get(b'branch', []))
1370 1370 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1371 1371
1372 1372 if revs:
1373 1373 revs = [other.lookup(rev) for rev in revs]
1374 1374 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1375 1375 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1376 1376 )
1377 1377
1378 1378 if not chlist:
1379 1379 ui.status(_(b"no changes found\n"))
1380 1380 return subreporecurse()
1381 1381 ui.pager(b'incoming')
1382 1382 displayer = logcmdutil.changesetdisplayer(
1383 1383 ui, other, opts, buffered=buffered
1384 1384 )
1385 1385 displaychlist(other, chlist, displayer)
1386 1386 displayer.close()
1387 1387 finally:
1388 1388 cleanupfn()
1389 1389 subreporecurse()
1390 1390 return 0 # exit code is zero since we found incoming changes
1391 1391
1392 1392
1393 1393 def incoming(ui, repo, source, opts, subpath=None):
1394 1394 def subreporecurse():
1395 1395 ret = 1
1396 1396 if opts.get(b'subrepos'):
1397 1397 ctx = repo[None]
1398 1398 for subpath in sorted(ctx.substate):
1399 1399 sub = ctx.sub(subpath)
1400 1400 ret = min(ret, sub.incoming(ui, source, opts))
1401 1401 return ret
1402 1402
1403 1403 def display(other, chlist, displayer):
1404 1404 limit = logcmdutil.getlimit(opts)
1405 1405 if opts.get(b'newest_first'):
1406 1406 chlist.reverse()
1407 1407 count = 0
1408 1408 for n in chlist:
1409 1409 if limit is not None and count >= limit:
1410 1410 break
1411 1411 parents = [
1412 1412 p for p in other.changelog.parents(n) if p != repo.nullid
1413 1413 ]
1414 1414 if opts.get(b'no_merges') and len(parents) == 2:
1415 1415 continue
1416 1416 count += 1
1417 1417 displayer.show(other[n])
1418 1418
1419 1419 return _incoming(
1420 1420 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1421 1421 )
1422 1422
1423 1423
1424 1424 def _outgoing(ui, repo, dests, opts, subpath=None):
1425 1425 out = set()
1426 1426 others = []
1427 1427 for path in urlutil.get_push_paths(repo, ui, dests):
1428 1428 dest = path.loc
1429 1429 if subpath is not None:
1430 1430 subpath = urlutil.url(subpath)
1431 1431 if subpath.isabs():
1432 1432 dest = bytes(subpath)
1433 1433 else:
1434 1434 p = urlutil.url(dest)
1435 1435 if p.islocal():
1436 1436 normpath = os.path.normpath
1437 1437 else:
1438 1438 normpath = posixpath.normpath
1439 1439 p.path = normpath(b'%s/%s' % (p.path, subpath))
1440 1440 dest = bytes(p)
1441 1441 branches = path.branch, opts.get(b'branch') or []
1442 1442
1443 1443 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1444 1444 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1445 1445 if revs:
1446 1446 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1447 1447
1448 1448 other = peer(repo, opts, dest)
1449 1449 try:
1450 1450 outgoing = discovery.findcommonoutgoing(
1451 1451 repo, other, revs, force=opts.get(b'force')
1452 1452 )
1453 1453 o = outgoing.missing
1454 1454 out.update(o)
1455 1455 if not o:
1456 1456 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1457 1457 others.append(other)
1458 1458 except: # re-raises
1459 1459 other.close()
1460 1460 raise
1461 1461 # make sure this is ordered by revision number
1462 1462 outgoing_revs = list(out)
1463 1463 cl = repo.changelog
1464 1464 outgoing_revs.sort(key=cl.rev)
1465 1465 return outgoing_revs, others
1466 1466
1467 1467
1468 1468 def _outgoing_recurse(ui, repo, dests, opts):
1469 1469 ret = 1
1470 1470 if opts.get(b'subrepos'):
1471 1471 ctx = repo[None]
1472 1472 for subpath in sorted(ctx.substate):
1473 1473 sub = ctx.sub(subpath)
1474 1474 ret = min(ret, sub.outgoing(ui, dests, opts))
1475 1475 return ret
1476 1476
1477 1477
1478 1478 def _outgoing_filter(repo, revs, opts):
1479 1479 """apply revision filtering/ordering option for outgoing"""
1480 1480 limit = logcmdutil.getlimit(opts)
1481 1481 no_merges = opts.get(b'no_merges')
1482 1482 if opts.get(b'newest_first'):
1483 1483 revs.reverse()
1484 1484 if limit is None and not no_merges:
1485 1485 for r in revs:
1486 1486 yield r
1487 1487 return
1488 1488
1489 1489 count = 0
1490 1490 cl = repo.changelog
1491 1491 for n in revs:
1492 1492 if limit is not None and count >= limit:
1493 1493 break
1494 1494 parents = [p for p in cl.parents(n) if p != repo.nullid]
1495 1495 if no_merges and len(parents) == 2:
1496 1496 continue
1497 1497 count += 1
1498 1498 yield n
1499 1499
1500 1500
1501 1501 def outgoing(ui, repo, dests, opts, subpath=None):
1502 1502 if opts.get(b'graph'):
1503 1503 logcmdutil.checkunsupportedgraphflags([], opts)
1504 1504 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1505 1505 ret = 1
1506 1506 try:
1507 1507 if o:
1508 1508 ret = 0
1509 1509
1510 1510 if opts.get(b'graph'):
1511 1511 revdag = logcmdutil.graphrevs(repo, o, opts)
1512 1512 ui.pager(b'outgoing')
1513 1513 displayer = logcmdutil.changesetdisplayer(
1514 1514 ui, repo, opts, buffered=True
1515 1515 )
1516 1516 logcmdutil.displaygraph(
1517 1517 ui, repo, revdag, displayer, graphmod.asciiedges
1518 1518 )
1519 1519 else:
1520 1520 ui.pager(b'outgoing')
1521 1521 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1522 1522 for n in _outgoing_filter(repo, o, opts):
1523 1523 displayer.show(repo[n])
1524 1524 displayer.close()
1525 1525 for oth in others:
1526 1526 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1527 1527 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1528 1528 return ret # exit code is zero since we found outgoing changes
1529 1529 finally:
1530 1530 for oth in others:
1531 1531 oth.close()
1532 1532
1533 1533
1534 1534 def verify(repo, level=None):
1535 1535 """verify the consistency of a repository"""
1536 1536 ret = verifymod.verify(repo, level=level)
1537 1537
1538 1538 # Broken subrepo references in hidden csets don't seem worth worrying about,
1539 1539 # since they can't be pushed/pulled, and --hidden can be used if they are a
1540 1540 # concern.
1541 1541
1542 1542 # pathto() is needed for -R case
1543 1543 revs = repo.revs(
1544 1544 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1545 1545 )
1546 1546
1547 1547 if revs:
1548 1548 repo.ui.status(_(b'checking subrepo links\n'))
1549 1549 for rev in revs:
1550 1550 ctx = repo[rev]
1551 1551 try:
1552 1552 for subpath in ctx.substate:
1553 1553 try:
1554 1554 ret = (
1555 1555 ctx.sub(subpath, allowcreate=False).verify() or ret
1556 1556 )
1557 1557 except error.RepoError as e:
1558 1558 repo.ui.warn(b'%d: %s\n' % (rev, e))
1559 1559 except Exception:
1560 1560 repo.ui.warn(
1561 1561 _(b'.hgsubstate is corrupt in revision %s\n')
1562 1562 % short(ctx.node())
1563 1563 )
1564 1564
1565 1565 return ret
1566 1566
1567 1567
1568 1568 def remoteui(src, opts):
1569 1569 """build a remote ui from ui or repo and opts"""
1570 1570 if util.safehasattr(src, 'baseui'): # looks like a repository
1571 1571 dst = src.baseui.copy() # drop repo-specific config
1572 1572 src = src.ui # copy target options from repo
1573 1573 else: # assume it's a global ui object
1574 1574 dst = src.copy() # keep all global options
1575 1575
1576 1576 # copy ssh-specific options
1577 1577 for o in b'ssh', b'remotecmd':
1578 1578 v = opts.get(o) or src.config(b'ui', o)
1579 1579 if v:
1580 1580 dst.setconfig(b"ui", o, v, b'copied')
1581 1581
1582 1582 # copy bundle-specific options
1583 1583 r = src.config(b'bundle', b'mainreporoot')
1584 1584 if r:
1585 1585 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1586 1586
1587 1587 # copy selected local settings to the remote ui
1588 1588 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1589 1589 for key, val in src.configitems(sect):
1590 1590 dst.setconfig(sect, key, val, b'copied')
1591 1591 v = src.config(b'web', b'cacerts')
1592 1592 if v:
1593 1593 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1594 1594
1595 1595 return dst
1596 1596
1597 1597
1598 1598 # Files of interest
1599 1599 # Used to check if the repository has changed looking at mtime and size of
1600 1600 # these files.
1601 1601 foi = [
1602 (b'spath', b'00changelog.i'),
1603 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1604 (b'spath', b'obsstore'),
1605 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1602 ('spath', b'00changelog.i'),
1603 ('spath', b'phaseroots'), # ! phase can change content at the same size
1604 ('spath', b'obsstore'),
1605 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1606 1606 ]
1607 1607
1608 1608
1609 1609 class cachedlocalrepo:
1610 1610 """Holds a localrepository that can be cached and reused."""
1611 1611
1612 1612 def __init__(self, repo):
1613 1613 """Create a new cached repo from an existing repo.
1614 1614
1615 1615 We assume the passed in repo was recently created. If the
1616 1616 repo has changed between when it was created and when it was
1617 1617 turned into a cache, it may not refresh properly.
1618 1618 """
1619 1619 assert isinstance(repo, localrepo.localrepository)
1620 1620 self._repo = repo
1621 1621 self._state, self.mtime = self._repostate()
1622 1622 self._filtername = repo.filtername
1623 1623
1624 1624 def fetch(self):
1625 1625 """Refresh (if necessary) and return a repository.
1626 1626
1627 1627 If the cached instance is out of date, it will be recreated
1628 1628 automatically and returned.
1629 1629
1630 1630 Returns a tuple of the repo and a boolean indicating whether a new
1631 1631 repo instance was created.
1632 1632 """
1633 1633 # We compare the mtimes and sizes of some well-known files to
1634 1634 # determine if the repo changed. This is not precise, as mtimes
1635 1635 # are susceptible to clock skew and imprecise filesystems and
1636 1636 # file content can change while maintaining the same size.
1637 1637
1638 1638 state, mtime = self._repostate()
1639 1639 if state == self._state:
1640 1640 return self._repo, False
1641 1641
1642 1642 repo = repository(self._repo.baseui, self._repo.url())
1643 1643 if self._filtername:
1644 1644 self._repo = repo.filtered(self._filtername)
1645 1645 else:
1646 1646 self._repo = repo.unfiltered()
1647 1647 self._state = state
1648 1648 self.mtime = mtime
1649 1649
1650 1650 return self._repo, True
1651 1651
1652 1652 def _repostate(self):
1653 1653 state = []
1654 1654 maxmtime = -1
1655 1655 for attr, fname in foi:
1656 1656 prefix = getattr(self._repo, attr)
1657 1657 p = os.path.join(prefix, fname)
1658 1658 try:
1659 1659 st = os.stat(p)
1660 1660 except OSError:
1661 1661 st = os.stat(prefix)
1662 1662 state.append((st[stat.ST_MTIME], st.st_size))
1663 1663 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1664 1664
1665 1665 return tuple(state), maxmtime
1666 1666
1667 1667 def copy(self):
1668 1668 """Obtain a copy of this class instance.
1669 1669
1670 1670 A new localrepository instance is obtained. The new instance should be
1671 1671 completely independent of the original.
1672 1672 """
1673 1673 repo = repository(self._repo.baseui, self._repo.origroot)
1674 1674 if self._filtername:
1675 1675 repo = repo.filtered(self._filtername)
1676 1676 else:
1677 1677 repo = repo.unfiltered()
1678 1678 c = cachedlocalrepo(repo)
1679 1679 c._state = self._state
1680 1680 c.mtime = self.mtime
1681 1681 return c
General Comments 0
You need to be logged in to leave comments. Login now