##// END OF EJS Templates
clone: cleanup the "cleanup dir" logic used during local clone...
marmoute -
r48209:1c7f3d91 default
parent child Browse files
Show More
@@ -1,1600 +1,1602 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cacheutil,
28 28 cmdutil,
29 29 destutil,
30 30 discovery,
31 31 error,
32 32 exchange,
33 33 extensions,
34 34 graphmod,
35 35 httppeer,
36 36 localrepo,
37 37 lock,
38 38 logcmdutil,
39 39 logexchange,
40 40 merge as mergemod,
41 41 mergestate as mergestatemod,
42 42 narrowspec,
43 43 phases,
44 44 requirements,
45 45 scmutil,
46 46 sshpeer,
47 47 statichttprepo,
48 48 ui as uimod,
49 49 unionrepo,
50 50 url,
51 51 util,
52 52 verify as verifymod,
53 53 vfs as vfsmod,
54 54 )
55 55 from .interfaces import repository as repositorymod
56 56 from .utils import (
57 57 hashutil,
58 58 stringutil,
59 59 urlutil,
60 60 )
61 61
62 62
63 63 release = lock.release
64 64
65 65 # shared features
66 66 sharedbookmarks = b'bookmarks'
67 67
68 68
69 69 def _local(path):
70 70 path = util.expandpath(urlutil.urllocalpath(path))
71 71
72 72 try:
73 73 # we use os.stat() directly here instead of os.path.isfile()
74 74 # because the latter started returning `False` on invalid path
75 75 # exceptions starting in 3.8 and we care about handling
76 76 # invalid paths specially here.
77 77 st = os.stat(path)
78 78 isfile = stat.S_ISREG(st.st_mode)
79 79 # Python 2 raises TypeError, Python 3 ValueError.
80 80 except (TypeError, ValueError) as e:
81 81 raise error.Abort(
82 82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 83 )
84 84 except OSError:
85 85 isfile = False
86 86
87 87 return isfile and bundlerepo or localrepo
88 88
89 89
90 90 def addbranchrevs(lrepo, other, branches, revs):
91 91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 92 hashbranch, branches = branches
93 93 if not hashbranch and not branches:
94 94 x = revs or None
95 95 if revs:
96 96 y = revs[0]
97 97 else:
98 98 y = None
99 99 return x, y
100 100 if revs:
101 101 revs = list(revs)
102 102 else:
103 103 revs = []
104 104
105 105 if not peer.capable(b'branchmap'):
106 106 if branches:
107 107 raise error.Abort(_(b"remote branch lookup not supported"))
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111 with peer.commandexecutor() as e:
112 112 branchmap = e.callcommand(b'branchmap', {}).result()
113 113
114 114 def primary(branch):
115 115 if branch == b'.':
116 116 if not lrepo:
117 117 raise error.Abort(_(b"dirstate branch not accessible"))
118 118 branch = lrepo.dirstate.branch()
119 119 if branch in branchmap:
120 120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 121 return True
122 122 else:
123 123 return False
124 124
125 125 for branch in branches:
126 126 if not primary(branch):
127 127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 128 if hashbranch:
129 129 if not primary(hashbranch):
130 130 revs.append(hashbranch)
131 131 return revs, revs[0]
132 132
133 133
134 134 def parseurl(path, branches=None):
135 135 '''parse url#branch, returning (url, (branch, branches))'''
136 136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 138 return urlutil.parseurl(path, branches=branches)
139 139
140 140
141 141 schemes = {
142 142 b'bundle': bundlerepo,
143 143 b'union': unionrepo,
144 144 b'file': _local,
145 145 b'http': httppeer,
146 146 b'https': httppeer,
147 147 b'ssh': sshpeer,
148 148 b'static-http': statichttprepo,
149 149 }
150 150
151 151
152 152 def _peerlookup(path):
153 153 u = urlutil.url(path)
154 154 scheme = u.scheme or b'file'
155 155 thing = schemes.get(scheme) or schemes[b'file']
156 156 try:
157 157 return thing(path)
158 158 except TypeError:
159 159 # we can't test callable(thing) because 'thing' can be an unloaded
160 160 # module that implements __call__
161 161 if not util.safehasattr(thing, b'instance'):
162 162 raise
163 163 return thing
164 164
165 165
166 166 def islocal(repo):
167 167 '''return true if repo (or path pointing to repo) is local'''
168 168 if isinstance(repo, bytes):
169 169 try:
170 170 return _peerlookup(repo).islocal(repo)
171 171 except AttributeError:
172 172 return False
173 173 return repo.local()
174 174
175 175
176 176 def openpath(ui, path, sendaccept=True):
177 177 '''open path with open if local, url.open if remote'''
178 178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 179 if pathurl.islocal():
180 180 return util.posixfile(pathurl.localpath(), b'rb')
181 181 else:
182 182 return url.open(ui, path, sendaccept=sendaccept)
183 183
184 184
185 185 # a list of (ui, repo) functions called for wire peer initialization
186 186 wirepeersetupfuncs = []
187 187
188 188
189 189 def _peerorrepo(
190 190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 191 ):
192 192 """return a repository object for the specified path"""
193 193 obj = _peerlookup(path).instance(
194 194 ui, path, create, intents=intents, createopts=createopts
195 195 )
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 ui.log(
208 208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 209 )
210 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 211 if not obj.local():
212 212 for f in wirepeersetupfuncs:
213 213 f(ui, obj)
214 214 return obj
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 peer = _peerorrepo(
227 227 ui,
228 228 path,
229 229 create,
230 230 presetupfuncs=presetupfuncs,
231 231 intents=intents,
232 232 createopts=createopts,
233 233 )
234 234 repo = peer.local()
235 235 if not repo:
236 236 raise error.Abort(
237 237 _(b"repository '%s' is not local") % (path or peer.url())
238 238 )
239 239 return repo.filtered(b'visible')
240 240
241 241
242 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 243 '''return a repository peer for the specified path'''
244 244 rui = remoteui(uiorrepo, opts)
245 245 return _peerorrepo(
246 246 rui, path, create, intents=intents, createopts=createopts
247 247 ).peer()
248 248
249 249
250 250 def defaultdest(source):
251 251 """return default destination of clone if none is given
252 252
253 253 >>> defaultdest(b'foo')
254 254 'foo'
255 255 >>> defaultdest(b'/foo/bar')
256 256 'bar'
257 257 >>> defaultdest(b'/')
258 258 ''
259 259 >>> defaultdest(b'')
260 260 ''
261 261 >>> defaultdest(b'http://example.org/')
262 262 ''
263 263 >>> defaultdest(b'http://example.org/foo/')
264 264 'foo'
265 265 """
266 266 path = urlutil.url(source).path
267 267 if not path:
268 268 return b''
269 269 return os.path.basename(os.path.normpath(path))
270 270
271 271
272 272 def sharedreposource(repo):
273 273 """Returns repository object for source repository of a shared repo.
274 274
275 275 If repo is not a shared repository, returns None.
276 276 """
277 277 if repo.sharedpath == repo.path:
278 278 return None
279 279
280 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 281 return repo.srcrepo
282 282
283 283 # the sharedpath always ends in the .hg; we want the path to the repo
284 284 source = repo.vfs.split(repo.sharedpath)[0]
285 285 srcurl, branches = urlutil.parseurl(source)
286 286 srcrepo = repository(repo.ui, srcurl)
287 287 repo.srcrepo = srcrepo
288 288 return srcrepo
289 289
290 290
291 291 def share(
292 292 ui,
293 293 source,
294 294 dest=None,
295 295 update=True,
296 296 bookmarks=True,
297 297 defaultpath=None,
298 298 relative=False,
299 299 ):
300 300 '''create a shared repository'''
301 301
302 302 if not islocal(source):
303 303 raise error.Abort(_(b'can only share local repositories'))
304 304
305 305 if not dest:
306 306 dest = defaultdest(source)
307 307 else:
308 308 dest = urlutil.get_clone_path(ui, dest)[1]
309 309
310 310 if isinstance(source, bytes):
311 311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 312 srcrepo = repository(ui, source)
313 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 314 else:
315 315 srcrepo = source.local()
316 316 checkout = None
317 317
318 318 shareditems = set()
319 319 if bookmarks:
320 320 shareditems.add(sharedbookmarks)
321 321
322 322 r = repository(
323 323 ui,
324 324 dest,
325 325 create=True,
326 326 createopts={
327 327 b'sharedrepo': srcrepo,
328 328 b'sharedrelative': relative,
329 329 b'shareditems': shareditems,
330 330 },
331 331 )
332 332
333 333 postshare(srcrepo, r, defaultpath=defaultpath)
334 334 r = repository(ui, dest)
335 335 _postshareupdate(r, update, checkout=checkout)
336 336 return r
337 337
338 338
339 339 def _prependsourcehgrc(repo):
340 340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 341 on unshare. This is only done if the share was perfomed using share safe
342 342 method where we share config of source in shares"""
343 343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 344 dstvfs = vfsmod.vfs(repo.path)
345 345
346 346 if not srcvfs.exists(b'hgrc'):
347 347 return
348 348
349 349 currentconfig = b''
350 350 if dstvfs.exists(b'hgrc'):
351 351 currentconfig = dstvfs.read(b'hgrc')
352 352
353 353 with dstvfs(b'hgrc', b'wb') as fp:
354 354 sourceconfig = srcvfs.read(b'hgrc')
355 355 fp.write(b"# Config copied from shared source\n")
356 356 fp.write(sourceconfig)
357 357 fp.write(b'\n')
358 358 fp.write(currentconfig)
359 359
360 360
361 361 def unshare(ui, repo):
362 362 """convert a shared repository to a normal one
363 363
364 364 Copy the store data to the repo and remove the sharedpath data.
365 365
366 366 Returns a new repository object representing the unshared repository.
367 367
368 368 The passed repository object is not usable after this function is
369 369 called.
370 370 """
371 371
372 372 with repo.lock():
373 373 # we use locks here because if we race with commit, we
374 374 # can end up with extra data in the cloned revlogs that's
375 375 # not pointed to by changesets, thus causing verify to
376 376 # fail
377 377 destlock = copystore(ui, repo, repo.path)
378 378 with destlock or util.nullcontextmanager():
379 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 380 # we were sharing .hg/hgrc of the share source with the current
381 381 # repo. We need to copy that while unsharing otherwise it can
382 382 # disable hooks and other checks
383 383 _prependsourcehgrc(repo)
384 384
385 385 sharefile = repo.vfs.join(b'sharedpath')
386 386 util.rename(sharefile, sharefile + b'.old')
387 387
388 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 390 scmutil.writereporequirements(repo)
391 391
392 392 # Removing share changes some fundamental properties of the repo instance.
393 393 # So we instantiate a new repo object and operate on it rather than
394 394 # try to keep the existing repo usable.
395 395 newrepo = repository(repo.baseui, repo.root, create=False)
396 396
397 397 # TODO: figure out how to access subrepos that exist, but were previously
398 398 # removed from .hgsub
399 399 c = newrepo[b'.']
400 400 subs = c.substate
401 401 for s in sorted(subs):
402 402 c.sub(s).unshare()
403 403
404 404 localrepo.poisonrepository(repo)
405 405
406 406 return newrepo
407 407
408 408
409 409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 410 """Called after a new shared repo is created.
411 411
412 412 The new repo only has a requirements file and pointer to the source.
413 413 This function configures additional shared data.
414 414
415 415 Extensions can wrap this function and write additional entries to
416 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 417 """
418 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 419 if default:
420 420 template = b'[paths]\ndefault = %s\n'
421 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 423 with destrepo.wlock():
424 424 narrowspec.copytoworkingcopy(destrepo)
425 425
426 426
427 427 def _postshareupdate(repo, update, checkout=None):
428 428 """Maybe perform a working directory update after a shared repo is created.
429 429
430 430 ``update`` can be a boolean or a revision to update to.
431 431 """
432 432 if not update:
433 433 return
434 434
435 435 repo.ui.status(_(b"updating working directory\n"))
436 436 if update is not True:
437 437 checkout = update
438 438 for test in (checkout, b'default', b'tip'):
439 439 if test is None:
440 440 continue
441 441 try:
442 442 uprev = repo.lookup(test)
443 443 break
444 444 except error.RepoLookupError:
445 445 continue
446 446 _update(repo, uprev)
447 447
448 448
449 449 def copystore(ui, srcrepo, destpath):
450 450 """copy files from store of srcrepo in destpath
451 451
452 452 returns destlock
453 453 """
454 454 destlock = None
455 455 try:
456 456 hardlink = None
457 457 topic = _(b'linking') if hardlink else _(b'copying')
458 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 459 num = 0
460 460 srcpublishing = srcrepo.publishing()
461 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 462 dstvfs = vfsmod.vfs(destpath)
463 463 for f in srcrepo.store.copylist():
464 464 if srcpublishing and f.endswith(b'phaseroots'):
465 465 continue
466 466 dstbase = os.path.dirname(f)
467 467 if dstbase and not dstvfs.exists(dstbase):
468 468 dstvfs.mkdir(dstbase)
469 469 if srcvfs.exists(f):
470 470 if f.endswith(b'data'):
471 471 # 'dstbase' may be empty (e.g. revlog format 0)
472 472 lockfile = os.path.join(dstbase, b"lock")
473 473 # lock to avoid premature writing to the target
474 474 destlock = lock.lock(dstvfs, lockfile)
475 475 hardlink, n = util.copyfiles(
476 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 477 )
478 478 num += n
479 479 if hardlink:
480 480 ui.debug(b"linked %d files\n" % num)
481 481 else:
482 482 ui.debug(b"copied %d files\n" % num)
483 483 return destlock
484 484 except: # re-raises
485 485 release(destlock)
486 486 raise
487 487
488 488
489 489 def clonewithshare(
490 490 ui,
491 491 peeropts,
492 492 sharepath,
493 493 source,
494 494 srcpeer,
495 495 dest,
496 496 pull=False,
497 497 rev=None,
498 498 update=True,
499 499 stream=False,
500 500 ):
501 501 """Perform a clone using a shared repo.
502 502
503 503 The store for the repository will be located at <sharepath>/.hg. The
504 504 specified revisions will be cloned or pulled from "source". A shared repo
505 505 will be created at "dest" and a working copy will be created if "update" is
506 506 True.
507 507 """
508 508 revs = None
509 509 if rev:
510 510 if not srcpeer.capable(b'lookup'):
511 511 raise error.Abort(
512 512 _(
513 513 b"src repository does not support "
514 514 b"revision lookup and so doesn't "
515 515 b"support clone by revision"
516 516 )
517 517 )
518 518
519 519 # TODO this is batchable.
520 520 remoterevs = []
521 521 for r in rev:
522 522 with srcpeer.commandexecutor() as e:
523 523 remoterevs.append(
524 524 e.callcommand(
525 525 b'lookup',
526 526 {
527 527 b'key': r,
528 528 },
529 529 ).result()
530 530 )
531 531 revs = remoterevs
532 532
533 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 534 # 2 clients may race creating or populating it.
535 535 pooldir = os.path.dirname(sharepath)
536 536 # lock class requires the directory to exist.
537 537 try:
538 538 util.makedir(pooldir, False)
539 539 except OSError as e:
540 540 if e.errno != errno.EEXIST:
541 541 raise
542 542
543 543 poolvfs = vfsmod.vfs(pooldir)
544 544 basename = os.path.basename(sharepath)
545 545
546 546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 547 if os.path.exists(sharepath):
548 548 ui.status(
549 549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 550 )
551 551 else:
552 552 ui.status(
553 553 _(b'(sharing from new pooled repository %s)\n') % basename
554 554 )
555 555 # Always use pull mode because hardlinks in share mode don't work
556 556 # well. Never update because working copies aren't necessary in
557 557 # share mode.
558 558 clone(
559 559 ui,
560 560 peeropts,
561 561 source,
562 562 dest=sharepath,
563 563 pull=True,
564 564 revs=rev,
565 565 update=False,
566 566 stream=stream,
567 567 )
568 568
569 569 # Resolve the value to put in [paths] section for the source.
570 570 if islocal(source):
571 571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 572 else:
573 573 defaultpath = source
574 574
575 575 sharerepo = repository(ui, path=sharepath)
576 576 destrepo = share(
577 577 ui,
578 578 sharerepo,
579 579 dest=dest,
580 580 update=False,
581 581 bookmarks=False,
582 582 defaultpath=defaultpath,
583 583 )
584 584
585 585 # We need to perform a pull against the dest repo to fetch bookmarks
586 586 # and other non-store data that isn't shared by default. In the case of
587 587 # non-existing shared repo, this means we pull from the remote twice. This
588 588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 589 # way to pull just non-changegroup data.
590 590 exchange.pull(destrepo, srcpeer, heads=revs)
591 591
592 592 _postshareupdate(destrepo, update)
593 593
594 594 return srcpeer, peer(ui, peeropts, dest)
595 595
596 596
597 597 # Recomputing caches is often slow on big repos, so copy them.
598 598 def _copycache(srcrepo, dstcachedir, fname):
599 599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 600 srcfname = srcrepo.cachevfs.join(fname)
601 601 dstfname = os.path.join(dstcachedir, fname)
602 602 if os.path.exists(srcfname):
603 603 if not os.path.exists(dstcachedir):
604 604 os.mkdir(dstcachedir)
605 605 util.copyfile(srcfname, dstfname)
606 606
607 607
608 608 def clone(
609 609 ui,
610 610 peeropts,
611 611 source,
612 612 dest=None,
613 613 pull=False,
614 614 revs=None,
615 615 update=True,
616 616 stream=False,
617 617 branch=None,
618 618 shareopts=None,
619 619 storeincludepats=None,
620 620 storeexcludepats=None,
621 621 depth=None,
622 622 ):
623 623 """Make a copy of an existing repository.
624 624
625 625 Create a copy of an existing repository in a new directory. The
626 626 source and destination are URLs, as passed to the repository
627 627 function. Returns a pair of repository peers, the source and
628 628 newly created destination.
629 629
630 630 The location of the source is added to the new repository's
631 631 .hg/hgrc file, as the default to be used for future pulls and
632 632 pushes.
633 633
634 634 If an exception is raised, the partly cloned/updated destination
635 635 repository will be deleted.
636 636
637 637 Arguments:
638 638
639 639 source: repository object or URL
640 640
641 641 dest: URL of destination repository to create (defaults to base
642 642 name of source repository)
643 643
644 644 pull: always pull from source repository, even in local case or if the
645 645 server prefers streaming
646 646
647 647 stream: stream raw data uncompressed from repository (fast over
648 648 LAN, slow over WAN)
649 649
650 650 revs: revision to clone up to (implies pull=True)
651 651
652 652 update: update working directory after clone completes, if
653 653 destination is local repository (True means update to default rev,
654 654 anything else is treated as a revision)
655 655
656 656 branch: branches to clone
657 657
658 658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 659 activates auto sharing mode and defines the directory for stores. The
660 660 "mode" key determines how to construct the directory name of the shared
661 661 repository. "identity" means the name is derived from the node of the first
662 662 changeset in the repository. "remote" means the name is derived from the
663 663 remote's path/URL. Defaults to "identity."
664 664
665 665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 666 exclude in the repository copy, respectively. If not defined, all files
667 667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 668 only the requested files will be performed. If ``storeincludepats`` is not
669 669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 670 ``path:.``. If both are empty sets, no files will be cloned.
671 671 """
672 672
673 673 if isinstance(source, bytes):
674 674 src = urlutil.get_clone_path(ui, source, branch)
675 675 origsource, source, branches = src
676 676 srcpeer = peer(ui, peeropts, source)
677 677 else:
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 681 srclock = destlock = cleandir = None
682 682 destpeer = None
683 683 try:
684 684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 685
686 686 if dest is None:
687 687 dest = defaultdest(source)
688 688 if dest:
689 689 ui.status(_(b"destination directory: %s\n") % dest)
690 690 else:
691 691 dest = urlutil.get_clone_path(ui, dest)[0]
692 692
693 693 dest = urlutil.urllocalpath(dest)
694 694 source = urlutil.urllocalpath(source)
695 695
696 696 if not dest:
697 697 raise error.InputError(_(b"empty destination path is not valid"))
698 698
699 699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 700 if destvfs.lexists():
701 701 if not destvfs.isdir():
702 702 raise error.InputError(
703 703 _(b"destination '%s' already exists") % dest
704 704 )
705 705 elif destvfs.listdir():
706 706 raise error.InputError(
707 707 _(b"destination '%s' is not empty") % dest
708 708 )
709 709
710 710 createopts = {}
711 711 narrow = False
712 712
713 713 if storeincludepats is not None:
714 714 narrowspec.validatepatterns(storeincludepats)
715 715 narrow = True
716 716
717 717 if storeexcludepats is not None:
718 718 narrowspec.validatepatterns(storeexcludepats)
719 719 narrow = True
720 720
721 721 if narrow:
722 722 # Include everything by default if only exclusion patterns defined.
723 723 if storeexcludepats and not storeincludepats:
724 724 storeincludepats = {b'path:.'}
725 725
726 726 createopts[b'narrowfiles'] = True
727 727
728 728 if depth:
729 729 createopts[b'shallowfilestore'] = True
730 730
731 731 if srcpeer.capable(b'lfs-serve'):
732 732 # Repository creation honors the config if it disabled the extension, so
733 733 # we can't just announce that lfs will be enabled. This check avoids
734 734 # saying that lfs will be enabled, and then saying it's an unknown
735 735 # feature. The lfs creation option is set in either case so that a
736 736 # requirement is added. If the extension is explicitly disabled but the
737 737 # requirement is set, the clone aborts early, before transferring any
738 738 # data.
739 739 createopts[b'lfs'] = True
740 740
741 741 if extensions.disabled_help(b'lfs'):
742 742 ui.status(
743 743 _(
744 744 b'(remote is using large file support (lfs), but it is '
745 745 b'explicitly disabled in the local configuration)\n'
746 746 )
747 747 )
748 748 else:
749 749 ui.status(
750 750 _(
751 751 b'(remote is using large file support (lfs); lfs will '
752 752 b'be enabled for this repository)\n'
753 753 )
754 754 )
755 755
756 756 shareopts = shareopts or {}
757 757 sharepool = shareopts.get(b'pool')
758 758 sharenamemode = shareopts.get(b'mode')
759 759 if sharepool and islocal(dest):
760 760 sharepath = None
761 761 if sharenamemode == b'identity':
762 762 # Resolve the name from the initial changeset in the remote
763 763 # repository. This returns nullid when the remote is empty. It
764 764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 765 # not available. If we fail to resolve, sharing is not enabled.
766 766 try:
767 767 with srcpeer.commandexecutor() as e:
768 768 rootnode = e.callcommand(
769 769 b'lookup',
770 770 {
771 771 b'key': b'0',
772 772 },
773 773 ).result()
774 774
775 775 if rootnode != sha1nodeconstants.nullid:
776 776 sharepath = os.path.join(sharepool, hex(rootnode))
777 777 else:
778 778 ui.status(
779 779 _(
780 780 b'(not using pooled storage: '
781 781 b'remote appears to be empty)\n'
782 782 )
783 783 )
784 784 except error.RepoLookupError:
785 785 ui.status(
786 786 _(
787 787 b'(not using pooled storage: '
788 788 b'unable to resolve identity of remote)\n'
789 789 )
790 790 )
791 791 elif sharenamemode == b'remote':
792 792 sharepath = os.path.join(
793 793 sharepool, hex(hashutil.sha1(source).digest())
794 794 )
795 795 else:
796 796 raise error.Abort(
797 797 _(b'unknown share naming mode: %s') % sharenamemode
798 798 )
799 799
800 800 # TODO this is a somewhat arbitrary restriction.
801 801 if narrow:
802 802 ui.status(
803 803 _(b'(pooled storage not supported for narrow clones)\n')
804 804 )
805 805 sharepath = None
806 806
807 807 if sharepath:
808 808 return clonewithshare(
809 809 ui,
810 810 peeropts,
811 811 sharepath,
812 812 source,
813 813 srcpeer,
814 814 dest,
815 815 pull=pull,
816 816 rev=revs,
817 817 update=update,
818 818 stream=stream,
819 819 )
820 820
821 821 srcrepo = srcpeer.local()
822 822
823 823 abspath = origsource
824 824 if islocal(origsource):
825 825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 826
827 827 if islocal(dest):
828 if os.path.exists(dest):
829 # only clean up directories we create ourselves
830 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
831 cleandir = hgdir
832 else:
828 833 cleandir = dest
829 834
830 835 copy = False
831 836 if (
832 837 srcrepo
833 838 and srcrepo.cancopy()
834 839 and islocal(dest)
835 840 and not phases.hassecret(srcrepo)
836 841 ):
837 842 copy = not pull and not revs
838 843
839 844 # TODO this is a somewhat arbitrary restriction.
840 845 if narrow:
841 846 copy = False
842 847
843 848 if copy:
844 849 try:
845 850 # we use a lock here because if we race with commit, we
846 851 # can end up with extra data in the cloned revlogs that's
847 852 # not pointed to by changesets, thus causing verify to
848 853 # fail
849 854 srclock = srcrepo.lock(wait=False)
850 855 except error.LockError:
851 856 copy = False
852 857
853 858 if copy:
854 859 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 861 if not os.path.exists(dest):
857 862 util.makedirs(dest)
858 else:
859 # only clean up directories we create ourselves
860 cleandir = hgdir
861 863 try:
862 864 destpath = hgdir
863 865 util.makedir(destpath, notindexed=True)
864 866 except OSError as inst:
865 867 if inst.errno == errno.EEXIST:
866 868 cleandir = None
867 869 raise error.Abort(
868 870 _(b"destination '%s' already exists") % dest
869 871 )
870 872 raise
871 873
872 874 destlock = copystore(ui, srcrepo, destpath)
873 875 # copy bookmarks over
874 876 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 877 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 878 if os.path.exists(srcbookmarks):
877 879 util.copyfile(srcbookmarks, dstbookmarks)
878 880
879 881 dstcachedir = os.path.join(destpath, b'cache')
880 882 for cache in cacheutil.cachetocopy(srcrepo):
881 883 _copycache(srcrepo, dstcachedir, cache)
882 884
883 885 # we need to re-init the repo after manually copying the data
884 886 # into it
885 887 destpeer = peer(srcrepo, peeropts, dest)
886 888 srcrepo.hook(
887 889 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
888 890 )
889 891 else:
890 892 try:
891 893 # only pass ui when no srcrepo
892 894 destpeer = peer(
893 895 srcrepo or ui,
894 896 peeropts,
895 897 dest,
896 898 create=True,
897 899 createopts=createopts,
898 900 )
899 901 except OSError as inst:
900 902 if inst.errno == errno.EEXIST:
901 903 cleandir = None
902 904 raise error.Abort(
903 905 _(b"destination '%s' already exists") % dest
904 906 )
905 907 raise
906 908
907 909 if revs:
908 910 if not srcpeer.capable(b'lookup'):
909 911 raise error.Abort(
910 912 _(
911 913 b"src repository does not support "
912 914 b"revision lookup and so doesn't "
913 915 b"support clone by revision"
914 916 )
915 917 )
916 918
917 919 # TODO this is batchable.
918 920 remoterevs = []
919 921 for rev in revs:
920 922 with srcpeer.commandexecutor() as e:
921 923 remoterevs.append(
922 924 e.callcommand(
923 925 b'lookup',
924 926 {
925 927 b'key': rev,
926 928 },
927 929 ).result()
928 930 )
929 931 revs = remoterevs
930 932
931 933 checkout = revs[0]
932 934 else:
933 935 revs = None
934 936 local = destpeer.local()
935 937 if local:
936 938 if narrow:
937 939 with local.wlock(), local.lock():
938 940 local.setnarrowpats(storeincludepats, storeexcludepats)
939 941 narrowspec.copytoworkingcopy(local)
940 942
941 943 u = urlutil.url(abspath)
942 944 defaulturl = bytes(u)
943 945 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
944 946 if not stream:
945 947 if pull:
946 948 stream = False
947 949 else:
948 950 stream = None
949 951 # internal config: ui.quietbookmarkmove
950 952 overrides = {(b'ui', b'quietbookmarkmove'): True}
951 953 with local.ui.configoverride(overrides, b'clone'):
952 954 exchange.pull(
953 955 local,
954 956 srcpeer,
955 957 revs,
956 958 streamclonerequested=stream,
957 959 includepats=storeincludepats,
958 960 excludepats=storeexcludepats,
959 961 depth=depth,
960 962 )
961 963 elif srcrepo:
962 964 # TODO lift restriction once exchange.push() accepts narrow
963 965 # push.
964 966 if narrow:
965 967 raise error.Abort(
966 968 _(
967 969 b'narrow clone not available for '
968 970 b'remote destinations'
969 971 )
970 972 )
971 973
972 974 exchange.push(
973 975 srcrepo,
974 976 destpeer,
975 977 revs=revs,
976 978 bookmarks=srcrepo._bookmarks.keys(),
977 979 )
978 980 else:
979 981 raise error.Abort(
980 982 _(b"clone from remote to remote not supported")
981 983 )
982 984
983 985 cleandir = None
984 986
985 987 destrepo = destpeer.local()
986 988 if destrepo:
987 989 template = uimod.samplehgrcs[b'cloned']
988 990 u = urlutil.url(abspath)
989 991 u.passwd = None
990 992 defaulturl = bytes(u)
991 993 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
992 994 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
993 995
994 996 if ui.configbool(b'experimental', b'remotenames'):
995 997 logexchange.pullremotenames(destrepo, srcpeer)
996 998
997 999 if update:
998 1000 if update is not True:
999 1001 with srcpeer.commandexecutor() as e:
1000 1002 checkout = e.callcommand(
1001 1003 b'lookup',
1002 1004 {
1003 1005 b'key': update,
1004 1006 },
1005 1007 ).result()
1006 1008
1007 1009 uprev = None
1008 1010 status = None
1009 1011 if checkout is not None:
1010 1012 # Some extensions (at least hg-git and hg-subversion) have
1011 1013 # a peer.lookup() implementation that returns a name instead
1012 1014 # of a nodeid. We work around it here until we've figured
1013 1015 # out a better solution.
1014 1016 if len(checkout) == 20 and checkout in destrepo:
1015 1017 uprev = checkout
1016 1018 elif scmutil.isrevsymbol(destrepo, checkout):
1017 1019 uprev = scmutil.revsymbol(destrepo, checkout).node()
1018 1020 else:
1019 1021 if update is not True:
1020 1022 try:
1021 1023 uprev = destrepo.lookup(update)
1022 1024 except error.RepoLookupError:
1023 1025 pass
1024 1026 if uprev is None:
1025 1027 try:
1026 1028 if destrepo._activebookmark:
1027 1029 uprev = destrepo.lookup(destrepo._activebookmark)
1028 1030 update = destrepo._activebookmark
1029 1031 else:
1030 1032 uprev = destrepo._bookmarks[b'@']
1031 1033 update = b'@'
1032 1034 bn = destrepo[uprev].branch()
1033 1035 if bn == b'default':
1034 1036 status = _(b"updating to bookmark %s\n" % update)
1035 1037 else:
1036 1038 status = (
1037 1039 _(b"updating to bookmark %s on branch %s\n")
1038 1040 ) % (update, bn)
1039 1041 except KeyError:
1040 1042 try:
1041 1043 uprev = destrepo.branchtip(b'default')
1042 1044 except error.RepoLookupError:
1043 1045 uprev = destrepo.lookup(b'tip')
1044 1046 if not status:
1045 1047 bn = destrepo[uprev].branch()
1046 1048 status = _(b"updating to branch %s\n") % bn
1047 1049 destrepo.ui.status(status)
1048 1050 _update(destrepo, uprev)
1049 1051 if update in destrepo._bookmarks:
1050 1052 bookmarks.activate(destrepo, update)
1051 1053 if destlock is not None:
1052 1054 release(destlock)
1053 1055 # here is a tiny windows were someone could end up writing the
1054 1056 # repository before the cache are sure to be warm. This is "fine"
1055 1057 # as the only "bad" outcome would be some slowness. That potential
1056 1058 # slowness already affect reader.
1057 1059 with destrepo.lock():
1058 1060 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1059 1061 finally:
1060 1062 release(srclock, destlock)
1061 1063 if cleandir is not None:
1062 1064 shutil.rmtree(cleandir, True)
1063 1065 if srcpeer is not None:
1064 1066 srcpeer.close()
1065 1067 if destpeer and destpeer.local() is None:
1066 1068 destpeer.close()
1067 1069 return srcpeer, destpeer
1068 1070
1069 1071
1070 1072 def _showstats(repo, stats, quietempty=False):
1071 1073 if quietempty and stats.isempty():
1072 1074 return
1073 1075 repo.ui.status(
1074 1076 _(
1075 1077 b"%d files updated, %d files merged, "
1076 1078 b"%d files removed, %d files unresolved\n"
1077 1079 )
1078 1080 % (
1079 1081 stats.updatedcount,
1080 1082 stats.mergedcount,
1081 1083 stats.removedcount,
1082 1084 stats.unresolvedcount,
1083 1085 )
1084 1086 )
1085 1087
1086 1088
1087 1089 def updaterepo(repo, node, overwrite, updatecheck=None):
1088 1090 """Update the working directory to node.
1089 1091
1090 1092 When overwrite is set, changes are clobbered, merged else
1091 1093
1092 1094 returns stats (see pydoc mercurial.merge.applyupdates)"""
1093 1095 repo.ui.deprecwarn(
1094 1096 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1095 1097 b'5.7',
1096 1098 )
1097 1099 return mergemod._update(
1098 1100 repo,
1099 1101 node,
1100 1102 branchmerge=False,
1101 1103 force=overwrite,
1102 1104 labels=[b'working copy', b'destination'],
1103 1105 updatecheck=updatecheck,
1104 1106 )
1105 1107
1106 1108
1107 1109 def update(repo, node, quietempty=False, updatecheck=None):
1108 1110 """update the working directory to node"""
1109 1111 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1110 1112 _showstats(repo, stats, quietempty)
1111 1113 if stats.unresolvedcount:
1112 1114 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1113 1115 return stats.unresolvedcount > 0
1114 1116
1115 1117
1116 1118 # naming conflict in clone()
1117 1119 _update = update
1118 1120
1119 1121
1120 1122 def clean(repo, node, show_stats=True, quietempty=False):
1121 1123 """forcibly switch the working directory to node, clobbering changes"""
1122 1124 stats = mergemod.clean_update(repo[node])
1123 1125 assert stats.unresolvedcount == 0
1124 1126 if show_stats:
1125 1127 _showstats(repo, stats, quietempty)
1126 1128 return False
1127 1129
1128 1130
1129 1131 # naming conflict in updatetotally()
1130 1132 _clean = clean
1131 1133
1132 1134 _VALID_UPDATECHECKS = {
1133 1135 mergemod.UPDATECHECK_ABORT,
1134 1136 mergemod.UPDATECHECK_NONE,
1135 1137 mergemod.UPDATECHECK_LINEAR,
1136 1138 mergemod.UPDATECHECK_NO_CONFLICT,
1137 1139 }
1138 1140
1139 1141
1140 1142 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1141 1143 """Update the working directory with extra care for non-file components
1142 1144
1143 1145 This takes care of non-file components below:
1144 1146
1145 1147 :bookmark: might be advanced or (in)activated
1146 1148
1147 1149 This takes arguments below:
1148 1150
1149 1151 :checkout: to which revision the working directory is updated
1150 1152 :brev: a name, which might be a bookmark to be activated after updating
1151 1153 :clean: whether changes in the working directory can be discarded
1152 1154 :updatecheck: how to deal with a dirty working directory
1153 1155
1154 1156 Valid values for updatecheck are the UPDATECHECK_* constants
1155 1157 defined in the merge module. Passing `None` will result in using the
1156 1158 configured default.
1157 1159
1158 1160 * ABORT: abort if the working directory is dirty
1159 1161 * NONE: don't check (merge working directory changes into destination)
1160 1162 * LINEAR: check that update is linear before merging working directory
1161 1163 changes into destination
1162 1164 * NO_CONFLICT: check that the update does not result in file merges
1163 1165
1164 1166 This returns whether conflict is detected at updating or not.
1165 1167 """
1166 1168 if updatecheck is None:
1167 1169 updatecheck = ui.config(b'commands', b'update.check')
1168 1170 if updatecheck not in _VALID_UPDATECHECKS:
1169 1171 # If not configured, or invalid value configured
1170 1172 updatecheck = mergemod.UPDATECHECK_LINEAR
1171 1173 if updatecheck not in _VALID_UPDATECHECKS:
1172 1174 raise ValueError(
1173 1175 r'Invalid updatecheck value %r (can accept %r)'
1174 1176 % (updatecheck, _VALID_UPDATECHECKS)
1175 1177 )
1176 1178 with repo.wlock():
1177 1179 movemarkfrom = None
1178 1180 warndest = False
1179 1181 if checkout is None:
1180 1182 updata = destutil.destupdate(repo, clean=clean)
1181 1183 checkout, movemarkfrom, brev = updata
1182 1184 warndest = True
1183 1185
1184 1186 if clean:
1185 1187 ret = _clean(repo, checkout)
1186 1188 else:
1187 1189 if updatecheck == mergemod.UPDATECHECK_ABORT:
1188 1190 cmdutil.bailifchanged(repo, merge=False)
1189 1191 updatecheck = mergemod.UPDATECHECK_NONE
1190 1192 ret = _update(repo, checkout, updatecheck=updatecheck)
1191 1193
1192 1194 if not ret and movemarkfrom:
1193 1195 if movemarkfrom == repo[b'.'].node():
1194 1196 pass # no-op update
1195 1197 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1196 1198 b = ui.label(repo._activebookmark, b'bookmarks.active')
1197 1199 ui.status(_(b"updating bookmark %s\n") % b)
1198 1200 else:
1199 1201 # this can happen with a non-linear update
1200 1202 b = ui.label(repo._activebookmark, b'bookmarks')
1201 1203 ui.status(_(b"(leaving bookmark %s)\n") % b)
1202 1204 bookmarks.deactivate(repo)
1203 1205 elif brev in repo._bookmarks:
1204 1206 if brev != repo._activebookmark:
1205 1207 b = ui.label(brev, b'bookmarks.active')
1206 1208 ui.status(_(b"(activating bookmark %s)\n") % b)
1207 1209 bookmarks.activate(repo, brev)
1208 1210 elif brev:
1209 1211 if repo._activebookmark:
1210 1212 b = ui.label(repo._activebookmark, b'bookmarks')
1211 1213 ui.status(_(b"(leaving bookmark %s)\n") % b)
1212 1214 bookmarks.deactivate(repo)
1213 1215
1214 1216 if warndest:
1215 1217 destutil.statusotherdests(ui, repo)
1216 1218
1217 1219 return ret
1218 1220
1219 1221
1220 1222 def merge(
1221 1223 ctx,
1222 1224 force=False,
1223 1225 remind=True,
1224 1226 labels=None,
1225 1227 ):
1226 1228 """Branch merge with node, resolving changes. Return true if any
1227 1229 unresolved conflicts."""
1228 1230 repo = ctx.repo()
1229 1231 stats = mergemod.merge(ctx, force=force, labels=labels)
1230 1232 _showstats(repo, stats)
1231 1233 if stats.unresolvedcount:
1232 1234 repo.ui.status(
1233 1235 _(
1234 1236 b"use 'hg resolve' to retry unresolved file merges "
1235 1237 b"or 'hg merge --abort' to abandon\n"
1236 1238 )
1237 1239 )
1238 1240 elif remind:
1239 1241 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1240 1242 return stats.unresolvedcount > 0
1241 1243
1242 1244
1243 1245 def abortmerge(ui, repo):
1244 1246 ms = mergestatemod.mergestate.read(repo)
1245 1247 if ms.active():
1246 1248 # there were conflicts
1247 1249 node = ms.localctx.hex()
1248 1250 else:
1249 1251 # there were no conficts, mergestate was not stored
1250 1252 node = repo[b'.'].hex()
1251 1253
1252 1254 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1253 1255 stats = mergemod.clean_update(repo[node])
1254 1256 assert stats.unresolvedcount == 0
1255 1257 _showstats(repo, stats)
1256 1258
1257 1259
1258 1260 def _incoming(
1259 1261 displaychlist,
1260 1262 subreporecurse,
1261 1263 ui,
1262 1264 repo,
1263 1265 source,
1264 1266 opts,
1265 1267 buffered=False,
1266 1268 subpath=None,
1267 1269 ):
1268 1270 """
1269 1271 Helper for incoming / gincoming.
1270 1272 displaychlist gets called with
1271 1273 (remoterepo, incomingchangesetlist, displayer) parameters,
1272 1274 and is supposed to contain only code that can't be unified.
1273 1275 """
1274 1276 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1275 1277 srcs = list(srcs)
1276 1278 if len(srcs) != 1:
1277 1279 msg = _(b'for now, incoming supports only a single source, %d provided')
1278 1280 msg %= len(srcs)
1279 1281 raise error.Abort(msg)
1280 1282 source, branches = srcs[0]
1281 1283 if subpath is not None:
1282 1284 subpath = urlutil.url(subpath)
1283 1285 if subpath.isabs():
1284 1286 source = bytes(subpath)
1285 1287 else:
1286 1288 p = urlutil.url(source)
1287 1289 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1288 1290 source = bytes(p)
1289 1291 other = peer(repo, opts, source)
1290 1292 cleanupfn = other.close
1291 1293 try:
1292 1294 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1293 1295 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1294 1296
1295 1297 if revs:
1296 1298 revs = [other.lookup(rev) for rev in revs]
1297 1299 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1298 1300 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1299 1301 )
1300 1302
1301 1303 if not chlist:
1302 1304 ui.status(_(b"no changes found\n"))
1303 1305 return subreporecurse()
1304 1306 ui.pager(b'incoming')
1305 1307 displayer = logcmdutil.changesetdisplayer(
1306 1308 ui, other, opts, buffered=buffered
1307 1309 )
1308 1310 displaychlist(other, chlist, displayer)
1309 1311 displayer.close()
1310 1312 finally:
1311 1313 cleanupfn()
1312 1314 subreporecurse()
1313 1315 return 0 # exit code is zero since we found incoming changes
1314 1316
1315 1317
1316 1318 def incoming(ui, repo, source, opts, subpath=None):
1317 1319 def subreporecurse():
1318 1320 ret = 1
1319 1321 if opts.get(b'subrepos'):
1320 1322 ctx = repo[None]
1321 1323 for subpath in sorted(ctx.substate):
1322 1324 sub = ctx.sub(subpath)
1323 1325 ret = min(ret, sub.incoming(ui, source, opts))
1324 1326 return ret
1325 1327
1326 1328 def display(other, chlist, displayer):
1327 1329 limit = logcmdutil.getlimit(opts)
1328 1330 if opts.get(b'newest_first'):
1329 1331 chlist.reverse()
1330 1332 count = 0
1331 1333 for n in chlist:
1332 1334 if limit is not None and count >= limit:
1333 1335 break
1334 1336 parents = [
1335 1337 p for p in other.changelog.parents(n) if p != repo.nullid
1336 1338 ]
1337 1339 if opts.get(b'no_merges') and len(parents) == 2:
1338 1340 continue
1339 1341 count += 1
1340 1342 displayer.show(other[n])
1341 1343
1342 1344 return _incoming(
1343 1345 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1344 1346 )
1345 1347
1346 1348
1347 1349 def _outgoing(ui, repo, dests, opts, subpath=None):
1348 1350 out = set()
1349 1351 others = []
1350 1352 for path in urlutil.get_push_paths(repo, ui, dests):
1351 1353 dest = path.pushloc or path.loc
1352 1354 if subpath is not None:
1353 1355 subpath = urlutil.url(subpath)
1354 1356 if subpath.isabs():
1355 1357 dest = bytes(subpath)
1356 1358 else:
1357 1359 p = urlutil.url(dest)
1358 1360 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1359 1361 dest = bytes(p)
1360 1362 branches = path.branch, opts.get(b'branch') or []
1361 1363
1362 1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1363 1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1364 1366 if revs:
1365 1367 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1366 1368
1367 1369 other = peer(repo, opts, dest)
1368 1370 try:
1369 1371 outgoing = discovery.findcommonoutgoing(
1370 1372 repo, other, revs, force=opts.get(b'force')
1371 1373 )
1372 1374 o = outgoing.missing
1373 1375 out.update(o)
1374 1376 if not o:
1375 1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1376 1378 others.append(other)
1377 1379 except: # re-raises
1378 1380 other.close()
1379 1381 raise
1380 1382 # make sure this is ordered by revision number
1381 1383 outgoing_revs = list(out)
1382 1384 cl = repo.changelog
1383 1385 outgoing_revs.sort(key=cl.rev)
1384 1386 return outgoing_revs, others
1385 1387
1386 1388
1387 1389 def _outgoing_recurse(ui, repo, dests, opts):
1388 1390 ret = 1
1389 1391 if opts.get(b'subrepos'):
1390 1392 ctx = repo[None]
1391 1393 for subpath in sorted(ctx.substate):
1392 1394 sub = ctx.sub(subpath)
1393 1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1394 1396 return ret
1395 1397
1396 1398
1397 1399 def _outgoing_filter(repo, revs, opts):
1398 1400 """apply revision filtering/ordering option for outgoing"""
1399 1401 limit = logcmdutil.getlimit(opts)
1400 1402 no_merges = opts.get(b'no_merges')
1401 1403 if opts.get(b'newest_first'):
1402 1404 revs.reverse()
1403 1405 if limit is None and not no_merges:
1404 1406 for r in revs:
1405 1407 yield r
1406 1408 return
1407 1409
1408 1410 count = 0
1409 1411 cl = repo.changelog
1410 1412 for n in revs:
1411 1413 if limit is not None and count >= limit:
1412 1414 break
1413 1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1414 1416 if no_merges and len(parents) == 2:
1415 1417 continue
1416 1418 count += 1
1417 1419 yield n
1418 1420
1419 1421
1420 1422 def outgoing(ui, repo, dests, opts, subpath=None):
1421 1423 if opts.get(b'graph'):
1422 1424 logcmdutil.checkunsupportedgraphflags([], opts)
1423 1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1424 1426 ret = 1
1425 1427 try:
1426 1428 if o:
1427 1429 ret = 0
1428 1430
1429 1431 if opts.get(b'graph'):
1430 1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1431 1433 ui.pager(b'outgoing')
1432 1434 displayer = logcmdutil.changesetdisplayer(
1433 1435 ui, repo, opts, buffered=True
1434 1436 )
1435 1437 logcmdutil.displaygraph(
1436 1438 ui, repo, revdag, displayer, graphmod.asciiedges
1437 1439 )
1438 1440 else:
1439 1441 ui.pager(b'outgoing')
1440 1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1441 1443 for n in _outgoing_filter(repo, o, opts):
1442 1444 displayer.show(repo[n])
1443 1445 displayer.close()
1444 1446 for oth in others:
1445 1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1446 1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1447 1449 return ret # exit code is zero since we found outgoing changes
1448 1450 finally:
1449 1451 for oth in others:
1450 1452 oth.close()
1451 1453
1452 1454
1453 1455 def verify(repo, level=None):
1454 1456 """verify the consistency of a repository"""
1455 1457 ret = verifymod.verify(repo, level=level)
1456 1458
1457 1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1458 1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1459 1461 # concern.
1460 1462
1461 1463 # pathto() is needed for -R case
1462 1464 revs = repo.revs(
1463 1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1464 1466 )
1465 1467
1466 1468 if revs:
1467 1469 repo.ui.status(_(b'checking subrepo links\n'))
1468 1470 for rev in revs:
1469 1471 ctx = repo[rev]
1470 1472 try:
1471 1473 for subpath in ctx.substate:
1472 1474 try:
1473 1475 ret = (
1474 1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1475 1477 )
1476 1478 except error.RepoError as e:
1477 1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1478 1480 except Exception:
1479 1481 repo.ui.warn(
1480 1482 _(b'.hgsubstate is corrupt in revision %s\n')
1481 1483 % short(ctx.node())
1482 1484 )
1483 1485
1484 1486 return ret
1485 1487
1486 1488
1487 1489 def remoteui(src, opts):
1488 1490 """build a remote ui from ui or repo and opts"""
1489 1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1490 1492 dst = src.baseui.copy() # drop repo-specific config
1491 1493 src = src.ui # copy target options from repo
1492 1494 else: # assume it's a global ui object
1493 1495 dst = src.copy() # keep all global options
1494 1496
1495 1497 # copy ssh-specific options
1496 1498 for o in b'ssh', b'remotecmd':
1497 1499 v = opts.get(o) or src.config(b'ui', o)
1498 1500 if v:
1499 1501 dst.setconfig(b"ui", o, v, b'copied')
1500 1502
1501 1503 # copy bundle-specific options
1502 1504 r = src.config(b'bundle', b'mainreporoot')
1503 1505 if r:
1504 1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1505 1507
1506 1508 # copy selected local settings to the remote ui
1507 1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1508 1510 for key, val in src.configitems(sect):
1509 1511 dst.setconfig(sect, key, val, b'copied')
1510 1512 v = src.config(b'web', b'cacerts')
1511 1513 if v:
1512 1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1513 1515
1514 1516 return dst
1515 1517
1516 1518
1517 1519 # Files of interest
1518 1520 # Used to check if the repository has changed looking at mtime and size of
1519 1521 # these files.
1520 1522 foi = [
1521 1523 (b'spath', b'00changelog.i'),
1522 1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1523 1525 (b'spath', b'obsstore'),
1524 1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1525 1527 ]
1526 1528
1527 1529
1528 1530 class cachedlocalrepo(object):
1529 1531 """Holds a localrepository that can be cached and reused."""
1530 1532
1531 1533 def __init__(self, repo):
1532 1534 """Create a new cached repo from an existing repo.
1533 1535
1534 1536 We assume the passed in repo was recently created. If the
1535 1537 repo has changed between when it was created and when it was
1536 1538 turned into a cache, it may not refresh properly.
1537 1539 """
1538 1540 assert isinstance(repo, localrepo.localrepository)
1539 1541 self._repo = repo
1540 1542 self._state, self.mtime = self._repostate()
1541 1543 self._filtername = repo.filtername
1542 1544
1543 1545 def fetch(self):
1544 1546 """Refresh (if necessary) and return a repository.
1545 1547
1546 1548 If the cached instance is out of date, it will be recreated
1547 1549 automatically and returned.
1548 1550
1549 1551 Returns a tuple of the repo and a boolean indicating whether a new
1550 1552 repo instance was created.
1551 1553 """
1552 1554 # We compare the mtimes and sizes of some well-known files to
1553 1555 # determine if the repo changed. This is not precise, as mtimes
1554 1556 # are susceptible to clock skew and imprecise filesystems and
1555 1557 # file content can change while maintaining the same size.
1556 1558
1557 1559 state, mtime = self._repostate()
1558 1560 if state == self._state:
1559 1561 return self._repo, False
1560 1562
1561 1563 repo = repository(self._repo.baseui, self._repo.url())
1562 1564 if self._filtername:
1563 1565 self._repo = repo.filtered(self._filtername)
1564 1566 else:
1565 1567 self._repo = repo.unfiltered()
1566 1568 self._state = state
1567 1569 self.mtime = mtime
1568 1570
1569 1571 return self._repo, True
1570 1572
1571 1573 def _repostate(self):
1572 1574 state = []
1573 1575 maxmtime = -1
1574 1576 for attr, fname in foi:
1575 1577 prefix = getattr(self._repo, attr)
1576 1578 p = os.path.join(prefix, fname)
1577 1579 try:
1578 1580 st = os.stat(p)
1579 1581 except OSError:
1580 1582 st = os.stat(prefix)
1581 1583 state.append((st[stat.ST_MTIME], st.st_size))
1582 1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1583 1585
1584 1586 return tuple(state), maxmtime
1585 1587
1586 1588 def copy(self):
1587 1589 """Obtain a copy of this class instance.
1588 1590
1589 1591 A new localrepository instance is obtained. The new instance should be
1590 1592 completely independent of the original.
1591 1593 """
1592 1594 repo = repository(self._repo.baseui, self._repo.origroot)
1593 1595 if self._filtername:
1594 1596 repo = repo.filtered(self._filtername)
1595 1597 else:
1596 1598 repo = repo.unfiltered()
1597 1599 c = cachedlocalrepo(repo)
1598 1600 c._state = self._state
1599 1601 c.mtime = self.mtime
1600 1602 return c
General Comments 0
You need to be logged in to leave comments. Login now