##// END OF EJS Templates
clean: check that there are no conflicts after...
Martin von Zweigbergk -
r45005:abcc82bf default
parent child Browse files
Show More
@@ -1,1459 +1,1459 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import getattr
19 19
20 20 from . import (
21 21 bookmarks,
22 22 bundlerepo,
23 23 cacheutil,
24 24 cmdutil,
25 25 destutil,
26 26 discovery,
27 27 error,
28 28 exchange,
29 29 extensions,
30 30 httppeer,
31 31 localrepo,
32 32 lock,
33 33 logcmdutil,
34 34 logexchange,
35 35 merge as mergemod,
36 36 narrowspec,
37 37 node,
38 38 phases,
39 39 pycompat,
40 40 scmutil,
41 41 sshpeer,
42 42 statichttprepo,
43 43 ui as uimod,
44 44 unionrepo,
45 45 url,
46 46 util,
47 47 verify as verifymod,
48 48 vfs as vfsmod,
49 49 )
50 50 from .utils import hashutil
51 51 from .interfaces import repository as repositorymod
52 52
53 53 release = lock.release
54 54
55 55 # shared features
56 56 sharedbookmarks = b'bookmarks'
57 57
58 58
59 59 def _local(path):
60 60 path = util.expandpath(util.urllocalpath(path))
61 61
62 62 try:
63 63 isfile = os.path.isfile(path)
64 64 # Python 2 raises TypeError, Python 3 ValueError.
65 65 except (TypeError, ValueError) as e:
66 66 raise error.Abort(
67 67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 68 )
69 69
70 70 return isfile and bundlerepo or localrepo
71 71
72 72
73 73 def addbranchrevs(lrepo, other, branches, revs):
74 74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 75 hashbranch, branches = branches
76 76 if not hashbranch and not branches:
77 77 x = revs or None
78 78 if revs:
79 79 y = revs[0]
80 80 else:
81 81 y = None
82 82 return x, y
83 83 if revs:
84 84 revs = list(revs)
85 85 else:
86 86 revs = []
87 87
88 88 if not peer.capable(b'branchmap'):
89 89 if branches:
90 90 raise error.Abort(_(b"remote branch lookup not supported"))
91 91 revs.append(hashbranch)
92 92 return revs, revs[0]
93 93
94 94 with peer.commandexecutor() as e:
95 95 branchmap = e.callcommand(b'branchmap', {}).result()
96 96
97 97 def primary(branch):
98 98 if branch == b'.':
99 99 if not lrepo:
100 100 raise error.Abort(_(b"dirstate branch not accessible"))
101 101 branch = lrepo.dirstate.branch()
102 102 if branch in branchmap:
103 103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 104 return True
105 105 else:
106 106 return False
107 107
108 108 for branch in branches:
109 109 if not primary(branch):
110 110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 111 if hashbranch:
112 112 if not primary(hashbranch):
113 113 revs.append(hashbranch)
114 114 return revs, revs[0]
115 115
116 116
117 117 def parseurl(path, branches=None):
118 118 '''parse url#branch, returning (url, (branch, branches))'''
119 119
120 120 u = util.url(path)
121 121 branch = None
122 122 if u.fragment:
123 123 branch = u.fragment
124 124 u.fragment = None
125 125 return bytes(u), (branch, branches or [])
126 126
127 127
128 128 schemes = {
129 129 b'bundle': bundlerepo,
130 130 b'union': unionrepo,
131 131 b'file': _local,
132 132 b'http': httppeer,
133 133 b'https': httppeer,
134 134 b'ssh': sshpeer,
135 135 b'static-http': statichttprepo,
136 136 }
137 137
138 138
139 139 def _peerlookup(path):
140 140 u = util.url(path)
141 141 scheme = u.scheme or b'file'
142 142 thing = schemes.get(scheme) or schemes[b'file']
143 143 try:
144 144 return thing(path)
145 145 except TypeError:
146 146 # we can't test callable(thing) because 'thing' can be an unloaded
147 147 # module that implements __call__
148 148 if not util.safehasattr(thing, b'instance'):
149 149 raise
150 150 return thing
151 151
152 152
153 153 def islocal(repo):
154 154 '''return true if repo (or path pointing to repo) is local'''
155 155 if isinstance(repo, bytes):
156 156 try:
157 157 return _peerlookup(repo).islocal(repo)
158 158 except AttributeError:
159 159 return False
160 160 return repo.local()
161 161
162 162
163 163 def openpath(ui, path, sendaccept=True):
164 164 '''open path with open if local, url.open if remote'''
165 165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 166 if pathurl.islocal():
167 167 return util.posixfile(pathurl.localpath(), b'rb')
168 168 else:
169 169 return url.open(ui, path, sendaccept=sendaccept)
170 170
171 171
172 172 # a list of (ui, repo) functions called for wire peer initialization
173 173 wirepeersetupfuncs = []
174 174
175 175
176 176 def _peerorrepo(
177 177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 178 ):
179 179 """return a repository object for the specified path"""
180 180 obj = _peerlookup(path).instance(
181 181 ui, path, create, intents=intents, createopts=createopts
182 182 )
183 183 ui = getattr(obj, "ui", ui)
184 184 for f in presetupfuncs or []:
185 185 f(ui, obj)
186 186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 187 with util.timedcm('all reposetup') as allreposetupstats:
188 188 for name, module in extensions.extensions(ui):
189 189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 190 hook = getattr(module, 'reposetup', None)
191 191 if hook:
192 192 with util.timedcm('reposetup %r', name) as stats:
193 193 hook(ui, obj)
194 194 ui.log(
195 195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 196 )
197 197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 198 if not obj.local():
199 199 for f in wirepeersetupfuncs:
200 200 f(ui, obj)
201 201 return obj
202 202
203 203
204 204 def repository(
205 205 ui,
206 206 path=b'',
207 207 create=False,
208 208 presetupfuncs=None,
209 209 intents=None,
210 210 createopts=None,
211 211 ):
212 212 """return a repository object for the specified path"""
213 213 peer = _peerorrepo(
214 214 ui,
215 215 path,
216 216 create,
217 217 presetupfuncs=presetupfuncs,
218 218 intents=intents,
219 219 createopts=createopts,
220 220 )
221 221 repo = peer.local()
222 222 if not repo:
223 223 raise error.Abort(
224 224 _(b"repository '%s' is not local") % (path or peer.url())
225 225 )
226 226 return repo.filtered(b'visible')
227 227
228 228
229 229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 230 '''return a repository peer for the specified path'''
231 231 rui = remoteui(uiorrepo, opts)
232 232 return _peerorrepo(
233 233 rui, path, create, intents=intents, createopts=createopts
234 234 ).peer()
235 235
236 236
237 237 def defaultdest(source):
238 238 '''return default destination of clone if none is given
239 239
240 240 >>> defaultdest(b'foo')
241 241 'foo'
242 242 >>> defaultdest(b'/foo/bar')
243 243 'bar'
244 244 >>> defaultdest(b'/')
245 245 ''
246 246 >>> defaultdest(b'')
247 247 ''
248 248 >>> defaultdest(b'http://example.org/')
249 249 ''
250 250 >>> defaultdest(b'http://example.org/foo/')
251 251 'foo'
252 252 '''
253 253 path = util.url(source).path
254 254 if not path:
255 255 return b''
256 256 return os.path.basename(os.path.normpath(path))
257 257
258 258
259 259 def sharedreposource(repo):
260 260 """Returns repository object for source repository of a shared repo.
261 261
262 262 If repo is not a shared repository, returns None.
263 263 """
264 264 if repo.sharedpath == repo.path:
265 265 return None
266 266
267 267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 268 return repo.srcrepo
269 269
270 270 # the sharedpath always ends in the .hg; we want the path to the repo
271 271 source = repo.vfs.split(repo.sharedpath)[0]
272 272 srcurl, branches = parseurl(source)
273 273 srcrepo = repository(repo.ui, srcurl)
274 274 repo.srcrepo = srcrepo
275 275 return srcrepo
276 276
277 277
278 278 def share(
279 279 ui,
280 280 source,
281 281 dest=None,
282 282 update=True,
283 283 bookmarks=True,
284 284 defaultpath=None,
285 285 relative=False,
286 286 ):
287 287 '''create a shared repository'''
288 288
289 289 if not islocal(source):
290 290 raise error.Abort(_(b'can only share local repositories'))
291 291
292 292 if not dest:
293 293 dest = defaultdest(source)
294 294 else:
295 295 dest = ui.expandpath(dest)
296 296
297 297 if isinstance(source, bytes):
298 298 origsource = ui.expandpath(source)
299 299 source, branches = parseurl(origsource)
300 300 srcrepo = repository(ui, source)
301 301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 302 else:
303 303 srcrepo = source.local()
304 304 checkout = None
305 305
306 306 shareditems = set()
307 307 if bookmarks:
308 308 shareditems.add(sharedbookmarks)
309 309
310 310 r = repository(
311 311 ui,
312 312 dest,
313 313 create=True,
314 314 createopts={
315 315 b'sharedrepo': srcrepo,
316 316 b'sharedrelative': relative,
317 317 b'shareditems': shareditems,
318 318 },
319 319 )
320 320
321 321 postshare(srcrepo, r, defaultpath=defaultpath)
322 322 r = repository(ui, dest)
323 323 _postshareupdate(r, update, checkout=checkout)
324 324 return r
325 325
326 326
327 327 def unshare(ui, repo):
328 328 """convert a shared repository to a normal one
329 329
330 330 Copy the store data to the repo and remove the sharedpath data.
331 331
332 332 Returns a new repository object representing the unshared repository.
333 333
334 334 The passed repository object is not usable after this function is
335 335 called.
336 336 """
337 337
338 338 with repo.lock():
339 339 # we use locks here because if we race with commit, we
340 340 # can end up with extra data in the cloned revlogs that's
341 341 # not pointed to by changesets, thus causing verify to
342 342 # fail
343 343 destlock = copystore(ui, repo, repo.path)
344 344 with destlock or util.nullcontextmanager():
345 345
346 346 sharefile = repo.vfs.join(b'sharedpath')
347 347 util.rename(sharefile, sharefile + b'.old')
348 348
349 349 repo.requirements.discard(b'shared')
350 350 repo.requirements.discard(b'relshared')
351 351 repo._writerequirements()
352 352
353 353 # Removing share changes some fundamental properties of the repo instance.
354 354 # So we instantiate a new repo object and operate on it rather than
355 355 # try to keep the existing repo usable.
356 356 newrepo = repository(repo.baseui, repo.root, create=False)
357 357
358 358 # TODO: figure out how to access subrepos that exist, but were previously
359 359 # removed from .hgsub
360 360 c = newrepo[b'.']
361 361 subs = c.substate
362 362 for s in sorted(subs):
363 363 c.sub(s).unshare()
364 364
365 365 localrepo.poisonrepository(repo)
366 366
367 367 return newrepo
368 368
369 369
370 370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 371 """Called after a new shared repo is created.
372 372
373 373 The new repo only has a requirements file and pointer to the source.
374 374 This function configures additional shared data.
375 375
376 376 Extensions can wrap this function and write additional entries to
377 377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 378 """
379 379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 380 if default:
381 381 template = b'[paths]\ndefault = %s\n'
382 382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 384 with destrepo.wlock():
385 385 narrowspec.copytoworkingcopy(destrepo)
386 386
387 387
388 388 def _postshareupdate(repo, update, checkout=None):
389 389 """Maybe perform a working directory update after a shared repo is created.
390 390
391 391 ``update`` can be a boolean or a revision to update to.
392 392 """
393 393 if not update:
394 394 return
395 395
396 396 repo.ui.status(_(b"updating working directory\n"))
397 397 if update is not True:
398 398 checkout = update
399 399 for test in (checkout, b'default', b'tip'):
400 400 if test is None:
401 401 continue
402 402 try:
403 403 uprev = repo.lookup(test)
404 404 break
405 405 except error.RepoLookupError:
406 406 continue
407 407 _update(repo, uprev)
408 408
409 409
410 410 def copystore(ui, srcrepo, destpath):
411 411 '''copy files from store of srcrepo in destpath
412 412
413 413 returns destlock
414 414 '''
415 415 destlock = None
416 416 try:
417 417 hardlink = None
418 418 topic = _(b'linking') if hardlink else _(b'copying')
419 419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 420 num = 0
421 421 srcpublishing = srcrepo.publishing()
422 422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 423 dstvfs = vfsmod.vfs(destpath)
424 424 for f in srcrepo.store.copylist():
425 425 if srcpublishing and f.endswith(b'phaseroots'):
426 426 continue
427 427 dstbase = os.path.dirname(f)
428 428 if dstbase and not dstvfs.exists(dstbase):
429 429 dstvfs.mkdir(dstbase)
430 430 if srcvfs.exists(f):
431 431 if f.endswith(b'data'):
432 432 # 'dstbase' may be empty (e.g. revlog format 0)
433 433 lockfile = os.path.join(dstbase, b"lock")
434 434 # lock to avoid premature writing to the target
435 435 destlock = lock.lock(dstvfs, lockfile)
436 436 hardlink, n = util.copyfiles(
437 437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 438 )
439 439 num += n
440 440 if hardlink:
441 441 ui.debug(b"linked %d files\n" % num)
442 442 else:
443 443 ui.debug(b"copied %d files\n" % num)
444 444 return destlock
445 445 except: # re-raises
446 446 release(destlock)
447 447 raise
448 448
449 449
450 450 def clonewithshare(
451 451 ui,
452 452 peeropts,
453 453 sharepath,
454 454 source,
455 455 srcpeer,
456 456 dest,
457 457 pull=False,
458 458 rev=None,
459 459 update=True,
460 460 stream=False,
461 461 ):
462 462 """Perform a clone using a shared repo.
463 463
464 464 The store for the repository will be located at <sharepath>/.hg. The
465 465 specified revisions will be cloned or pulled from "source". A shared repo
466 466 will be created at "dest" and a working copy will be created if "update" is
467 467 True.
468 468 """
469 469 revs = None
470 470 if rev:
471 471 if not srcpeer.capable(b'lookup'):
472 472 raise error.Abort(
473 473 _(
474 474 b"src repository does not support "
475 475 b"revision lookup and so doesn't "
476 476 b"support clone by revision"
477 477 )
478 478 )
479 479
480 480 # TODO this is batchable.
481 481 remoterevs = []
482 482 for r in rev:
483 483 with srcpeer.commandexecutor() as e:
484 484 remoterevs.append(
485 485 e.callcommand(b'lookup', {b'key': r,}).result()
486 486 )
487 487 revs = remoterevs
488 488
489 489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 490 # 2 clients may race creating or populating it.
491 491 pooldir = os.path.dirname(sharepath)
492 492 # lock class requires the directory to exist.
493 493 try:
494 494 util.makedir(pooldir, False)
495 495 except OSError as e:
496 496 if e.errno != errno.EEXIST:
497 497 raise
498 498
499 499 poolvfs = vfsmod.vfs(pooldir)
500 500 basename = os.path.basename(sharepath)
501 501
502 502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 503 if os.path.exists(sharepath):
504 504 ui.status(
505 505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 506 )
507 507 else:
508 508 ui.status(
509 509 _(b'(sharing from new pooled repository %s)\n') % basename
510 510 )
511 511 # Always use pull mode because hardlinks in share mode don't work
512 512 # well. Never update because working copies aren't necessary in
513 513 # share mode.
514 514 clone(
515 515 ui,
516 516 peeropts,
517 517 source,
518 518 dest=sharepath,
519 519 pull=True,
520 520 revs=rev,
521 521 update=False,
522 522 stream=stream,
523 523 )
524 524
525 525 # Resolve the value to put in [paths] section for the source.
526 526 if islocal(source):
527 527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 528 else:
529 529 defaultpath = source
530 530
531 531 sharerepo = repository(ui, path=sharepath)
532 532 destrepo = share(
533 533 ui,
534 534 sharerepo,
535 535 dest=dest,
536 536 update=False,
537 537 bookmarks=False,
538 538 defaultpath=defaultpath,
539 539 )
540 540
541 541 # We need to perform a pull against the dest repo to fetch bookmarks
542 542 # and other non-store data that isn't shared by default. In the case of
543 543 # non-existing shared repo, this means we pull from the remote twice. This
544 544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 545 # way to pull just non-changegroup data.
546 546 exchange.pull(destrepo, srcpeer, heads=revs)
547 547
548 548 _postshareupdate(destrepo, update)
549 549
550 550 return srcpeer, peer(ui, peeropts, dest)
551 551
552 552
553 553 # Recomputing branch cache might be slow on big repos,
554 554 # so just copy it
555 555 def _copycache(srcrepo, dstcachedir, fname):
556 556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 558 dstbranchcache = os.path.join(dstcachedir, fname)
559 559 if os.path.exists(srcbranchcache):
560 560 if not os.path.exists(dstcachedir):
561 561 os.mkdir(dstcachedir)
562 562 util.copyfile(srcbranchcache, dstbranchcache)
563 563
564 564
565 565 def clone(
566 566 ui,
567 567 peeropts,
568 568 source,
569 569 dest=None,
570 570 pull=False,
571 571 revs=None,
572 572 update=True,
573 573 stream=False,
574 574 branch=None,
575 575 shareopts=None,
576 576 storeincludepats=None,
577 577 storeexcludepats=None,
578 578 depth=None,
579 579 ):
580 580 """Make a copy of an existing repository.
581 581
582 582 Create a copy of an existing repository in a new directory. The
583 583 source and destination are URLs, as passed to the repository
584 584 function. Returns a pair of repository peers, the source and
585 585 newly created destination.
586 586
587 587 The location of the source is added to the new repository's
588 588 .hg/hgrc file, as the default to be used for future pulls and
589 589 pushes.
590 590
591 591 If an exception is raised, the partly cloned/updated destination
592 592 repository will be deleted.
593 593
594 594 Arguments:
595 595
596 596 source: repository object or URL
597 597
598 598 dest: URL of destination repository to create (defaults to base
599 599 name of source repository)
600 600
601 601 pull: always pull from source repository, even in local case or if the
602 602 server prefers streaming
603 603
604 604 stream: stream raw data uncompressed from repository (fast over
605 605 LAN, slow over WAN)
606 606
607 607 revs: revision to clone up to (implies pull=True)
608 608
609 609 update: update working directory after clone completes, if
610 610 destination is local repository (True means update to default rev,
611 611 anything else is treated as a revision)
612 612
613 613 branch: branches to clone
614 614
615 615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 616 activates auto sharing mode and defines the directory for stores. The
617 617 "mode" key determines how to construct the directory name of the shared
618 618 repository. "identity" means the name is derived from the node of the first
619 619 changeset in the repository. "remote" means the name is derived from the
620 620 remote's path/URL. Defaults to "identity."
621 621
622 622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 623 exclude in the repository copy, respectively. If not defined, all files
624 624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 625 only the requested files will be performed. If ``storeincludepats`` is not
626 626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 627 ``path:.``. If both are empty sets, no files will be cloned.
628 628 """
629 629
630 630 if isinstance(source, bytes):
631 631 origsource = ui.expandpath(source)
632 632 source, branches = parseurl(origsource, branch)
633 633 srcpeer = peer(ui, peeropts, source)
634 634 else:
635 635 srcpeer = source.peer() # in case we were called with a localrepo
636 636 branches = (None, branch or [])
637 637 origsource = source = srcpeer.url()
638 638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639 639
640 640 if dest is None:
641 641 dest = defaultdest(source)
642 642 if dest:
643 643 ui.status(_(b"destination directory: %s\n") % dest)
644 644 else:
645 645 dest = ui.expandpath(dest)
646 646
647 647 dest = util.urllocalpath(dest)
648 648 source = util.urllocalpath(source)
649 649
650 650 if not dest:
651 651 raise error.Abort(_(b"empty destination path is not valid"))
652 652
653 653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 654 if destvfs.lexists():
655 655 if not destvfs.isdir():
656 656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 657 elif destvfs.listdir():
658 658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659 659
660 660 createopts = {}
661 661 narrow = False
662 662
663 663 if storeincludepats is not None:
664 664 narrowspec.validatepatterns(storeincludepats)
665 665 narrow = True
666 666
667 667 if storeexcludepats is not None:
668 668 narrowspec.validatepatterns(storeexcludepats)
669 669 narrow = True
670 670
671 671 if narrow:
672 672 # Include everything by default if only exclusion patterns defined.
673 673 if storeexcludepats and not storeincludepats:
674 674 storeincludepats = {b'path:.'}
675 675
676 676 createopts[b'narrowfiles'] = True
677 677
678 678 if depth:
679 679 createopts[b'shallowfilestore'] = True
680 680
681 681 if srcpeer.capable(b'lfs-serve'):
682 682 # Repository creation honors the config if it disabled the extension, so
683 683 # we can't just announce that lfs will be enabled. This check avoids
684 684 # saying that lfs will be enabled, and then saying it's an unknown
685 685 # feature. The lfs creation option is set in either case so that a
686 686 # requirement is added. If the extension is explicitly disabled but the
687 687 # requirement is set, the clone aborts early, before transferring any
688 688 # data.
689 689 createopts[b'lfs'] = True
690 690
691 691 if extensions.disabledext(b'lfs'):
692 692 ui.status(
693 693 _(
694 694 b'(remote is using large file support (lfs), but it is '
695 695 b'explicitly disabled in the local configuration)\n'
696 696 )
697 697 )
698 698 else:
699 699 ui.status(
700 700 _(
701 701 b'(remote is using large file support (lfs); lfs will '
702 702 b'be enabled for this repository)\n'
703 703 )
704 704 )
705 705
706 706 shareopts = shareopts or {}
707 707 sharepool = shareopts.get(b'pool')
708 708 sharenamemode = shareopts.get(b'mode')
709 709 if sharepool and islocal(dest):
710 710 sharepath = None
711 711 if sharenamemode == b'identity':
712 712 # Resolve the name from the initial changeset in the remote
713 713 # repository. This returns nullid when the remote is empty. It
714 714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 715 # not available. If we fail to resolve, sharing is not enabled.
716 716 try:
717 717 with srcpeer.commandexecutor() as e:
718 718 rootnode = e.callcommand(
719 719 b'lookup', {b'key': b'0',}
720 720 ).result()
721 721
722 722 if rootnode != node.nullid:
723 723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 724 else:
725 725 ui.status(
726 726 _(
727 727 b'(not using pooled storage: '
728 728 b'remote appears to be empty)\n'
729 729 )
730 730 )
731 731 except error.RepoLookupError:
732 732 ui.status(
733 733 _(
734 734 b'(not using pooled storage: '
735 735 b'unable to resolve identity of remote)\n'
736 736 )
737 737 )
738 738 elif sharenamemode == b'remote':
739 739 sharepath = os.path.join(
740 740 sharepool, node.hex(hashutil.sha1(source).digest())
741 741 )
742 742 else:
743 743 raise error.Abort(
744 744 _(b'unknown share naming mode: %s') % sharenamemode
745 745 )
746 746
747 747 # TODO this is a somewhat arbitrary restriction.
748 748 if narrow:
749 749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 750 sharepath = None
751 751
752 752 if sharepath:
753 753 return clonewithshare(
754 754 ui,
755 755 peeropts,
756 756 sharepath,
757 757 source,
758 758 srcpeer,
759 759 dest,
760 760 pull=pull,
761 761 rev=revs,
762 762 update=update,
763 763 stream=stream,
764 764 )
765 765
766 766 srclock = destlock = cleandir = None
767 767 srcrepo = srcpeer.local()
768 768 try:
769 769 abspath = origsource
770 770 if islocal(origsource):
771 771 abspath = os.path.abspath(util.urllocalpath(origsource))
772 772
773 773 if islocal(dest):
774 774 cleandir = dest
775 775
776 776 copy = False
777 777 if (
778 778 srcrepo
779 779 and srcrepo.cancopy()
780 780 and islocal(dest)
781 781 and not phases.hassecret(srcrepo)
782 782 ):
783 783 copy = not pull and not revs
784 784
785 785 # TODO this is a somewhat arbitrary restriction.
786 786 if narrow:
787 787 copy = False
788 788
789 789 if copy:
790 790 try:
791 791 # we use a lock here because if we race with commit, we
792 792 # can end up with extra data in the cloned revlogs that's
793 793 # not pointed to by changesets, thus causing verify to
794 794 # fail
795 795 srclock = srcrepo.lock(wait=False)
796 796 except error.LockError:
797 797 copy = False
798 798
799 799 if copy:
800 800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 802 if not os.path.exists(dest):
803 803 util.makedirs(dest)
804 804 else:
805 805 # only clean up directories we create ourselves
806 806 cleandir = hgdir
807 807 try:
808 808 destpath = hgdir
809 809 util.makedir(destpath, notindexed=True)
810 810 except OSError as inst:
811 811 if inst.errno == errno.EEXIST:
812 812 cleandir = None
813 813 raise error.Abort(
814 814 _(b"destination '%s' already exists") % dest
815 815 )
816 816 raise
817 817
818 818 destlock = copystore(ui, srcrepo, destpath)
819 819 # copy bookmarks over
820 820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 822 if os.path.exists(srcbookmarks):
823 823 util.copyfile(srcbookmarks, dstbookmarks)
824 824
825 825 dstcachedir = os.path.join(destpath, b'cache')
826 826 for cache in cacheutil.cachetocopy(srcrepo):
827 827 _copycache(srcrepo, dstcachedir, cache)
828 828
829 829 # we need to re-init the repo after manually copying the data
830 830 # into it
831 831 destpeer = peer(srcrepo, peeropts, dest)
832 832 srcrepo.hook(
833 833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 834 )
835 835 else:
836 836 try:
837 837 # only pass ui when no srcrepo
838 838 destpeer = peer(
839 839 srcrepo or ui,
840 840 peeropts,
841 841 dest,
842 842 create=True,
843 843 createopts=createopts,
844 844 )
845 845 except OSError as inst:
846 846 if inst.errno == errno.EEXIST:
847 847 cleandir = None
848 848 raise error.Abort(
849 849 _(b"destination '%s' already exists") % dest
850 850 )
851 851 raise
852 852
853 853 if revs:
854 854 if not srcpeer.capable(b'lookup'):
855 855 raise error.Abort(
856 856 _(
857 857 b"src repository does not support "
858 858 b"revision lookup and so doesn't "
859 859 b"support clone by revision"
860 860 )
861 861 )
862 862
863 863 # TODO this is batchable.
864 864 remoterevs = []
865 865 for rev in revs:
866 866 with srcpeer.commandexecutor() as e:
867 867 remoterevs.append(
868 868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 869 )
870 870 revs = remoterevs
871 871
872 872 checkout = revs[0]
873 873 else:
874 874 revs = None
875 875 local = destpeer.local()
876 876 if local:
877 877 if narrow:
878 878 with local.wlock(), local.lock():
879 879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 880 narrowspec.copytoworkingcopy(local)
881 881
882 882 u = util.url(abspath)
883 883 defaulturl = bytes(u)
884 884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 885 if not stream:
886 886 if pull:
887 887 stream = False
888 888 else:
889 889 stream = None
890 890 # internal config: ui.quietbookmarkmove
891 891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 892 with local.ui.configoverride(overrides, b'clone'):
893 893 exchange.pull(
894 894 local,
895 895 srcpeer,
896 896 revs,
897 897 streamclonerequested=stream,
898 898 includepats=storeincludepats,
899 899 excludepats=storeexcludepats,
900 900 depth=depth,
901 901 )
902 902 elif srcrepo:
903 903 # TODO lift restriction once exchange.push() accepts narrow
904 904 # push.
905 905 if narrow:
906 906 raise error.Abort(
907 907 _(
908 908 b'narrow clone not available for '
909 909 b'remote destinations'
910 910 )
911 911 )
912 912
913 913 exchange.push(
914 914 srcrepo,
915 915 destpeer,
916 916 revs=revs,
917 917 bookmarks=srcrepo._bookmarks.keys(),
918 918 )
919 919 else:
920 920 raise error.Abort(
921 921 _(b"clone from remote to remote not supported")
922 922 )
923 923
924 924 cleandir = None
925 925
926 926 destrepo = destpeer.local()
927 927 if destrepo:
928 928 template = uimod.samplehgrcs[b'cloned']
929 929 u = util.url(abspath)
930 930 u.passwd = None
931 931 defaulturl = bytes(u)
932 932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 934
935 935 if ui.configbool(b'experimental', b'remotenames'):
936 936 logexchange.pullremotenames(destrepo, srcpeer)
937 937
938 938 if update:
939 939 if update is not True:
940 940 with srcpeer.commandexecutor() as e:
941 941 checkout = e.callcommand(
942 942 b'lookup', {b'key': update,}
943 943 ).result()
944 944
945 945 uprev = None
946 946 status = None
947 947 if checkout is not None:
948 948 # Some extensions (at least hg-git and hg-subversion) have
949 949 # a peer.lookup() implementation that returns a name instead
950 950 # of a nodeid. We work around it here until we've figured
951 951 # out a better solution.
952 952 if len(checkout) == 20 and checkout in destrepo:
953 953 uprev = checkout
954 954 elif scmutil.isrevsymbol(destrepo, checkout):
955 955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 956 else:
957 957 if update is not True:
958 958 try:
959 959 uprev = destrepo.lookup(update)
960 960 except error.RepoLookupError:
961 961 pass
962 962 if uprev is None:
963 963 try:
964 964 uprev = destrepo._bookmarks[b'@']
965 965 update = b'@'
966 966 bn = destrepo[uprev].branch()
967 967 if bn == b'default':
968 968 status = _(b"updating to bookmark @\n")
969 969 else:
970 970 status = (
971 971 _(b"updating to bookmark @ on branch %s\n") % bn
972 972 )
973 973 except KeyError:
974 974 try:
975 975 uprev = destrepo.branchtip(b'default')
976 976 except error.RepoLookupError:
977 977 uprev = destrepo.lookup(b'tip')
978 978 if not status:
979 979 bn = destrepo[uprev].branch()
980 980 status = _(b"updating to branch %s\n") % bn
981 981 destrepo.ui.status(status)
982 982 _update(destrepo, uprev)
983 983 if update in destrepo._bookmarks:
984 984 bookmarks.activate(destrepo, update)
985 985 finally:
986 986 release(srclock, destlock)
987 987 if cleandir is not None:
988 988 shutil.rmtree(cleandir, True)
989 989 if srcpeer is not None:
990 990 srcpeer.close()
991 991 return srcpeer, destpeer
992 992
993 993
994 994 def _showstats(repo, stats, quietempty=False):
995 995 if quietempty and stats.isempty():
996 996 return
997 997 repo.ui.status(
998 998 _(
999 999 b"%d files updated, %d files merged, "
1000 1000 b"%d files removed, %d files unresolved\n"
1001 1001 )
1002 1002 % (
1003 1003 stats.updatedcount,
1004 1004 stats.mergedcount,
1005 1005 stats.removedcount,
1006 1006 stats.unresolvedcount,
1007 1007 )
1008 1008 )
1009 1009
1010 1010
1011 1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 1012 """Update the working directory to node.
1013 1013
1014 1014 When overwrite is set, changes are clobbered, merged else
1015 1015
1016 1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 1017 return mergemod.update(
1018 1018 repo,
1019 1019 node,
1020 1020 branchmerge=False,
1021 1021 force=overwrite,
1022 1022 labels=[b'working copy', b'destination'],
1023 1023 updatecheck=updatecheck,
1024 1024 )
1025 1025
1026 1026
1027 1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 1028 """update the working directory to node"""
1029 1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 1030 _showstats(repo, stats, quietempty)
1031 1031 if stats.unresolvedcount:
1032 1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 1033 return stats.unresolvedcount > 0
1034 1034
1035 1035
1036 1036 # naming conflict in clone()
1037 1037 _update = update
1038 1038
1039 1039
1040 1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 1041 """forcibly switch the working directory to node, clobbering changes"""
1042 1042 stats = updaterepo(repo, node, True)
1043 assert stats.unresolvedcount == 0
1043 1044 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1044 1045 if show_stats:
1045 1046 _showstats(repo, stats, quietempty)
1046 return stats.unresolvedcount > 0
1047 1047
1048 1048
1049 1049 # naming conflict in updatetotally()
1050 1050 _clean = clean
1051 1051
1052 1052 _VALID_UPDATECHECKS = {
1053 1053 mergemod.UPDATECHECK_ABORT,
1054 1054 mergemod.UPDATECHECK_NONE,
1055 1055 mergemod.UPDATECHECK_LINEAR,
1056 1056 mergemod.UPDATECHECK_NO_CONFLICT,
1057 1057 }
1058 1058
1059 1059
1060 1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1061 1061 """Update the working directory with extra care for non-file components
1062 1062
1063 1063 This takes care of non-file components below:
1064 1064
1065 1065 :bookmark: might be advanced or (in)activated
1066 1066
1067 1067 This takes arguments below:
1068 1068
1069 1069 :checkout: to which revision the working directory is updated
1070 1070 :brev: a name, which might be a bookmark to be activated after updating
1071 1071 :clean: whether changes in the working directory can be discarded
1072 1072 :updatecheck: how to deal with a dirty working directory
1073 1073
1074 1074 Valid values for updatecheck are the UPDATECHECK_* constants
1075 1075 defined in the merge module. Passing `None` will result in using the
1076 1076 configured default.
1077 1077
1078 1078 * ABORT: abort if the working directory is dirty
1079 1079 * NONE: don't check (merge working directory changes into destination)
1080 1080 * LINEAR: check that update is linear before merging working directory
1081 1081 changes into destination
1082 1082 * NO_CONFLICT: check that the update does not result in file merges
1083 1083
1084 1084 This returns whether conflict is detected at updating or not.
1085 1085 """
1086 1086 if updatecheck is None:
1087 1087 updatecheck = ui.config(b'commands', b'update.check')
1088 1088 if updatecheck not in _VALID_UPDATECHECKS:
1089 1089 # If not configured, or invalid value configured
1090 1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1091 1091 if updatecheck not in _VALID_UPDATECHECKS:
1092 1092 raise ValueError(
1093 1093 r'Invalid updatecheck value %r (can accept %r)'
1094 1094 % (updatecheck, _VALID_UPDATECHECKS)
1095 1095 )
1096 1096 with repo.wlock():
1097 1097 movemarkfrom = None
1098 1098 warndest = False
1099 1099 if checkout is None:
1100 1100 updata = destutil.destupdate(repo, clean=clean)
1101 1101 checkout, movemarkfrom, brev = updata
1102 1102 warndest = True
1103 1103
1104 1104 if clean:
1105 1105 ret = _clean(repo, checkout)
1106 1106 else:
1107 1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1108 1108 cmdutil.bailifchanged(repo, merge=False)
1109 1109 updatecheck = mergemod.UPDATECHECK_NONE
1110 1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1111 1111
1112 1112 if not ret and movemarkfrom:
1113 1113 if movemarkfrom == repo[b'.'].node():
1114 1114 pass # no-op update
1115 1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1116 1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1117 1117 ui.status(_(b"updating bookmark %s\n") % b)
1118 1118 else:
1119 1119 # this can happen with a non-linear update
1120 1120 b = ui.label(repo._activebookmark, b'bookmarks')
1121 1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1122 1122 bookmarks.deactivate(repo)
1123 1123 elif brev in repo._bookmarks:
1124 1124 if brev != repo._activebookmark:
1125 1125 b = ui.label(brev, b'bookmarks.active')
1126 1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1127 1127 bookmarks.activate(repo, brev)
1128 1128 elif brev:
1129 1129 if repo._activebookmark:
1130 1130 b = ui.label(repo._activebookmark, b'bookmarks')
1131 1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1132 1132 bookmarks.deactivate(repo)
1133 1133
1134 1134 if warndest:
1135 1135 destutil.statusotherdests(ui, repo)
1136 1136
1137 1137 return ret
1138 1138
1139 1139
1140 1140 def merge(
1141 1141 repo,
1142 1142 node,
1143 1143 force=None,
1144 1144 remind=True,
1145 1145 mergeforce=False,
1146 1146 labels=None,
1147 1147 abort=False,
1148 1148 ):
1149 1149 """Branch merge with node, resolving changes. Return true if any
1150 1150 unresolved conflicts."""
1151 1151 if abort:
1152 1152 return abortmerge(repo.ui, repo)
1153 1153
1154 1154 stats = mergemod.update(
1155 1155 repo,
1156 1156 node,
1157 1157 branchmerge=True,
1158 1158 force=force,
1159 1159 mergeforce=mergeforce,
1160 1160 labels=labels,
1161 1161 )
1162 1162 _showstats(repo, stats)
1163 1163 if stats.unresolvedcount:
1164 1164 repo.ui.status(
1165 1165 _(
1166 1166 b"use 'hg resolve' to retry unresolved file merges "
1167 1167 b"or 'hg merge --abort' to abandon\n"
1168 1168 )
1169 1169 )
1170 1170 elif remind:
1171 1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1172 1172 return stats.unresolvedcount > 0
1173 1173
1174 1174
1175 1175 def abortmerge(ui, repo):
1176 1176 ms = mergemod.mergestate.read(repo)
1177 1177 if ms.active():
1178 1178 # there were conflicts
1179 1179 node = ms.localctx.hex()
1180 1180 else:
1181 1181 # there were no conficts, mergestate was not stored
1182 1182 node = repo[b'.'].hex()
1183 1183
1184 1184 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1185 1185 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1186 1186 _showstats(repo, stats)
1187 1187 return stats.unresolvedcount > 0
1188 1188
1189 1189
1190 1190 def _incoming(
1191 1191 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1192 1192 ):
1193 1193 """
1194 1194 Helper for incoming / gincoming.
1195 1195 displaychlist gets called with
1196 1196 (remoterepo, incomingchangesetlist, displayer) parameters,
1197 1197 and is supposed to contain only code that can't be unified.
1198 1198 """
1199 1199 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1200 1200 other = peer(repo, opts, source)
1201 1201 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1202 1202 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1203 1203
1204 1204 if revs:
1205 1205 revs = [other.lookup(rev) for rev in revs]
1206 1206 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1207 1207 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1208 1208 )
1209 1209 try:
1210 1210 if not chlist:
1211 1211 ui.status(_(b"no changes found\n"))
1212 1212 return subreporecurse()
1213 1213 ui.pager(b'incoming')
1214 1214 displayer = logcmdutil.changesetdisplayer(
1215 1215 ui, other, opts, buffered=buffered
1216 1216 )
1217 1217 displaychlist(other, chlist, displayer)
1218 1218 displayer.close()
1219 1219 finally:
1220 1220 cleanupfn()
1221 1221 subreporecurse()
1222 1222 return 0 # exit code is zero since we found incoming changes
1223 1223
1224 1224
1225 1225 def incoming(ui, repo, source, opts):
1226 1226 def subreporecurse():
1227 1227 ret = 1
1228 1228 if opts.get(b'subrepos'):
1229 1229 ctx = repo[None]
1230 1230 for subpath in sorted(ctx.substate):
1231 1231 sub = ctx.sub(subpath)
1232 1232 ret = min(ret, sub.incoming(ui, source, opts))
1233 1233 return ret
1234 1234
1235 1235 def display(other, chlist, displayer):
1236 1236 limit = logcmdutil.getlimit(opts)
1237 1237 if opts.get(b'newest_first'):
1238 1238 chlist.reverse()
1239 1239 count = 0
1240 1240 for n in chlist:
1241 1241 if limit is not None and count >= limit:
1242 1242 break
1243 1243 parents = [p for p in other.changelog.parents(n) if p != nullid]
1244 1244 if opts.get(b'no_merges') and len(parents) == 2:
1245 1245 continue
1246 1246 count += 1
1247 1247 displayer.show(other[n])
1248 1248
1249 1249 return _incoming(display, subreporecurse, ui, repo, source, opts)
1250 1250
1251 1251
1252 1252 def _outgoing(ui, repo, dest, opts):
1253 1253 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1254 1254 if not path:
1255 1255 raise error.Abort(
1256 1256 _(b'default repository not configured!'),
1257 1257 hint=_(b"see 'hg help config.paths'"),
1258 1258 )
1259 1259 dest = path.pushloc or path.loc
1260 1260 branches = path.branch, opts.get(b'branch') or []
1261 1261
1262 1262 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1263 1263 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1264 1264 if revs:
1265 1265 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1266 1266
1267 1267 other = peer(repo, opts, dest)
1268 1268 outgoing = discovery.findcommonoutgoing(
1269 1269 repo, other, revs, force=opts.get(b'force')
1270 1270 )
1271 1271 o = outgoing.missing
1272 1272 if not o:
1273 1273 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1274 1274 return o, other
1275 1275
1276 1276
1277 1277 def outgoing(ui, repo, dest, opts):
1278 1278 def recurse():
1279 1279 ret = 1
1280 1280 if opts.get(b'subrepos'):
1281 1281 ctx = repo[None]
1282 1282 for subpath in sorted(ctx.substate):
1283 1283 sub = ctx.sub(subpath)
1284 1284 ret = min(ret, sub.outgoing(ui, dest, opts))
1285 1285 return ret
1286 1286
1287 1287 limit = logcmdutil.getlimit(opts)
1288 1288 o, other = _outgoing(ui, repo, dest, opts)
1289 1289 if not o:
1290 1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1291 1291 return recurse()
1292 1292
1293 1293 if opts.get(b'newest_first'):
1294 1294 o.reverse()
1295 1295 ui.pager(b'outgoing')
1296 1296 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1297 1297 count = 0
1298 1298 for n in o:
1299 1299 if limit is not None and count >= limit:
1300 1300 break
1301 1301 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1302 1302 if opts.get(b'no_merges') and len(parents) == 2:
1303 1303 continue
1304 1304 count += 1
1305 1305 displayer.show(repo[n])
1306 1306 displayer.close()
1307 1307 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1308 1308 recurse()
1309 1309 return 0 # exit code is zero since we found outgoing changes
1310 1310
1311 1311
1312 1312 def verify(repo, level=None):
1313 1313 """verify the consistency of a repository"""
1314 1314 ret = verifymod.verify(repo, level=level)
1315 1315
1316 1316 # Broken subrepo references in hidden csets don't seem worth worrying about,
1317 1317 # since they can't be pushed/pulled, and --hidden can be used if they are a
1318 1318 # concern.
1319 1319
1320 1320 # pathto() is needed for -R case
1321 1321 revs = repo.revs(
1322 1322 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1323 1323 )
1324 1324
1325 1325 if revs:
1326 1326 repo.ui.status(_(b'checking subrepo links\n'))
1327 1327 for rev in revs:
1328 1328 ctx = repo[rev]
1329 1329 try:
1330 1330 for subpath in ctx.substate:
1331 1331 try:
1332 1332 ret = (
1333 1333 ctx.sub(subpath, allowcreate=False).verify() or ret
1334 1334 )
1335 1335 except error.RepoError as e:
1336 1336 repo.ui.warn(b'%d: %s\n' % (rev, e))
1337 1337 except Exception:
1338 1338 repo.ui.warn(
1339 1339 _(b'.hgsubstate is corrupt in revision %s\n')
1340 1340 % node.short(ctx.node())
1341 1341 )
1342 1342
1343 1343 return ret
1344 1344
1345 1345
1346 1346 def remoteui(src, opts):
1347 1347 """build a remote ui from ui or repo and opts"""
1348 1348 if util.safehasattr(src, b'baseui'): # looks like a repository
1349 1349 dst = src.baseui.copy() # drop repo-specific config
1350 1350 src = src.ui # copy target options from repo
1351 1351 else: # assume it's a global ui object
1352 1352 dst = src.copy() # keep all global options
1353 1353
1354 1354 # copy ssh-specific options
1355 1355 for o in b'ssh', b'remotecmd':
1356 1356 v = opts.get(o) or src.config(b'ui', o)
1357 1357 if v:
1358 1358 dst.setconfig(b"ui", o, v, b'copied')
1359 1359
1360 1360 # copy bundle-specific options
1361 1361 r = src.config(b'bundle', b'mainreporoot')
1362 1362 if r:
1363 1363 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1364 1364
1365 1365 # copy selected local settings to the remote ui
1366 1366 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1367 1367 for key, val in src.configitems(sect):
1368 1368 dst.setconfig(sect, key, val, b'copied')
1369 1369 v = src.config(b'web', b'cacerts')
1370 1370 if v:
1371 1371 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1372 1372
1373 1373 return dst
1374 1374
1375 1375
1376 1376 # Files of interest
1377 1377 # Used to check if the repository has changed looking at mtime and size of
1378 1378 # these files.
1379 1379 foi = [
1380 1380 (b'spath', b'00changelog.i'),
1381 1381 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1382 1382 (b'spath', b'obsstore'),
1383 1383 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1384 1384 ]
1385 1385
1386 1386
1387 1387 class cachedlocalrepo(object):
1388 1388 """Holds a localrepository that can be cached and reused."""
1389 1389
1390 1390 def __init__(self, repo):
1391 1391 """Create a new cached repo from an existing repo.
1392 1392
1393 1393 We assume the passed in repo was recently created. If the
1394 1394 repo has changed between when it was created and when it was
1395 1395 turned into a cache, it may not refresh properly.
1396 1396 """
1397 1397 assert isinstance(repo, localrepo.localrepository)
1398 1398 self._repo = repo
1399 1399 self._state, self.mtime = self._repostate()
1400 1400 self._filtername = repo.filtername
1401 1401
1402 1402 def fetch(self):
1403 1403 """Refresh (if necessary) and return a repository.
1404 1404
1405 1405 If the cached instance is out of date, it will be recreated
1406 1406 automatically and returned.
1407 1407
1408 1408 Returns a tuple of the repo and a boolean indicating whether a new
1409 1409 repo instance was created.
1410 1410 """
1411 1411 # We compare the mtimes and sizes of some well-known files to
1412 1412 # determine if the repo changed. This is not precise, as mtimes
1413 1413 # are susceptible to clock skew and imprecise filesystems and
1414 1414 # file content can change while maintaining the same size.
1415 1415
1416 1416 state, mtime = self._repostate()
1417 1417 if state == self._state:
1418 1418 return self._repo, False
1419 1419
1420 1420 repo = repository(self._repo.baseui, self._repo.url())
1421 1421 if self._filtername:
1422 1422 self._repo = repo.filtered(self._filtername)
1423 1423 else:
1424 1424 self._repo = repo.unfiltered()
1425 1425 self._state = state
1426 1426 self.mtime = mtime
1427 1427
1428 1428 return self._repo, True
1429 1429
1430 1430 def _repostate(self):
1431 1431 state = []
1432 1432 maxmtime = -1
1433 1433 for attr, fname in foi:
1434 1434 prefix = getattr(self._repo, attr)
1435 1435 p = os.path.join(prefix, fname)
1436 1436 try:
1437 1437 st = os.stat(p)
1438 1438 except OSError:
1439 1439 st = os.stat(prefix)
1440 1440 state.append((st[stat.ST_MTIME], st.st_size))
1441 1441 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1442 1442
1443 1443 return tuple(state), maxmtime
1444 1444
1445 1445 def copy(self):
1446 1446 """Obtain a copy of this class instance.
1447 1447
1448 1448 A new localrepository instance is obtained. The new instance should be
1449 1449 completely independent of the original.
1450 1450 """
1451 1451 repo = repository(self._repo.baseui, self._repo.origroot)
1452 1452 if self._filtername:
1453 1453 repo = repo.filtered(self._filtername)
1454 1454 else:
1455 1455 repo = repo.unfiltered()
1456 1456 c = cachedlocalrepo(repo)
1457 1457 c._state = self._state
1458 1458 c.mtime = self.mtime
1459 1459 return c
General Comments 0
You need to be logged in to leave comments. Login now