##// END OF EJS Templates
clean: delete obsolete unlinking of .hg/graftstate...
Martin von Zweigbergk -
r44750:3245cdea default
parent child Browse files
Show More
@@ -1,1450 +1,1449 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import getattr
19 19
20 20 from . import (
21 21 bookmarks,
22 22 bundlerepo,
23 23 cacheutil,
24 24 cmdutil,
25 25 destutil,
26 26 discovery,
27 27 error,
28 28 exchange,
29 29 extensions,
30 30 httppeer,
31 31 localrepo,
32 32 lock,
33 33 logcmdutil,
34 34 logexchange,
35 35 merge as mergemod,
36 36 narrowspec,
37 37 node,
38 38 phases,
39 39 pycompat,
40 40 scmutil,
41 41 sshpeer,
42 42 statichttprepo,
43 43 ui as uimod,
44 44 unionrepo,
45 45 url,
46 46 util,
47 47 verify as verifymod,
48 48 vfs as vfsmod,
49 49 )
50 50 from .utils import hashutil
51 51 from .interfaces import repository as repositorymod
52 52
53 53 release = lock.release
54 54
55 55 # shared features
56 56 sharedbookmarks = b'bookmarks'
57 57
58 58
59 59 def _local(path):
60 60 path = util.expandpath(util.urllocalpath(path))
61 61
62 62 try:
63 63 isfile = os.path.isfile(path)
64 64 # Python 2 raises TypeError, Python 3 ValueError.
65 65 except (TypeError, ValueError) as e:
66 66 raise error.Abort(
67 67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 68 )
69 69
70 70 return isfile and bundlerepo or localrepo
71 71
72 72
73 73 def addbranchrevs(lrepo, other, branches, revs):
74 74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 75 hashbranch, branches = branches
76 76 if not hashbranch and not branches:
77 77 x = revs or None
78 78 if revs:
79 79 y = revs[0]
80 80 else:
81 81 y = None
82 82 return x, y
83 83 if revs:
84 84 revs = list(revs)
85 85 else:
86 86 revs = []
87 87
88 88 if not peer.capable(b'branchmap'):
89 89 if branches:
90 90 raise error.Abort(_(b"remote branch lookup not supported"))
91 91 revs.append(hashbranch)
92 92 return revs, revs[0]
93 93
94 94 with peer.commandexecutor() as e:
95 95 branchmap = e.callcommand(b'branchmap', {}).result()
96 96
97 97 def primary(branch):
98 98 if branch == b'.':
99 99 if not lrepo:
100 100 raise error.Abort(_(b"dirstate branch not accessible"))
101 101 branch = lrepo.dirstate.branch()
102 102 if branch in branchmap:
103 103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 104 return True
105 105 else:
106 106 return False
107 107
108 108 for branch in branches:
109 109 if not primary(branch):
110 110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 111 if hashbranch:
112 112 if not primary(hashbranch):
113 113 revs.append(hashbranch)
114 114 return revs, revs[0]
115 115
116 116
117 117 def parseurl(path, branches=None):
118 118 '''parse url#branch, returning (url, (branch, branches))'''
119 119
120 120 u = util.url(path)
121 121 branch = None
122 122 if u.fragment:
123 123 branch = u.fragment
124 124 u.fragment = None
125 125 return bytes(u), (branch, branches or [])
126 126
127 127
128 128 schemes = {
129 129 b'bundle': bundlerepo,
130 130 b'union': unionrepo,
131 131 b'file': _local,
132 132 b'http': httppeer,
133 133 b'https': httppeer,
134 134 b'ssh': sshpeer,
135 135 b'static-http': statichttprepo,
136 136 }
137 137
138 138
139 139 def _peerlookup(path):
140 140 u = util.url(path)
141 141 scheme = u.scheme or b'file'
142 142 thing = schemes.get(scheme) or schemes[b'file']
143 143 try:
144 144 return thing(path)
145 145 except TypeError:
146 146 # we can't test callable(thing) because 'thing' can be an unloaded
147 147 # module that implements __call__
148 148 if not util.safehasattr(thing, b'instance'):
149 149 raise
150 150 return thing
151 151
152 152
153 153 def islocal(repo):
154 154 '''return true if repo (or path pointing to repo) is local'''
155 155 if isinstance(repo, bytes):
156 156 try:
157 157 return _peerlookup(repo).islocal(repo)
158 158 except AttributeError:
159 159 return False
160 160 return repo.local()
161 161
162 162
163 163 def openpath(ui, path, sendaccept=True):
164 164 '''open path with open if local, url.open if remote'''
165 165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 166 if pathurl.islocal():
167 167 return util.posixfile(pathurl.localpath(), b'rb')
168 168 else:
169 169 return url.open(ui, path, sendaccept=sendaccept)
170 170
171 171
172 172 # a list of (ui, repo) functions called for wire peer initialization
173 173 wirepeersetupfuncs = []
174 174
175 175
176 176 def _peerorrepo(
177 177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 178 ):
179 179 """return a repository object for the specified path"""
180 180 obj = _peerlookup(path).instance(
181 181 ui, path, create, intents=intents, createopts=createopts
182 182 )
183 183 ui = getattr(obj, "ui", ui)
184 184 for f in presetupfuncs or []:
185 185 f(ui, obj)
186 186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 187 with util.timedcm('all reposetup') as allreposetupstats:
188 188 for name, module in extensions.extensions(ui):
189 189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 190 hook = getattr(module, 'reposetup', None)
191 191 if hook:
192 192 with util.timedcm('reposetup %r', name) as stats:
193 193 hook(ui, obj)
194 194 ui.log(
195 195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 196 )
197 197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 198 if not obj.local():
199 199 for f in wirepeersetupfuncs:
200 200 f(ui, obj)
201 201 return obj
202 202
203 203
204 204 def repository(
205 205 ui,
206 206 path=b'',
207 207 create=False,
208 208 presetupfuncs=None,
209 209 intents=None,
210 210 createopts=None,
211 211 ):
212 212 """return a repository object for the specified path"""
213 213 peer = _peerorrepo(
214 214 ui,
215 215 path,
216 216 create,
217 217 presetupfuncs=presetupfuncs,
218 218 intents=intents,
219 219 createopts=createopts,
220 220 )
221 221 repo = peer.local()
222 222 if not repo:
223 223 raise error.Abort(
224 224 _(b"repository '%s' is not local") % (path or peer.url())
225 225 )
226 226 return repo.filtered(b'visible')
227 227
228 228
229 229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 230 '''return a repository peer for the specified path'''
231 231 rui = remoteui(uiorrepo, opts)
232 232 return _peerorrepo(
233 233 rui, path, create, intents=intents, createopts=createopts
234 234 ).peer()
235 235
236 236
237 237 def defaultdest(source):
238 238 '''return default destination of clone if none is given
239 239
240 240 >>> defaultdest(b'foo')
241 241 'foo'
242 242 >>> defaultdest(b'/foo/bar')
243 243 'bar'
244 244 >>> defaultdest(b'/')
245 245 ''
246 246 >>> defaultdest(b'')
247 247 ''
248 248 >>> defaultdest(b'http://example.org/')
249 249 ''
250 250 >>> defaultdest(b'http://example.org/foo/')
251 251 'foo'
252 252 '''
253 253 path = util.url(source).path
254 254 if not path:
255 255 return b''
256 256 return os.path.basename(os.path.normpath(path))
257 257
258 258
259 259 def sharedreposource(repo):
260 260 """Returns repository object for source repository of a shared repo.
261 261
262 262 If repo is not a shared repository, returns None.
263 263 """
264 264 if repo.sharedpath == repo.path:
265 265 return None
266 266
267 267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 268 return repo.srcrepo
269 269
270 270 # the sharedpath always ends in the .hg; we want the path to the repo
271 271 source = repo.vfs.split(repo.sharedpath)[0]
272 272 srcurl, branches = parseurl(source)
273 273 srcrepo = repository(repo.ui, srcurl)
274 274 repo.srcrepo = srcrepo
275 275 return srcrepo
276 276
277 277
278 278 def share(
279 279 ui,
280 280 source,
281 281 dest=None,
282 282 update=True,
283 283 bookmarks=True,
284 284 defaultpath=None,
285 285 relative=False,
286 286 ):
287 287 '''create a shared repository'''
288 288
289 289 if not islocal(source):
290 290 raise error.Abort(_(b'can only share local repositories'))
291 291
292 292 if not dest:
293 293 dest = defaultdest(source)
294 294 else:
295 295 dest = ui.expandpath(dest)
296 296
297 297 if isinstance(source, bytes):
298 298 origsource = ui.expandpath(source)
299 299 source, branches = parseurl(origsource)
300 300 srcrepo = repository(ui, source)
301 301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 302 else:
303 303 srcrepo = source.local()
304 304 checkout = None
305 305
306 306 shareditems = set()
307 307 if bookmarks:
308 308 shareditems.add(sharedbookmarks)
309 309
310 310 r = repository(
311 311 ui,
312 312 dest,
313 313 create=True,
314 314 createopts={
315 315 b'sharedrepo': srcrepo,
316 316 b'sharedrelative': relative,
317 317 b'shareditems': shareditems,
318 318 },
319 319 )
320 320
321 321 postshare(srcrepo, r, defaultpath=defaultpath)
322 322 r = repository(ui, dest)
323 323 _postshareupdate(r, update, checkout=checkout)
324 324 return r
325 325
326 326
327 327 def unshare(ui, repo):
328 328 """convert a shared repository to a normal one
329 329
330 330 Copy the store data to the repo and remove the sharedpath data.
331 331
332 332 Returns a new repository object representing the unshared repository.
333 333
334 334 The passed repository object is not usable after this function is
335 335 called.
336 336 """
337 337
338 338 with repo.lock():
339 339 # we use locks here because if we race with commit, we
340 340 # can end up with extra data in the cloned revlogs that's
341 341 # not pointed to by changesets, thus causing verify to
342 342 # fail
343 343 destlock = copystore(ui, repo, repo.path)
344 344 with destlock or util.nullcontextmanager():
345 345
346 346 sharefile = repo.vfs.join(b'sharedpath')
347 347 util.rename(sharefile, sharefile + b'.old')
348 348
349 349 repo.requirements.discard(b'shared')
350 350 repo.requirements.discard(b'relshared')
351 351 repo._writerequirements()
352 352
353 353 # Removing share changes some fundamental properties of the repo instance.
354 354 # So we instantiate a new repo object and operate on it rather than
355 355 # try to keep the existing repo usable.
356 356 newrepo = repository(repo.baseui, repo.root, create=False)
357 357
358 358 # TODO: figure out how to access subrepos that exist, but were previously
359 359 # removed from .hgsub
360 360 c = newrepo[b'.']
361 361 subs = c.substate
362 362 for s in sorted(subs):
363 363 c.sub(s).unshare()
364 364
365 365 localrepo.poisonrepository(repo)
366 366
367 367 return newrepo
368 368
369 369
370 370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 371 """Called after a new shared repo is created.
372 372
373 373 The new repo only has a requirements file and pointer to the source.
374 374 This function configures additional shared data.
375 375
376 376 Extensions can wrap this function and write additional entries to
377 377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 378 """
379 379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 380 if default:
381 381 template = b'[paths]\ndefault = %s\n'
382 382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 384 with destrepo.wlock():
385 385 narrowspec.copytoworkingcopy(destrepo)
386 386
387 387
388 388 def _postshareupdate(repo, update, checkout=None):
389 389 """Maybe perform a working directory update after a shared repo is created.
390 390
391 391 ``update`` can be a boolean or a revision to update to.
392 392 """
393 393 if not update:
394 394 return
395 395
396 396 repo.ui.status(_(b"updating working directory\n"))
397 397 if update is not True:
398 398 checkout = update
399 399 for test in (checkout, b'default', b'tip'):
400 400 if test is None:
401 401 continue
402 402 try:
403 403 uprev = repo.lookup(test)
404 404 break
405 405 except error.RepoLookupError:
406 406 continue
407 407 _update(repo, uprev)
408 408
409 409
410 410 def copystore(ui, srcrepo, destpath):
411 411 '''copy files from store of srcrepo in destpath
412 412
413 413 returns destlock
414 414 '''
415 415 destlock = None
416 416 try:
417 417 hardlink = None
418 418 topic = _(b'linking') if hardlink else _(b'copying')
419 419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 420 num = 0
421 421 srcpublishing = srcrepo.publishing()
422 422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 423 dstvfs = vfsmod.vfs(destpath)
424 424 for f in srcrepo.store.copylist():
425 425 if srcpublishing and f.endswith(b'phaseroots'):
426 426 continue
427 427 dstbase = os.path.dirname(f)
428 428 if dstbase and not dstvfs.exists(dstbase):
429 429 dstvfs.mkdir(dstbase)
430 430 if srcvfs.exists(f):
431 431 if f.endswith(b'data'):
432 432 # 'dstbase' may be empty (e.g. revlog format 0)
433 433 lockfile = os.path.join(dstbase, b"lock")
434 434 # lock to avoid premature writing to the target
435 435 destlock = lock.lock(dstvfs, lockfile)
436 436 hardlink, n = util.copyfiles(
437 437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 438 )
439 439 num += n
440 440 if hardlink:
441 441 ui.debug(b"linked %d files\n" % num)
442 442 else:
443 443 ui.debug(b"copied %d files\n" % num)
444 444 return destlock
445 445 except: # re-raises
446 446 release(destlock)
447 447 raise
448 448
449 449
450 450 def clonewithshare(
451 451 ui,
452 452 peeropts,
453 453 sharepath,
454 454 source,
455 455 srcpeer,
456 456 dest,
457 457 pull=False,
458 458 rev=None,
459 459 update=True,
460 460 stream=False,
461 461 ):
462 462 """Perform a clone using a shared repo.
463 463
464 464 The store for the repository will be located at <sharepath>/.hg. The
465 465 specified revisions will be cloned or pulled from "source". A shared repo
466 466 will be created at "dest" and a working copy will be created if "update" is
467 467 True.
468 468 """
469 469 revs = None
470 470 if rev:
471 471 if not srcpeer.capable(b'lookup'):
472 472 raise error.Abort(
473 473 _(
474 474 b"src repository does not support "
475 475 b"revision lookup and so doesn't "
476 476 b"support clone by revision"
477 477 )
478 478 )
479 479
480 480 # TODO this is batchable.
481 481 remoterevs = []
482 482 for r in rev:
483 483 with srcpeer.commandexecutor() as e:
484 484 remoterevs.append(
485 485 e.callcommand(b'lookup', {b'key': r,}).result()
486 486 )
487 487 revs = remoterevs
488 488
489 489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 490 # 2 clients may race creating or populating it.
491 491 pooldir = os.path.dirname(sharepath)
492 492 # lock class requires the directory to exist.
493 493 try:
494 494 util.makedir(pooldir, False)
495 495 except OSError as e:
496 496 if e.errno != errno.EEXIST:
497 497 raise
498 498
499 499 poolvfs = vfsmod.vfs(pooldir)
500 500 basename = os.path.basename(sharepath)
501 501
502 502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 503 if os.path.exists(sharepath):
504 504 ui.status(
505 505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 506 )
507 507 else:
508 508 ui.status(
509 509 _(b'(sharing from new pooled repository %s)\n') % basename
510 510 )
511 511 # Always use pull mode because hardlinks in share mode don't work
512 512 # well. Never update because working copies aren't necessary in
513 513 # share mode.
514 514 clone(
515 515 ui,
516 516 peeropts,
517 517 source,
518 518 dest=sharepath,
519 519 pull=True,
520 520 revs=rev,
521 521 update=False,
522 522 stream=stream,
523 523 )
524 524
525 525 # Resolve the value to put in [paths] section for the source.
526 526 if islocal(source):
527 527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 528 else:
529 529 defaultpath = source
530 530
531 531 sharerepo = repository(ui, path=sharepath)
532 532 destrepo = share(
533 533 ui,
534 534 sharerepo,
535 535 dest=dest,
536 536 update=False,
537 537 bookmarks=False,
538 538 defaultpath=defaultpath,
539 539 )
540 540
541 541 # We need to perform a pull against the dest repo to fetch bookmarks
542 542 # and other non-store data that isn't shared by default. In the case of
543 543 # non-existing shared repo, this means we pull from the remote twice. This
544 544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 545 # way to pull just non-changegroup data.
546 546 exchange.pull(destrepo, srcpeer, heads=revs)
547 547
548 548 _postshareupdate(destrepo, update)
549 549
550 550 return srcpeer, peer(ui, peeropts, dest)
551 551
552 552
553 553 # Recomputing branch cache might be slow on big repos,
554 554 # so just copy it
555 555 def _copycache(srcrepo, dstcachedir, fname):
556 556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 558 dstbranchcache = os.path.join(dstcachedir, fname)
559 559 if os.path.exists(srcbranchcache):
560 560 if not os.path.exists(dstcachedir):
561 561 os.mkdir(dstcachedir)
562 562 util.copyfile(srcbranchcache, dstbranchcache)
563 563
564 564
565 565 def clone(
566 566 ui,
567 567 peeropts,
568 568 source,
569 569 dest=None,
570 570 pull=False,
571 571 revs=None,
572 572 update=True,
573 573 stream=False,
574 574 branch=None,
575 575 shareopts=None,
576 576 storeincludepats=None,
577 577 storeexcludepats=None,
578 578 depth=None,
579 579 ):
580 580 """Make a copy of an existing repository.
581 581
582 582 Create a copy of an existing repository in a new directory. The
583 583 source and destination are URLs, as passed to the repository
584 584 function. Returns a pair of repository peers, the source and
585 585 newly created destination.
586 586
587 587 The location of the source is added to the new repository's
588 588 .hg/hgrc file, as the default to be used for future pulls and
589 589 pushes.
590 590
591 591 If an exception is raised, the partly cloned/updated destination
592 592 repository will be deleted.
593 593
594 594 Arguments:
595 595
596 596 source: repository object or URL
597 597
598 598 dest: URL of destination repository to create (defaults to base
599 599 name of source repository)
600 600
601 601 pull: always pull from source repository, even in local case or if the
602 602 server prefers streaming
603 603
604 604 stream: stream raw data uncompressed from repository (fast over
605 605 LAN, slow over WAN)
606 606
607 607 revs: revision to clone up to (implies pull=True)
608 608
609 609 update: update working directory after clone completes, if
610 610 destination is local repository (True means update to default rev,
611 611 anything else is treated as a revision)
612 612
613 613 branch: branches to clone
614 614
615 615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 616 activates auto sharing mode and defines the directory for stores. The
617 617 "mode" key determines how to construct the directory name of the shared
618 618 repository. "identity" means the name is derived from the node of the first
619 619 changeset in the repository. "remote" means the name is derived from the
620 620 remote's path/URL. Defaults to "identity."
621 621
622 622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 623 exclude in the repository copy, respectively. If not defined, all files
624 624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 625 only the requested files will be performed. If ``storeincludepats`` is not
626 626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 627 ``path:.``. If both are empty sets, no files will be cloned.
628 628 """
629 629
630 630 if isinstance(source, bytes):
631 631 origsource = ui.expandpath(source)
632 632 source, branches = parseurl(origsource, branch)
633 633 srcpeer = peer(ui, peeropts, source)
634 634 else:
635 635 srcpeer = source.peer() # in case we were called with a localrepo
636 636 branches = (None, branch or [])
637 637 origsource = source = srcpeer.url()
638 638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639 639
640 640 if dest is None:
641 641 dest = defaultdest(source)
642 642 if dest:
643 643 ui.status(_(b"destination directory: %s\n") % dest)
644 644 else:
645 645 dest = ui.expandpath(dest)
646 646
647 647 dest = util.urllocalpath(dest)
648 648 source = util.urllocalpath(source)
649 649
650 650 if not dest:
651 651 raise error.Abort(_(b"empty destination path is not valid"))
652 652
653 653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 654 if destvfs.lexists():
655 655 if not destvfs.isdir():
656 656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 657 elif destvfs.listdir():
658 658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659 659
660 660 createopts = {}
661 661 narrow = False
662 662
663 663 if storeincludepats is not None:
664 664 narrowspec.validatepatterns(storeincludepats)
665 665 narrow = True
666 666
667 667 if storeexcludepats is not None:
668 668 narrowspec.validatepatterns(storeexcludepats)
669 669 narrow = True
670 670
671 671 if narrow:
672 672 # Include everything by default if only exclusion patterns defined.
673 673 if storeexcludepats and not storeincludepats:
674 674 storeincludepats = {b'path:.'}
675 675
676 676 createopts[b'narrowfiles'] = True
677 677
678 678 if depth:
679 679 createopts[b'shallowfilestore'] = True
680 680
681 681 if srcpeer.capable(b'lfs-serve'):
682 682 # Repository creation honors the config if it disabled the extension, so
683 683 # we can't just announce that lfs will be enabled. This check avoids
684 684 # saying that lfs will be enabled, and then saying it's an unknown
685 685 # feature. The lfs creation option is set in either case so that a
686 686 # requirement is added. If the extension is explicitly disabled but the
687 687 # requirement is set, the clone aborts early, before transferring any
688 688 # data.
689 689 createopts[b'lfs'] = True
690 690
691 691 if extensions.disabledext(b'lfs'):
692 692 ui.status(
693 693 _(
694 694 b'(remote is using large file support (lfs), but it is '
695 695 b'explicitly disabled in the local configuration)\n'
696 696 )
697 697 )
698 698 else:
699 699 ui.status(
700 700 _(
701 701 b'(remote is using large file support (lfs); lfs will '
702 702 b'be enabled for this repository)\n'
703 703 )
704 704 )
705 705
706 706 shareopts = shareopts or {}
707 707 sharepool = shareopts.get(b'pool')
708 708 sharenamemode = shareopts.get(b'mode')
709 709 if sharepool and islocal(dest):
710 710 sharepath = None
711 711 if sharenamemode == b'identity':
712 712 # Resolve the name from the initial changeset in the remote
713 713 # repository. This returns nullid when the remote is empty. It
714 714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 715 # not available. If we fail to resolve, sharing is not enabled.
716 716 try:
717 717 with srcpeer.commandexecutor() as e:
718 718 rootnode = e.callcommand(
719 719 b'lookup', {b'key': b'0',}
720 720 ).result()
721 721
722 722 if rootnode != node.nullid:
723 723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 724 else:
725 725 ui.status(
726 726 _(
727 727 b'(not using pooled storage: '
728 728 b'remote appears to be empty)\n'
729 729 )
730 730 )
731 731 except error.RepoLookupError:
732 732 ui.status(
733 733 _(
734 734 b'(not using pooled storage: '
735 735 b'unable to resolve identity of remote)\n'
736 736 )
737 737 )
738 738 elif sharenamemode == b'remote':
739 739 sharepath = os.path.join(
740 740 sharepool, node.hex(hashutil.sha1(source).digest())
741 741 )
742 742 else:
743 743 raise error.Abort(
744 744 _(b'unknown share naming mode: %s') % sharenamemode
745 745 )
746 746
747 747 # TODO this is a somewhat arbitrary restriction.
748 748 if narrow:
749 749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 750 sharepath = None
751 751
752 752 if sharepath:
753 753 return clonewithshare(
754 754 ui,
755 755 peeropts,
756 756 sharepath,
757 757 source,
758 758 srcpeer,
759 759 dest,
760 760 pull=pull,
761 761 rev=revs,
762 762 update=update,
763 763 stream=stream,
764 764 )
765 765
766 766 srclock = destlock = cleandir = None
767 767 srcrepo = srcpeer.local()
768 768 try:
769 769 abspath = origsource
770 770 if islocal(origsource):
771 771 abspath = os.path.abspath(util.urllocalpath(origsource))
772 772
773 773 if islocal(dest):
774 774 cleandir = dest
775 775
776 776 copy = False
777 777 if (
778 778 srcrepo
779 779 and srcrepo.cancopy()
780 780 and islocal(dest)
781 781 and not phases.hassecret(srcrepo)
782 782 ):
783 783 copy = not pull and not revs
784 784
785 785 # TODO this is a somewhat arbitrary restriction.
786 786 if narrow:
787 787 copy = False
788 788
789 789 if copy:
790 790 try:
791 791 # we use a lock here because if we race with commit, we
792 792 # can end up with extra data in the cloned revlogs that's
793 793 # not pointed to by changesets, thus causing verify to
794 794 # fail
795 795 srclock = srcrepo.lock(wait=False)
796 796 except error.LockError:
797 797 copy = False
798 798
799 799 if copy:
800 800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 802 if not os.path.exists(dest):
803 803 util.makedirs(dest)
804 804 else:
805 805 # only clean up directories we create ourselves
806 806 cleandir = hgdir
807 807 try:
808 808 destpath = hgdir
809 809 util.makedir(destpath, notindexed=True)
810 810 except OSError as inst:
811 811 if inst.errno == errno.EEXIST:
812 812 cleandir = None
813 813 raise error.Abort(
814 814 _(b"destination '%s' already exists") % dest
815 815 )
816 816 raise
817 817
818 818 destlock = copystore(ui, srcrepo, destpath)
819 819 # copy bookmarks over
820 820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 822 if os.path.exists(srcbookmarks):
823 823 util.copyfile(srcbookmarks, dstbookmarks)
824 824
825 825 dstcachedir = os.path.join(destpath, b'cache')
826 826 for cache in cacheutil.cachetocopy(srcrepo):
827 827 _copycache(srcrepo, dstcachedir, cache)
828 828
829 829 # we need to re-init the repo after manually copying the data
830 830 # into it
831 831 destpeer = peer(srcrepo, peeropts, dest)
832 832 srcrepo.hook(
833 833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 834 )
835 835 else:
836 836 try:
837 837 # only pass ui when no srcrepo
838 838 destpeer = peer(
839 839 srcrepo or ui,
840 840 peeropts,
841 841 dest,
842 842 create=True,
843 843 createopts=createopts,
844 844 )
845 845 except OSError as inst:
846 846 if inst.errno == errno.EEXIST:
847 847 cleandir = None
848 848 raise error.Abort(
849 849 _(b"destination '%s' already exists") % dest
850 850 )
851 851 raise
852 852
853 853 if revs:
854 854 if not srcpeer.capable(b'lookup'):
855 855 raise error.Abort(
856 856 _(
857 857 b"src repository does not support "
858 858 b"revision lookup and so doesn't "
859 859 b"support clone by revision"
860 860 )
861 861 )
862 862
863 863 # TODO this is batchable.
864 864 remoterevs = []
865 865 for rev in revs:
866 866 with srcpeer.commandexecutor() as e:
867 867 remoterevs.append(
868 868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 869 )
870 870 revs = remoterevs
871 871
872 872 checkout = revs[0]
873 873 else:
874 874 revs = None
875 875 local = destpeer.local()
876 876 if local:
877 877 if narrow:
878 878 with local.wlock(), local.lock():
879 879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 880 narrowspec.copytoworkingcopy(local)
881 881
882 882 u = util.url(abspath)
883 883 defaulturl = bytes(u)
884 884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 885 if not stream:
886 886 if pull:
887 887 stream = False
888 888 else:
889 889 stream = None
890 890 # internal config: ui.quietbookmarkmove
891 891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 892 with local.ui.configoverride(overrides, b'clone'):
893 893 exchange.pull(
894 894 local,
895 895 srcpeer,
896 896 revs,
897 897 streamclonerequested=stream,
898 898 includepats=storeincludepats,
899 899 excludepats=storeexcludepats,
900 900 depth=depth,
901 901 )
902 902 elif srcrepo:
903 903 # TODO lift restriction once exchange.push() accepts narrow
904 904 # push.
905 905 if narrow:
906 906 raise error.Abort(
907 907 _(
908 908 b'narrow clone not available for '
909 909 b'remote destinations'
910 910 )
911 911 )
912 912
913 913 exchange.push(
914 914 srcrepo,
915 915 destpeer,
916 916 revs=revs,
917 917 bookmarks=srcrepo._bookmarks.keys(),
918 918 )
919 919 else:
920 920 raise error.Abort(
921 921 _(b"clone from remote to remote not supported")
922 922 )
923 923
924 924 cleandir = None
925 925
926 926 destrepo = destpeer.local()
927 927 if destrepo:
928 928 template = uimod.samplehgrcs[b'cloned']
929 929 u = util.url(abspath)
930 930 u.passwd = None
931 931 defaulturl = bytes(u)
932 932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 934
935 935 if ui.configbool(b'experimental', b'remotenames'):
936 936 logexchange.pullremotenames(destrepo, srcpeer)
937 937
938 938 if update:
939 939 if update is not True:
940 940 with srcpeer.commandexecutor() as e:
941 941 checkout = e.callcommand(
942 942 b'lookup', {b'key': update,}
943 943 ).result()
944 944
945 945 uprev = None
946 946 status = None
947 947 if checkout is not None:
948 948 # Some extensions (at least hg-git and hg-subversion) have
949 949 # a peer.lookup() implementation that returns a name instead
950 950 # of a nodeid. We work around it here until we've figured
951 951 # out a better solution.
952 952 if len(checkout) == 20 and checkout in destrepo:
953 953 uprev = checkout
954 954 elif scmutil.isrevsymbol(destrepo, checkout):
955 955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 956 else:
957 957 if update is not True:
958 958 try:
959 959 uprev = destrepo.lookup(update)
960 960 except error.RepoLookupError:
961 961 pass
962 962 if uprev is None:
963 963 try:
964 964 uprev = destrepo._bookmarks[b'@']
965 965 update = b'@'
966 966 bn = destrepo[uprev].branch()
967 967 if bn == b'default':
968 968 status = _(b"updating to bookmark @\n")
969 969 else:
970 970 status = (
971 971 _(b"updating to bookmark @ on branch %s\n") % bn
972 972 )
973 973 except KeyError:
974 974 try:
975 975 uprev = destrepo.branchtip(b'default')
976 976 except error.RepoLookupError:
977 977 uprev = destrepo.lookup(b'tip')
978 978 if not status:
979 979 bn = destrepo[uprev].branch()
980 980 status = _(b"updating to branch %s\n") % bn
981 981 destrepo.ui.status(status)
982 982 _update(destrepo, uprev)
983 983 if update in destrepo._bookmarks:
984 984 bookmarks.activate(destrepo, update)
985 985 finally:
986 986 release(srclock, destlock)
987 987 if cleandir is not None:
988 988 shutil.rmtree(cleandir, True)
989 989 if srcpeer is not None:
990 990 srcpeer.close()
991 991 return srcpeer, destpeer
992 992
993 993
994 994 def _showstats(repo, stats, quietempty=False):
995 995 if quietempty and stats.isempty():
996 996 return
997 997 repo.ui.status(
998 998 _(
999 999 b"%d files updated, %d files merged, "
1000 1000 b"%d files removed, %d files unresolved\n"
1001 1001 )
1002 1002 % (
1003 1003 stats.updatedcount,
1004 1004 stats.mergedcount,
1005 1005 stats.removedcount,
1006 1006 stats.unresolvedcount,
1007 1007 )
1008 1008 )
1009 1009
1010 1010
1011 1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 1012 """Update the working directory to node.
1013 1013
1014 1014 When overwrite is set, changes are clobbered, merged else
1015 1015
1016 1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 1017 return mergemod.update(
1018 1018 repo,
1019 1019 node,
1020 1020 branchmerge=False,
1021 1021 force=overwrite,
1022 1022 labels=[b'working copy', b'destination'],
1023 1023 updatecheck=updatecheck,
1024 1024 )
1025 1025
1026 1026
1027 1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 1028 """update the working directory to node"""
1029 1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 1030 _showstats(repo, stats, quietempty)
1031 1031 if stats.unresolvedcount:
1032 1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 1033 return stats.unresolvedcount > 0
1034 1034
1035 1035
1036 1036 # naming conflict in clone()
1037 1037 _update = update
1038 1038
1039 1039
1040 1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 1041 """forcibly switch the working directory to node, clobbering changes"""
1042 1042 stats = updaterepo(repo, node, True)
1043 1043 assert stats.unresolvedcount == 0
1044 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1045 1044 if show_stats:
1046 1045 _showstats(repo, stats, quietempty)
1047 1046
1048 1047
1049 1048 # naming conflict in updatetotally()
1050 1049 _clean = clean
1051 1050
1052 1051 _VALID_UPDATECHECKS = {
1053 1052 mergemod.UPDATECHECK_ABORT,
1054 1053 mergemod.UPDATECHECK_NONE,
1055 1054 mergemod.UPDATECHECK_LINEAR,
1056 1055 mergemod.UPDATECHECK_NO_CONFLICT,
1057 1056 }
1058 1057
1059 1058
1060 1059 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1061 1060 """Update the working directory with extra care for non-file components
1062 1061
1063 1062 This takes care of non-file components below:
1064 1063
1065 1064 :bookmark: might be advanced or (in)activated
1066 1065
1067 1066 This takes arguments below:
1068 1067
1069 1068 :checkout: to which revision the working directory is updated
1070 1069 :brev: a name, which might be a bookmark to be activated after updating
1071 1070 :clean: whether changes in the working directory can be discarded
1072 1071 :updatecheck: how to deal with a dirty working directory
1073 1072
1074 1073 Valid values for updatecheck are the UPDATECHECK_* constants
1075 1074 defined in the merge module. Passing `None` will result in using the
1076 1075 configured default.
1077 1076
1078 1077 * ABORT: abort if the working directory is dirty
1079 1078 * NONE: don't check (merge working directory changes into destination)
1080 1079 * LINEAR: check that update is linear before merging working directory
1081 1080 changes into destination
1082 1081 * NO_CONFLICT: check that the update does not result in file merges
1083 1082
1084 1083 This returns whether conflict is detected at updating or not.
1085 1084 """
1086 1085 if updatecheck is None:
1087 1086 updatecheck = ui.config(b'commands', b'update.check')
1088 1087 if updatecheck not in _VALID_UPDATECHECKS:
1089 1088 # If not configured, or invalid value configured
1090 1089 updatecheck = mergemod.UPDATECHECK_LINEAR
1091 1090 if updatecheck not in _VALID_UPDATECHECKS:
1092 1091 raise ValueError(
1093 1092 r'Invalid updatecheck value %r (can accept %r)'
1094 1093 % (updatecheck, _VALID_UPDATECHECKS)
1095 1094 )
1096 1095 with repo.wlock():
1097 1096 movemarkfrom = None
1098 1097 warndest = False
1099 1098 if checkout is None:
1100 1099 updata = destutil.destupdate(repo, clean=clean)
1101 1100 checkout, movemarkfrom, brev = updata
1102 1101 warndest = True
1103 1102
1104 1103 if clean:
1105 1104 ret = _clean(repo, checkout)
1106 1105 else:
1107 1106 if updatecheck == mergemod.UPDATECHECK_ABORT:
1108 1107 cmdutil.bailifchanged(repo, merge=False)
1109 1108 updatecheck = mergemod.UPDATECHECK_NONE
1110 1109 ret = _update(repo, checkout, updatecheck=updatecheck)
1111 1110
1112 1111 if not ret and movemarkfrom:
1113 1112 if movemarkfrom == repo[b'.'].node():
1114 1113 pass # no-op update
1115 1114 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1116 1115 b = ui.label(repo._activebookmark, b'bookmarks.active')
1117 1116 ui.status(_(b"updating bookmark %s\n") % b)
1118 1117 else:
1119 1118 # this can happen with a non-linear update
1120 1119 b = ui.label(repo._activebookmark, b'bookmarks')
1121 1120 ui.status(_(b"(leaving bookmark %s)\n") % b)
1122 1121 bookmarks.deactivate(repo)
1123 1122 elif brev in repo._bookmarks:
1124 1123 if brev != repo._activebookmark:
1125 1124 b = ui.label(brev, b'bookmarks.active')
1126 1125 ui.status(_(b"(activating bookmark %s)\n") % b)
1127 1126 bookmarks.activate(repo, brev)
1128 1127 elif brev:
1129 1128 if repo._activebookmark:
1130 1129 b = ui.label(repo._activebookmark, b'bookmarks')
1131 1130 ui.status(_(b"(leaving bookmark %s)\n") % b)
1132 1131 bookmarks.deactivate(repo)
1133 1132
1134 1133 if warndest:
1135 1134 destutil.statusotherdests(ui, repo)
1136 1135
1137 1136 return ret
1138 1137
1139 1138
1140 1139 def merge(
1141 1140 repo, node, force=None, remind=True, mergeforce=False, labels=None,
1142 1141 ):
1143 1142 """Branch merge with node, resolving changes. Return true if any
1144 1143 unresolved conflicts."""
1145 1144 stats = mergemod.update(
1146 1145 repo,
1147 1146 node,
1148 1147 branchmerge=True,
1149 1148 force=force,
1150 1149 mergeforce=mergeforce,
1151 1150 labels=labels,
1152 1151 )
1153 1152 _showstats(repo, stats)
1154 1153 if stats.unresolvedcount:
1155 1154 repo.ui.status(
1156 1155 _(
1157 1156 b"use 'hg resolve' to retry unresolved file merges "
1158 1157 b"or 'hg merge --abort' to abandon\n"
1159 1158 )
1160 1159 )
1161 1160 elif remind:
1162 1161 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1163 1162 return stats.unresolvedcount > 0
1164 1163
1165 1164
1166 1165 def abortmerge(ui, repo):
1167 1166 ms = mergemod.mergestate.read(repo)
1168 1167 if ms.active():
1169 1168 # there were conflicts
1170 1169 node = ms.localctx.hex()
1171 1170 else:
1172 1171 # there were no conficts, mergestate was not stored
1173 1172 node = repo[b'.'].hex()
1174 1173
1175 1174 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1176 1175 stats = mergemod.clean_update(repo[node])
1177 1176 assert stats.unresolvedcount == 0
1178 1177 _showstats(repo, stats)
1179 1178
1180 1179
1181 1180 def _incoming(
1182 1181 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1183 1182 ):
1184 1183 """
1185 1184 Helper for incoming / gincoming.
1186 1185 displaychlist gets called with
1187 1186 (remoterepo, incomingchangesetlist, displayer) parameters,
1188 1187 and is supposed to contain only code that can't be unified.
1189 1188 """
1190 1189 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1191 1190 other = peer(repo, opts, source)
1192 1191 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1193 1192 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1194 1193
1195 1194 if revs:
1196 1195 revs = [other.lookup(rev) for rev in revs]
1197 1196 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1198 1197 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1199 1198 )
1200 1199 try:
1201 1200 if not chlist:
1202 1201 ui.status(_(b"no changes found\n"))
1203 1202 return subreporecurse()
1204 1203 ui.pager(b'incoming')
1205 1204 displayer = logcmdutil.changesetdisplayer(
1206 1205 ui, other, opts, buffered=buffered
1207 1206 )
1208 1207 displaychlist(other, chlist, displayer)
1209 1208 displayer.close()
1210 1209 finally:
1211 1210 cleanupfn()
1212 1211 subreporecurse()
1213 1212 return 0 # exit code is zero since we found incoming changes
1214 1213
1215 1214
1216 1215 def incoming(ui, repo, source, opts):
1217 1216 def subreporecurse():
1218 1217 ret = 1
1219 1218 if opts.get(b'subrepos'):
1220 1219 ctx = repo[None]
1221 1220 for subpath in sorted(ctx.substate):
1222 1221 sub = ctx.sub(subpath)
1223 1222 ret = min(ret, sub.incoming(ui, source, opts))
1224 1223 return ret
1225 1224
1226 1225 def display(other, chlist, displayer):
1227 1226 limit = logcmdutil.getlimit(opts)
1228 1227 if opts.get(b'newest_first'):
1229 1228 chlist.reverse()
1230 1229 count = 0
1231 1230 for n in chlist:
1232 1231 if limit is not None and count >= limit:
1233 1232 break
1234 1233 parents = [p for p in other.changelog.parents(n) if p != nullid]
1235 1234 if opts.get(b'no_merges') and len(parents) == 2:
1236 1235 continue
1237 1236 count += 1
1238 1237 displayer.show(other[n])
1239 1238
1240 1239 return _incoming(display, subreporecurse, ui, repo, source, opts)
1241 1240
1242 1241
1243 1242 def _outgoing(ui, repo, dest, opts):
1244 1243 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1245 1244 if not path:
1246 1245 raise error.Abort(
1247 1246 _(b'default repository not configured!'),
1248 1247 hint=_(b"see 'hg help config.paths'"),
1249 1248 )
1250 1249 dest = path.pushloc or path.loc
1251 1250 branches = path.branch, opts.get(b'branch') or []
1252 1251
1253 1252 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1254 1253 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1255 1254 if revs:
1256 1255 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1257 1256
1258 1257 other = peer(repo, opts, dest)
1259 1258 outgoing = discovery.findcommonoutgoing(
1260 1259 repo, other, revs, force=opts.get(b'force')
1261 1260 )
1262 1261 o = outgoing.missing
1263 1262 if not o:
1264 1263 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1265 1264 return o, other
1266 1265
1267 1266
1268 1267 def outgoing(ui, repo, dest, opts):
1269 1268 def recurse():
1270 1269 ret = 1
1271 1270 if opts.get(b'subrepos'):
1272 1271 ctx = repo[None]
1273 1272 for subpath in sorted(ctx.substate):
1274 1273 sub = ctx.sub(subpath)
1275 1274 ret = min(ret, sub.outgoing(ui, dest, opts))
1276 1275 return ret
1277 1276
1278 1277 limit = logcmdutil.getlimit(opts)
1279 1278 o, other = _outgoing(ui, repo, dest, opts)
1280 1279 if not o:
1281 1280 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1282 1281 return recurse()
1283 1282
1284 1283 if opts.get(b'newest_first'):
1285 1284 o.reverse()
1286 1285 ui.pager(b'outgoing')
1287 1286 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1288 1287 count = 0
1289 1288 for n in o:
1290 1289 if limit is not None and count >= limit:
1291 1290 break
1292 1291 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1293 1292 if opts.get(b'no_merges') and len(parents) == 2:
1294 1293 continue
1295 1294 count += 1
1296 1295 displayer.show(repo[n])
1297 1296 displayer.close()
1298 1297 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1299 1298 recurse()
1300 1299 return 0 # exit code is zero since we found outgoing changes
1301 1300
1302 1301
1303 1302 def verify(repo, level=None):
1304 1303 """verify the consistency of a repository"""
1305 1304 ret = verifymod.verify(repo, level=level)
1306 1305
1307 1306 # Broken subrepo references in hidden csets don't seem worth worrying about,
1308 1307 # since they can't be pushed/pulled, and --hidden can be used if they are a
1309 1308 # concern.
1310 1309
1311 1310 # pathto() is needed for -R case
1312 1311 revs = repo.revs(
1313 1312 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1314 1313 )
1315 1314
1316 1315 if revs:
1317 1316 repo.ui.status(_(b'checking subrepo links\n'))
1318 1317 for rev in revs:
1319 1318 ctx = repo[rev]
1320 1319 try:
1321 1320 for subpath in ctx.substate:
1322 1321 try:
1323 1322 ret = (
1324 1323 ctx.sub(subpath, allowcreate=False).verify() or ret
1325 1324 )
1326 1325 except error.RepoError as e:
1327 1326 repo.ui.warn(b'%d: %s\n' % (rev, e))
1328 1327 except Exception:
1329 1328 repo.ui.warn(
1330 1329 _(b'.hgsubstate is corrupt in revision %s\n')
1331 1330 % node.short(ctx.node())
1332 1331 )
1333 1332
1334 1333 return ret
1335 1334
1336 1335
1337 1336 def remoteui(src, opts):
1338 1337 """build a remote ui from ui or repo and opts"""
1339 1338 if util.safehasattr(src, b'baseui'): # looks like a repository
1340 1339 dst = src.baseui.copy() # drop repo-specific config
1341 1340 src = src.ui # copy target options from repo
1342 1341 else: # assume it's a global ui object
1343 1342 dst = src.copy() # keep all global options
1344 1343
1345 1344 # copy ssh-specific options
1346 1345 for o in b'ssh', b'remotecmd':
1347 1346 v = opts.get(o) or src.config(b'ui', o)
1348 1347 if v:
1349 1348 dst.setconfig(b"ui", o, v, b'copied')
1350 1349
1351 1350 # copy bundle-specific options
1352 1351 r = src.config(b'bundle', b'mainreporoot')
1353 1352 if r:
1354 1353 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1355 1354
1356 1355 # copy selected local settings to the remote ui
1357 1356 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1358 1357 for key, val in src.configitems(sect):
1359 1358 dst.setconfig(sect, key, val, b'copied')
1360 1359 v = src.config(b'web', b'cacerts')
1361 1360 if v:
1362 1361 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1363 1362
1364 1363 return dst
1365 1364
1366 1365
1367 1366 # Files of interest
1368 1367 # Used to check if the repository has changed looking at mtime and size of
1369 1368 # these files.
1370 1369 foi = [
1371 1370 (b'spath', b'00changelog.i'),
1372 1371 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1373 1372 (b'spath', b'obsstore'),
1374 1373 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1375 1374 ]
1376 1375
1377 1376
1378 1377 class cachedlocalrepo(object):
1379 1378 """Holds a localrepository that can be cached and reused."""
1380 1379
1381 1380 def __init__(self, repo):
1382 1381 """Create a new cached repo from an existing repo.
1383 1382
1384 1383 We assume the passed in repo was recently created. If the
1385 1384 repo has changed between when it was created and when it was
1386 1385 turned into a cache, it may not refresh properly.
1387 1386 """
1388 1387 assert isinstance(repo, localrepo.localrepository)
1389 1388 self._repo = repo
1390 1389 self._state, self.mtime = self._repostate()
1391 1390 self._filtername = repo.filtername
1392 1391
1393 1392 def fetch(self):
1394 1393 """Refresh (if necessary) and return a repository.
1395 1394
1396 1395 If the cached instance is out of date, it will be recreated
1397 1396 automatically and returned.
1398 1397
1399 1398 Returns a tuple of the repo and a boolean indicating whether a new
1400 1399 repo instance was created.
1401 1400 """
1402 1401 # We compare the mtimes and sizes of some well-known files to
1403 1402 # determine if the repo changed. This is not precise, as mtimes
1404 1403 # are susceptible to clock skew and imprecise filesystems and
1405 1404 # file content can change while maintaining the same size.
1406 1405
1407 1406 state, mtime = self._repostate()
1408 1407 if state == self._state:
1409 1408 return self._repo, False
1410 1409
1411 1410 repo = repository(self._repo.baseui, self._repo.url())
1412 1411 if self._filtername:
1413 1412 self._repo = repo.filtered(self._filtername)
1414 1413 else:
1415 1414 self._repo = repo.unfiltered()
1416 1415 self._state = state
1417 1416 self.mtime = mtime
1418 1417
1419 1418 return self._repo, True
1420 1419
1421 1420 def _repostate(self):
1422 1421 state = []
1423 1422 maxmtime = -1
1424 1423 for attr, fname in foi:
1425 1424 prefix = getattr(self._repo, attr)
1426 1425 p = os.path.join(prefix, fname)
1427 1426 try:
1428 1427 st = os.stat(p)
1429 1428 except OSError:
1430 1429 st = os.stat(prefix)
1431 1430 state.append((st[stat.ST_MTIME], st.st_size))
1432 1431 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1433 1432
1434 1433 return tuple(state), maxmtime
1435 1434
1436 1435 def copy(self):
1437 1436 """Obtain a copy of this class instance.
1438 1437
1439 1438 A new localrepository instance is obtained. The new instance should be
1440 1439 completely independent of the original.
1441 1440 """
1442 1441 repo = repository(self._repo.baseui, self._repo.origroot)
1443 1442 if self._filtername:
1444 1443 repo = repo.filtered(self._filtername)
1445 1444 else:
1446 1445 repo = repo.unfiltered()
1447 1446 c = cachedlocalrepo(repo)
1448 1447 c._state = self._state
1449 1448 c.mtime = self.mtime
1450 1449 return c
General Comments 0
You need to be logged in to leave comments. Login now