##// END OF EJS Templates
merge: use public interface ms.localctx instead of ms._local
Pulkit Goyal -
r35731:7ffbd911 default
parent child Browse files
Show More
@@ -1,1128 +1,1127 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import (
18 hex,
19 18 nullid,
20 19 )
21 20
22 21 from . import (
23 22 bookmarks,
24 23 bundlerepo,
25 24 cmdutil,
26 25 destutil,
27 26 discovery,
28 27 error,
29 28 exchange,
30 29 extensions,
31 30 httppeer,
32 31 localrepo,
33 32 lock,
34 33 logexchange,
35 34 merge as mergemod,
36 35 node,
37 36 phases,
38 37 repoview,
39 38 scmutil,
40 39 sshpeer,
41 40 statichttprepo,
42 41 ui as uimod,
43 42 unionrepo,
44 43 url,
45 44 util,
46 45 verify as verifymod,
47 46 vfs as vfsmod,
48 47 )
49 48
50 49 release = lock.release
51 50
52 51 # shared features
53 52 sharedbookmarks = 'bookmarks'
54 53
55 54 def _local(path):
56 55 path = util.expandpath(util.urllocalpath(path))
57 56 return (os.path.isfile(path) and bundlerepo or localrepo)
58 57
59 58 def addbranchrevs(lrepo, other, branches, revs):
60 59 peer = other.peer() # a courtesy to callers using a localrepo for other
61 60 hashbranch, branches = branches
62 61 if not hashbranch and not branches:
63 62 x = revs or None
64 63 if util.safehasattr(revs, 'first'):
65 64 y = revs.first()
66 65 elif revs:
67 66 y = revs[0]
68 67 else:
69 68 y = None
70 69 return x, y
71 70 if revs:
72 71 revs = list(revs)
73 72 else:
74 73 revs = []
75 74
76 75 if not peer.capable('branchmap'):
77 76 if branches:
78 77 raise error.Abort(_("remote branch lookup not supported"))
79 78 revs.append(hashbranch)
80 79 return revs, revs[0]
81 80 branchmap = peer.branchmap()
82 81
83 82 def primary(branch):
84 83 if branch == '.':
85 84 if not lrepo:
86 85 raise error.Abort(_("dirstate branch not accessible"))
87 86 branch = lrepo.dirstate.branch()
88 87 if branch in branchmap:
89 88 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 89 return True
91 90 else:
92 91 return False
93 92
94 93 for branch in branches:
95 94 if not primary(branch):
96 95 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 96 if hashbranch:
98 97 if not primary(hashbranch):
99 98 revs.append(hashbranch)
100 99 return revs, revs[0]
101 100
102 101 def parseurl(path, branches=None):
103 102 '''parse url#branch, returning (url, (branch, branches))'''
104 103
105 104 u = util.url(path)
106 105 branch = None
107 106 if u.fragment:
108 107 branch = u.fragment
109 108 u.fragment = None
110 109 return bytes(u), (branch, branches or [])
111 110
112 111 schemes = {
113 112 'bundle': bundlerepo,
114 113 'union': unionrepo,
115 114 'file': _local,
116 115 'http': httppeer,
117 116 'https': httppeer,
118 117 'ssh': sshpeer,
119 118 'static-http': statichttprepo,
120 119 }
121 120
122 121 def _peerlookup(path):
123 122 u = util.url(path)
124 123 scheme = u.scheme or 'file'
125 124 thing = schemes.get(scheme) or schemes['file']
126 125 try:
127 126 return thing(path)
128 127 except TypeError:
129 128 # we can't test callable(thing) because 'thing' can be an unloaded
130 129 # module that implements __call__
131 130 if not util.safehasattr(thing, 'instance'):
132 131 raise
133 132 return thing
134 133
135 134 def islocal(repo):
136 135 '''return true if repo (or path pointing to repo) is local'''
137 136 if isinstance(repo, bytes):
138 137 try:
139 138 return _peerlookup(repo).islocal(repo)
140 139 except AttributeError:
141 140 return False
142 141 return repo.local()
143 142
144 143 def openpath(ui, path):
145 144 '''open path with open if local, url.open if remote'''
146 145 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 146 if pathurl.islocal():
148 147 return util.posixfile(pathurl.localpath(), 'rb')
149 148 else:
150 149 return url.open(ui, path)
151 150
152 151 # a list of (ui, repo) functions called for wire peer initialization
153 152 wirepeersetupfuncs = []
154 153
155 154 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 155 """return a repository object for the specified path"""
157 156 obj = _peerlookup(path).instance(ui, path, create)
158 157 ui = getattr(obj, "ui", ui)
159 158 for f in presetupfuncs or []:
160 159 f(ui, obj)
161 160 for name, module in extensions.extensions(ui):
162 161 hook = getattr(module, 'reposetup', None)
163 162 if hook:
164 163 hook(ui, obj)
165 164 if not obj.local():
166 165 for f in wirepeersetupfuncs:
167 166 f(ui, obj)
168 167 return obj
169 168
170 169 def repository(ui, path='', create=False, presetupfuncs=None):
171 170 """return a repository object for the specified path"""
172 171 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 172 repo = peer.local()
174 173 if not repo:
175 174 raise error.Abort(_("repository '%s' is not local") %
176 175 (path or peer.url()))
177 176 return repo.filtered('visible')
178 177
179 178 def peer(uiorrepo, opts, path, create=False):
180 179 '''return a repository peer for the specified path'''
181 180 rui = remoteui(uiorrepo, opts)
182 181 return _peerorrepo(rui, path, create).peer()
183 182
184 183 def defaultdest(source):
185 184 '''return default destination of clone if none is given
186 185
187 186 >>> defaultdest(b'foo')
188 187 'foo'
189 188 >>> defaultdest(b'/foo/bar')
190 189 'bar'
191 190 >>> defaultdest(b'/')
192 191 ''
193 192 >>> defaultdest(b'')
194 193 ''
195 194 >>> defaultdest(b'http://example.org/')
196 195 ''
197 196 >>> defaultdest(b'http://example.org/foo/')
198 197 'foo'
199 198 '''
200 199 path = util.url(source).path
201 200 if not path:
202 201 return ''
203 202 return os.path.basename(os.path.normpath(path))
204 203
205 204 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
206 205 relative=False):
207 206 '''create a shared repository'''
208 207
209 208 if not islocal(source):
210 209 raise error.Abort(_('can only share local repositories'))
211 210
212 211 if not dest:
213 212 dest = defaultdest(source)
214 213 else:
215 214 dest = ui.expandpath(dest)
216 215
217 216 if isinstance(source, str):
218 217 origsource = ui.expandpath(source)
219 218 source, branches = parseurl(origsource)
220 219 srcrepo = repository(ui, source)
221 220 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
222 221 else:
223 222 srcrepo = source.local()
224 223 origsource = source = srcrepo.url()
225 224 checkout = None
226 225
227 226 sharedpath = srcrepo.sharedpath # if our source is already sharing
228 227
229 228 destwvfs = vfsmod.vfs(dest, realpath=True)
230 229 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
231 230
232 231 if destvfs.lexists():
233 232 raise error.Abort(_('destination already exists'))
234 233
235 234 if not destwvfs.isdir():
236 235 destwvfs.mkdir()
237 236 destvfs.makedir()
238 237
239 238 requirements = ''
240 239 try:
241 240 requirements = srcrepo.vfs.read('requires')
242 241 except IOError as inst:
243 242 if inst.errno != errno.ENOENT:
244 243 raise
245 244
246 245 if relative:
247 246 try:
248 247 sharedpath = os.path.relpath(sharedpath, destvfs.base)
249 248 requirements += 'relshared\n'
250 249 except (IOError, ValueError) as e:
251 250 # ValueError is raised on Windows if the drive letters differ on
252 251 # each path
253 252 raise error.Abort(_('cannot calculate relative path'),
254 253 hint=str(e))
255 254 else:
256 255 requirements += 'shared\n'
257 256
258 257 destvfs.write('requires', requirements)
259 258 destvfs.write('sharedpath', sharedpath)
260 259
261 260 r = repository(ui, destwvfs.base)
262 261 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
263 262 _postshareupdate(r, update, checkout=checkout)
264 263 return r
265 264
266 265 def unshare(ui, repo):
267 266 """convert a shared repository to a normal one
268 267
269 268 Copy the store data to the repo and remove the sharedpath data.
270 269 """
271 270
272 271 destlock = lock = None
273 272 lock = repo.lock()
274 273 try:
275 274 # we use locks here because if we race with commit, we
276 275 # can end up with extra data in the cloned revlogs that's
277 276 # not pointed to by changesets, thus causing verify to
278 277 # fail
279 278
280 279 destlock = copystore(ui, repo, repo.path)
281 280
282 281 sharefile = repo.vfs.join('sharedpath')
283 282 util.rename(sharefile, sharefile + '.old')
284 283
285 284 repo.requirements.discard('shared')
286 285 repo.requirements.discard('relshared')
287 286 repo._writerequirements()
288 287 finally:
289 288 destlock and destlock.release()
290 289 lock and lock.release()
291 290
292 291 # update store, spath, svfs and sjoin of repo
293 292 repo.unfiltered().__init__(repo.baseui, repo.root)
294 293
295 294 # TODO: figure out how to access subrepos that exist, but were previously
296 295 # removed from .hgsub
297 296 c = repo['.']
298 297 subs = c.substate
299 298 for s in sorted(subs):
300 299 c.sub(s).unshare()
301 300
302 301 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
303 302 """Called after a new shared repo is created.
304 303
305 304 The new repo only has a requirements file and pointer to the source.
306 305 This function configures additional shared data.
307 306
308 307 Extensions can wrap this function and write additional entries to
309 308 destrepo/.hg/shared to indicate additional pieces of data to be shared.
310 309 """
311 310 default = defaultpath or sourcerepo.ui.config('paths', 'default')
312 311 if default:
313 312 template = ('[paths]\n'
314 313 'default = %s\n')
315 314 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
316 315
317 316 with destrepo.wlock():
318 317 if bookmarks:
319 318 destrepo.vfs.write('shared', sharedbookmarks + '\n')
320 319
321 320 def _postshareupdate(repo, update, checkout=None):
322 321 """Maybe perform a working directory update after a shared repo is created.
323 322
324 323 ``update`` can be a boolean or a revision to update to.
325 324 """
326 325 if not update:
327 326 return
328 327
329 328 repo.ui.status(_("updating working directory\n"))
330 329 if update is not True:
331 330 checkout = update
332 331 for test in (checkout, 'default', 'tip'):
333 332 if test is None:
334 333 continue
335 334 try:
336 335 uprev = repo.lookup(test)
337 336 break
338 337 except error.RepoLookupError:
339 338 continue
340 339 _update(repo, uprev)
341 340
342 341 def copystore(ui, srcrepo, destpath):
343 342 '''copy files from store of srcrepo in destpath
344 343
345 344 returns destlock
346 345 '''
347 346 destlock = None
348 347 try:
349 348 hardlink = None
350 349 num = 0
351 350 closetopic = [None]
352 351 def prog(topic, pos):
353 352 if pos is None:
354 353 closetopic[0] = topic
355 354 else:
356 355 ui.progress(topic, pos + num)
357 356 srcpublishing = srcrepo.publishing()
358 357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 358 dstvfs = vfsmod.vfs(destpath)
360 359 for f in srcrepo.store.copylist():
361 360 if srcpublishing and f.endswith('phaseroots'):
362 361 continue
363 362 dstbase = os.path.dirname(f)
364 363 if dstbase and not dstvfs.exists(dstbase):
365 364 dstvfs.mkdir(dstbase)
366 365 if srcvfs.exists(f):
367 366 if f.endswith('data'):
368 367 # 'dstbase' may be empty (e.g. revlog format 0)
369 368 lockfile = os.path.join(dstbase, "lock")
370 369 # lock to avoid premature writing to the target
371 370 destlock = lock.lock(dstvfs, lockfile)
372 371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 372 hardlink, progress=prog)
374 373 num += n
375 374 if hardlink:
376 375 ui.debug("linked %d files\n" % num)
377 376 if closetopic[0]:
378 377 ui.progress(closetopic[0], None)
379 378 else:
380 379 ui.debug("copied %d files\n" % num)
381 380 if closetopic[0]:
382 381 ui.progress(closetopic[0], None)
383 382 return destlock
384 383 except: # re-raises
385 384 release(destlock)
386 385 raise
387 386
388 387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 388 rev=None, update=True, stream=False):
390 389 """Perform a clone using a shared repo.
391 390
392 391 The store for the repository will be located at <sharepath>/.hg. The
393 392 specified revisions will be cloned or pulled from "source". A shared repo
394 393 will be created at "dest" and a working copy will be created if "update" is
395 394 True.
396 395 """
397 396 revs = None
398 397 if rev:
399 398 if not srcpeer.capable('lookup'):
400 399 raise error.Abort(_("src repository does not support "
401 400 "revision lookup and so doesn't "
402 401 "support clone by revision"))
403 402 revs = [srcpeer.lookup(r) for r in rev]
404 403
405 404 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 405 # 2 clients may race creating or populating it.
407 406 pooldir = os.path.dirname(sharepath)
408 407 # lock class requires the directory to exist.
409 408 try:
410 409 util.makedir(pooldir, False)
411 410 except OSError as e:
412 411 if e.errno != errno.EEXIST:
413 412 raise
414 413
415 414 poolvfs = vfsmod.vfs(pooldir)
416 415 basename = os.path.basename(sharepath)
417 416
418 417 with lock.lock(poolvfs, '%s.lock' % basename):
419 418 if os.path.exists(sharepath):
420 419 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 420 basename)
422 421 else:
423 422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 423 # Always use pull mode because hardlinks in share mode don't work
425 424 # well. Never update because working copies aren't necessary in
426 425 # share mode.
427 426 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 427 rev=rev, update=False, stream=stream)
429 428
430 429 # Resolve the value to put in [paths] section for the source.
431 430 if islocal(source):
432 431 defaultpath = os.path.abspath(util.urllocalpath(source))
433 432 else:
434 433 defaultpath = source
435 434
436 435 sharerepo = repository(ui, path=sharepath)
437 436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 437 defaultpath=defaultpath)
439 438
440 439 # We need to perform a pull against the dest repo to fetch bookmarks
441 440 # and other non-store data that isn't shared by default. In the case of
442 441 # non-existing shared repo, this means we pull from the remote twice. This
443 442 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 443 # way to pull just non-changegroup data.
445 444 destrepo = repository(ui, path=dest)
446 445 exchange.pull(destrepo, srcpeer, heads=revs)
447 446
448 447 _postshareupdate(destrepo, update)
449 448
450 449 return srcpeer, peer(ui, peeropts, dest)
451 450
452 451 # Recomputing branch cache might be slow on big repos,
453 452 # so just copy it
454 453 def _copycache(srcrepo, dstcachedir, fname):
455 454 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 456 dstbranchcache = os.path.join(dstcachedir, fname)
458 457 if os.path.exists(srcbranchcache):
459 458 if not os.path.exists(dstcachedir):
460 459 os.mkdir(dstcachedir)
461 460 util.copyfile(srcbranchcache, dstbranchcache)
462 461
463 462 def _cachetocopy(srcrepo):
464 463 """return the list of cache file valuable to copy during a clone"""
465 464 # In local clones we're copying all nodes, not just served
466 465 # ones. Therefore copy all branch caches over.
467 466 cachefiles = ['branch2']
468 467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
469 468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
470 469 cachefiles += ['tags2']
471 470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
472 471 cachefiles += ['hgtagsfnodes1']
473 472 return cachefiles
474 473
475 474 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
476 475 update=True, stream=False, branch=None, shareopts=None):
477 476 """Make a copy of an existing repository.
478 477
479 478 Create a copy of an existing repository in a new directory. The
480 479 source and destination are URLs, as passed to the repository
481 480 function. Returns a pair of repository peers, the source and
482 481 newly created destination.
483 482
484 483 The location of the source is added to the new repository's
485 484 .hg/hgrc file, as the default to be used for future pulls and
486 485 pushes.
487 486
488 487 If an exception is raised, the partly cloned/updated destination
489 488 repository will be deleted.
490 489
491 490 Arguments:
492 491
493 492 source: repository object or URL
494 493
495 494 dest: URL of destination repository to create (defaults to base
496 495 name of source repository)
497 496
498 497 pull: always pull from source repository, even in local case or if the
499 498 server prefers streaming
500 499
501 500 stream: stream raw data uncompressed from repository (fast over
502 501 LAN, slow over WAN)
503 502
504 503 rev: revision to clone up to (implies pull=True)
505 504
506 505 update: update working directory after clone completes, if
507 506 destination is local repository (True means update to default rev,
508 507 anything else is treated as a revision)
509 508
510 509 branch: branches to clone
511 510
512 511 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 512 activates auto sharing mode and defines the directory for stores. The
514 513 "mode" key determines how to construct the directory name of the shared
515 514 repository. "identity" means the name is derived from the node of the first
516 515 changeset in the repository. "remote" means the name is derived from the
517 516 remote's path/URL. Defaults to "identity."
518 517 """
519 518
520 519 if isinstance(source, bytes):
521 520 origsource = ui.expandpath(source)
522 521 source, branch = parseurl(origsource, branch)
523 522 srcpeer = peer(ui, peeropts, source)
524 523 else:
525 524 srcpeer = source.peer() # in case we were called with a localrepo
526 525 branch = (None, branch or [])
527 526 origsource = source = srcpeer.url()
528 527 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
529 528
530 529 if dest is None:
531 530 dest = defaultdest(source)
532 531 if dest:
533 532 ui.status(_("destination directory: %s\n") % dest)
534 533 else:
535 534 dest = ui.expandpath(dest)
536 535
537 536 dest = util.urllocalpath(dest)
538 537 source = util.urllocalpath(source)
539 538
540 539 if not dest:
541 540 raise error.Abort(_("empty destination path is not valid"))
542 541
543 542 destvfs = vfsmod.vfs(dest, expandpath=True)
544 543 if destvfs.lexists():
545 544 if not destvfs.isdir():
546 545 raise error.Abort(_("destination '%s' already exists") % dest)
547 546 elif destvfs.listdir():
548 547 raise error.Abort(_("destination '%s' is not empty") % dest)
549 548
550 549 shareopts = shareopts or {}
551 550 sharepool = shareopts.get('pool')
552 551 sharenamemode = shareopts.get('mode')
553 552 if sharepool and islocal(dest):
554 553 sharepath = None
555 554 if sharenamemode == 'identity':
556 555 # Resolve the name from the initial changeset in the remote
557 556 # repository. This returns nullid when the remote is empty. It
558 557 # raises RepoLookupError if revision 0 is filtered or otherwise
559 558 # not available. If we fail to resolve, sharing is not enabled.
560 559 try:
561 560 rootnode = srcpeer.lookup('0')
562 561 if rootnode != node.nullid:
563 562 sharepath = os.path.join(sharepool, node.hex(rootnode))
564 563 else:
565 564 ui.status(_('(not using pooled storage: '
566 565 'remote appears to be empty)\n'))
567 566 except error.RepoLookupError:
568 567 ui.status(_('(not using pooled storage: '
569 568 'unable to resolve identity of remote)\n'))
570 569 elif sharenamemode == 'remote':
571 570 sharepath = os.path.join(
572 571 sharepool, node.hex(hashlib.sha1(source).digest()))
573 572 else:
574 573 raise error.Abort(_('unknown share naming mode: %s') %
575 574 sharenamemode)
576 575
577 576 if sharepath:
578 577 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
579 578 dest, pull=pull, rev=rev, update=update,
580 579 stream=stream)
581 580
582 581 srclock = destlock = cleandir = None
583 582 srcrepo = srcpeer.local()
584 583 try:
585 584 abspath = origsource
586 585 if islocal(origsource):
587 586 abspath = os.path.abspath(util.urllocalpath(origsource))
588 587
589 588 if islocal(dest):
590 589 cleandir = dest
591 590
592 591 copy = False
593 592 if (srcrepo and srcrepo.cancopy() and islocal(dest)
594 593 and not phases.hassecret(srcrepo)):
595 594 copy = not pull and not rev
596 595
597 596 if copy:
598 597 try:
599 598 # we use a lock here because if we race with commit, we
600 599 # can end up with extra data in the cloned revlogs that's
601 600 # not pointed to by changesets, thus causing verify to
602 601 # fail
603 602 srclock = srcrepo.lock(wait=False)
604 603 except error.LockError:
605 604 copy = False
606 605
607 606 if copy:
608 607 srcrepo.hook('preoutgoing', throw=True, source='clone')
609 608 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
610 609 if not os.path.exists(dest):
611 610 os.mkdir(dest)
612 611 else:
613 612 # only clean up directories we create ourselves
614 613 cleandir = hgdir
615 614 try:
616 615 destpath = hgdir
617 616 util.makedir(destpath, notindexed=True)
618 617 except OSError as inst:
619 618 if inst.errno == errno.EEXIST:
620 619 cleandir = None
621 620 raise error.Abort(_("destination '%s' already exists")
622 621 % dest)
623 622 raise
624 623
625 624 destlock = copystore(ui, srcrepo, destpath)
626 625 # copy bookmarks over
627 626 srcbookmarks = srcrepo.vfs.join('bookmarks')
628 627 dstbookmarks = os.path.join(destpath, 'bookmarks')
629 628 if os.path.exists(srcbookmarks):
630 629 util.copyfile(srcbookmarks, dstbookmarks)
631 630
632 631 dstcachedir = os.path.join(destpath, 'cache')
633 632 for cache in _cachetocopy(srcrepo):
634 633 _copycache(srcrepo, dstcachedir, cache)
635 634
636 635 # we need to re-init the repo after manually copying the data
637 636 # into it
638 637 destpeer = peer(srcrepo, peeropts, dest)
639 638 srcrepo.hook('outgoing', source='clone',
640 639 node=node.hex(node.nullid))
641 640 else:
642 641 try:
643 642 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
644 643 # only pass ui when no srcrepo
645 644 except OSError as inst:
646 645 if inst.errno == errno.EEXIST:
647 646 cleandir = None
648 647 raise error.Abort(_("destination '%s' already exists")
649 648 % dest)
650 649 raise
651 650
652 651 revs = None
653 652 if rev:
654 653 if not srcpeer.capable('lookup'):
655 654 raise error.Abort(_("src repository does not support "
656 655 "revision lookup and so doesn't "
657 656 "support clone by revision"))
658 657 revs = [srcpeer.lookup(r) for r in rev]
659 658 checkout = revs[0]
660 659 local = destpeer.local()
661 660 if local:
662 661 u = util.url(abspath)
663 662 defaulturl = bytes(u)
664 663 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
665 664 if not stream:
666 665 if pull:
667 666 stream = False
668 667 else:
669 668 stream = None
670 669 # internal config: ui.quietbookmarkmove
671 670 overrides = {('ui', 'quietbookmarkmove'): True}
672 671 with local.ui.configoverride(overrides, 'clone'):
673 672 exchange.pull(local, srcpeer, revs,
674 673 streamclonerequested=stream)
675 674 elif srcrepo:
676 675 exchange.push(srcrepo, destpeer, revs=revs,
677 676 bookmarks=srcrepo._bookmarks.keys())
678 677 else:
679 678 raise error.Abort(_("clone from remote to remote not supported")
680 679 )
681 680
682 681 cleandir = None
683 682
684 683 destrepo = destpeer.local()
685 684 if destrepo:
686 685 template = uimod.samplehgrcs['cloned']
687 686 u = util.url(abspath)
688 687 u.passwd = None
689 688 defaulturl = bytes(u)
690 689 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
691 690 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692 691
693 692 if ui.configbool('experimental', 'remotenames'):
694 693 logexchange.pullremotenames(destrepo, srcpeer)
695 694
696 695 if update:
697 696 if update is not True:
698 697 checkout = srcpeer.lookup(update)
699 698 uprev = None
700 699 status = None
701 700 if checkout is not None:
702 701 try:
703 702 uprev = destrepo.lookup(checkout)
704 703 except error.RepoLookupError:
705 704 if update is not True:
706 705 try:
707 706 uprev = destrepo.lookup(update)
708 707 except error.RepoLookupError:
709 708 pass
710 709 if uprev is None:
711 710 try:
712 711 uprev = destrepo._bookmarks['@']
713 712 update = '@'
714 713 bn = destrepo[uprev].branch()
715 714 if bn == 'default':
716 715 status = _("updating to bookmark @\n")
717 716 else:
718 717 status = (_("updating to bookmark @ on branch %s\n")
719 718 % bn)
720 719 except KeyError:
721 720 try:
722 721 uprev = destrepo.branchtip('default')
723 722 except error.RepoLookupError:
724 723 uprev = destrepo.lookup('tip')
725 724 if not status:
726 725 bn = destrepo[uprev].branch()
727 726 status = _("updating to branch %s\n") % bn
728 727 destrepo.ui.status(status)
729 728 _update(destrepo, uprev)
730 729 if update in destrepo._bookmarks:
731 730 bookmarks.activate(destrepo, update)
732 731 finally:
733 732 release(srclock, destlock)
734 733 if cleandir is not None:
735 734 shutil.rmtree(cleandir, True)
736 735 if srcpeer is not None:
737 736 srcpeer.close()
738 737 return srcpeer, destpeer
739 738
740 739 def _showstats(repo, stats, quietempty=False):
741 740 if quietempty and not any(stats):
742 741 return
743 742 repo.ui.status(_("%d files updated, %d files merged, "
744 743 "%d files removed, %d files unresolved\n") % stats)
745 744
746 745 def updaterepo(repo, node, overwrite, updatecheck=None):
747 746 """Update the working directory to node.
748 747
749 748 When overwrite is set, changes are clobbered, merged else
750 749
751 750 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 751 return mergemod.update(repo, node, False, overwrite,
753 752 labels=['working copy', 'destination'],
754 753 updatecheck=updatecheck)
755 754
756 755 def update(repo, node, quietempty=False, updatecheck=None):
757 756 """update the working directory to node"""
758 757 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 758 _showstats(repo, stats, quietempty)
760 759 if stats[3]:
761 760 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 761 return stats[3] > 0
763 762
764 763 # naming conflict in clone()
765 764 _update = update
766 765
767 766 def clean(repo, node, show_stats=True, quietempty=False):
768 767 """forcibly switch the working directory to node, clobbering changes"""
769 768 stats = updaterepo(repo, node, True)
770 769 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 770 if show_stats:
772 771 _showstats(repo, stats, quietempty)
773 772 return stats[3] > 0
774 773
775 774 # naming conflict in updatetotally()
776 775 _clean = clean
777 776
778 777 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 778 """Update the working directory with extra care for non-file components
780 779
781 780 This takes care of non-file components below:
782 781
783 782 :bookmark: might be advanced or (in)activated
784 783
785 784 This takes arguments below:
786 785
787 786 :checkout: to which revision the working directory is updated
788 787 :brev: a name, which might be a bookmark to be activated after updating
789 788 :clean: whether changes in the working directory can be discarded
790 789 :updatecheck: how to deal with a dirty working directory
791 790
792 791 Valid values for updatecheck are (None => linear):
793 792
794 793 * abort: abort if the working directory is dirty
795 794 * none: don't check (merge working directory changes into destination)
796 795 * linear: check that update is linear before merging working directory
797 796 changes into destination
798 797 * noconflict: check that the update does not result in file merges
799 798
800 799 This returns whether conflict is detected at updating or not.
801 800 """
802 801 if updatecheck is None:
803 802 updatecheck = ui.config('commands', 'update.check')
804 803 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 804 # If not configured, or invalid value configured
806 805 updatecheck = 'linear'
807 806 with repo.wlock():
808 807 movemarkfrom = None
809 808 warndest = False
810 809 if checkout is None:
811 810 updata = destutil.destupdate(repo, clean=clean)
812 811 checkout, movemarkfrom, brev = updata
813 812 warndest = True
814 813
815 814 if clean:
816 815 ret = _clean(repo, checkout)
817 816 else:
818 817 if updatecheck == 'abort':
819 818 cmdutil.bailifchanged(repo, merge=False)
820 819 updatecheck = 'none'
821 820 ret = _update(repo, checkout, updatecheck=updatecheck)
822 821
823 822 if not ret and movemarkfrom:
824 823 if movemarkfrom == repo['.'].node():
825 824 pass # no-op update
826 825 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 826 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 827 ui.status(_("updating bookmark %s\n") % b)
829 828 else:
830 829 # this can happen with a non-linear update
831 830 b = ui.label(repo._activebookmark, 'bookmarks')
832 831 ui.status(_("(leaving bookmark %s)\n") % b)
833 832 bookmarks.deactivate(repo)
834 833 elif brev in repo._bookmarks:
835 834 if brev != repo._activebookmark:
836 835 b = ui.label(brev, 'bookmarks.active')
837 836 ui.status(_("(activating bookmark %s)\n") % b)
838 837 bookmarks.activate(repo, brev)
839 838 elif brev:
840 839 if repo._activebookmark:
841 840 b = ui.label(repo._activebookmark, 'bookmarks')
842 841 ui.status(_("(leaving bookmark %s)\n") % b)
843 842 bookmarks.deactivate(repo)
844 843
845 844 if warndest:
846 845 destutil.statusotherdests(ui, repo)
847 846
848 847 return ret
849 848
850 849 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
851 850 abort=False):
852 851 """Branch merge with node, resolving changes. Return true if any
853 852 unresolved conflicts."""
854 853 if not abort:
855 854 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
856 855 labels=labels)
857 856 else:
858 857 ms = mergemod.mergestate.read(repo)
859 858 if ms.active():
860 859 # there were conflicts
861 node = hex(ms._local)
860 node = ms.localctx.hex()
862 861 else:
863 862 # there were no conficts, mergestate was not stored
864 863 node = repo['.'].hex()
865 864
866 865 repo.ui.status(_("aborting the merge, updating back to"
867 866 " %s\n") % node[:12])
868 867 stats = mergemod.update(repo, node, branchmerge=False, force=True,
869 868 labels=labels)
870 869
871 870 _showstats(repo, stats)
872 871 if stats[3]:
873 872 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
874 873 "or 'hg merge --abort' to abandon\n"))
875 874 elif remind and not abort:
876 875 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
877 876 return stats[3] > 0
878 877
879 878 def _incoming(displaychlist, subreporecurse, ui, repo, source,
880 879 opts, buffered=False):
881 880 """
882 881 Helper for incoming / gincoming.
883 882 displaychlist gets called with
884 883 (remoterepo, incomingchangesetlist, displayer) parameters,
885 884 and is supposed to contain only code that can't be unified.
886 885 """
887 886 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
888 887 other = peer(repo, opts, source)
889 888 ui.status(_('comparing with %s\n') % util.hidepassword(source))
890 889 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
891 890
892 891 if revs:
893 892 revs = [other.lookup(rev) for rev in revs]
894 893 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
895 894 revs, opts["bundle"], opts["force"])
896 895 try:
897 896 if not chlist:
898 897 ui.status(_("no changes found\n"))
899 898 return subreporecurse()
900 899 ui.pager('incoming')
901 900 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
902 901 displaychlist(other, chlist, displayer)
903 902 displayer.close()
904 903 finally:
905 904 cleanupfn()
906 905 subreporecurse()
907 906 return 0 # exit code is zero since we found incoming changes
908 907
909 908 def incoming(ui, repo, source, opts):
910 909 def subreporecurse():
911 910 ret = 1
912 911 if opts.get('subrepos'):
913 912 ctx = repo[None]
914 913 for subpath in sorted(ctx.substate):
915 914 sub = ctx.sub(subpath)
916 915 ret = min(ret, sub.incoming(ui, source, opts))
917 916 return ret
918 917
919 918 def display(other, chlist, displayer):
920 919 limit = cmdutil.loglimit(opts)
921 920 if opts.get('newest_first'):
922 921 chlist.reverse()
923 922 count = 0
924 923 for n in chlist:
925 924 if limit is not None and count >= limit:
926 925 break
927 926 parents = [p for p in other.changelog.parents(n) if p != nullid]
928 927 if opts.get('no_merges') and len(parents) == 2:
929 928 continue
930 929 count += 1
931 930 displayer.show(other[n])
932 931 return _incoming(display, subreporecurse, ui, repo, source, opts)
933 932
934 933 def _outgoing(ui, repo, dest, opts):
935 934 path = ui.paths.getpath(dest, default=('default-push', 'default'))
936 935 if not path:
937 936 raise error.Abort(_('default repository not configured!'),
938 937 hint=_("see 'hg help config.paths'"))
939 938 dest = path.pushloc or path.loc
940 939 branches = path.branch, opts.get('branch') or []
941 940
942 941 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
943 942 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
944 943 if revs:
945 944 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
946 945
947 946 other = peer(repo, opts, dest)
948 947 outgoing = discovery.findcommonoutgoing(repo, other, revs,
949 948 force=opts.get('force'))
950 949 o = outgoing.missing
951 950 if not o:
952 951 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
953 952 return o, other
954 953
955 954 def outgoing(ui, repo, dest, opts):
956 955 def recurse():
957 956 ret = 1
958 957 if opts.get('subrepos'):
959 958 ctx = repo[None]
960 959 for subpath in sorted(ctx.substate):
961 960 sub = ctx.sub(subpath)
962 961 ret = min(ret, sub.outgoing(ui, dest, opts))
963 962 return ret
964 963
965 964 limit = cmdutil.loglimit(opts)
966 965 o, other = _outgoing(ui, repo, dest, opts)
967 966 if not o:
968 967 cmdutil.outgoinghooks(ui, repo, other, opts, o)
969 968 return recurse()
970 969
971 970 if opts.get('newest_first'):
972 971 o.reverse()
973 972 ui.pager('outgoing')
974 973 displayer = cmdutil.show_changeset(ui, repo, opts)
975 974 count = 0
976 975 for n in o:
977 976 if limit is not None and count >= limit:
978 977 break
979 978 parents = [p for p in repo.changelog.parents(n) if p != nullid]
980 979 if opts.get('no_merges') and len(parents) == 2:
981 980 continue
982 981 count += 1
983 982 displayer.show(repo[n])
984 983 displayer.close()
985 984 cmdutil.outgoinghooks(ui, repo, other, opts, o)
986 985 recurse()
987 986 return 0 # exit code is zero since we found outgoing changes
988 987
989 988 def verify(repo):
990 989 """verify the consistency of a repository"""
991 990 ret = verifymod.verify(repo)
992 991
993 992 # Broken subrepo references in hidden csets don't seem worth worrying about,
994 993 # since they can't be pushed/pulled, and --hidden can be used if they are a
995 994 # concern.
996 995
997 996 # pathto() is needed for -R case
998 997 revs = repo.revs("filelog(%s)",
999 998 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1000 999
1001 1000 if revs:
1002 1001 repo.ui.status(_('checking subrepo links\n'))
1003 1002 for rev in revs:
1004 1003 ctx = repo[rev]
1005 1004 try:
1006 1005 for subpath in ctx.substate:
1007 1006 try:
1008 1007 ret = (ctx.sub(subpath, allowcreate=False).verify()
1009 1008 or ret)
1010 1009 except error.RepoError as e:
1011 1010 repo.ui.warn(('%s: %s\n') % (rev, e))
1012 1011 except Exception:
1013 1012 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1014 1013 node.short(ctx.node()))
1015 1014
1016 1015 return ret
1017 1016
1018 1017 def remoteui(src, opts):
1019 1018 'build a remote ui from ui or repo and opts'
1020 1019 if util.safehasattr(src, 'baseui'): # looks like a repository
1021 1020 dst = src.baseui.copy() # drop repo-specific config
1022 1021 src = src.ui # copy target options from repo
1023 1022 else: # assume it's a global ui object
1024 1023 dst = src.copy() # keep all global options
1025 1024
1026 1025 # copy ssh-specific options
1027 1026 for o in 'ssh', 'remotecmd':
1028 1027 v = opts.get(o) or src.config('ui', o)
1029 1028 if v:
1030 1029 dst.setconfig("ui", o, v, 'copied')
1031 1030
1032 1031 # copy bundle-specific options
1033 1032 r = src.config('bundle', 'mainreporoot')
1034 1033 if r:
1035 1034 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1036 1035
1037 1036 # copy selected local settings to the remote ui
1038 1037 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1039 1038 for key, val in src.configitems(sect):
1040 1039 dst.setconfig(sect, key, val, 'copied')
1041 1040 v = src.config('web', 'cacerts')
1042 1041 if v:
1043 1042 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1044 1043
1045 1044 return dst
1046 1045
1047 1046 # Files of interest
1048 1047 # Used to check if the repository has changed looking at mtime and size of
1049 1048 # these files.
1050 1049 foi = [('spath', '00changelog.i'),
1051 1050 ('spath', 'phaseroots'), # ! phase can change content at the same size
1052 1051 ('spath', 'obsstore'),
1053 1052 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1054 1053 ]
1055 1054
1056 1055 class cachedlocalrepo(object):
1057 1056 """Holds a localrepository that can be cached and reused."""
1058 1057
1059 1058 def __init__(self, repo):
1060 1059 """Create a new cached repo from an existing repo.
1061 1060
1062 1061 We assume the passed in repo was recently created. If the
1063 1062 repo has changed between when it was created and when it was
1064 1063 turned into a cache, it may not refresh properly.
1065 1064 """
1066 1065 assert isinstance(repo, localrepo.localrepository)
1067 1066 self._repo = repo
1068 1067 self._state, self.mtime = self._repostate()
1069 1068 self._filtername = repo.filtername
1070 1069
1071 1070 def fetch(self):
1072 1071 """Refresh (if necessary) and return a repository.
1073 1072
1074 1073 If the cached instance is out of date, it will be recreated
1075 1074 automatically and returned.
1076 1075
1077 1076 Returns a tuple of the repo and a boolean indicating whether a new
1078 1077 repo instance was created.
1079 1078 """
1080 1079 # We compare the mtimes and sizes of some well-known files to
1081 1080 # determine if the repo changed. This is not precise, as mtimes
1082 1081 # are susceptible to clock skew and imprecise filesystems and
1083 1082 # file content can change while maintaining the same size.
1084 1083
1085 1084 state, mtime = self._repostate()
1086 1085 if state == self._state:
1087 1086 return self._repo, False
1088 1087
1089 1088 repo = repository(self._repo.baseui, self._repo.url())
1090 1089 if self._filtername:
1091 1090 self._repo = repo.filtered(self._filtername)
1092 1091 else:
1093 1092 self._repo = repo.unfiltered()
1094 1093 self._state = state
1095 1094 self.mtime = mtime
1096 1095
1097 1096 return self._repo, True
1098 1097
1099 1098 def _repostate(self):
1100 1099 state = []
1101 1100 maxmtime = -1
1102 1101 for attr, fname in foi:
1103 1102 prefix = getattr(self._repo, attr)
1104 1103 p = os.path.join(prefix, fname)
1105 1104 try:
1106 1105 st = os.stat(p)
1107 1106 except OSError:
1108 1107 st = os.stat(prefix)
1109 1108 state.append((st.st_mtime, st.st_size))
1110 1109 maxmtime = max(maxmtime, st.st_mtime)
1111 1110
1112 1111 return tuple(state), maxmtime
1113 1112
1114 1113 def copy(self):
1115 1114 """Obtain a copy of this class instance.
1116 1115
1117 1116 A new localrepository instance is obtained. The new instance should be
1118 1117 completely independent of the original.
1119 1118 """
1120 1119 repo = repository(self._repo.baseui, self._repo.origroot)
1121 1120 if self._filtername:
1122 1121 repo = repo.filtered(self._filtername)
1123 1122 else:
1124 1123 repo = repo.unfiltered()
1125 1124 c = cachedlocalrepo(repo)
1126 1125 c._state = self._state
1127 1126 c.mtime = self.mtime
1128 1127 return c
General Comments 0
You need to be logged in to leave comments. Login now