##// END OF EJS Templates
clone: use utility function to write hgrc
Yuya Nishihara -
r35638:545967ec default
parent child Browse files
Show More
@@ -1,1112 +1,1109
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 31 logexchange,
32 32 merge as mergemod,
33 33 node,
34 34 phases,
35 35 repoview,
36 36 scmutil,
37 37 sshpeer,
38 38 statichttprepo,
39 39 ui as uimod,
40 40 unionrepo,
41 41 url,
42 42 util,
43 43 verify as verifymod,
44 44 vfs as vfsmod,
45 45 )
46 46
47 47 release = lock.release
48 48
49 49 # shared features
50 50 sharedbookmarks = 'bookmarks'
51 51
52 52 def _local(path):
53 53 path = util.expandpath(util.urllocalpath(path))
54 54 return (os.path.isfile(path) and bundlerepo or localrepo)
55 55
56 56 def addbranchrevs(lrepo, other, branches, revs):
57 57 peer = other.peer() # a courtesy to callers using a localrepo for other
58 58 hashbranch, branches = branches
59 59 if not hashbranch and not branches:
60 60 x = revs or None
61 61 if util.safehasattr(revs, 'first'):
62 62 y = revs.first()
63 63 elif revs:
64 64 y = revs[0]
65 65 else:
66 66 y = None
67 67 return x, y
68 68 if revs:
69 69 revs = list(revs)
70 70 else:
71 71 revs = []
72 72
73 73 if not peer.capable('branchmap'):
74 74 if branches:
75 75 raise error.Abort(_("remote branch lookup not supported"))
76 76 revs.append(hashbranch)
77 77 return revs, revs[0]
78 78 branchmap = peer.branchmap()
79 79
80 80 def primary(branch):
81 81 if branch == '.':
82 82 if not lrepo:
83 83 raise error.Abort(_("dirstate branch not accessible"))
84 84 branch = lrepo.dirstate.branch()
85 85 if branch in branchmap:
86 86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
87 87 return True
88 88 else:
89 89 return False
90 90
91 91 for branch in branches:
92 92 if not primary(branch):
93 93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
94 94 if hashbranch:
95 95 if not primary(hashbranch):
96 96 revs.append(hashbranch)
97 97 return revs, revs[0]
98 98
99 99 def parseurl(path, branches=None):
100 100 '''parse url#branch, returning (url, (branch, branches))'''
101 101
102 102 u = util.url(path)
103 103 branch = None
104 104 if u.fragment:
105 105 branch = u.fragment
106 106 u.fragment = None
107 107 return bytes(u), (branch, branches or [])
108 108
109 109 schemes = {
110 110 'bundle': bundlerepo,
111 111 'union': unionrepo,
112 112 'file': _local,
113 113 'http': httppeer,
114 114 'https': httppeer,
115 115 'ssh': sshpeer,
116 116 'static-http': statichttprepo,
117 117 }
118 118
119 119 def _peerlookup(path):
120 120 u = util.url(path)
121 121 scheme = u.scheme or 'file'
122 122 thing = schemes.get(scheme) or schemes['file']
123 123 try:
124 124 return thing(path)
125 125 except TypeError:
126 126 # we can't test callable(thing) because 'thing' can be an unloaded
127 127 # module that implements __call__
128 128 if not util.safehasattr(thing, 'instance'):
129 129 raise
130 130 return thing
131 131
132 132 def islocal(repo):
133 133 '''return true if repo (or path pointing to repo) is local'''
134 134 if isinstance(repo, bytes):
135 135 try:
136 136 return _peerlookup(repo).islocal(repo)
137 137 except AttributeError:
138 138 return False
139 139 return repo.local()
140 140
141 141 def openpath(ui, path):
142 142 '''open path with open if local, url.open if remote'''
143 143 pathurl = util.url(path, parsequery=False, parsefragment=False)
144 144 if pathurl.islocal():
145 145 return util.posixfile(pathurl.localpath(), 'rb')
146 146 else:
147 147 return url.open(ui, path)
148 148
149 149 # a list of (ui, repo) functions called for wire peer initialization
150 150 wirepeersetupfuncs = []
151 151
152 152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
153 153 """return a repository object for the specified path"""
154 154 obj = _peerlookup(path).instance(ui, path, create)
155 155 ui = getattr(obj, "ui", ui)
156 156 for f in presetupfuncs or []:
157 157 f(ui, obj)
158 158 for name, module in extensions.extensions(ui):
159 159 hook = getattr(module, 'reposetup', None)
160 160 if hook:
161 161 hook(ui, obj)
162 162 if not obj.local():
163 163 for f in wirepeersetupfuncs:
164 164 f(ui, obj)
165 165 return obj
166 166
167 167 def repository(ui, path='', create=False, presetupfuncs=None):
168 168 """return a repository object for the specified path"""
169 169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
170 170 repo = peer.local()
171 171 if not repo:
172 172 raise error.Abort(_("repository '%s' is not local") %
173 173 (path or peer.url()))
174 174 return repo.filtered('visible')
175 175
176 176 def peer(uiorrepo, opts, path, create=False):
177 177 '''return a repository peer for the specified path'''
178 178 rui = remoteui(uiorrepo, opts)
179 179 return _peerorrepo(rui, path, create).peer()
180 180
181 181 def defaultdest(source):
182 182 '''return default destination of clone if none is given
183 183
184 184 >>> defaultdest(b'foo')
185 185 'foo'
186 186 >>> defaultdest(b'/foo/bar')
187 187 'bar'
188 188 >>> defaultdest(b'/')
189 189 ''
190 190 >>> defaultdest(b'')
191 191 ''
192 192 >>> defaultdest(b'http://example.org/')
193 193 ''
194 194 >>> defaultdest(b'http://example.org/foo/')
195 195 'foo'
196 196 '''
197 197 path = util.url(source).path
198 198 if not path:
199 199 return ''
200 200 return os.path.basename(os.path.normpath(path))
201 201
202 202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
203 203 relative=False):
204 204 '''create a shared repository'''
205 205
206 206 if not islocal(source):
207 207 raise error.Abort(_('can only share local repositories'))
208 208
209 209 if not dest:
210 210 dest = defaultdest(source)
211 211 else:
212 212 dest = ui.expandpath(dest)
213 213
214 214 if isinstance(source, str):
215 215 origsource = ui.expandpath(source)
216 216 source, branches = parseurl(origsource)
217 217 srcrepo = repository(ui, source)
218 218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
219 219 else:
220 220 srcrepo = source.local()
221 221 origsource = source = srcrepo.url()
222 222 checkout = None
223 223
224 224 sharedpath = srcrepo.sharedpath # if our source is already sharing
225 225
226 226 destwvfs = vfsmod.vfs(dest, realpath=True)
227 227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
228 228
229 229 if destvfs.lexists():
230 230 raise error.Abort(_('destination already exists'))
231 231
232 232 if not destwvfs.isdir():
233 233 destwvfs.mkdir()
234 234 destvfs.makedir()
235 235
236 236 requirements = ''
237 237 try:
238 238 requirements = srcrepo.vfs.read('requires')
239 239 except IOError as inst:
240 240 if inst.errno != errno.ENOENT:
241 241 raise
242 242
243 243 if relative:
244 244 try:
245 245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
246 246 requirements += 'relshared\n'
247 247 except (IOError, ValueError) as e:
248 248 # ValueError is raised on Windows if the drive letters differ on
249 249 # each path
250 250 raise error.Abort(_('cannot calculate relative path'),
251 251 hint=str(e))
252 252 else:
253 253 requirements += 'shared\n'
254 254
255 255 destvfs.write('requires', requirements)
256 256 destvfs.write('sharedpath', sharedpath)
257 257
258 258 r = repository(ui, destwvfs.base)
259 259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
260 260 _postshareupdate(r, update, checkout=checkout)
261 261 return r
262 262
263 263 def unshare(ui, repo):
264 264 """convert a shared repository to a normal one
265 265
266 266 Copy the store data to the repo and remove the sharedpath data.
267 267 """
268 268
269 269 destlock = lock = None
270 270 lock = repo.lock()
271 271 try:
272 272 # we use locks here because if we race with commit, we
273 273 # can end up with extra data in the cloned revlogs that's
274 274 # not pointed to by changesets, thus causing verify to
275 275 # fail
276 276
277 277 destlock = copystore(ui, repo, repo.path)
278 278
279 279 sharefile = repo.vfs.join('sharedpath')
280 280 util.rename(sharefile, sharefile + '.old')
281 281
282 282 repo.requirements.discard('shared')
283 283 repo.requirements.discard('relshared')
284 284 repo._writerequirements()
285 285 finally:
286 286 destlock and destlock.release()
287 287 lock and lock.release()
288 288
289 289 # update store, spath, svfs and sjoin of repo
290 290 repo.unfiltered().__init__(repo.baseui, repo.root)
291 291
292 292 # TODO: figure out how to access subrepos that exist, but were previously
293 293 # removed from .hgsub
294 294 c = repo['.']
295 295 subs = c.substate
296 296 for s in sorted(subs):
297 297 c.sub(s).unshare()
298 298
299 299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
300 300 """Called after a new shared repo is created.
301 301
302 302 The new repo only has a requirements file and pointer to the source.
303 303 This function configures additional shared data.
304 304
305 305 Extensions can wrap this function and write additional entries to
306 306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
307 307 """
308 308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
309 309 if default:
310 310 with destrepo.vfs("hgrc", "w", text=True) as fp:
311 311 fp.write("[paths]\n")
312 312 fp.write("default = %s\n" % default)
313 313
314 314 with destrepo.wlock():
315 315 if bookmarks:
316 316 destrepo.vfs.write('shared', sharedbookmarks + '\n')
317 317
318 318 def _postshareupdate(repo, update, checkout=None):
319 319 """Maybe perform a working directory update after a shared repo is created.
320 320
321 321 ``update`` can be a boolean or a revision to update to.
322 322 """
323 323 if not update:
324 324 return
325 325
326 326 repo.ui.status(_("updating working directory\n"))
327 327 if update is not True:
328 328 checkout = update
329 329 for test in (checkout, 'default', 'tip'):
330 330 if test is None:
331 331 continue
332 332 try:
333 333 uprev = repo.lookup(test)
334 334 break
335 335 except error.RepoLookupError:
336 336 continue
337 337 _update(repo, uprev)
338 338
339 339 def copystore(ui, srcrepo, destpath):
340 340 '''copy files from store of srcrepo in destpath
341 341
342 342 returns destlock
343 343 '''
344 344 destlock = None
345 345 try:
346 346 hardlink = None
347 347 num = 0
348 348 closetopic = [None]
349 349 def prog(topic, pos):
350 350 if pos is None:
351 351 closetopic[0] = topic
352 352 else:
353 353 ui.progress(topic, pos + num)
354 354 srcpublishing = srcrepo.publishing()
355 355 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
356 356 dstvfs = vfsmod.vfs(destpath)
357 357 for f in srcrepo.store.copylist():
358 358 if srcpublishing and f.endswith('phaseroots'):
359 359 continue
360 360 dstbase = os.path.dirname(f)
361 361 if dstbase and not dstvfs.exists(dstbase):
362 362 dstvfs.mkdir(dstbase)
363 363 if srcvfs.exists(f):
364 364 if f.endswith('data'):
365 365 # 'dstbase' may be empty (e.g. revlog format 0)
366 366 lockfile = os.path.join(dstbase, "lock")
367 367 # lock to avoid premature writing to the target
368 368 destlock = lock.lock(dstvfs, lockfile)
369 369 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
370 370 hardlink, progress=prog)
371 371 num += n
372 372 if hardlink:
373 373 ui.debug("linked %d files\n" % num)
374 374 if closetopic[0]:
375 375 ui.progress(closetopic[0], None)
376 376 else:
377 377 ui.debug("copied %d files\n" % num)
378 378 if closetopic[0]:
379 379 ui.progress(closetopic[0], None)
380 380 return destlock
381 381 except: # re-raises
382 382 release(destlock)
383 383 raise
384 384
385 385 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
386 386 rev=None, update=True, stream=False):
387 387 """Perform a clone using a shared repo.
388 388
389 389 The store for the repository will be located at <sharepath>/.hg. The
390 390 specified revisions will be cloned or pulled from "source". A shared repo
391 391 will be created at "dest" and a working copy will be created if "update" is
392 392 True.
393 393 """
394 394 revs = None
395 395 if rev:
396 396 if not srcpeer.capable('lookup'):
397 397 raise error.Abort(_("src repository does not support "
398 398 "revision lookup and so doesn't "
399 399 "support clone by revision"))
400 400 revs = [srcpeer.lookup(r) for r in rev]
401 401
402 402 # Obtain a lock before checking for or cloning the pooled repo otherwise
403 403 # 2 clients may race creating or populating it.
404 404 pooldir = os.path.dirname(sharepath)
405 405 # lock class requires the directory to exist.
406 406 try:
407 407 util.makedir(pooldir, False)
408 408 except OSError as e:
409 409 if e.errno != errno.EEXIST:
410 410 raise
411 411
412 412 poolvfs = vfsmod.vfs(pooldir)
413 413 basename = os.path.basename(sharepath)
414 414
415 415 with lock.lock(poolvfs, '%s.lock' % basename):
416 416 if os.path.exists(sharepath):
417 417 ui.status(_('(sharing from existing pooled repository %s)\n') %
418 418 basename)
419 419 else:
420 420 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
421 421 # Always use pull mode because hardlinks in share mode don't work
422 422 # well. Never update because working copies aren't necessary in
423 423 # share mode.
424 424 clone(ui, peeropts, source, dest=sharepath, pull=True,
425 425 rev=rev, update=False, stream=stream)
426 426
427 427 # Resolve the value to put in [paths] section for the source.
428 428 if islocal(source):
429 429 defaultpath = os.path.abspath(util.urllocalpath(source))
430 430 else:
431 431 defaultpath = source
432 432
433 433 sharerepo = repository(ui, path=sharepath)
434 434 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
435 435 defaultpath=defaultpath)
436 436
437 437 # We need to perform a pull against the dest repo to fetch bookmarks
438 438 # and other non-store data that isn't shared by default. In the case of
439 439 # non-existing shared repo, this means we pull from the remote twice. This
440 440 # is a bit weird. But at the time it was implemented, there wasn't an easy
441 441 # way to pull just non-changegroup data.
442 442 destrepo = repository(ui, path=dest)
443 443 exchange.pull(destrepo, srcpeer, heads=revs)
444 444
445 445 _postshareupdate(destrepo, update)
446 446
447 447 return srcpeer, peer(ui, peeropts, dest)
448 448
449 449 # Recomputing branch cache might be slow on big repos,
450 450 # so just copy it
451 451 def _copycache(srcrepo, dstcachedir, fname):
452 452 """copy a cache from srcrepo to destcachedir (if it exists)"""
453 453 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
454 454 dstbranchcache = os.path.join(dstcachedir, fname)
455 455 if os.path.exists(srcbranchcache):
456 456 if not os.path.exists(dstcachedir):
457 457 os.mkdir(dstcachedir)
458 458 util.copyfile(srcbranchcache, dstbranchcache)
459 459
460 460 def _cachetocopy(srcrepo):
461 461 """return the list of cache file valuable to copy during a clone"""
462 462 # In local clones we're copying all nodes, not just served
463 463 # ones. Therefore copy all branch caches over.
464 464 cachefiles = ['branch2']
465 465 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
466 466 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
467 467 cachefiles += ['tags2']
468 468 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
469 469 cachefiles += ['hgtagsfnodes1']
470 470 return cachefiles
471 471
472 472 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
473 473 update=True, stream=False, branch=None, shareopts=None):
474 474 """Make a copy of an existing repository.
475 475
476 476 Create a copy of an existing repository in a new directory. The
477 477 source and destination are URLs, as passed to the repository
478 478 function. Returns a pair of repository peers, the source and
479 479 newly created destination.
480 480
481 481 The location of the source is added to the new repository's
482 482 .hg/hgrc file, as the default to be used for future pulls and
483 483 pushes.
484 484
485 485 If an exception is raised, the partly cloned/updated destination
486 486 repository will be deleted.
487 487
488 488 Arguments:
489 489
490 490 source: repository object or URL
491 491
492 492 dest: URL of destination repository to create (defaults to base
493 493 name of source repository)
494 494
495 495 pull: always pull from source repository, even in local case or if the
496 496 server prefers streaming
497 497
498 498 stream: stream raw data uncompressed from repository (fast over
499 499 LAN, slow over WAN)
500 500
501 501 rev: revision to clone up to (implies pull=True)
502 502
503 503 update: update working directory after clone completes, if
504 504 destination is local repository (True means update to default rev,
505 505 anything else is treated as a revision)
506 506
507 507 branch: branches to clone
508 508
509 509 shareopts: dict of options to control auto sharing behavior. The "pool" key
510 510 activates auto sharing mode and defines the directory for stores. The
511 511 "mode" key determines how to construct the directory name of the shared
512 512 repository. "identity" means the name is derived from the node of the first
513 513 changeset in the repository. "remote" means the name is derived from the
514 514 remote's path/URL. Defaults to "identity."
515 515 """
516 516
517 517 if isinstance(source, bytes):
518 518 origsource = ui.expandpath(source)
519 519 source, branch = parseurl(origsource, branch)
520 520 srcpeer = peer(ui, peeropts, source)
521 521 else:
522 522 srcpeer = source.peer() # in case we were called with a localrepo
523 523 branch = (None, branch or [])
524 524 origsource = source = srcpeer.url()
525 525 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
526 526
527 527 if dest is None:
528 528 dest = defaultdest(source)
529 529 if dest:
530 530 ui.status(_("destination directory: %s\n") % dest)
531 531 else:
532 532 dest = ui.expandpath(dest)
533 533
534 534 dest = util.urllocalpath(dest)
535 535 source = util.urllocalpath(source)
536 536
537 537 if not dest:
538 538 raise error.Abort(_("empty destination path is not valid"))
539 539
540 540 destvfs = vfsmod.vfs(dest, expandpath=True)
541 541 if destvfs.lexists():
542 542 if not destvfs.isdir():
543 543 raise error.Abort(_("destination '%s' already exists") % dest)
544 544 elif destvfs.listdir():
545 545 raise error.Abort(_("destination '%s' is not empty") % dest)
546 546
547 547 shareopts = shareopts or {}
548 548 sharepool = shareopts.get('pool')
549 549 sharenamemode = shareopts.get('mode')
550 550 if sharepool and islocal(dest):
551 551 sharepath = None
552 552 if sharenamemode == 'identity':
553 553 # Resolve the name from the initial changeset in the remote
554 554 # repository. This returns nullid when the remote is empty. It
555 555 # raises RepoLookupError if revision 0 is filtered or otherwise
556 556 # not available. If we fail to resolve, sharing is not enabled.
557 557 try:
558 558 rootnode = srcpeer.lookup('0')
559 559 if rootnode != node.nullid:
560 560 sharepath = os.path.join(sharepool, node.hex(rootnode))
561 561 else:
562 562 ui.status(_('(not using pooled storage: '
563 563 'remote appears to be empty)\n'))
564 564 except error.RepoLookupError:
565 565 ui.status(_('(not using pooled storage: '
566 566 'unable to resolve identity of remote)\n'))
567 567 elif sharenamemode == 'remote':
568 568 sharepath = os.path.join(
569 569 sharepool, node.hex(hashlib.sha1(source).digest()))
570 570 else:
571 571 raise error.Abort(_('unknown share naming mode: %s') %
572 572 sharenamemode)
573 573
574 574 if sharepath:
575 575 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
576 576 dest, pull=pull, rev=rev, update=update,
577 577 stream=stream)
578 578
579 579 srclock = destlock = cleandir = None
580 580 srcrepo = srcpeer.local()
581 581 try:
582 582 abspath = origsource
583 583 if islocal(origsource):
584 584 abspath = os.path.abspath(util.urllocalpath(origsource))
585 585
586 586 if islocal(dest):
587 587 cleandir = dest
588 588
589 589 copy = False
590 590 if (srcrepo and srcrepo.cancopy() and islocal(dest)
591 591 and not phases.hassecret(srcrepo)):
592 592 copy = not pull and not rev
593 593
594 594 if copy:
595 595 try:
596 596 # we use a lock here because if we race with commit, we
597 597 # can end up with extra data in the cloned revlogs that's
598 598 # not pointed to by changesets, thus causing verify to
599 599 # fail
600 600 srclock = srcrepo.lock(wait=False)
601 601 except error.LockError:
602 602 copy = False
603 603
604 604 if copy:
605 605 srcrepo.hook('preoutgoing', throw=True, source='clone')
606 606 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
607 607 if not os.path.exists(dest):
608 608 os.mkdir(dest)
609 609 else:
610 610 # only clean up directories we create ourselves
611 611 cleandir = hgdir
612 612 try:
613 613 destpath = hgdir
614 614 util.makedir(destpath, notindexed=True)
615 615 except OSError as inst:
616 616 if inst.errno == errno.EEXIST:
617 617 cleandir = None
618 618 raise error.Abort(_("destination '%s' already exists")
619 619 % dest)
620 620 raise
621 621
622 622 destlock = copystore(ui, srcrepo, destpath)
623 623 # copy bookmarks over
624 624 srcbookmarks = srcrepo.vfs.join('bookmarks')
625 625 dstbookmarks = os.path.join(destpath, 'bookmarks')
626 626 if os.path.exists(srcbookmarks):
627 627 util.copyfile(srcbookmarks, dstbookmarks)
628 628
629 629 dstcachedir = os.path.join(destpath, 'cache')
630 630 for cache in _cachetocopy(srcrepo):
631 631 _copycache(srcrepo, dstcachedir, cache)
632 632
633 633 # we need to re-init the repo after manually copying the data
634 634 # into it
635 635 destpeer = peer(srcrepo, peeropts, dest)
636 636 srcrepo.hook('outgoing', source='clone',
637 637 node=node.hex(node.nullid))
638 638 else:
639 639 try:
640 640 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
641 641 # only pass ui when no srcrepo
642 642 except OSError as inst:
643 643 if inst.errno == errno.EEXIST:
644 644 cleandir = None
645 645 raise error.Abort(_("destination '%s' already exists")
646 646 % dest)
647 647 raise
648 648
649 649 revs = None
650 650 if rev:
651 651 if not srcpeer.capable('lookup'):
652 652 raise error.Abort(_("src repository does not support "
653 653 "revision lookup and so doesn't "
654 654 "support clone by revision"))
655 655 revs = [srcpeer.lookup(r) for r in rev]
656 656 checkout = revs[0]
657 657 local = destpeer.local()
658 658 if local:
659 659 u = util.url(abspath)
660 660 defaulturl = bytes(u)
661 661 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
662 662 if not stream:
663 663 if pull:
664 664 stream = False
665 665 else:
666 666 stream = None
667 667 # internal config: ui.quietbookmarkmove
668 668 overrides = {('ui', 'quietbookmarkmove'): True}
669 669 with local.ui.configoverride(overrides, 'clone'):
670 670 exchange.pull(local, srcpeer, revs,
671 671 streamclonerequested=stream)
672 672 elif srcrepo:
673 673 exchange.push(srcrepo, destpeer, revs=revs,
674 674 bookmarks=srcrepo._bookmarks.keys())
675 675 else:
676 676 raise error.Abort(_("clone from remote to remote not supported")
677 677 )
678 678
679 679 cleandir = None
680 680
681 681 destrepo = destpeer.local()
682 682 if destrepo:
683 683 template = uimod.samplehgrcs['cloned']
684 fp = destrepo.vfs("hgrc", "wb")
685 684 u = util.url(abspath)
686 685 u.passwd = None
687 686 defaulturl = bytes(u)
688 fp.write(util.tonativeeol(template % defaulturl))
689 fp.close()
690
687 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
691 688 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692 689
693 690 if ui.configbool('experimental', 'remotenames'):
694 691 logexchange.pullremotenames(destrepo, srcpeer)
695 692
696 693 if update:
697 694 if update is not True:
698 695 checkout = srcpeer.lookup(update)
699 696 uprev = None
700 697 status = None
701 698 if checkout is not None:
702 699 try:
703 700 uprev = destrepo.lookup(checkout)
704 701 except error.RepoLookupError:
705 702 if update is not True:
706 703 try:
707 704 uprev = destrepo.lookup(update)
708 705 except error.RepoLookupError:
709 706 pass
710 707 if uprev is None:
711 708 try:
712 709 uprev = destrepo._bookmarks['@']
713 710 update = '@'
714 711 bn = destrepo[uprev].branch()
715 712 if bn == 'default':
716 713 status = _("updating to bookmark @\n")
717 714 else:
718 715 status = (_("updating to bookmark @ on branch %s\n")
719 716 % bn)
720 717 except KeyError:
721 718 try:
722 719 uprev = destrepo.branchtip('default')
723 720 except error.RepoLookupError:
724 721 uprev = destrepo.lookup('tip')
725 722 if not status:
726 723 bn = destrepo[uprev].branch()
727 724 status = _("updating to branch %s\n") % bn
728 725 destrepo.ui.status(status)
729 726 _update(destrepo, uprev)
730 727 if update in destrepo._bookmarks:
731 728 bookmarks.activate(destrepo, update)
732 729 finally:
733 730 release(srclock, destlock)
734 731 if cleandir is not None:
735 732 shutil.rmtree(cleandir, True)
736 733 if srcpeer is not None:
737 734 srcpeer.close()
738 735 return srcpeer, destpeer
739 736
740 737 def _showstats(repo, stats, quietempty=False):
741 738 if quietempty and not any(stats):
742 739 return
743 740 repo.ui.status(_("%d files updated, %d files merged, "
744 741 "%d files removed, %d files unresolved\n") % stats)
745 742
746 743 def updaterepo(repo, node, overwrite, updatecheck=None):
747 744 """Update the working directory to node.
748 745
749 746 When overwrite is set, changes are clobbered, merged else
750 747
751 748 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 749 return mergemod.update(repo, node, False, overwrite,
753 750 labels=['working copy', 'destination'],
754 751 updatecheck=updatecheck)
755 752
756 753 def update(repo, node, quietempty=False, updatecheck=None):
757 754 """update the working directory to node"""
758 755 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 756 _showstats(repo, stats, quietempty)
760 757 if stats[3]:
761 758 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 759 return stats[3] > 0
763 760
764 761 # naming conflict in clone()
765 762 _update = update
766 763
767 764 def clean(repo, node, show_stats=True, quietempty=False):
768 765 """forcibly switch the working directory to node, clobbering changes"""
769 766 stats = updaterepo(repo, node, True)
770 767 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 768 if show_stats:
772 769 _showstats(repo, stats, quietempty)
773 770 return stats[3] > 0
774 771
775 772 # naming conflict in updatetotally()
776 773 _clean = clean
777 774
778 775 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 776 """Update the working directory with extra care for non-file components
780 777
781 778 This takes care of non-file components below:
782 779
783 780 :bookmark: might be advanced or (in)activated
784 781
785 782 This takes arguments below:
786 783
787 784 :checkout: to which revision the working directory is updated
788 785 :brev: a name, which might be a bookmark to be activated after updating
789 786 :clean: whether changes in the working directory can be discarded
790 787 :updatecheck: how to deal with a dirty working directory
791 788
792 789 Valid values for updatecheck are (None => linear):
793 790
794 791 * abort: abort if the working directory is dirty
795 792 * none: don't check (merge working directory changes into destination)
796 793 * linear: check that update is linear before merging working directory
797 794 changes into destination
798 795 * noconflict: check that the update does not result in file merges
799 796
800 797 This returns whether conflict is detected at updating or not.
801 798 """
802 799 if updatecheck is None:
803 800 updatecheck = ui.config('commands', 'update.check')
804 801 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 802 # If not configured, or invalid value configured
806 803 updatecheck = 'linear'
807 804 with repo.wlock():
808 805 movemarkfrom = None
809 806 warndest = False
810 807 if checkout is None:
811 808 updata = destutil.destupdate(repo, clean=clean)
812 809 checkout, movemarkfrom, brev = updata
813 810 warndest = True
814 811
815 812 if clean:
816 813 ret = _clean(repo, checkout)
817 814 else:
818 815 if updatecheck == 'abort':
819 816 cmdutil.bailifchanged(repo, merge=False)
820 817 updatecheck = 'none'
821 818 ret = _update(repo, checkout, updatecheck=updatecheck)
822 819
823 820 if not ret and movemarkfrom:
824 821 if movemarkfrom == repo['.'].node():
825 822 pass # no-op update
826 823 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 824 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 825 ui.status(_("updating bookmark %s\n") % b)
829 826 else:
830 827 # this can happen with a non-linear update
831 828 b = ui.label(repo._activebookmark, 'bookmarks')
832 829 ui.status(_("(leaving bookmark %s)\n") % b)
833 830 bookmarks.deactivate(repo)
834 831 elif brev in repo._bookmarks:
835 832 if brev != repo._activebookmark:
836 833 b = ui.label(brev, 'bookmarks.active')
837 834 ui.status(_("(activating bookmark %s)\n") % b)
838 835 bookmarks.activate(repo, brev)
839 836 elif brev:
840 837 if repo._activebookmark:
841 838 b = ui.label(repo._activebookmark, 'bookmarks')
842 839 ui.status(_("(leaving bookmark %s)\n") % b)
843 840 bookmarks.deactivate(repo)
844 841
845 842 if warndest:
846 843 destutil.statusotherdests(ui, repo)
847 844
848 845 return ret
849 846
850 847 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
851 848 """Branch merge with node, resolving changes. Return true if any
852 849 unresolved conflicts."""
853 850 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
854 851 labels=labels)
855 852 _showstats(repo, stats)
856 853 if stats[3]:
857 854 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
858 855 "or 'hg update -C .' to abandon\n"))
859 856 elif remind:
860 857 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
861 858 return stats[3] > 0
862 859
863 860 def _incoming(displaychlist, subreporecurse, ui, repo, source,
864 861 opts, buffered=False):
865 862 """
866 863 Helper for incoming / gincoming.
867 864 displaychlist gets called with
868 865 (remoterepo, incomingchangesetlist, displayer) parameters,
869 866 and is supposed to contain only code that can't be unified.
870 867 """
871 868 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
872 869 other = peer(repo, opts, source)
873 870 ui.status(_('comparing with %s\n') % util.hidepassword(source))
874 871 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
875 872
876 873 if revs:
877 874 revs = [other.lookup(rev) for rev in revs]
878 875 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
879 876 revs, opts["bundle"], opts["force"])
880 877 try:
881 878 if not chlist:
882 879 ui.status(_("no changes found\n"))
883 880 return subreporecurse()
884 881 ui.pager('incoming')
885 882 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
886 883 displaychlist(other, chlist, displayer)
887 884 displayer.close()
888 885 finally:
889 886 cleanupfn()
890 887 subreporecurse()
891 888 return 0 # exit code is zero since we found incoming changes
892 889
893 890 def incoming(ui, repo, source, opts):
894 891 def subreporecurse():
895 892 ret = 1
896 893 if opts.get('subrepos'):
897 894 ctx = repo[None]
898 895 for subpath in sorted(ctx.substate):
899 896 sub = ctx.sub(subpath)
900 897 ret = min(ret, sub.incoming(ui, source, opts))
901 898 return ret
902 899
903 900 def display(other, chlist, displayer):
904 901 limit = cmdutil.loglimit(opts)
905 902 if opts.get('newest_first'):
906 903 chlist.reverse()
907 904 count = 0
908 905 for n in chlist:
909 906 if limit is not None and count >= limit:
910 907 break
911 908 parents = [p for p in other.changelog.parents(n) if p != nullid]
912 909 if opts.get('no_merges') and len(parents) == 2:
913 910 continue
914 911 count += 1
915 912 displayer.show(other[n])
916 913 return _incoming(display, subreporecurse, ui, repo, source, opts)
917 914
918 915 def _outgoing(ui, repo, dest, opts):
919 916 path = ui.paths.getpath(dest, default=('default-push', 'default'))
920 917 if not path:
921 918 raise error.Abort(_('default repository not configured!'),
922 919 hint=_("see 'hg help config.paths'"))
923 920 dest = path.pushloc or path.loc
924 921 branches = path.branch, opts.get('branch') or []
925 922
926 923 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
927 924 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
928 925 if revs:
929 926 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
930 927
931 928 other = peer(repo, opts, dest)
932 929 outgoing = discovery.findcommonoutgoing(repo, other, revs,
933 930 force=opts.get('force'))
934 931 o = outgoing.missing
935 932 if not o:
936 933 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
937 934 return o, other
938 935
939 936 def outgoing(ui, repo, dest, opts):
940 937 def recurse():
941 938 ret = 1
942 939 if opts.get('subrepos'):
943 940 ctx = repo[None]
944 941 for subpath in sorted(ctx.substate):
945 942 sub = ctx.sub(subpath)
946 943 ret = min(ret, sub.outgoing(ui, dest, opts))
947 944 return ret
948 945
949 946 limit = cmdutil.loglimit(opts)
950 947 o, other = _outgoing(ui, repo, dest, opts)
951 948 if not o:
952 949 cmdutil.outgoinghooks(ui, repo, other, opts, o)
953 950 return recurse()
954 951
955 952 if opts.get('newest_first'):
956 953 o.reverse()
957 954 ui.pager('outgoing')
958 955 displayer = cmdutil.show_changeset(ui, repo, opts)
959 956 count = 0
960 957 for n in o:
961 958 if limit is not None and count >= limit:
962 959 break
963 960 parents = [p for p in repo.changelog.parents(n) if p != nullid]
964 961 if opts.get('no_merges') and len(parents) == 2:
965 962 continue
966 963 count += 1
967 964 displayer.show(repo[n])
968 965 displayer.close()
969 966 cmdutil.outgoinghooks(ui, repo, other, opts, o)
970 967 recurse()
971 968 return 0 # exit code is zero since we found outgoing changes
972 969
973 970 def verify(repo):
974 971 """verify the consistency of a repository"""
975 972 ret = verifymod.verify(repo)
976 973
977 974 # Broken subrepo references in hidden csets don't seem worth worrying about,
978 975 # since they can't be pushed/pulled, and --hidden can be used if they are a
979 976 # concern.
980 977
981 978 # pathto() is needed for -R case
982 979 revs = repo.revs("filelog(%s)",
983 980 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
984 981
985 982 if revs:
986 983 repo.ui.status(_('checking subrepo links\n'))
987 984 for rev in revs:
988 985 ctx = repo[rev]
989 986 try:
990 987 for subpath in ctx.substate:
991 988 try:
992 989 ret = (ctx.sub(subpath, allowcreate=False).verify()
993 990 or ret)
994 991 except error.RepoError as e:
995 992 repo.ui.warn(('%s: %s\n') % (rev, e))
996 993 except Exception:
997 994 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
998 995 node.short(ctx.node()))
999 996
1000 997 return ret
1001 998
1002 999 def remoteui(src, opts):
1003 1000 'build a remote ui from ui or repo and opts'
1004 1001 if util.safehasattr(src, 'baseui'): # looks like a repository
1005 1002 dst = src.baseui.copy() # drop repo-specific config
1006 1003 src = src.ui # copy target options from repo
1007 1004 else: # assume it's a global ui object
1008 1005 dst = src.copy() # keep all global options
1009 1006
1010 1007 # copy ssh-specific options
1011 1008 for o in 'ssh', 'remotecmd':
1012 1009 v = opts.get(o) or src.config('ui', o)
1013 1010 if v:
1014 1011 dst.setconfig("ui", o, v, 'copied')
1015 1012
1016 1013 # copy bundle-specific options
1017 1014 r = src.config('bundle', 'mainreporoot')
1018 1015 if r:
1019 1016 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1020 1017
1021 1018 # copy selected local settings to the remote ui
1022 1019 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1023 1020 for key, val in src.configitems(sect):
1024 1021 dst.setconfig(sect, key, val, 'copied')
1025 1022 v = src.config('web', 'cacerts')
1026 1023 if v:
1027 1024 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1028 1025
1029 1026 return dst
1030 1027
1031 1028 # Files of interest
1032 1029 # Used to check if the repository has changed looking at mtime and size of
1033 1030 # these files.
1034 1031 foi = [('spath', '00changelog.i'),
1035 1032 ('spath', 'phaseroots'), # ! phase can change content at the same size
1036 1033 ('spath', 'obsstore'),
1037 1034 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1038 1035 ]
1039 1036
1040 1037 class cachedlocalrepo(object):
1041 1038 """Holds a localrepository that can be cached and reused."""
1042 1039
1043 1040 def __init__(self, repo):
1044 1041 """Create a new cached repo from an existing repo.
1045 1042
1046 1043 We assume the passed in repo was recently created. If the
1047 1044 repo has changed between when it was created and when it was
1048 1045 turned into a cache, it may not refresh properly.
1049 1046 """
1050 1047 assert isinstance(repo, localrepo.localrepository)
1051 1048 self._repo = repo
1052 1049 self._state, self.mtime = self._repostate()
1053 1050 self._filtername = repo.filtername
1054 1051
1055 1052 def fetch(self):
1056 1053 """Refresh (if necessary) and return a repository.
1057 1054
1058 1055 If the cached instance is out of date, it will be recreated
1059 1056 automatically and returned.
1060 1057
1061 1058 Returns a tuple of the repo and a boolean indicating whether a new
1062 1059 repo instance was created.
1063 1060 """
1064 1061 # We compare the mtimes and sizes of some well-known files to
1065 1062 # determine if the repo changed. This is not precise, as mtimes
1066 1063 # are susceptible to clock skew and imprecise filesystems and
1067 1064 # file content can change while maintaining the same size.
1068 1065
1069 1066 state, mtime = self._repostate()
1070 1067 if state == self._state:
1071 1068 return self._repo, False
1072 1069
1073 1070 repo = repository(self._repo.baseui, self._repo.url())
1074 1071 if self._filtername:
1075 1072 self._repo = repo.filtered(self._filtername)
1076 1073 else:
1077 1074 self._repo = repo.unfiltered()
1078 1075 self._state = state
1079 1076 self.mtime = mtime
1080 1077
1081 1078 return self._repo, True
1082 1079
1083 1080 def _repostate(self):
1084 1081 state = []
1085 1082 maxmtime = -1
1086 1083 for attr, fname in foi:
1087 1084 prefix = getattr(self._repo, attr)
1088 1085 p = os.path.join(prefix, fname)
1089 1086 try:
1090 1087 st = os.stat(p)
1091 1088 except OSError:
1092 1089 st = os.stat(prefix)
1093 1090 state.append((st.st_mtime, st.st_size))
1094 1091 maxmtime = max(maxmtime, st.st_mtime)
1095 1092
1096 1093 return tuple(state), maxmtime
1097 1094
1098 1095 def copy(self):
1099 1096 """Obtain a copy of this class instance.
1100 1097
1101 1098 A new localrepository instance is obtained. The new instance should be
1102 1099 completely independent of the original.
1103 1100 """
1104 1101 repo = repository(self._repo.baseui, self._repo.origroot)
1105 1102 if self._filtername:
1106 1103 repo = repo.filtered(self._filtername)
1107 1104 else:
1108 1105 repo = repo.unfiltered()
1109 1106 c = cachedlocalrepo(repo)
1110 1107 c._state = self._state
1111 1108 c.mtime = self.mtime
1112 1109 return c
General Comments 0
You need to be logged in to leave comments. Login now