##// END OF EJS Templates
outgoing: avoid repo.lookup() for converting revnum to nodeid...
Martin von Zweigbergk -
r37329:70c52800 default
parent child Browse files
Show More
@@ -1,1143 +1,1143
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 node,
39 39 phases,
40 40 scmutil,
41 41 sshpeer,
42 42 statichttprepo,
43 43 ui as uimod,
44 44 unionrepo,
45 45 url,
46 46 util,
47 47 verify as verifymod,
48 48 vfs as vfsmod,
49 49 )
50 50
51 51 from .utils import (
52 52 stringutil,
53 53 )
54 54
55 55 release = lock.release
56 56
57 57 # shared features
58 58 sharedbookmarks = 'bookmarks'
59 59
60 60 def _local(path):
61 61 path = util.expandpath(util.urllocalpath(path))
62 62 return (os.path.isfile(path) and bundlerepo or localrepo)
63 63
64 64 def addbranchrevs(lrepo, other, branches, revs):
65 65 peer = other.peer() # a courtesy to callers using a localrepo for other
66 66 hashbranch, branches = branches
67 67 if not hashbranch and not branches:
68 68 x = revs or None
69 69 if util.safehasattr(revs, 'first'):
70 70 y = revs.first()
71 71 elif revs:
72 72 y = revs[0]
73 73 else:
74 74 y = None
75 75 return x, y
76 76 if revs:
77 77 revs = list(revs)
78 78 else:
79 79 revs = []
80 80
81 81 if not peer.capable('branchmap'):
82 82 if branches:
83 83 raise error.Abort(_("remote branch lookup not supported"))
84 84 revs.append(hashbranch)
85 85 return revs, revs[0]
86 86 branchmap = peer.branchmap()
87 87
88 88 def primary(branch):
89 89 if branch == '.':
90 90 if not lrepo:
91 91 raise error.Abort(_("dirstate branch not accessible"))
92 92 branch = lrepo.dirstate.branch()
93 93 if branch in branchmap:
94 94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 95 return True
96 96 else:
97 97 return False
98 98
99 99 for branch in branches:
100 100 if not primary(branch):
101 101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 102 if hashbranch:
103 103 if not primary(hashbranch):
104 104 revs.append(hashbranch)
105 105 return revs, revs[0]
106 106
107 107 def parseurl(path, branches=None):
108 108 '''parse url#branch, returning (url, (branch, branches))'''
109 109
110 110 u = util.url(path)
111 111 branch = None
112 112 if u.fragment:
113 113 branch = u.fragment
114 114 u.fragment = None
115 115 return bytes(u), (branch, branches or [])
116 116
117 117 schemes = {
118 118 'bundle': bundlerepo,
119 119 'union': unionrepo,
120 120 'file': _local,
121 121 'http': httppeer,
122 122 'https': httppeer,
123 123 'ssh': sshpeer,
124 124 'static-http': statichttprepo,
125 125 }
126 126
127 127 def _peerlookup(path):
128 128 u = util.url(path)
129 129 scheme = u.scheme or 'file'
130 130 thing = schemes.get(scheme) or schemes['file']
131 131 try:
132 132 return thing(path)
133 133 except TypeError:
134 134 # we can't test callable(thing) because 'thing' can be an unloaded
135 135 # module that implements __call__
136 136 if not util.safehasattr(thing, 'instance'):
137 137 raise
138 138 return thing
139 139
140 140 def islocal(repo):
141 141 '''return true if repo (or path pointing to repo) is local'''
142 142 if isinstance(repo, bytes):
143 143 try:
144 144 return _peerlookup(repo).islocal(repo)
145 145 except AttributeError:
146 146 return False
147 147 return repo.local()
148 148
149 149 def openpath(ui, path):
150 150 '''open path with open if local, url.open if remote'''
151 151 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 152 if pathurl.islocal():
153 153 return util.posixfile(pathurl.localpath(), 'rb')
154 154 else:
155 155 return url.open(ui, path)
156 156
157 157 # a list of (ui, repo) functions called for wire peer initialization
158 158 wirepeersetupfuncs = []
159 159
160 160 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
161 161 """return a repository object for the specified path"""
162 162 obj = _peerlookup(path).instance(ui, path, create)
163 163 ui = getattr(obj, "ui", ui)
164 164 for f in presetupfuncs or []:
165 165 f(ui, obj)
166 166 for name, module in extensions.extensions(ui):
167 167 hook = getattr(module, 'reposetup', None)
168 168 if hook:
169 169 hook(ui, obj)
170 170 if not obj.local():
171 171 for f in wirepeersetupfuncs:
172 172 f(ui, obj)
173 173 return obj
174 174
175 175 def repository(ui, path='', create=False, presetupfuncs=None):
176 176 """return a repository object for the specified path"""
177 177 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
178 178 repo = peer.local()
179 179 if not repo:
180 180 raise error.Abort(_("repository '%s' is not local") %
181 181 (path or peer.url()))
182 182 return repo.filtered('visible')
183 183
184 184 def peer(uiorrepo, opts, path, create=False):
185 185 '''return a repository peer for the specified path'''
186 186 rui = remoteui(uiorrepo, opts)
187 187 return _peerorrepo(rui, path, create).peer()
188 188
189 189 def defaultdest(source):
190 190 '''return default destination of clone if none is given
191 191
192 192 >>> defaultdest(b'foo')
193 193 'foo'
194 194 >>> defaultdest(b'/foo/bar')
195 195 'bar'
196 196 >>> defaultdest(b'/')
197 197 ''
198 198 >>> defaultdest(b'')
199 199 ''
200 200 >>> defaultdest(b'http://example.org/')
201 201 ''
202 202 >>> defaultdest(b'http://example.org/foo/')
203 203 'foo'
204 204 '''
205 205 path = util.url(source).path
206 206 if not path:
207 207 return ''
208 208 return os.path.basename(os.path.normpath(path))
209 209
210 210 def sharedreposource(repo):
211 211 """Returns repository object for source repository of a shared repo.
212 212
213 213 If repo is not a shared repository, returns None.
214 214 """
215 215 if repo.sharedpath == repo.path:
216 216 return None
217 217
218 218 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
219 219 return repo.srcrepo
220 220
221 221 # the sharedpath always ends in the .hg; we want the path to the repo
222 222 source = repo.vfs.split(repo.sharedpath)[0]
223 223 srcurl, branches = parseurl(source)
224 224 srcrepo = repository(repo.ui, srcurl)
225 225 repo.srcrepo = srcrepo
226 226 return srcrepo
227 227
228 228 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
229 229 relative=False):
230 230 '''create a shared repository'''
231 231
232 232 if not islocal(source):
233 233 raise error.Abort(_('can only share local repositories'))
234 234
235 235 if not dest:
236 236 dest = defaultdest(source)
237 237 else:
238 238 dest = ui.expandpath(dest)
239 239
240 240 if isinstance(source, bytes):
241 241 origsource = ui.expandpath(source)
242 242 source, branches = parseurl(origsource)
243 243 srcrepo = repository(ui, source)
244 244 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
245 245 else:
246 246 srcrepo = source.local()
247 247 origsource = source = srcrepo.url()
248 248 checkout = None
249 249
250 250 sharedpath = srcrepo.sharedpath # if our source is already sharing
251 251
252 252 destwvfs = vfsmod.vfs(dest, realpath=True)
253 253 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
254 254
255 255 if destvfs.lexists():
256 256 raise error.Abort(_('destination already exists'))
257 257
258 258 if not destwvfs.isdir():
259 259 destwvfs.mkdir()
260 260 destvfs.makedir()
261 261
262 262 requirements = ''
263 263 try:
264 264 requirements = srcrepo.vfs.read('requires')
265 265 except IOError as inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 if relative:
270 270 try:
271 271 sharedpath = os.path.relpath(sharedpath, destvfs.base)
272 272 requirements += 'relshared\n'
273 273 except (IOError, ValueError) as e:
274 274 # ValueError is raised on Windows if the drive letters differ on
275 275 # each path
276 276 raise error.Abort(_('cannot calculate relative path'),
277 277 hint=stringutil.forcebytestr(e))
278 278 else:
279 279 requirements += 'shared\n'
280 280
281 281 destvfs.write('requires', requirements)
282 282 destvfs.write('sharedpath', sharedpath)
283 283
284 284 r = repository(ui, destwvfs.base)
285 285 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
286 286 _postshareupdate(r, update, checkout=checkout)
287 287 return r
288 288
289 289 def unshare(ui, repo):
290 290 """convert a shared repository to a normal one
291 291
292 292 Copy the store data to the repo and remove the sharedpath data.
293 293 """
294 294
295 295 destlock = lock = None
296 296 lock = repo.lock()
297 297 try:
298 298 # we use locks here because if we race with commit, we
299 299 # can end up with extra data in the cloned revlogs that's
300 300 # not pointed to by changesets, thus causing verify to
301 301 # fail
302 302
303 303 destlock = copystore(ui, repo, repo.path)
304 304
305 305 sharefile = repo.vfs.join('sharedpath')
306 306 util.rename(sharefile, sharefile + '.old')
307 307
308 308 repo.requirements.discard('shared')
309 309 repo.requirements.discard('relshared')
310 310 repo._writerequirements()
311 311 finally:
312 312 destlock and destlock.release()
313 313 lock and lock.release()
314 314
315 315 # update store, spath, svfs and sjoin of repo
316 316 repo.unfiltered().__init__(repo.baseui, repo.root)
317 317
318 318 # TODO: figure out how to access subrepos that exist, but were previously
319 319 # removed from .hgsub
320 320 c = repo['.']
321 321 subs = c.substate
322 322 for s in sorted(subs):
323 323 c.sub(s).unshare()
324 324
325 325 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
326 326 """Called after a new shared repo is created.
327 327
328 328 The new repo only has a requirements file and pointer to the source.
329 329 This function configures additional shared data.
330 330
331 331 Extensions can wrap this function and write additional entries to
332 332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 333 """
334 334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 335 if default:
336 336 template = ('[paths]\n'
337 337 'default = %s\n')
338 338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339 339
340 340 with destrepo.wlock():
341 341 if bookmarks:
342 342 destrepo.vfs.write('shared', sharedbookmarks + '\n')
343 343
344 344 def _postshareupdate(repo, update, checkout=None):
345 345 """Maybe perform a working directory update after a shared repo is created.
346 346
347 347 ``update`` can be a boolean or a revision to update to.
348 348 """
349 349 if not update:
350 350 return
351 351
352 352 repo.ui.status(_("updating working directory\n"))
353 353 if update is not True:
354 354 checkout = update
355 355 for test in (checkout, 'default', 'tip'):
356 356 if test is None:
357 357 continue
358 358 try:
359 359 uprev = repo.lookup(test)
360 360 break
361 361 except error.RepoLookupError:
362 362 continue
363 363 _update(repo, uprev)
364 364
365 365 def copystore(ui, srcrepo, destpath):
366 366 '''copy files from store of srcrepo in destpath
367 367
368 368 returns destlock
369 369 '''
370 370 destlock = None
371 371 try:
372 372 hardlink = None
373 373 num = 0
374 374 closetopic = [None]
375 375 def prog(topic, pos):
376 376 if pos is None:
377 377 closetopic[0] = topic
378 378 else:
379 379 ui.progress(topic, pos + num)
380 380 srcpublishing = srcrepo.publishing()
381 381 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
382 382 dstvfs = vfsmod.vfs(destpath)
383 383 for f in srcrepo.store.copylist():
384 384 if srcpublishing and f.endswith('phaseroots'):
385 385 continue
386 386 dstbase = os.path.dirname(f)
387 387 if dstbase and not dstvfs.exists(dstbase):
388 388 dstvfs.mkdir(dstbase)
389 389 if srcvfs.exists(f):
390 390 if f.endswith('data'):
391 391 # 'dstbase' may be empty (e.g. revlog format 0)
392 392 lockfile = os.path.join(dstbase, "lock")
393 393 # lock to avoid premature writing to the target
394 394 destlock = lock.lock(dstvfs, lockfile)
395 395 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
396 396 hardlink, progress=prog)
397 397 num += n
398 398 if hardlink:
399 399 ui.debug("linked %d files\n" % num)
400 400 if closetopic[0]:
401 401 ui.progress(closetopic[0], None)
402 402 else:
403 403 ui.debug("copied %d files\n" % num)
404 404 if closetopic[0]:
405 405 ui.progress(closetopic[0], None)
406 406 return destlock
407 407 except: # re-raises
408 408 release(destlock)
409 409 raise
410 410
411 411 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
412 412 rev=None, update=True, stream=False):
413 413 """Perform a clone using a shared repo.
414 414
415 415 The store for the repository will be located at <sharepath>/.hg. The
416 416 specified revisions will be cloned or pulled from "source". A shared repo
417 417 will be created at "dest" and a working copy will be created if "update" is
418 418 True.
419 419 """
420 420 revs = None
421 421 if rev:
422 422 if not srcpeer.capable('lookup'):
423 423 raise error.Abort(_("src repository does not support "
424 424 "revision lookup and so doesn't "
425 425 "support clone by revision"))
426 426 revs = [srcpeer.lookup(r) for r in rev]
427 427
428 428 # Obtain a lock before checking for or cloning the pooled repo otherwise
429 429 # 2 clients may race creating or populating it.
430 430 pooldir = os.path.dirname(sharepath)
431 431 # lock class requires the directory to exist.
432 432 try:
433 433 util.makedir(pooldir, False)
434 434 except OSError as e:
435 435 if e.errno != errno.EEXIST:
436 436 raise
437 437
438 438 poolvfs = vfsmod.vfs(pooldir)
439 439 basename = os.path.basename(sharepath)
440 440
441 441 with lock.lock(poolvfs, '%s.lock' % basename):
442 442 if os.path.exists(sharepath):
443 443 ui.status(_('(sharing from existing pooled repository %s)\n') %
444 444 basename)
445 445 else:
446 446 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
447 447 # Always use pull mode because hardlinks in share mode don't work
448 448 # well. Never update because working copies aren't necessary in
449 449 # share mode.
450 450 clone(ui, peeropts, source, dest=sharepath, pull=True,
451 451 revs=rev, update=False, stream=stream)
452 452
453 453 # Resolve the value to put in [paths] section for the source.
454 454 if islocal(source):
455 455 defaultpath = os.path.abspath(util.urllocalpath(source))
456 456 else:
457 457 defaultpath = source
458 458
459 459 sharerepo = repository(ui, path=sharepath)
460 460 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
461 461 defaultpath=defaultpath)
462 462
463 463 # We need to perform a pull against the dest repo to fetch bookmarks
464 464 # and other non-store data that isn't shared by default. In the case of
465 465 # non-existing shared repo, this means we pull from the remote twice. This
466 466 # is a bit weird. But at the time it was implemented, there wasn't an easy
467 467 # way to pull just non-changegroup data.
468 468 destrepo = repository(ui, path=dest)
469 469 exchange.pull(destrepo, srcpeer, heads=revs)
470 470
471 471 _postshareupdate(destrepo, update)
472 472
473 473 return srcpeer, peer(ui, peeropts, dest)
474 474
475 475 # Recomputing branch cache might be slow on big repos,
476 476 # so just copy it
477 477 def _copycache(srcrepo, dstcachedir, fname):
478 478 """copy a cache from srcrepo to destcachedir (if it exists)"""
479 479 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
480 480 dstbranchcache = os.path.join(dstcachedir, fname)
481 481 if os.path.exists(srcbranchcache):
482 482 if not os.path.exists(dstcachedir):
483 483 os.mkdir(dstcachedir)
484 484 util.copyfile(srcbranchcache, dstbranchcache)
485 485
486 486 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
487 487 update=True, stream=False, branch=None, shareopts=None):
488 488 """Make a copy of an existing repository.
489 489
490 490 Create a copy of an existing repository in a new directory. The
491 491 source and destination are URLs, as passed to the repository
492 492 function. Returns a pair of repository peers, the source and
493 493 newly created destination.
494 494
495 495 The location of the source is added to the new repository's
496 496 .hg/hgrc file, as the default to be used for future pulls and
497 497 pushes.
498 498
499 499 If an exception is raised, the partly cloned/updated destination
500 500 repository will be deleted.
501 501
502 502 Arguments:
503 503
504 504 source: repository object or URL
505 505
506 506 dest: URL of destination repository to create (defaults to base
507 507 name of source repository)
508 508
509 509 pull: always pull from source repository, even in local case or if the
510 510 server prefers streaming
511 511
512 512 stream: stream raw data uncompressed from repository (fast over
513 513 LAN, slow over WAN)
514 514
515 515 revs: revision to clone up to (implies pull=True)
516 516
517 517 update: update working directory after clone completes, if
518 518 destination is local repository (True means update to default rev,
519 519 anything else is treated as a revision)
520 520
521 521 branch: branches to clone
522 522
523 523 shareopts: dict of options to control auto sharing behavior. The "pool" key
524 524 activates auto sharing mode and defines the directory for stores. The
525 525 "mode" key determines how to construct the directory name of the shared
526 526 repository. "identity" means the name is derived from the node of the first
527 527 changeset in the repository. "remote" means the name is derived from the
528 528 remote's path/URL. Defaults to "identity."
529 529 """
530 530
531 531 if isinstance(source, bytes):
532 532 origsource = ui.expandpath(source)
533 533 source, branches = parseurl(origsource, branch)
534 534 srcpeer = peer(ui, peeropts, source)
535 535 else:
536 536 srcpeer = source.peer() # in case we were called with a localrepo
537 537 branches = (None, branch or [])
538 538 origsource = source = srcpeer.url()
539 539 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
540 540
541 541 if dest is None:
542 542 dest = defaultdest(source)
543 543 if dest:
544 544 ui.status(_("destination directory: %s\n") % dest)
545 545 else:
546 546 dest = ui.expandpath(dest)
547 547
548 548 dest = util.urllocalpath(dest)
549 549 source = util.urllocalpath(source)
550 550
551 551 if not dest:
552 552 raise error.Abort(_("empty destination path is not valid"))
553 553
554 554 destvfs = vfsmod.vfs(dest, expandpath=True)
555 555 if destvfs.lexists():
556 556 if not destvfs.isdir():
557 557 raise error.Abort(_("destination '%s' already exists") % dest)
558 558 elif destvfs.listdir():
559 559 raise error.Abort(_("destination '%s' is not empty") % dest)
560 560
561 561 shareopts = shareopts or {}
562 562 sharepool = shareopts.get('pool')
563 563 sharenamemode = shareopts.get('mode')
564 564 if sharepool and islocal(dest):
565 565 sharepath = None
566 566 if sharenamemode == 'identity':
567 567 # Resolve the name from the initial changeset in the remote
568 568 # repository. This returns nullid when the remote is empty. It
569 569 # raises RepoLookupError if revision 0 is filtered or otherwise
570 570 # not available. If we fail to resolve, sharing is not enabled.
571 571 try:
572 572 rootnode = srcpeer.lookup('0')
573 573 if rootnode != node.nullid:
574 574 sharepath = os.path.join(sharepool, node.hex(rootnode))
575 575 else:
576 576 ui.status(_('(not using pooled storage: '
577 577 'remote appears to be empty)\n'))
578 578 except error.RepoLookupError:
579 579 ui.status(_('(not using pooled storage: '
580 580 'unable to resolve identity of remote)\n'))
581 581 elif sharenamemode == 'remote':
582 582 sharepath = os.path.join(
583 583 sharepool, node.hex(hashlib.sha1(source).digest()))
584 584 else:
585 585 raise error.Abort(_('unknown share naming mode: %s') %
586 586 sharenamemode)
587 587
588 588 if sharepath:
589 589 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
590 590 dest, pull=pull, rev=revs, update=update,
591 591 stream=stream)
592 592
593 593 srclock = destlock = cleandir = None
594 594 srcrepo = srcpeer.local()
595 595 try:
596 596 abspath = origsource
597 597 if islocal(origsource):
598 598 abspath = os.path.abspath(util.urllocalpath(origsource))
599 599
600 600 if islocal(dest):
601 601 cleandir = dest
602 602
603 603 copy = False
604 604 if (srcrepo and srcrepo.cancopy() and islocal(dest)
605 605 and not phases.hassecret(srcrepo)):
606 606 copy = not pull and not revs
607 607
608 608 if copy:
609 609 try:
610 610 # we use a lock here because if we race with commit, we
611 611 # can end up with extra data in the cloned revlogs that's
612 612 # not pointed to by changesets, thus causing verify to
613 613 # fail
614 614 srclock = srcrepo.lock(wait=False)
615 615 except error.LockError:
616 616 copy = False
617 617
618 618 if copy:
619 619 srcrepo.hook('preoutgoing', throw=True, source='clone')
620 620 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
621 621 if not os.path.exists(dest):
622 622 os.mkdir(dest)
623 623 else:
624 624 # only clean up directories we create ourselves
625 625 cleandir = hgdir
626 626 try:
627 627 destpath = hgdir
628 628 util.makedir(destpath, notindexed=True)
629 629 except OSError as inst:
630 630 if inst.errno == errno.EEXIST:
631 631 cleandir = None
632 632 raise error.Abort(_("destination '%s' already exists")
633 633 % dest)
634 634 raise
635 635
636 636 destlock = copystore(ui, srcrepo, destpath)
637 637 # copy bookmarks over
638 638 srcbookmarks = srcrepo.vfs.join('bookmarks')
639 639 dstbookmarks = os.path.join(destpath, 'bookmarks')
640 640 if os.path.exists(srcbookmarks):
641 641 util.copyfile(srcbookmarks, dstbookmarks)
642 642
643 643 dstcachedir = os.path.join(destpath, 'cache')
644 644 for cache in cacheutil.cachetocopy(srcrepo):
645 645 _copycache(srcrepo, dstcachedir, cache)
646 646
647 647 # we need to re-init the repo after manually copying the data
648 648 # into it
649 649 destpeer = peer(srcrepo, peeropts, dest)
650 650 srcrepo.hook('outgoing', source='clone',
651 651 node=node.hex(node.nullid))
652 652 else:
653 653 try:
654 654 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
655 655 # only pass ui when no srcrepo
656 656 except OSError as inst:
657 657 if inst.errno == errno.EEXIST:
658 658 cleandir = None
659 659 raise error.Abort(_("destination '%s' already exists")
660 660 % dest)
661 661 raise
662 662
663 663 if revs:
664 664 if not srcpeer.capable('lookup'):
665 665 raise error.Abort(_("src repository does not support "
666 666 "revision lookup and so doesn't "
667 667 "support clone by revision"))
668 668 revs = [srcpeer.lookup(r) for r in revs]
669 669 checkout = revs[0]
670 670 else:
671 671 revs = None
672 672 local = destpeer.local()
673 673 if local:
674 674 u = util.url(abspath)
675 675 defaulturl = bytes(u)
676 676 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
677 677 if not stream:
678 678 if pull:
679 679 stream = False
680 680 else:
681 681 stream = None
682 682 # internal config: ui.quietbookmarkmove
683 683 overrides = {('ui', 'quietbookmarkmove'): True}
684 684 with local.ui.configoverride(overrides, 'clone'):
685 685 exchange.pull(local, srcpeer, revs,
686 686 streamclonerequested=stream)
687 687 elif srcrepo:
688 688 exchange.push(srcrepo, destpeer, revs=revs,
689 689 bookmarks=srcrepo._bookmarks.keys())
690 690 else:
691 691 raise error.Abort(_("clone from remote to remote not supported")
692 692 )
693 693
694 694 cleandir = None
695 695
696 696 destrepo = destpeer.local()
697 697 if destrepo:
698 698 template = uimod.samplehgrcs['cloned']
699 699 u = util.url(abspath)
700 700 u.passwd = None
701 701 defaulturl = bytes(u)
702 702 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
703 703 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
704 704
705 705 if ui.configbool('experimental', 'remotenames'):
706 706 logexchange.pullremotenames(destrepo, srcpeer)
707 707
708 708 if update:
709 709 if update is not True:
710 710 checkout = srcpeer.lookup(update)
711 711 uprev = None
712 712 status = None
713 713 if checkout is not None:
714 714 try:
715 715 uprev = destrepo.lookup(checkout)
716 716 except error.RepoLookupError:
717 717 if update is not True:
718 718 try:
719 719 uprev = destrepo.lookup(update)
720 720 except error.RepoLookupError:
721 721 pass
722 722 if uprev is None:
723 723 try:
724 724 uprev = destrepo._bookmarks['@']
725 725 update = '@'
726 726 bn = destrepo[uprev].branch()
727 727 if bn == 'default':
728 728 status = _("updating to bookmark @\n")
729 729 else:
730 730 status = (_("updating to bookmark @ on branch %s\n")
731 731 % bn)
732 732 except KeyError:
733 733 try:
734 734 uprev = destrepo.branchtip('default')
735 735 except error.RepoLookupError:
736 736 uprev = destrepo.lookup('tip')
737 737 if not status:
738 738 bn = destrepo[uprev].branch()
739 739 status = _("updating to branch %s\n") % bn
740 740 destrepo.ui.status(status)
741 741 _update(destrepo, uprev)
742 742 if update in destrepo._bookmarks:
743 743 bookmarks.activate(destrepo, update)
744 744 finally:
745 745 release(srclock, destlock)
746 746 if cleandir is not None:
747 747 shutil.rmtree(cleandir, True)
748 748 if srcpeer is not None:
749 749 srcpeer.close()
750 750 return srcpeer, destpeer
751 751
752 752 def _showstats(repo, stats, quietempty=False):
753 753 if quietempty and stats.isempty():
754 754 return
755 755 repo.ui.status(_("%d files updated, %d files merged, "
756 756 "%d files removed, %d files unresolved\n") % (
757 757 stats.updatedcount, stats.mergedcount,
758 758 stats.removedcount, stats.unresolvedcount))
759 759
760 760 def updaterepo(repo, node, overwrite, updatecheck=None):
761 761 """Update the working directory to node.
762 762
763 763 When overwrite is set, changes are clobbered, merged else
764 764
765 765 returns stats (see pydoc mercurial.merge.applyupdates)"""
766 766 return mergemod.update(repo, node, False, overwrite,
767 767 labels=['working copy', 'destination'],
768 768 updatecheck=updatecheck)
769 769
770 770 def update(repo, node, quietempty=False, updatecheck=None):
771 771 """update the working directory to node"""
772 772 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
773 773 _showstats(repo, stats, quietempty)
774 774 if stats.unresolvedcount:
775 775 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
776 776 return stats.unresolvedcount > 0
777 777
778 778 # naming conflict in clone()
779 779 _update = update
780 780
781 781 def clean(repo, node, show_stats=True, quietempty=False):
782 782 """forcibly switch the working directory to node, clobbering changes"""
783 783 stats = updaterepo(repo, node, True)
784 784 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
785 785 if show_stats:
786 786 _showstats(repo, stats, quietempty)
787 787 return stats.unresolvedcount > 0
788 788
789 789 # naming conflict in updatetotally()
790 790 _clean = clean
791 791
792 792 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
793 793 """Update the working directory with extra care for non-file components
794 794
795 795 This takes care of non-file components below:
796 796
797 797 :bookmark: might be advanced or (in)activated
798 798
799 799 This takes arguments below:
800 800
801 801 :checkout: to which revision the working directory is updated
802 802 :brev: a name, which might be a bookmark to be activated after updating
803 803 :clean: whether changes in the working directory can be discarded
804 804 :updatecheck: how to deal with a dirty working directory
805 805
806 806 Valid values for updatecheck are (None => linear):
807 807
808 808 * abort: abort if the working directory is dirty
809 809 * none: don't check (merge working directory changes into destination)
810 810 * linear: check that update is linear before merging working directory
811 811 changes into destination
812 812 * noconflict: check that the update does not result in file merges
813 813
814 814 This returns whether conflict is detected at updating or not.
815 815 """
816 816 if updatecheck is None:
817 817 updatecheck = ui.config('commands', 'update.check')
818 818 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
819 819 # If not configured, or invalid value configured
820 820 updatecheck = 'linear'
821 821 with repo.wlock():
822 822 movemarkfrom = None
823 823 warndest = False
824 824 if checkout is None:
825 825 updata = destutil.destupdate(repo, clean=clean)
826 826 checkout, movemarkfrom, brev = updata
827 827 warndest = True
828 828
829 829 if clean:
830 830 ret = _clean(repo, checkout)
831 831 else:
832 832 if updatecheck == 'abort':
833 833 cmdutil.bailifchanged(repo, merge=False)
834 834 updatecheck = 'none'
835 835 ret = _update(repo, checkout, updatecheck=updatecheck)
836 836
837 837 if not ret and movemarkfrom:
838 838 if movemarkfrom == repo['.'].node():
839 839 pass # no-op update
840 840 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
841 841 b = ui.label(repo._activebookmark, 'bookmarks.active')
842 842 ui.status(_("updating bookmark %s\n") % b)
843 843 else:
844 844 # this can happen with a non-linear update
845 845 b = ui.label(repo._activebookmark, 'bookmarks')
846 846 ui.status(_("(leaving bookmark %s)\n") % b)
847 847 bookmarks.deactivate(repo)
848 848 elif brev in repo._bookmarks:
849 849 if brev != repo._activebookmark:
850 850 b = ui.label(brev, 'bookmarks.active')
851 851 ui.status(_("(activating bookmark %s)\n") % b)
852 852 bookmarks.activate(repo, brev)
853 853 elif brev:
854 854 if repo._activebookmark:
855 855 b = ui.label(repo._activebookmark, 'bookmarks')
856 856 ui.status(_("(leaving bookmark %s)\n") % b)
857 857 bookmarks.deactivate(repo)
858 858
859 859 if warndest:
860 860 destutil.statusotherdests(ui, repo)
861 861
862 862 return ret
863 863
864 864 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
865 865 abort=False):
866 866 """Branch merge with node, resolving changes. Return true if any
867 867 unresolved conflicts."""
868 868 if not abort:
869 869 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
870 870 labels=labels)
871 871 else:
872 872 ms = mergemod.mergestate.read(repo)
873 873 if ms.active():
874 874 # there were conflicts
875 875 node = ms.localctx.hex()
876 876 else:
877 877 # there were no conficts, mergestate was not stored
878 878 node = repo['.'].hex()
879 879
880 880 repo.ui.status(_("aborting the merge, updating back to"
881 881 " %s\n") % node[:12])
882 882 stats = mergemod.update(repo, node, branchmerge=False, force=True,
883 883 labels=labels)
884 884
885 885 _showstats(repo, stats)
886 886 if stats.unresolvedcount:
887 887 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
888 888 "or 'hg merge --abort' to abandon\n"))
889 889 elif remind and not abort:
890 890 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
891 891 return stats.unresolvedcount > 0
892 892
893 893 def _incoming(displaychlist, subreporecurse, ui, repo, source,
894 894 opts, buffered=False):
895 895 """
896 896 Helper for incoming / gincoming.
897 897 displaychlist gets called with
898 898 (remoterepo, incomingchangesetlist, displayer) parameters,
899 899 and is supposed to contain only code that can't be unified.
900 900 """
901 901 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
902 902 other = peer(repo, opts, source)
903 903 ui.status(_('comparing with %s\n') % util.hidepassword(source))
904 904 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
905 905
906 906 if revs:
907 907 revs = [other.lookup(rev) for rev in revs]
908 908 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
909 909 revs, opts["bundle"], opts["force"])
910 910 try:
911 911 if not chlist:
912 912 ui.status(_("no changes found\n"))
913 913 return subreporecurse()
914 914 ui.pager('incoming')
915 915 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
916 916 buffered=buffered)
917 917 displaychlist(other, chlist, displayer)
918 918 displayer.close()
919 919 finally:
920 920 cleanupfn()
921 921 subreporecurse()
922 922 return 0 # exit code is zero since we found incoming changes
923 923
924 924 def incoming(ui, repo, source, opts):
925 925 def subreporecurse():
926 926 ret = 1
927 927 if opts.get('subrepos'):
928 928 ctx = repo[None]
929 929 for subpath in sorted(ctx.substate):
930 930 sub = ctx.sub(subpath)
931 931 ret = min(ret, sub.incoming(ui, source, opts))
932 932 return ret
933 933
934 934 def display(other, chlist, displayer):
935 935 limit = logcmdutil.getlimit(opts)
936 936 if opts.get('newest_first'):
937 937 chlist.reverse()
938 938 count = 0
939 939 for n in chlist:
940 940 if limit is not None and count >= limit:
941 941 break
942 942 parents = [p for p in other.changelog.parents(n) if p != nullid]
943 943 if opts.get('no_merges') and len(parents) == 2:
944 944 continue
945 945 count += 1
946 946 displayer.show(other[n])
947 947 return _incoming(display, subreporecurse, ui, repo, source, opts)
948 948
949 949 def _outgoing(ui, repo, dest, opts):
950 950 path = ui.paths.getpath(dest, default=('default-push', 'default'))
951 951 if not path:
952 952 raise error.Abort(_('default repository not configured!'),
953 953 hint=_("see 'hg help config.paths'"))
954 954 dest = path.pushloc or path.loc
955 955 branches = path.branch, opts.get('branch') or []
956 956
957 957 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
958 958 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
959 959 if revs:
960 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
960 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
961 961
962 962 other = peer(repo, opts, dest)
963 963 outgoing = discovery.findcommonoutgoing(repo, other, revs,
964 964 force=opts.get('force'))
965 965 o = outgoing.missing
966 966 if not o:
967 967 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
968 968 return o, other
969 969
970 970 def outgoing(ui, repo, dest, opts):
971 971 def recurse():
972 972 ret = 1
973 973 if opts.get('subrepos'):
974 974 ctx = repo[None]
975 975 for subpath in sorted(ctx.substate):
976 976 sub = ctx.sub(subpath)
977 977 ret = min(ret, sub.outgoing(ui, dest, opts))
978 978 return ret
979 979
980 980 limit = logcmdutil.getlimit(opts)
981 981 o, other = _outgoing(ui, repo, dest, opts)
982 982 if not o:
983 983 cmdutil.outgoinghooks(ui, repo, other, opts, o)
984 984 return recurse()
985 985
986 986 if opts.get('newest_first'):
987 987 o.reverse()
988 988 ui.pager('outgoing')
989 989 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
990 990 count = 0
991 991 for n in o:
992 992 if limit is not None and count >= limit:
993 993 break
994 994 parents = [p for p in repo.changelog.parents(n) if p != nullid]
995 995 if opts.get('no_merges') and len(parents) == 2:
996 996 continue
997 997 count += 1
998 998 displayer.show(repo[n])
999 999 displayer.close()
1000 1000 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1001 1001 recurse()
1002 1002 return 0 # exit code is zero since we found outgoing changes
1003 1003
1004 1004 def verify(repo):
1005 1005 """verify the consistency of a repository"""
1006 1006 ret = verifymod.verify(repo)
1007 1007
1008 1008 # Broken subrepo references in hidden csets don't seem worth worrying about,
1009 1009 # since they can't be pushed/pulled, and --hidden can be used if they are a
1010 1010 # concern.
1011 1011
1012 1012 # pathto() is needed for -R case
1013 1013 revs = repo.revs("filelog(%s)",
1014 1014 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1015 1015
1016 1016 if revs:
1017 1017 repo.ui.status(_('checking subrepo links\n'))
1018 1018 for rev in revs:
1019 1019 ctx = repo[rev]
1020 1020 try:
1021 1021 for subpath in ctx.substate:
1022 1022 try:
1023 1023 ret = (ctx.sub(subpath, allowcreate=False).verify()
1024 1024 or ret)
1025 1025 except error.RepoError as e:
1026 1026 repo.ui.warn(('%s: %s\n') % (rev, e))
1027 1027 except Exception:
1028 1028 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1029 1029 node.short(ctx.node()))
1030 1030
1031 1031 return ret
1032 1032
1033 1033 def remoteui(src, opts):
1034 1034 'build a remote ui from ui or repo and opts'
1035 1035 if util.safehasattr(src, 'baseui'): # looks like a repository
1036 1036 dst = src.baseui.copy() # drop repo-specific config
1037 1037 src = src.ui # copy target options from repo
1038 1038 else: # assume it's a global ui object
1039 1039 dst = src.copy() # keep all global options
1040 1040
1041 1041 # copy ssh-specific options
1042 1042 for o in 'ssh', 'remotecmd':
1043 1043 v = opts.get(o) or src.config('ui', o)
1044 1044 if v:
1045 1045 dst.setconfig("ui", o, v, 'copied')
1046 1046
1047 1047 # copy bundle-specific options
1048 1048 r = src.config('bundle', 'mainreporoot')
1049 1049 if r:
1050 1050 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1051 1051
1052 1052 # copy selected local settings to the remote ui
1053 1053 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1054 1054 for key, val in src.configitems(sect):
1055 1055 dst.setconfig(sect, key, val, 'copied')
1056 1056 v = src.config('web', 'cacerts')
1057 1057 if v:
1058 1058 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1059 1059
1060 1060 return dst
1061 1061
1062 1062 # Files of interest
1063 1063 # Used to check if the repository has changed looking at mtime and size of
1064 1064 # these files.
1065 1065 foi = [('spath', '00changelog.i'),
1066 1066 ('spath', 'phaseroots'), # ! phase can change content at the same size
1067 1067 ('spath', 'obsstore'),
1068 1068 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1069 1069 ]
1070 1070
1071 1071 class cachedlocalrepo(object):
1072 1072 """Holds a localrepository that can be cached and reused."""
1073 1073
1074 1074 def __init__(self, repo):
1075 1075 """Create a new cached repo from an existing repo.
1076 1076
1077 1077 We assume the passed in repo was recently created. If the
1078 1078 repo has changed between when it was created and when it was
1079 1079 turned into a cache, it may not refresh properly.
1080 1080 """
1081 1081 assert isinstance(repo, localrepo.localrepository)
1082 1082 self._repo = repo
1083 1083 self._state, self.mtime = self._repostate()
1084 1084 self._filtername = repo.filtername
1085 1085
1086 1086 def fetch(self):
1087 1087 """Refresh (if necessary) and return a repository.
1088 1088
1089 1089 If the cached instance is out of date, it will be recreated
1090 1090 automatically and returned.
1091 1091
1092 1092 Returns a tuple of the repo and a boolean indicating whether a new
1093 1093 repo instance was created.
1094 1094 """
1095 1095 # We compare the mtimes and sizes of some well-known files to
1096 1096 # determine if the repo changed. This is not precise, as mtimes
1097 1097 # are susceptible to clock skew and imprecise filesystems and
1098 1098 # file content can change while maintaining the same size.
1099 1099
1100 1100 state, mtime = self._repostate()
1101 1101 if state == self._state:
1102 1102 return self._repo, False
1103 1103
1104 1104 repo = repository(self._repo.baseui, self._repo.url())
1105 1105 if self._filtername:
1106 1106 self._repo = repo.filtered(self._filtername)
1107 1107 else:
1108 1108 self._repo = repo.unfiltered()
1109 1109 self._state = state
1110 1110 self.mtime = mtime
1111 1111
1112 1112 return self._repo, True
1113 1113
1114 1114 def _repostate(self):
1115 1115 state = []
1116 1116 maxmtime = -1
1117 1117 for attr, fname in foi:
1118 1118 prefix = getattr(self._repo, attr)
1119 1119 p = os.path.join(prefix, fname)
1120 1120 try:
1121 1121 st = os.stat(p)
1122 1122 except OSError:
1123 1123 st = os.stat(prefix)
1124 1124 state.append((st[stat.ST_MTIME], st.st_size))
1125 1125 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1126 1126
1127 1127 return tuple(state), maxmtime
1128 1128
1129 1129 def copy(self):
1130 1130 """Obtain a copy of this class instance.
1131 1131
1132 1132 A new localrepository instance is obtained. The new instance should be
1133 1133 completely independent of the original.
1134 1134 """
1135 1135 repo = repository(self._repo.baseui, self._repo.origroot)
1136 1136 if self._filtername:
1137 1137 repo = repo.filtered(self._filtername)
1138 1138 else:
1139 1139 repo = repo.unfiltered()
1140 1140 c = cachedlocalrepo(repo)
1141 1141 c._state = self._state
1142 1142 c.mtime = self.mtime
1143 1143 return c
General Comments 0
You need to be logged in to leave comments. Login now