##// END OF EJS Templates
local-clone: extract the listing of caches to copy...
marmoute -
r32493:3c8a71a8 default
parent child Browse files
Show More
@@ -1,1055 +1,1060 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 31 merge as mergemod,
32 32 node,
33 33 phases,
34 34 repoview,
35 35 scmutil,
36 36 sshpeer,
37 37 statichttprepo,
38 38 ui as uimod,
39 39 unionrepo,
40 40 url,
41 41 util,
42 42 verify as verifymod,
43 43 vfs as vfsmod,
44 44 )
45 45
46 46 release = lock.release
47 47
48 48 # shared features
49 49 sharedbookmarks = 'bookmarks'
50 50
51 51 def _local(path):
52 52 path = util.expandpath(util.urllocalpath(path))
53 53 return (os.path.isfile(path) and bundlerepo or localrepo)
54 54
55 55 def addbranchrevs(lrepo, other, branches, revs):
56 56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 57 hashbranch, branches = branches
58 58 if not hashbranch and not branches:
59 59 x = revs or None
60 60 if util.safehasattr(revs, 'first'):
61 61 y = revs.first()
62 62 elif revs:
63 63 y = revs[0]
64 64 else:
65 65 y = None
66 66 return x, y
67 67 if revs:
68 68 revs = list(revs)
69 69 else:
70 70 revs = []
71 71
72 72 if not peer.capable('branchmap'):
73 73 if branches:
74 74 raise error.Abort(_("remote branch lookup not supported"))
75 75 revs.append(hashbranch)
76 76 return revs, revs[0]
77 77 branchmap = peer.branchmap()
78 78
79 79 def primary(branch):
80 80 if branch == '.':
81 81 if not lrepo:
82 82 raise error.Abort(_("dirstate branch not accessible"))
83 83 branch = lrepo.dirstate.branch()
84 84 if branch in branchmap:
85 85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 86 return True
87 87 else:
88 88 return False
89 89
90 90 for branch in branches:
91 91 if not primary(branch):
92 92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 93 if hashbranch:
94 94 if not primary(hashbranch):
95 95 revs.append(hashbranch)
96 96 return revs, revs[0]
97 97
98 98 def parseurl(path, branches=None):
99 99 '''parse url#branch, returning (url, (branch, branches))'''
100 100
101 101 u = util.url(path)
102 102 branch = None
103 103 if u.fragment:
104 104 branch = u.fragment
105 105 u.fragment = None
106 106 return bytes(u), (branch, branches or [])
107 107
108 108 schemes = {
109 109 'bundle': bundlerepo,
110 110 'union': unionrepo,
111 111 'file': _local,
112 112 'http': httppeer,
113 113 'https': httppeer,
114 114 'ssh': sshpeer,
115 115 'static-http': statichttprepo,
116 116 }
117 117
118 118 def _peerlookup(path):
119 119 u = util.url(path)
120 120 scheme = u.scheme or 'file'
121 121 thing = schemes.get(scheme) or schemes['file']
122 122 try:
123 123 return thing(path)
124 124 except TypeError:
125 125 # we can't test callable(thing) because 'thing' can be an unloaded
126 126 # module that implements __call__
127 127 if not util.safehasattr(thing, 'instance'):
128 128 raise
129 129 return thing
130 130
131 131 def islocal(repo):
132 132 '''return true if repo (or path pointing to repo) is local'''
133 133 if isinstance(repo, str):
134 134 try:
135 135 return _peerlookup(repo).islocal(repo)
136 136 except AttributeError:
137 137 return False
138 138 return repo.local()
139 139
140 140 def openpath(ui, path):
141 141 '''open path with open if local, url.open if remote'''
142 142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 143 if pathurl.islocal():
144 144 return util.posixfile(pathurl.localpath(), 'rb')
145 145 else:
146 146 return url.open(ui, path)
147 147
148 148 # a list of (ui, repo) functions called for wire peer initialization
149 149 wirepeersetupfuncs = []
150 150
151 151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 152 """return a repository object for the specified path"""
153 153 obj = _peerlookup(path).instance(ui, path, create)
154 154 ui = getattr(obj, "ui", ui)
155 155 for f in presetupfuncs or []:
156 156 f(ui, obj)
157 157 for name, module in extensions.extensions(ui):
158 158 hook = getattr(module, 'reposetup', None)
159 159 if hook:
160 160 hook(ui, obj)
161 161 if not obj.local():
162 162 for f in wirepeersetupfuncs:
163 163 f(ui, obj)
164 164 return obj
165 165
166 166 def repository(ui, path='', create=False, presetupfuncs=None):
167 167 """return a repository object for the specified path"""
168 168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 169 repo = peer.local()
170 170 if not repo:
171 171 raise error.Abort(_("repository '%s' is not local") %
172 172 (path or peer.url()))
173 173 return repo.filtered('visible')
174 174
175 175 def peer(uiorrepo, opts, path, create=False):
176 176 '''return a repository peer for the specified path'''
177 177 rui = remoteui(uiorrepo, opts)
178 178 return _peerorrepo(rui, path, create).peer()
179 179
180 180 def defaultdest(source):
181 181 '''return default destination of clone if none is given
182 182
183 183 >>> defaultdest('foo')
184 184 'foo'
185 185 >>> defaultdest('/foo/bar')
186 186 'bar'
187 187 >>> defaultdest('/')
188 188 ''
189 189 >>> defaultdest('')
190 190 ''
191 191 >>> defaultdest('http://example.org/')
192 192 ''
193 193 >>> defaultdest('http://example.org/foo/')
194 194 'foo'
195 195 '''
196 196 path = util.url(source).path
197 197 if not path:
198 198 return ''
199 199 return os.path.basename(os.path.normpath(path))
200 200
201 201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 202 relative=False):
203 203 '''create a shared repository'''
204 204
205 205 if not islocal(source):
206 206 raise error.Abort(_('can only share local repositories'))
207 207
208 208 if not dest:
209 209 dest = defaultdest(source)
210 210 else:
211 211 dest = ui.expandpath(dest)
212 212
213 213 if isinstance(source, str):
214 214 origsource = ui.expandpath(source)
215 215 source, branches = parseurl(origsource)
216 216 srcrepo = repository(ui, source)
217 217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 218 else:
219 219 srcrepo = source.local()
220 220 origsource = source = srcrepo.url()
221 221 checkout = None
222 222
223 223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 224
225 225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 227
228 228 if destvfs.lexists():
229 229 raise error.Abort(_('destination already exists'))
230 230
231 231 if not destwvfs.isdir():
232 232 destwvfs.mkdir()
233 233 destvfs.makedir()
234 234
235 235 requirements = ''
236 236 try:
237 237 requirements = srcrepo.vfs.read('requires')
238 238 except IOError as inst:
239 239 if inst.errno != errno.ENOENT:
240 240 raise
241 241
242 242 if relative:
243 243 try:
244 244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 245 requirements += 'relshared\n'
246 246 except IOError as e:
247 247 raise error.Abort(_('cannot calculate relative path'),
248 248 hint=str(e))
249 249 else:
250 250 requirements += 'shared\n'
251 251
252 252 destvfs.write('requires', requirements)
253 253 destvfs.write('sharedpath', sharedpath)
254 254
255 255 r = repository(ui, destwvfs.base)
256 256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 257 _postshareupdate(r, update, checkout=checkout)
258 258
259 259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 260 """Called after a new shared repo is created.
261 261
262 262 The new repo only has a requirements file and pointer to the source.
263 263 This function configures additional shared data.
264 264
265 265 Extensions can wrap this function and write additional entries to
266 266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 267 """
268 268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 269 if default:
270 270 fp = destrepo.vfs("hgrc", "w", text=True)
271 271 fp.write("[paths]\n")
272 272 fp.write("default = %s\n" % default)
273 273 fp.close()
274 274
275 275 with destrepo.wlock():
276 276 if bookmarks:
277 277 fp = destrepo.vfs('shared', 'w')
278 278 fp.write(sharedbookmarks + '\n')
279 279 fp.close()
280 280
281 281 def _postshareupdate(repo, update, checkout=None):
282 282 """Maybe perform a working directory update after a shared repo is created.
283 283
284 284 ``update`` can be a boolean or a revision to update to.
285 285 """
286 286 if not update:
287 287 return
288 288
289 289 repo.ui.status(_("updating working directory\n"))
290 290 if update is not True:
291 291 checkout = update
292 292 for test in (checkout, 'default', 'tip'):
293 293 if test is None:
294 294 continue
295 295 try:
296 296 uprev = repo.lookup(test)
297 297 break
298 298 except error.RepoLookupError:
299 299 continue
300 300 _update(repo, uprev)
301 301
302 302 def copystore(ui, srcrepo, destpath):
303 303 '''copy files from store of srcrepo in destpath
304 304
305 305 returns destlock
306 306 '''
307 307 destlock = None
308 308 try:
309 309 hardlink = None
310 310 num = 0
311 311 closetopic = [None]
312 312 def prog(topic, pos):
313 313 if pos is None:
314 314 closetopic[0] = topic
315 315 else:
316 316 ui.progress(topic, pos + num)
317 317 srcpublishing = srcrepo.publishing()
318 318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 319 dstvfs = vfsmod.vfs(destpath)
320 320 for f in srcrepo.store.copylist():
321 321 if srcpublishing and f.endswith('phaseroots'):
322 322 continue
323 323 dstbase = os.path.dirname(f)
324 324 if dstbase and not dstvfs.exists(dstbase):
325 325 dstvfs.mkdir(dstbase)
326 326 if srcvfs.exists(f):
327 327 if f.endswith('data'):
328 328 # 'dstbase' may be empty (e.g. revlog format 0)
329 329 lockfile = os.path.join(dstbase, "lock")
330 330 # lock to avoid premature writing to the target
331 331 destlock = lock.lock(dstvfs, lockfile)
332 332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 333 hardlink, progress=prog)
334 334 num += n
335 335 if hardlink:
336 336 ui.debug("linked %d files\n" % num)
337 337 if closetopic[0]:
338 338 ui.progress(closetopic[0], None)
339 339 else:
340 340 ui.debug("copied %d files\n" % num)
341 341 if closetopic[0]:
342 342 ui.progress(closetopic[0], None)
343 343 return destlock
344 344 except: # re-raises
345 345 release(destlock)
346 346 raise
347 347
348 348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 349 rev=None, update=True, stream=False):
350 350 """Perform a clone using a shared repo.
351 351
352 352 The store for the repository will be located at <sharepath>/.hg. The
353 353 specified revisions will be cloned or pulled from "source". A shared repo
354 354 will be created at "dest" and a working copy will be created if "update" is
355 355 True.
356 356 """
357 357 revs = None
358 358 if rev:
359 359 if not srcpeer.capable('lookup'):
360 360 raise error.Abort(_("src repository does not support "
361 361 "revision lookup and so doesn't "
362 362 "support clone by revision"))
363 363 revs = [srcpeer.lookup(r) for r in rev]
364 364
365 365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 366 # 2 clients may race creating or populating it.
367 367 pooldir = os.path.dirname(sharepath)
368 368 # lock class requires the directory to exist.
369 369 try:
370 370 util.makedir(pooldir, False)
371 371 except OSError as e:
372 372 if e.errno != errno.EEXIST:
373 373 raise
374 374
375 375 poolvfs = vfsmod.vfs(pooldir)
376 376 basename = os.path.basename(sharepath)
377 377
378 378 with lock.lock(poolvfs, '%s.lock' % basename):
379 379 if os.path.exists(sharepath):
380 380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 381 basename)
382 382 else:
383 383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 384 # Always use pull mode because hardlinks in share mode don't work
385 385 # well. Never update because working copies aren't necessary in
386 386 # share mode.
387 387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 388 rev=rev, update=False, stream=stream)
389 389
390 390 # Resolve the value to put in [paths] section for the source.
391 391 if islocal(source):
392 392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 393 else:
394 394 defaultpath = source
395 395
396 396 sharerepo = repository(ui, path=sharepath)
397 397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 398 defaultpath=defaultpath)
399 399
400 400 # We need to perform a pull against the dest repo to fetch bookmarks
401 401 # and other non-store data that isn't shared by default. In the case of
402 402 # non-existing shared repo, this means we pull from the remote twice. This
403 403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 404 # way to pull just non-changegroup data.
405 405 destrepo = repository(ui, path=dest)
406 406 exchange.pull(destrepo, srcpeer, heads=revs)
407 407
408 408 _postshareupdate(destrepo, update)
409 409
410 410 return srcpeer, peer(ui, peeropts, dest)
411 411
412 412 # Recomputing branch cache might be slow on big repos,
413 413 # so just copy it
414 414 def _copycache(srcrepo, dstcachedir, fname):
415 415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 417 dstbranchcache = os.path.join(dstcachedir, fname)
418 418 if os.path.exists(srcbranchcache):
419 419 if not os.path.exists(dstcachedir):
420 420 os.mkdir(dstcachedir)
421 421 util.copyfile(srcbranchcache, dstbranchcache)
422 422
423 def _cachetocopy(srcrepo):
424 """return the list of cache file valuable to copy during a clone"""
425 # In local clones we're copying all nodes, not just served
426 # ones. Therefore copy all branch caches over.
427 cachefiles = ['branch2']
428 cachefiles.extend('branch2-%s' % f for f in repoview.filtertable)
429 return cachefiles
430
423 431 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
424 432 update=True, stream=False, branch=None, shareopts=None):
425 433 """Make a copy of an existing repository.
426 434
427 435 Create a copy of an existing repository in a new directory. The
428 436 source and destination are URLs, as passed to the repository
429 437 function. Returns a pair of repository peers, the source and
430 438 newly created destination.
431 439
432 440 The location of the source is added to the new repository's
433 441 .hg/hgrc file, as the default to be used for future pulls and
434 442 pushes.
435 443
436 444 If an exception is raised, the partly cloned/updated destination
437 445 repository will be deleted.
438 446
439 447 Arguments:
440 448
441 449 source: repository object or URL
442 450
443 451 dest: URL of destination repository to create (defaults to base
444 452 name of source repository)
445 453
446 454 pull: always pull from source repository, even in local case or if the
447 455 server prefers streaming
448 456
449 457 stream: stream raw data uncompressed from repository (fast over
450 458 LAN, slow over WAN)
451 459
452 460 rev: revision to clone up to (implies pull=True)
453 461
454 462 update: update working directory after clone completes, if
455 463 destination is local repository (True means update to default rev,
456 464 anything else is treated as a revision)
457 465
458 466 branch: branches to clone
459 467
460 468 shareopts: dict of options to control auto sharing behavior. The "pool" key
461 469 activates auto sharing mode and defines the directory for stores. The
462 470 "mode" key determines how to construct the directory name of the shared
463 471 repository. "identity" means the name is derived from the node of the first
464 472 changeset in the repository. "remote" means the name is derived from the
465 473 remote's path/URL. Defaults to "identity."
466 474 """
467 475
468 476 if isinstance(source, str):
469 477 origsource = ui.expandpath(source)
470 478 source, branch = parseurl(origsource, branch)
471 479 srcpeer = peer(ui, peeropts, source)
472 480 else:
473 481 srcpeer = source.peer() # in case we were called with a localrepo
474 482 branch = (None, branch or [])
475 483 origsource = source = srcpeer.url()
476 484 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
477 485
478 486 if dest is None:
479 487 dest = defaultdest(source)
480 488 if dest:
481 489 ui.status(_("destination directory: %s\n") % dest)
482 490 else:
483 491 dest = ui.expandpath(dest)
484 492
485 493 dest = util.urllocalpath(dest)
486 494 source = util.urllocalpath(source)
487 495
488 496 if not dest:
489 497 raise error.Abort(_("empty destination path is not valid"))
490 498
491 499 destvfs = vfsmod.vfs(dest, expandpath=True)
492 500 if destvfs.lexists():
493 501 if not destvfs.isdir():
494 502 raise error.Abort(_("destination '%s' already exists") % dest)
495 503 elif destvfs.listdir():
496 504 raise error.Abort(_("destination '%s' is not empty") % dest)
497 505
498 506 shareopts = shareopts or {}
499 507 sharepool = shareopts.get('pool')
500 508 sharenamemode = shareopts.get('mode')
501 509 if sharepool and islocal(dest):
502 510 sharepath = None
503 511 if sharenamemode == 'identity':
504 512 # Resolve the name from the initial changeset in the remote
505 513 # repository. This returns nullid when the remote is empty. It
506 514 # raises RepoLookupError if revision 0 is filtered or otherwise
507 515 # not available. If we fail to resolve, sharing is not enabled.
508 516 try:
509 517 rootnode = srcpeer.lookup('0')
510 518 if rootnode != node.nullid:
511 519 sharepath = os.path.join(sharepool, node.hex(rootnode))
512 520 else:
513 521 ui.status(_('(not using pooled storage: '
514 522 'remote appears to be empty)\n'))
515 523 except error.RepoLookupError:
516 524 ui.status(_('(not using pooled storage: '
517 525 'unable to resolve identity of remote)\n'))
518 526 elif sharenamemode == 'remote':
519 527 sharepath = os.path.join(
520 528 sharepool, hashlib.sha1(source).hexdigest())
521 529 else:
522 530 raise error.Abort(_('unknown share naming mode: %s') %
523 531 sharenamemode)
524 532
525 533 if sharepath:
526 534 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
527 535 dest, pull=pull, rev=rev, update=update,
528 536 stream=stream)
529 537
530 538 srclock = destlock = cleandir = None
531 539 srcrepo = srcpeer.local()
532 540 try:
533 541 abspath = origsource
534 542 if islocal(origsource):
535 543 abspath = os.path.abspath(util.urllocalpath(origsource))
536 544
537 545 if islocal(dest):
538 546 cleandir = dest
539 547
540 548 copy = False
541 549 if (srcrepo and srcrepo.cancopy() and islocal(dest)
542 550 and not phases.hassecret(srcrepo)):
543 551 copy = not pull and not rev
544 552
545 553 if copy:
546 554 try:
547 555 # we use a lock here because if we race with commit, we
548 556 # can end up with extra data in the cloned revlogs that's
549 557 # not pointed to by changesets, thus causing verify to
550 558 # fail
551 559 srclock = srcrepo.lock(wait=False)
552 560 except error.LockError:
553 561 copy = False
554 562
555 563 if copy:
556 564 srcrepo.hook('preoutgoing', throw=True, source='clone')
557 565 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
558 566 if not os.path.exists(dest):
559 567 os.mkdir(dest)
560 568 else:
561 569 # only clean up directories we create ourselves
562 570 cleandir = hgdir
563 571 try:
564 572 destpath = hgdir
565 573 util.makedir(destpath, notindexed=True)
566 574 except OSError as inst:
567 575 if inst.errno == errno.EEXIST:
568 576 cleandir = None
569 577 raise error.Abort(_("destination '%s' already exists")
570 578 % dest)
571 579 raise
572 580
573 581 destlock = copystore(ui, srcrepo, destpath)
574 582 # copy bookmarks over
575 583 srcbookmarks = srcrepo.vfs.join('bookmarks')
576 584 dstbookmarks = os.path.join(destpath, 'bookmarks')
577 585 if os.path.exists(srcbookmarks):
578 586 util.copyfile(srcbookmarks, dstbookmarks)
579 587
580 588 dstcachedir = os.path.join(destpath, 'cache')
581 # In local clones we're copying all nodes, not just served
582 # ones. Therefore copy all branch caches over.
583 _copycache(srcrepo, dstcachedir, 'branch2')
584 for cachename in repoview.filtertable:
585 _copycache(srcrepo, dstcachedir, 'branch2-%s' % cachename)
589 for cache in _cachetocopy(srcrepo):
590 _copycache(srcrepo, dstcachedir, cache)
586 591
587 592 # we need to re-init the repo after manually copying the data
588 593 # into it
589 594 destpeer = peer(srcrepo, peeropts, dest)
590 595 srcrepo.hook('outgoing', source='clone',
591 596 node=node.hex(node.nullid))
592 597 else:
593 598 try:
594 599 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
595 600 # only pass ui when no srcrepo
596 601 except OSError as inst:
597 602 if inst.errno == errno.EEXIST:
598 603 cleandir = None
599 604 raise error.Abort(_("destination '%s' already exists")
600 605 % dest)
601 606 raise
602 607
603 608 revs = None
604 609 if rev:
605 610 if not srcpeer.capable('lookup'):
606 611 raise error.Abort(_("src repository does not support "
607 612 "revision lookup and so doesn't "
608 613 "support clone by revision"))
609 614 revs = [srcpeer.lookup(r) for r in rev]
610 615 checkout = revs[0]
611 616 local = destpeer.local()
612 617 if local:
613 618 if not stream:
614 619 if pull:
615 620 stream = False
616 621 else:
617 622 stream = None
618 623 # internal config: ui.quietbookmarkmove
619 624 overrides = {('ui', 'quietbookmarkmove'): True}
620 625 with local.ui.configoverride(overrides, 'clone'):
621 626 exchange.pull(local, srcpeer, revs,
622 627 streamclonerequested=stream)
623 628 elif srcrepo:
624 629 exchange.push(srcrepo, destpeer, revs=revs,
625 630 bookmarks=srcrepo._bookmarks.keys())
626 631 else:
627 632 raise error.Abort(_("clone from remote to remote not supported")
628 633 )
629 634
630 635 cleandir = None
631 636
632 637 destrepo = destpeer.local()
633 638 if destrepo:
634 639 template = uimod.samplehgrcs['cloned']
635 640 fp = destrepo.vfs("hgrc", "w", text=True)
636 641 u = util.url(abspath)
637 642 u.passwd = None
638 643 defaulturl = str(u)
639 644 fp.write(template % defaulturl)
640 645 fp.close()
641 646
642 647 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
643 648
644 649 if update:
645 650 if update is not True:
646 651 checkout = srcpeer.lookup(update)
647 652 uprev = None
648 653 status = None
649 654 if checkout is not None:
650 655 try:
651 656 uprev = destrepo.lookup(checkout)
652 657 except error.RepoLookupError:
653 658 if update is not True:
654 659 try:
655 660 uprev = destrepo.lookup(update)
656 661 except error.RepoLookupError:
657 662 pass
658 663 if uprev is None:
659 664 try:
660 665 uprev = destrepo._bookmarks['@']
661 666 update = '@'
662 667 bn = destrepo[uprev].branch()
663 668 if bn == 'default':
664 669 status = _("updating to bookmark @\n")
665 670 else:
666 671 status = (_("updating to bookmark @ on branch %s\n")
667 672 % bn)
668 673 except KeyError:
669 674 try:
670 675 uprev = destrepo.branchtip('default')
671 676 except error.RepoLookupError:
672 677 uprev = destrepo.lookup('tip')
673 678 if not status:
674 679 bn = destrepo[uprev].branch()
675 680 status = _("updating to branch %s\n") % bn
676 681 destrepo.ui.status(status)
677 682 _update(destrepo, uprev)
678 683 if update in destrepo._bookmarks:
679 684 bookmarks.activate(destrepo, update)
680 685 finally:
681 686 release(srclock, destlock)
682 687 if cleandir is not None:
683 688 shutil.rmtree(cleandir, True)
684 689 if srcpeer is not None:
685 690 srcpeer.close()
686 691 return srcpeer, destpeer
687 692
688 693 def _showstats(repo, stats, quietempty=False):
689 694 if quietempty and not any(stats):
690 695 return
691 696 repo.ui.status(_("%d files updated, %d files merged, "
692 697 "%d files removed, %d files unresolved\n") % stats)
693 698
694 699 def updaterepo(repo, node, overwrite, updatecheck=None):
695 700 """Update the working directory to node.
696 701
697 702 When overwrite is set, changes are clobbered, merged else
698 703
699 704 returns stats (see pydoc mercurial.merge.applyupdates)"""
700 705 return mergemod.update(repo, node, False, overwrite,
701 706 labels=['working copy', 'destination'],
702 707 updatecheck=updatecheck)
703 708
704 709 def update(repo, node, quietempty=False, updatecheck=None):
705 710 """update the working directory to node"""
706 711 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
707 712 _showstats(repo, stats, quietempty)
708 713 if stats[3]:
709 714 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
710 715 return stats[3] > 0
711 716
712 717 # naming conflict in clone()
713 718 _update = update
714 719
715 720 def clean(repo, node, show_stats=True, quietempty=False):
716 721 """forcibly switch the working directory to node, clobbering changes"""
717 722 stats = updaterepo(repo, node, True)
718 723 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
719 724 if show_stats:
720 725 _showstats(repo, stats, quietempty)
721 726 return stats[3] > 0
722 727
723 728 # naming conflict in updatetotally()
724 729 _clean = clean
725 730
726 731 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
727 732 """Update the working directory with extra care for non-file components
728 733
729 734 This takes care of non-file components below:
730 735
731 736 :bookmark: might be advanced or (in)activated
732 737
733 738 This takes arguments below:
734 739
735 740 :checkout: to which revision the working directory is updated
736 741 :brev: a name, which might be a bookmark to be activated after updating
737 742 :clean: whether changes in the working directory can be discarded
738 743 :updatecheck: how to deal with a dirty working directory
739 744
740 745 Valid values for updatecheck are (None => linear):
741 746
742 747 * abort: abort if the working directory is dirty
743 748 * none: don't check (merge working directory changes into destination)
744 749 * linear: check that update is linear before merging working directory
745 750 changes into destination
746 751 * noconflict: check that the update does not result in file merges
747 752
748 753 This returns whether conflict is detected at updating or not.
749 754 """
750 755 if updatecheck is None:
751 756 updatecheck = ui.config('experimental', 'updatecheck')
752 757 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
753 758 # If not configured, or invalid value configured
754 759 updatecheck = 'linear'
755 760 with repo.wlock():
756 761 movemarkfrom = None
757 762 warndest = False
758 763 if checkout is None:
759 764 updata = destutil.destupdate(repo, clean=clean)
760 765 checkout, movemarkfrom, brev = updata
761 766 warndest = True
762 767
763 768 if clean:
764 769 ret = _clean(repo, checkout)
765 770 else:
766 771 if updatecheck == 'abort':
767 772 cmdutil.bailifchanged(repo, merge=False)
768 773 updatecheck = 'none'
769 774 ret = _update(repo, checkout, updatecheck=updatecheck)
770 775
771 776 if not ret and movemarkfrom:
772 777 if movemarkfrom == repo['.'].node():
773 778 pass # no-op update
774 779 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
775 780 b = ui.label(repo._activebookmark, 'bookmarks.active')
776 781 ui.status(_("updating bookmark %s\n") % b)
777 782 else:
778 783 # this can happen with a non-linear update
779 784 b = ui.label(repo._activebookmark, 'bookmarks')
780 785 ui.status(_("(leaving bookmark %s)\n") % b)
781 786 bookmarks.deactivate(repo)
782 787 elif brev in repo._bookmarks:
783 788 if brev != repo._activebookmark:
784 789 b = ui.label(brev, 'bookmarks.active')
785 790 ui.status(_("(activating bookmark %s)\n") % b)
786 791 bookmarks.activate(repo, brev)
787 792 elif brev:
788 793 if repo._activebookmark:
789 794 b = ui.label(repo._activebookmark, 'bookmarks')
790 795 ui.status(_("(leaving bookmark %s)\n") % b)
791 796 bookmarks.deactivate(repo)
792 797
793 798 if warndest:
794 799 destutil.statusotherdests(ui, repo)
795 800
796 801 return ret
797 802
798 803 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
799 804 """Branch merge with node, resolving changes. Return true if any
800 805 unresolved conflicts."""
801 806 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
802 807 labels=labels)
803 808 _showstats(repo, stats)
804 809 if stats[3]:
805 810 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
806 811 "or 'hg update -C .' to abandon\n"))
807 812 elif remind:
808 813 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
809 814 return stats[3] > 0
810 815
811 816 def _incoming(displaychlist, subreporecurse, ui, repo, source,
812 817 opts, buffered=False):
813 818 """
814 819 Helper for incoming / gincoming.
815 820 displaychlist gets called with
816 821 (remoterepo, incomingchangesetlist, displayer) parameters,
817 822 and is supposed to contain only code that can't be unified.
818 823 """
819 824 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
820 825 other = peer(repo, opts, source)
821 826 ui.status(_('comparing with %s\n') % util.hidepassword(source))
822 827 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
823 828
824 829 if revs:
825 830 revs = [other.lookup(rev) for rev in revs]
826 831 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
827 832 revs, opts["bundle"], opts["force"])
828 833 try:
829 834 if not chlist:
830 835 ui.status(_("no changes found\n"))
831 836 return subreporecurse()
832 837 ui.pager('incoming')
833 838 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
834 839 displaychlist(other, chlist, displayer)
835 840 displayer.close()
836 841 finally:
837 842 cleanupfn()
838 843 subreporecurse()
839 844 return 0 # exit code is zero since we found incoming changes
840 845
841 846 def incoming(ui, repo, source, opts):
842 847 def subreporecurse():
843 848 ret = 1
844 849 if opts.get('subrepos'):
845 850 ctx = repo[None]
846 851 for subpath in sorted(ctx.substate):
847 852 sub = ctx.sub(subpath)
848 853 ret = min(ret, sub.incoming(ui, source, opts))
849 854 return ret
850 855
851 856 def display(other, chlist, displayer):
852 857 limit = cmdutil.loglimit(opts)
853 858 if opts.get('newest_first'):
854 859 chlist.reverse()
855 860 count = 0
856 861 for n in chlist:
857 862 if limit is not None and count >= limit:
858 863 break
859 864 parents = [p for p in other.changelog.parents(n) if p != nullid]
860 865 if opts.get('no_merges') and len(parents) == 2:
861 866 continue
862 867 count += 1
863 868 displayer.show(other[n])
864 869 return _incoming(display, subreporecurse, ui, repo, source, opts)
865 870
866 871 def _outgoing(ui, repo, dest, opts):
867 872 dest = ui.expandpath(dest or 'default-push', dest or 'default')
868 873 dest, branches = parseurl(dest, opts.get('branch'))
869 874 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
870 875 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
871 876 if revs:
872 877 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
873 878
874 879 other = peer(repo, opts, dest)
875 880 outgoing = discovery.findcommonoutgoing(repo, other, revs,
876 881 force=opts.get('force'))
877 882 o = outgoing.missing
878 883 if not o:
879 884 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
880 885 return o, other
881 886
882 887 def outgoing(ui, repo, dest, opts):
883 888 def recurse():
884 889 ret = 1
885 890 if opts.get('subrepos'):
886 891 ctx = repo[None]
887 892 for subpath in sorted(ctx.substate):
888 893 sub = ctx.sub(subpath)
889 894 ret = min(ret, sub.outgoing(ui, dest, opts))
890 895 return ret
891 896
892 897 limit = cmdutil.loglimit(opts)
893 898 o, other = _outgoing(ui, repo, dest, opts)
894 899 if not o:
895 900 cmdutil.outgoinghooks(ui, repo, other, opts, o)
896 901 return recurse()
897 902
898 903 if opts.get('newest_first'):
899 904 o.reverse()
900 905 ui.pager('outgoing')
901 906 displayer = cmdutil.show_changeset(ui, repo, opts)
902 907 count = 0
903 908 for n in o:
904 909 if limit is not None and count >= limit:
905 910 break
906 911 parents = [p for p in repo.changelog.parents(n) if p != nullid]
907 912 if opts.get('no_merges') and len(parents) == 2:
908 913 continue
909 914 count += 1
910 915 displayer.show(repo[n])
911 916 displayer.close()
912 917 cmdutil.outgoinghooks(ui, repo, other, opts, o)
913 918 recurse()
914 919 return 0 # exit code is zero since we found outgoing changes
915 920
916 921 def verify(repo):
917 922 """verify the consistency of a repository"""
918 923 ret = verifymod.verify(repo)
919 924
920 925 # Broken subrepo references in hidden csets don't seem worth worrying about,
921 926 # since they can't be pushed/pulled, and --hidden can be used if they are a
922 927 # concern.
923 928
924 929 # pathto() is needed for -R case
925 930 revs = repo.revs("filelog(%s)",
926 931 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
927 932
928 933 if revs:
929 934 repo.ui.status(_('checking subrepo links\n'))
930 935 for rev in revs:
931 936 ctx = repo[rev]
932 937 try:
933 938 for subpath in ctx.substate:
934 939 try:
935 940 ret = (ctx.sub(subpath, allowcreate=False).verify()
936 941 or ret)
937 942 except error.RepoError as e:
938 943 repo.ui.warn(('%s: %s\n') % (rev, e))
939 944 except Exception:
940 945 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
941 946 node.short(ctx.node()))
942 947
943 948 return ret
944 949
945 950 def remoteui(src, opts):
946 951 'build a remote ui from ui or repo and opts'
947 952 if util.safehasattr(src, 'baseui'): # looks like a repository
948 953 dst = src.baseui.copy() # drop repo-specific config
949 954 src = src.ui # copy target options from repo
950 955 else: # assume it's a global ui object
951 956 dst = src.copy() # keep all global options
952 957
953 958 # copy ssh-specific options
954 959 for o in 'ssh', 'remotecmd':
955 960 v = opts.get(o) or src.config('ui', o)
956 961 if v:
957 962 dst.setconfig("ui", o, v, 'copied')
958 963
959 964 # copy bundle-specific options
960 965 r = src.config('bundle', 'mainreporoot')
961 966 if r:
962 967 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
963 968
964 969 # copy selected local settings to the remote ui
965 970 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
966 971 for key, val in src.configitems(sect):
967 972 dst.setconfig(sect, key, val, 'copied')
968 973 v = src.config('web', 'cacerts')
969 974 if v:
970 975 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
971 976
972 977 return dst
973 978
974 979 # Files of interest
975 980 # Used to check if the repository has changed looking at mtime and size of
976 981 # these files.
977 982 foi = [('spath', '00changelog.i'),
978 983 ('spath', 'phaseroots'), # ! phase can change content at the same size
979 984 ('spath', 'obsstore'),
980 985 ('path', 'bookmarks'), # ! bookmark can change content at the same size
981 986 ]
982 987
983 988 class cachedlocalrepo(object):
984 989 """Holds a localrepository that can be cached and reused."""
985 990
986 991 def __init__(self, repo):
987 992 """Create a new cached repo from an existing repo.
988 993
989 994 We assume the passed in repo was recently created. If the
990 995 repo has changed between when it was created and when it was
991 996 turned into a cache, it may not refresh properly.
992 997 """
993 998 assert isinstance(repo, localrepo.localrepository)
994 999 self._repo = repo
995 1000 self._state, self.mtime = self._repostate()
996 1001 self._filtername = repo.filtername
997 1002
998 1003 def fetch(self):
999 1004 """Refresh (if necessary) and return a repository.
1000 1005
1001 1006 If the cached instance is out of date, it will be recreated
1002 1007 automatically and returned.
1003 1008
1004 1009 Returns a tuple of the repo and a boolean indicating whether a new
1005 1010 repo instance was created.
1006 1011 """
1007 1012 # We compare the mtimes and sizes of some well-known files to
1008 1013 # determine if the repo changed. This is not precise, as mtimes
1009 1014 # are susceptible to clock skew and imprecise filesystems and
1010 1015 # file content can change while maintaining the same size.
1011 1016
1012 1017 state, mtime = self._repostate()
1013 1018 if state == self._state:
1014 1019 return self._repo, False
1015 1020
1016 1021 repo = repository(self._repo.baseui, self._repo.url())
1017 1022 if self._filtername:
1018 1023 self._repo = repo.filtered(self._filtername)
1019 1024 else:
1020 1025 self._repo = repo.unfiltered()
1021 1026 self._state = state
1022 1027 self.mtime = mtime
1023 1028
1024 1029 return self._repo, True
1025 1030
1026 1031 def _repostate(self):
1027 1032 state = []
1028 1033 maxmtime = -1
1029 1034 for attr, fname in foi:
1030 1035 prefix = getattr(self._repo, attr)
1031 1036 p = os.path.join(prefix, fname)
1032 1037 try:
1033 1038 st = os.stat(p)
1034 1039 except OSError:
1035 1040 st = os.stat(prefix)
1036 1041 state.append((st.st_mtime, st.st_size))
1037 1042 maxmtime = max(maxmtime, st.st_mtime)
1038 1043
1039 1044 return tuple(state), maxmtime
1040 1045
1041 1046 def copy(self):
1042 1047 """Obtain a copy of this class instance.
1043 1048
1044 1049 A new localrepository instance is obtained. The new instance should be
1045 1050 completely independent of the original.
1046 1051 """
1047 1052 repo = repository(self._repo.baseui, self._repo.origroot)
1048 1053 if self._filtername:
1049 1054 repo = repo.filtered(self._filtername)
1050 1055 else:
1051 1056 repo = repo.unfiltered()
1052 1057 c = cachedlocalrepo(repo)
1053 1058 c._state = self._state
1054 1059 c.mtime = self.mtime
1055 1060 return c
General Comments 0
You need to be logged in to leave comments. Login now