##// END OF EJS Templates
hg: make cachedlocalrepo cache appropriate repoview object...
FUJIWARA Katsunori -
r28119:91a827e7 default
parent child Browse files
Show More
@@ -1,918 +1,927
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17
18 18 from . import (
19 19 bookmarks,
20 20 bundlerepo,
21 21 cmdutil,
22 22 discovery,
23 23 error,
24 24 exchange,
25 25 extensions,
26 26 httppeer,
27 27 localrepo,
28 28 lock,
29 29 merge as mergemod,
30 30 node,
31 31 phases,
32 32 repoview,
33 33 scmutil,
34 34 sshpeer,
35 35 statichttprepo,
36 36 ui as uimod,
37 37 unionrepo,
38 38 url,
39 39 util,
40 40 verify as verifymod,
41 41 )
42 42
43 43 release = lock.release
44 44
45 45 def _local(path):
46 46 path = util.expandpath(util.urllocalpath(path))
47 47 return (os.path.isfile(path) and bundlerepo or localrepo)
48 48
49 49 def addbranchrevs(lrepo, other, branches, revs):
50 50 peer = other.peer() # a courtesy to callers using a localrepo for other
51 51 hashbranch, branches = branches
52 52 if not hashbranch and not branches:
53 53 x = revs or None
54 54 if util.safehasattr(revs, 'first'):
55 55 y = revs.first()
56 56 elif revs:
57 57 y = revs[0]
58 58 else:
59 59 y = None
60 60 return x, y
61 61 if revs:
62 62 revs = list(revs)
63 63 else:
64 64 revs = []
65 65
66 66 if not peer.capable('branchmap'):
67 67 if branches:
68 68 raise error.Abort(_("remote branch lookup not supported"))
69 69 revs.append(hashbranch)
70 70 return revs, revs[0]
71 71 branchmap = peer.branchmap()
72 72
73 73 def primary(branch):
74 74 if branch == '.':
75 75 if not lrepo:
76 76 raise error.Abort(_("dirstate branch not accessible"))
77 77 branch = lrepo.dirstate.branch()
78 78 if branch in branchmap:
79 79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
80 80 return True
81 81 else:
82 82 return False
83 83
84 84 for branch in branches:
85 85 if not primary(branch):
86 86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
87 87 if hashbranch:
88 88 if not primary(hashbranch):
89 89 revs.append(hashbranch)
90 90 return revs, revs[0]
91 91
92 92 def parseurl(path, branches=None):
93 93 '''parse url#branch, returning (url, (branch, branches))'''
94 94
95 95 u = util.url(path)
96 96 branch = None
97 97 if u.fragment:
98 98 branch = u.fragment
99 99 u.fragment = None
100 100 return str(u), (branch, branches or [])
101 101
102 102 schemes = {
103 103 'bundle': bundlerepo,
104 104 'union': unionrepo,
105 105 'file': _local,
106 106 'http': httppeer,
107 107 'https': httppeer,
108 108 'ssh': sshpeer,
109 109 'static-http': statichttprepo,
110 110 }
111 111
112 112 def _peerlookup(path):
113 113 u = util.url(path)
114 114 scheme = u.scheme or 'file'
115 115 thing = schemes.get(scheme) or schemes['file']
116 116 try:
117 117 return thing(path)
118 118 except TypeError:
119 119 # we can't test callable(thing) because 'thing' can be an unloaded
120 120 # module that implements __call__
121 121 if not util.safehasattr(thing, 'instance'):
122 122 raise
123 123 return thing
124 124
125 125 def islocal(repo):
126 126 '''return true if repo (or path pointing to repo) is local'''
127 127 if isinstance(repo, str):
128 128 try:
129 129 return _peerlookup(repo).islocal(repo)
130 130 except AttributeError:
131 131 return False
132 132 return repo.local()
133 133
134 134 def openpath(ui, path):
135 135 '''open path with open if local, url.open if remote'''
136 136 pathurl = util.url(path, parsequery=False, parsefragment=False)
137 137 if pathurl.islocal():
138 138 return util.posixfile(pathurl.localpath(), 'rb')
139 139 else:
140 140 return url.open(ui, path)
141 141
142 142 # a list of (ui, repo) functions called for wire peer initialization
143 143 wirepeersetupfuncs = []
144 144
145 145 def _peerorrepo(ui, path, create=False):
146 146 """return a repository object for the specified path"""
147 147 obj = _peerlookup(path).instance(ui, path, create)
148 148 ui = getattr(obj, "ui", ui)
149 149 for name, module in extensions.extensions(ui):
150 150 hook = getattr(module, 'reposetup', None)
151 151 if hook:
152 152 hook(ui, obj)
153 153 if not obj.local():
154 154 for f in wirepeersetupfuncs:
155 155 f(ui, obj)
156 156 return obj
157 157
158 158 def repository(ui, path='', create=False):
159 159 """return a repository object for the specified path"""
160 160 peer = _peerorrepo(ui, path, create)
161 161 repo = peer.local()
162 162 if not repo:
163 163 raise error.Abort(_("repository '%s' is not local") %
164 164 (path or peer.url()))
165 165 return repo.filtered('visible')
166 166
167 167 def peer(uiorrepo, opts, path, create=False):
168 168 '''return a repository peer for the specified path'''
169 169 rui = remoteui(uiorrepo, opts)
170 170 return _peerorrepo(rui, path, create).peer()
171 171
172 172 def defaultdest(source):
173 173 '''return default destination of clone if none is given
174 174
175 175 >>> defaultdest('foo')
176 176 'foo'
177 177 >>> defaultdest('/foo/bar')
178 178 'bar'
179 179 >>> defaultdest('/')
180 180 ''
181 181 >>> defaultdest('')
182 182 ''
183 183 >>> defaultdest('http://example.org/')
184 184 ''
185 185 >>> defaultdest('http://example.org/foo/')
186 186 'foo'
187 187 '''
188 188 path = util.url(source).path
189 189 if not path:
190 190 return ''
191 191 return os.path.basename(os.path.normpath(path))
192 192
193 193 def share(ui, source, dest=None, update=True, bookmarks=True):
194 194 '''create a shared repository'''
195 195
196 196 if not islocal(source):
197 197 raise error.Abort(_('can only share local repositories'))
198 198
199 199 if not dest:
200 200 dest = defaultdest(source)
201 201 else:
202 202 dest = ui.expandpath(dest)
203 203
204 204 if isinstance(source, str):
205 205 origsource = ui.expandpath(source)
206 206 source, branches = parseurl(origsource)
207 207 srcrepo = repository(ui, source)
208 208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
209 209 else:
210 210 srcrepo = source.local()
211 211 origsource = source = srcrepo.url()
212 212 checkout = None
213 213
214 214 sharedpath = srcrepo.sharedpath # if our source is already sharing
215 215
216 216 destwvfs = scmutil.vfs(dest, realpath=True)
217 217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
218 218
219 219 if destvfs.lexists():
220 220 raise error.Abort(_('destination already exists'))
221 221
222 222 if not destwvfs.isdir():
223 223 destwvfs.mkdir()
224 224 destvfs.makedir()
225 225
226 226 requirements = ''
227 227 try:
228 228 requirements = srcrepo.vfs.read('requires')
229 229 except IOError as inst:
230 230 if inst.errno != errno.ENOENT:
231 231 raise
232 232
233 233 requirements += 'shared\n'
234 234 destvfs.write('requires', requirements)
235 235 destvfs.write('sharedpath', sharedpath)
236 236
237 237 r = repository(ui, destwvfs.base)
238 238 postshare(srcrepo, r, bookmarks=bookmarks)
239 239
240 240 if update:
241 241 r.ui.status(_("updating working directory\n"))
242 242 if update is not True:
243 243 checkout = update
244 244 for test in (checkout, 'default', 'tip'):
245 245 if test is None:
246 246 continue
247 247 try:
248 248 uprev = r.lookup(test)
249 249 break
250 250 except error.RepoLookupError:
251 251 continue
252 252 _update(r, uprev)
253 253
254 254 def postshare(sourcerepo, destrepo, bookmarks=True):
255 255 """Called after a new shared repo is created.
256 256
257 257 The new repo only has a requirements file and pointer to the source.
258 258 This function configures additional shared data.
259 259
260 260 Extensions can wrap this function and write additional entries to
261 261 destrepo/.hg/shared to indicate additional pieces of data to be shared.
262 262 """
263 263 default = sourcerepo.ui.config('paths', 'default')
264 264 if default:
265 265 fp = destrepo.vfs("hgrc", "w", text=True)
266 266 fp.write("[paths]\n")
267 267 fp.write("default = %s\n" % default)
268 268 fp.close()
269 269
270 270 if bookmarks:
271 271 fp = destrepo.vfs('shared', 'w')
272 272 fp.write('bookmarks\n')
273 273 fp.close()
274 274
275 275 def copystore(ui, srcrepo, destpath):
276 276 '''copy files from store of srcrepo in destpath
277 277
278 278 returns destlock
279 279 '''
280 280 destlock = None
281 281 try:
282 282 hardlink = None
283 283 num = 0
284 284 closetopic = [None]
285 285 def prog(topic, pos):
286 286 if pos is None:
287 287 closetopic[0] = topic
288 288 else:
289 289 ui.progress(topic, pos + num)
290 290 srcpublishing = srcrepo.publishing()
291 291 srcvfs = scmutil.vfs(srcrepo.sharedpath)
292 292 dstvfs = scmutil.vfs(destpath)
293 293 for f in srcrepo.store.copylist():
294 294 if srcpublishing and f.endswith('phaseroots'):
295 295 continue
296 296 dstbase = os.path.dirname(f)
297 297 if dstbase and not dstvfs.exists(dstbase):
298 298 dstvfs.mkdir(dstbase)
299 299 if srcvfs.exists(f):
300 300 if f.endswith('data'):
301 301 # 'dstbase' may be empty (e.g. revlog format 0)
302 302 lockfile = os.path.join(dstbase, "lock")
303 303 # lock to avoid premature writing to the target
304 304 destlock = lock.lock(dstvfs, lockfile)
305 305 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
306 306 hardlink, progress=prog)
307 307 num += n
308 308 if hardlink:
309 309 ui.debug("linked %d files\n" % num)
310 310 if closetopic[0]:
311 311 ui.progress(closetopic[0], None)
312 312 else:
313 313 ui.debug("copied %d files\n" % num)
314 314 if closetopic[0]:
315 315 ui.progress(closetopic[0], None)
316 316 return destlock
317 317 except: # re-raises
318 318 release(destlock)
319 319 raise
320 320
321 321 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
322 322 rev=None, update=True, stream=False):
323 323 """Perform a clone using a shared repo.
324 324
325 325 The store for the repository will be located at <sharepath>/.hg. The
326 326 specified revisions will be cloned or pulled from "source". A shared repo
327 327 will be created at "dest" and a working copy will be created if "update" is
328 328 True.
329 329 """
330 330 revs = None
331 331 if rev:
332 332 if not srcpeer.capable('lookup'):
333 333 raise error.Abort(_("src repository does not support "
334 334 "revision lookup and so doesn't "
335 335 "support clone by revision"))
336 336 revs = [srcpeer.lookup(r) for r in rev]
337 337
338 338 basename = os.path.basename(sharepath)
339 339
340 340 if os.path.exists(sharepath):
341 341 ui.status(_('(sharing from existing pooled repository %s)\n') %
342 342 basename)
343 343 else:
344 344 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
345 345 # Always use pull mode because hardlinks in share mode don't work well.
346 346 # Never update because working copies aren't necessary in share mode.
347 347 clone(ui, peeropts, source, dest=sharepath, pull=True,
348 348 rev=rev, update=False, stream=stream)
349 349
350 350 sharerepo = repository(ui, path=sharepath)
351 351 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
352 352
353 353 # We need to perform a pull against the dest repo to fetch bookmarks
354 354 # and other non-store data that isn't shared by default. In the case of
355 355 # non-existing shared repo, this means we pull from the remote twice. This
356 356 # is a bit weird. But at the time it was implemented, there wasn't an easy
357 357 # way to pull just non-changegroup data.
358 358 destrepo = repository(ui, path=dest)
359 359 exchange.pull(destrepo, srcpeer, heads=revs)
360 360
361 361 return srcpeer, peer(ui, peeropts, dest)
362 362
363 363 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
364 364 update=True, stream=False, branch=None, shareopts=None):
365 365 """Make a copy of an existing repository.
366 366
367 367 Create a copy of an existing repository in a new directory. The
368 368 source and destination are URLs, as passed to the repository
369 369 function. Returns a pair of repository peers, the source and
370 370 newly created destination.
371 371
372 372 The location of the source is added to the new repository's
373 373 .hg/hgrc file, as the default to be used for future pulls and
374 374 pushes.
375 375
376 376 If an exception is raised, the partly cloned/updated destination
377 377 repository will be deleted.
378 378
379 379 Arguments:
380 380
381 381 source: repository object or URL
382 382
383 383 dest: URL of destination repository to create (defaults to base
384 384 name of source repository)
385 385
386 386 pull: always pull from source repository, even in local case or if the
387 387 server prefers streaming
388 388
389 389 stream: stream raw data uncompressed from repository (fast over
390 390 LAN, slow over WAN)
391 391
392 392 rev: revision to clone up to (implies pull=True)
393 393
394 394 update: update working directory after clone completes, if
395 395 destination is local repository (True means update to default rev,
396 396 anything else is treated as a revision)
397 397
398 398 branch: branches to clone
399 399
400 400 shareopts: dict of options to control auto sharing behavior. The "pool" key
401 401 activates auto sharing mode and defines the directory for stores. The
402 402 "mode" key determines how to construct the directory name of the shared
403 403 repository. "identity" means the name is derived from the node of the first
404 404 changeset in the repository. "remote" means the name is derived from the
405 405 remote's path/URL. Defaults to "identity."
406 406 """
407 407
408 408 if isinstance(source, str):
409 409 origsource = ui.expandpath(source)
410 410 source, branch = parseurl(origsource, branch)
411 411 srcpeer = peer(ui, peeropts, source)
412 412 else:
413 413 srcpeer = source.peer() # in case we were called with a localrepo
414 414 branch = (None, branch or [])
415 415 origsource = source = srcpeer.url()
416 416 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
417 417
418 418 if dest is None:
419 419 dest = defaultdest(source)
420 420 if dest:
421 421 ui.status(_("destination directory: %s\n") % dest)
422 422 else:
423 423 dest = ui.expandpath(dest)
424 424
425 425 dest = util.urllocalpath(dest)
426 426 source = util.urllocalpath(source)
427 427
428 428 if not dest:
429 429 raise error.Abort(_("empty destination path is not valid"))
430 430
431 431 destvfs = scmutil.vfs(dest, expandpath=True)
432 432 if destvfs.lexists():
433 433 if not destvfs.isdir():
434 434 raise error.Abort(_("destination '%s' already exists") % dest)
435 435 elif destvfs.listdir():
436 436 raise error.Abort(_("destination '%s' is not empty") % dest)
437 437
438 438 shareopts = shareopts or {}
439 439 sharepool = shareopts.get('pool')
440 440 sharenamemode = shareopts.get('mode')
441 441 if sharepool and islocal(dest):
442 442 sharepath = None
443 443 if sharenamemode == 'identity':
444 444 # Resolve the name from the initial changeset in the remote
445 445 # repository. This returns nullid when the remote is empty. It
446 446 # raises RepoLookupError if revision 0 is filtered or otherwise
447 447 # not available. If we fail to resolve, sharing is not enabled.
448 448 try:
449 449 rootnode = srcpeer.lookup('0')
450 450 if rootnode != node.nullid:
451 451 sharepath = os.path.join(sharepool, node.hex(rootnode))
452 452 else:
453 453 ui.status(_('(not using pooled storage: '
454 454 'remote appears to be empty)\n'))
455 455 except error.RepoLookupError:
456 456 ui.status(_('(not using pooled storage: '
457 457 'unable to resolve identity of remote)\n'))
458 458 elif sharenamemode == 'remote':
459 459 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
460 460 else:
461 461 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
462 462
463 463 if sharepath:
464 464 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
465 465 dest, pull=pull, rev=rev, update=update,
466 466 stream=stream)
467 467
468 468 srclock = destlock = cleandir = None
469 469 srcrepo = srcpeer.local()
470 470 try:
471 471 abspath = origsource
472 472 if islocal(origsource):
473 473 abspath = os.path.abspath(util.urllocalpath(origsource))
474 474
475 475 if islocal(dest):
476 476 cleandir = dest
477 477
478 478 copy = False
479 479 if (srcrepo and srcrepo.cancopy() and islocal(dest)
480 480 and not phases.hassecret(srcrepo)):
481 481 copy = not pull and not rev
482 482
483 483 if copy:
484 484 try:
485 485 # we use a lock here because if we race with commit, we
486 486 # can end up with extra data in the cloned revlogs that's
487 487 # not pointed to by changesets, thus causing verify to
488 488 # fail
489 489 srclock = srcrepo.lock(wait=False)
490 490 except error.LockError:
491 491 copy = False
492 492
493 493 if copy:
494 494 srcrepo.hook('preoutgoing', throw=True, source='clone')
495 495 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
496 496 if not os.path.exists(dest):
497 497 os.mkdir(dest)
498 498 else:
499 499 # only clean up directories we create ourselves
500 500 cleandir = hgdir
501 501 try:
502 502 destpath = hgdir
503 503 util.makedir(destpath, notindexed=True)
504 504 except OSError as inst:
505 505 if inst.errno == errno.EEXIST:
506 506 cleandir = None
507 507 raise error.Abort(_("destination '%s' already exists")
508 508 % dest)
509 509 raise
510 510
511 511 destlock = copystore(ui, srcrepo, destpath)
512 512 # copy bookmarks over
513 513 srcbookmarks = srcrepo.join('bookmarks')
514 514 dstbookmarks = os.path.join(destpath, 'bookmarks')
515 515 if os.path.exists(srcbookmarks):
516 516 util.copyfile(srcbookmarks, dstbookmarks)
517 517
518 518 # Recomputing branch cache might be slow on big repos,
519 519 # so just copy it
520 520 def copybranchcache(fname):
521 521 srcbranchcache = srcrepo.join('cache/%s' % fname)
522 522 dstbranchcache = os.path.join(dstcachedir, fname)
523 523 if os.path.exists(srcbranchcache):
524 524 if not os.path.exists(dstcachedir):
525 525 os.mkdir(dstcachedir)
526 526 util.copyfile(srcbranchcache, dstbranchcache)
527 527
528 528 dstcachedir = os.path.join(destpath, 'cache')
529 529 # In local clones we're copying all nodes, not just served
530 530 # ones. Therefore copy all branch caches over.
531 531 copybranchcache('branch2')
532 532 for cachename in repoview.filtertable:
533 533 copybranchcache('branch2-%s' % cachename)
534 534
535 535 # we need to re-init the repo after manually copying the data
536 536 # into it
537 537 destpeer = peer(srcrepo, peeropts, dest)
538 538 srcrepo.hook('outgoing', source='clone',
539 539 node=node.hex(node.nullid))
540 540 else:
541 541 try:
542 542 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
543 543 # only pass ui when no srcrepo
544 544 except OSError as inst:
545 545 if inst.errno == errno.EEXIST:
546 546 cleandir = None
547 547 raise error.Abort(_("destination '%s' already exists")
548 548 % dest)
549 549 raise
550 550
551 551 revs = None
552 552 if rev:
553 553 if not srcpeer.capable('lookup'):
554 554 raise error.Abort(_("src repository does not support "
555 555 "revision lookup and so doesn't "
556 556 "support clone by revision"))
557 557 revs = [srcpeer.lookup(r) for r in rev]
558 558 checkout = revs[0]
559 559 local = destpeer.local()
560 560 if local:
561 561 if not stream:
562 562 if pull:
563 563 stream = False
564 564 else:
565 565 stream = None
566 566 # internal config: ui.quietbookmarkmove
567 567 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
568 568 try:
569 569 local.ui.setconfig(
570 570 'ui', 'quietbookmarkmove', True, 'clone')
571 571 exchange.pull(local, srcpeer, revs,
572 572 streamclonerequested=stream)
573 573 finally:
574 574 local.ui.restoreconfig(quiet)
575 575 elif srcrepo:
576 576 exchange.push(srcrepo, destpeer, revs=revs,
577 577 bookmarks=srcrepo._bookmarks.keys())
578 578 else:
579 579 raise error.Abort(_("clone from remote to remote not supported")
580 580 )
581 581
582 582 cleandir = None
583 583
584 584 destrepo = destpeer.local()
585 585 if destrepo:
586 586 template = uimod.samplehgrcs['cloned']
587 587 fp = destrepo.vfs("hgrc", "w", text=True)
588 588 u = util.url(abspath)
589 589 u.passwd = None
590 590 defaulturl = str(u)
591 591 fp.write(template % defaulturl)
592 592 fp.close()
593 593
594 594 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
595 595
596 596 if update:
597 597 if update is not True:
598 598 checkout = srcpeer.lookup(update)
599 599 uprev = None
600 600 status = None
601 601 if checkout is not None:
602 602 try:
603 603 uprev = destrepo.lookup(checkout)
604 604 except error.RepoLookupError:
605 605 if update is not True:
606 606 try:
607 607 uprev = destrepo.lookup(update)
608 608 except error.RepoLookupError:
609 609 pass
610 610 if uprev is None:
611 611 try:
612 612 uprev = destrepo._bookmarks['@']
613 613 update = '@'
614 614 bn = destrepo[uprev].branch()
615 615 if bn == 'default':
616 616 status = _("updating to bookmark @\n")
617 617 else:
618 618 status = (_("updating to bookmark @ on branch %s\n")
619 619 % bn)
620 620 except KeyError:
621 621 try:
622 622 uprev = destrepo.branchtip('default')
623 623 except error.RepoLookupError:
624 624 uprev = destrepo.lookup('tip')
625 625 if not status:
626 626 bn = destrepo[uprev].branch()
627 627 status = _("updating to branch %s\n") % bn
628 628 destrepo.ui.status(status)
629 629 _update(destrepo, uprev)
630 630 if update in destrepo._bookmarks:
631 631 bookmarks.activate(destrepo, update)
632 632 finally:
633 633 release(srclock, destlock)
634 634 if cleandir is not None:
635 635 shutil.rmtree(cleandir, True)
636 636 if srcpeer is not None:
637 637 srcpeer.close()
638 638 return srcpeer, destpeer
639 639
640 640 def _showstats(repo, stats, quietempty=False):
641 641 if quietempty and not any(stats):
642 642 return
643 643 repo.ui.status(_("%d files updated, %d files merged, "
644 644 "%d files removed, %d files unresolved\n") % stats)
645 645
646 646 def updaterepo(repo, node, overwrite):
647 647 """Update the working directory to node.
648 648
649 649 When overwrite is set, changes are clobbered, merged else
650 650
651 651 returns stats (see pydoc mercurial.merge.applyupdates)"""
652 652 return mergemod.update(repo, node, False, overwrite,
653 653 labels=['working copy', 'destination'])
654 654
655 655 def update(repo, node, quietempty=False):
656 656 """update the working directory to node, merging linear changes"""
657 657 stats = updaterepo(repo, node, False)
658 658 _showstats(repo, stats, quietempty)
659 659 if stats[3]:
660 660 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
661 661 return stats[3] > 0
662 662
663 663 # naming conflict in clone()
664 664 _update = update
665 665
666 666 def clean(repo, node, show_stats=True, quietempty=False):
667 667 """forcibly switch the working directory to node, clobbering changes"""
668 668 stats = updaterepo(repo, node, True)
669 669 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
670 670 if show_stats:
671 671 _showstats(repo, stats, quietempty)
672 672 return stats[3] > 0
673 673
674 674 def merge(repo, node, force=None, remind=True, mergeforce=False):
675 675 """Branch merge with node, resolving changes. Return true if any
676 676 unresolved conflicts."""
677 677 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
678 678 _showstats(repo, stats)
679 679 if stats[3]:
680 680 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
681 681 "or 'hg update -C .' to abandon\n"))
682 682 elif remind:
683 683 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
684 684 return stats[3] > 0
685 685
686 686 def _incoming(displaychlist, subreporecurse, ui, repo, source,
687 687 opts, buffered=False):
688 688 """
689 689 Helper for incoming / gincoming.
690 690 displaychlist gets called with
691 691 (remoterepo, incomingchangesetlist, displayer) parameters,
692 692 and is supposed to contain only code that can't be unified.
693 693 """
694 694 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
695 695 other = peer(repo, opts, source)
696 696 ui.status(_('comparing with %s\n') % util.hidepassword(source))
697 697 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
698 698
699 699 if revs:
700 700 revs = [other.lookup(rev) for rev in revs]
701 701 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
702 702 revs, opts["bundle"], opts["force"])
703 703 try:
704 704 if not chlist:
705 705 ui.status(_("no changes found\n"))
706 706 return subreporecurse()
707 707
708 708 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
709 709 displaychlist(other, chlist, displayer)
710 710 displayer.close()
711 711 finally:
712 712 cleanupfn()
713 713 subreporecurse()
714 714 return 0 # exit code is zero since we found incoming changes
715 715
716 716 def incoming(ui, repo, source, opts):
717 717 def subreporecurse():
718 718 ret = 1
719 719 if opts.get('subrepos'):
720 720 ctx = repo[None]
721 721 for subpath in sorted(ctx.substate):
722 722 sub = ctx.sub(subpath)
723 723 ret = min(ret, sub.incoming(ui, source, opts))
724 724 return ret
725 725
726 726 def display(other, chlist, displayer):
727 727 limit = cmdutil.loglimit(opts)
728 728 if opts.get('newest_first'):
729 729 chlist.reverse()
730 730 count = 0
731 731 for n in chlist:
732 732 if limit is not None and count >= limit:
733 733 break
734 734 parents = [p for p in other.changelog.parents(n) if p != nullid]
735 735 if opts.get('no_merges') and len(parents) == 2:
736 736 continue
737 737 count += 1
738 738 displayer.show(other[n])
739 739 return _incoming(display, subreporecurse, ui, repo, source, opts)
740 740
741 741 def _outgoing(ui, repo, dest, opts):
742 742 dest = ui.expandpath(dest or 'default-push', dest or 'default')
743 743 dest, branches = parseurl(dest, opts.get('branch'))
744 744 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
745 745 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
746 746 if revs:
747 747 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
748 748
749 749 other = peer(repo, opts, dest)
750 750 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
751 751 force=opts.get('force'))
752 752 o = outgoing.missing
753 753 if not o:
754 754 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
755 755 return o, other
756 756
757 757 def outgoing(ui, repo, dest, opts):
758 758 def recurse():
759 759 ret = 1
760 760 if opts.get('subrepos'):
761 761 ctx = repo[None]
762 762 for subpath in sorted(ctx.substate):
763 763 sub = ctx.sub(subpath)
764 764 ret = min(ret, sub.outgoing(ui, dest, opts))
765 765 return ret
766 766
767 767 limit = cmdutil.loglimit(opts)
768 768 o, other = _outgoing(ui, repo, dest, opts)
769 769 if not o:
770 770 cmdutil.outgoinghooks(ui, repo, other, opts, o)
771 771 return recurse()
772 772
773 773 if opts.get('newest_first'):
774 774 o.reverse()
775 775 displayer = cmdutil.show_changeset(ui, repo, opts)
776 776 count = 0
777 777 for n in o:
778 778 if limit is not None and count >= limit:
779 779 break
780 780 parents = [p for p in repo.changelog.parents(n) if p != nullid]
781 781 if opts.get('no_merges') and len(parents) == 2:
782 782 continue
783 783 count += 1
784 784 displayer.show(repo[n])
785 785 displayer.close()
786 786 cmdutil.outgoinghooks(ui, repo, other, opts, o)
787 787 recurse()
788 788 return 0 # exit code is zero since we found outgoing changes
789 789
790 790 def verify(repo):
791 791 """verify the consistency of a repository"""
792 792 ret = verifymod.verify(repo)
793 793
794 794 # Broken subrepo references in hidden csets don't seem worth worrying about,
795 795 # since they can't be pushed/pulled, and --hidden can be used if they are a
796 796 # concern.
797 797
798 798 # pathto() is needed for -R case
799 799 revs = repo.revs("filelog(%s)",
800 800 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
801 801
802 802 if revs:
803 803 repo.ui.status(_('checking subrepo links\n'))
804 804 for rev in revs:
805 805 ctx = repo[rev]
806 806 try:
807 807 for subpath in ctx.substate:
808 808 ret = ctx.sub(subpath).verify() or ret
809 809 except Exception:
810 810 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
811 811 node.short(ctx.node()))
812 812
813 813 return ret
814 814
815 815 def remoteui(src, opts):
816 816 'build a remote ui from ui or repo and opts'
817 817 if util.safehasattr(src, 'baseui'): # looks like a repository
818 818 dst = src.baseui.copy() # drop repo-specific config
819 819 src = src.ui # copy target options from repo
820 820 else: # assume it's a global ui object
821 821 dst = src.copy() # keep all global options
822 822
823 823 # copy ssh-specific options
824 824 for o in 'ssh', 'remotecmd':
825 825 v = opts.get(o) or src.config('ui', o)
826 826 if v:
827 827 dst.setconfig("ui", o, v, 'copied')
828 828
829 829 # copy bundle-specific options
830 830 r = src.config('bundle', 'mainreporoot')
831 831 if r:
832 832 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
833 833
834 834 # copy selected local settings to the remote ui
835 835 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
836 836 for key, val in src.configitems(sect):
837 837 dst.setconfig(sect, key, val, 'copied')
838 838 v = src.config('web', 'cacerts')
839 839 if v == '!':
840 840 dst.setconfig('web', 'cacerts', v, 'copied')
841 841 elif v:
842 842 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
843 843
844 844 return dst
845 845
846 846 # Files of interest
847 847 # Used to check if the repository has changed looking at mtime and size of
848 848 # these files.
849 849 foi = [('spath', '00changelog.i'),
850 850 ('spath', 'phaseroots'), # ! phase can change content at the same size
851 851 ('spath', 'obsstore'),
852 852 ('path', 'bookmarks'), # ! bookmark can change content at the same size
853 853 ]
854 854
855 855 class cachedlocalrepo(object):
856 856 """Holds a localrepository that can be cached and reused."""
857 857
858 858 def __init__(self, repo):
859 859 """Create a new cached repo from an existing repo.
860 860
861 861 We assume the passed in repo was recently created. If the
862 862 repo has changed between when it was created and when it was
863 863 turned into a cache, it may not refresh properly.
864 864 """
865 865 assert isinstance(repo, localrepo.localrepository)
866 866 self._repo = repo
867 867 self._state, self.mtime = self._repostate()
868 self._filtername = repo.filtername
868 869
869 870 def fetch(self):
870 871 """Refresh (if necessary) and return a repository.
871 872
872 873 If the cached instance is out of date, it will be recreated
873 874 automatically and returned.
874 875
875 876 Returns a tuple of the repo and a boolean indicating whether a new
876 877 repo instance was created.
877 878 """
878 879 # We compare the mtimes and sizes of some well-known files to
879 880 # determine if the repo changed. This is not precise, as mtimes
880 881 # are susceptible to clock skew and imprecise filesystems and
881 882 # file content can change while maintaining the same size.
882 883
883 884 state, mtime = self._repostate()
884 885 if state == self._state:
885 886 return self._repo, False
886 887
887 self._repo = repository(self._repo.baseui, self._repo.url())
888 repo = repository(self._repo.baseui, self._repo.url())
889 if self._filtername:
890 self._repo = repo.filtered(self._filtername)
891 else:
892 self._repo = repo.unfiltered()
888 893 self._state = state
889 894 self.mtime = mtime
890 895
891 896 return self._repo, True
892 897
893 898 def _repostate(self):
894 899 state = []
895 900 maxmtime = -1
896 901 for attr, fname in foi:
897 902 prefix = getattr(self._repo, attr)
898 903 p = os.path.join(prefix, fname)
899 904 try:
900 905 st = os.stat(p)
901 906 except OSError:
902 907 st = os.stat(prefix)
903 908 state.append((st.st_mtime, st.st_size))
904 909 maxmtime = max(maxmtime, st.st_mtime)
905 910
906 911 return tuple(state), maxmtime
907 912
908 913 def copy(self):
909 914 """Obtain a copy of this class instance.
910 915
911 916 A new localrepository instance is obtained. The new instance should be
912 917 completely independent of the original.
913 918 """
914 919 repo = repository(self._repo.baseui, self._repo.origroot)
920 if self._filtername:
921 repo = repo.filtered(self._filtername)
922 else:
923 repo = repo.unfiltered()
915 924 c = cachedlocalrepo(repo)
916 925 c._state = self._state
917 926 c.mtime = self.mtime
918 927 return c
General Comments 0
You need to be logged in to leave comments. Login now