##// END OF EJS Templates
caches: make 'cachetocopy' available in scmutil...
Boris Feld -
r35784:72fdd99e default
parent child Browse files
Show More
@@ -0,0 +1,21 b''
1 # scmutil.py - Mercurial core utility functions
2 #
3 # Copyright Matt Mackall <mpm@selenic.com> and other
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
8
9 from . import repoview
10
11 def cachetocopy(srcrepo):
12 """return the list of cache file valuable to copy during a clone"""
13 # In local clones we're copying all nodes, not just served
14 # ones. Therefore copy all branch caches over.
15 cachefiles = ['branch2']
16 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
17 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
18 cachefiles += ['tags2']
19 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
20 cachefiles += ['hgtagsfnodes1']
21 return cachefiles
@@ -1,1127 +1,1115 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 nullid,
19 19 )
20 20
21 21 from . import (
22 22 bookmarks,
23 23 bundlerepo,
24 cacheutil,
24 25 cmdutil,
25 26 destutil,
26 27 discovery,
27 28 error,
28 29 exchange,
29 30 extensions,
30 31 httppeer,
31 32 localrepo,
32 33 lock,
33 34 logexchange,
34 35 merge as mergemod,
35 36 node,
36 37 phases,
37 repoview,
38 38 scmutil,
39 39 sshpeer,
40 40 statichttprepo,
41 41 ui as uimod,
42 42 unionrepo,
43 43 url,
44 44 util,
45 45 verify as verifymod,
46 46 vfs as vfsmod,
47 47 )
48 48
49 49 release = lock.release
50 50
51 51 # shared features
52 52 sharedbookmarks = 'bookmarks'
53 53
54 54 def _local(path):
55 55 path = util.expandpath(util.urllocalpath(path))
56 56 return (os.path.isfile(path) and bundlerepo or localrepo)
57 57
58 58 def addbranchrevs(lrepo, other, branches, revs):
59 59 peer = other.peer() # a courtesy to callers using a localrepo for other
60 60 hashbranch, branches = branches
61 61 if not hashbranch and not branches:
62 62 x = revs or None
63 63 if util.safehasattr(revs, 'first'):
64 64 y = revs.first()
65 65 elif revs:
66 66 y = revs[0]
67 67 else:
68 68 y = None
69 69 return x, y
70 70 if revs:
71 71 revs = list(revs)
72 72 else:
73 73 revs = []
74 74
75 75 if not peer.capable('branchmap'):
76 76 if branches:
77 77 raise error.Abort(_("remote branch lookup not supported"))
78 78 revs.append(hashbranch)
79 79 return revs, revs[0]
80 80 branchmap = peer.branchmap()
81 81
82 82 def primary(branch):
83 83 if branch == '.':
84 84 if not lrepo:
85 85 raise error.Abort(_("dirstate branch not accessible"))
86 86 branch = lrepo.dirstate.branch()
87 87 if branch in branchmap:
88 88 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
89 89 return True
90 90 else:
91 91 return False
92 92
93 93 for branch in branches:
94 94 if not primary(branch):
95 95 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
96 96 if hashbranch:
97 97 if not primary(hashbranch):
98 98 revs.append(hashbranch)
99 99 return revs, revs[0]
100 100
101 101 def parseurl(path, branches=None):
102 102 '''parse url#branch, returning (url, (branch, branches))'''
103 103
104 104 u = util.url(path)
105 105 branch = None
106 106 if u.fragment:
107 107 branch = u.fragment
108 108 u.fragment = None
109 109 return bytes(u), (branch, branches or [])
110 110
111 111 schemes = {
112 112 'bundle': bundlerepo,
113 113 'union': unionrepo,
114 114 'file': _local,
115 115 'http': httppeer,
116 116 'https': httppeer,
117 117 'ssh': sshpeer,
118 118 'static-http': statichttprepo,
119 119 }
120 120
121 121 def _peerlookup(path):
122 122 u = util.url(path)
123 123 scheme = u.scheme or 'file'
124 124 thing = schemes.get(scheme) or schemes['file']
125 125 try:
126 126 return thing(path)
127 127 except TypeError:
128 128 # we can't test callable(thing) because 'thing' can be an unloaded
129 129 # module that implements __call__
130 130 if not util.safehasattr(thing, 'instance'):
131 131 raise
132 132 return thing
133 133
134 134 def islocal(repo):
135 135 '''return true if repo (or path pointing to repo) is local'''
136 136 if isinstance(repo, bytes):
137 137 try:
138 138 return _peerlookup(repo).islocal(repo)
139 139 except AttributeError:
140 140 return False
141 141 return repo.local()
142 142
143 143 def openpath(ui, path):
144 144 '''open path with open if local, url.open if remote'''
145 145 pathurl = util.url(path, parsequery=False, parsefragment=False)
146 146 if pathurl.islocal():
147 147 return util.posixfile(pathurl.localpath(), 'rb')
148 148 else:
149 149 return url.open(ui, path)
150 150
151 151 # a list of (ui, repo) functions called for wire peer initialization
152 152 wirepeersetupfuncs = []
153 153
154 154 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
155 155 """return a repository object for the specified path"""
156 156 obj = _peerlookup(path).instance(ui, path, create)
157 157 ui = getattr(obj, "ui", ui)
158 158 for f in presetupfuncs or []:
159 159 f(ui, obj)
160 160 for name, module in extensions.extensions(ui):
161 161 hook = getattr(module, 'reposetup', None)
162 162 if hook:
163 163 hook(ui, obj)
164 164 if not obj.local():
165 165 for f in wirepeersetupfuncs:
166 166 f(ui, obj)
167 167 return obj
168 168
169 169 def repository(ui, path='', create=False, presetupfuncs=None):
170 170 """return a repository object for the specified path"""
171 171 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
172 172 repo = peer.local()
173 173 if not repo:
174 174 raise error.Abort(_("repository '%s' is not local") %
175 175 (path or peer.url()))
176 176 return repo.filtered('visible')
177 177
178 178 def peer(uiorrepo, opts, path, create=False):
179 179 '''return a repository peer for the specified path'''
180 180 rui = remoteui(uiorrepo, opts)
181 181 return _peerorrepo(rui, path, create).peer()
182 182
183 183 def defaultdest(source):
184 184 '''return default destination of clone if none is given
185 185
186 186 >>> defaultdest(b'foo')
187 187 'foo'
188 188 >>> defaultdest(b'/foo/bar')
189 189 'bar'
190 190 >>> defaultdest(b'/')
191 191 ''
192 192 >>> defaultdest(b'')
193 193 ''
194 194 >>> defaultdest(b'http://example.org/')
195 195 ''
196 196 >>> defaultdest(b'http://example.org/foo/')
197 197 'foo'
198 198 '''
199 199 path = util.url(source).path
200 200 if not path:
201 201 return ''
202 202 return os.path.basename(os.path.normpath(path))
203 203
204 204 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
205 205 relative=False):
206 206 '''create a shared repository'''
207 207
208 208 if not islocal(source):
209 209 raise error.Abort(_('can only share local repositories'))
210 210
211 211 if not dest:
212 212 dest = defaultdest(source)
213 213 else:
214 214 dest = ui.expandpath(dest)
215 215
216 216 if isinstance(source, str):
217 217 origsource = ui.expandpath(source)
218 218 source, branches = parseurl(origsource)
219 219 srcrepo = repository(ui, source)
220 220 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
221 221 else:
222 222 srcrepo = source.local()
223 223 origsource = source = srcrepo.url()
224 224 checkout = None
225 225
226 226 sharedpath = srcrepo.sharedpath # if our source is already sharing
227 227
228 228 destwvfs = vfsmod.vfs(dest, realpath=True)
229 229 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
230 230
231 231 if destvfs.lexists():
232 232 raise error.Abort(_('destination already exists'))
233 233
234 234 if not destwvfs.isdir():
235 235 destwvfs.mkdir()
236 236 destvfs.makedir()
237 237
238 238 requirements = ''
239 239 try:
240 240 requirements = srcrepo.vfs.read('requires')
241 241 except IOError as inst:
242 242 if inst.errno != errno.ENOENT:
243 243 raise
244 244
245 245 if relative:
246 246 try:
247 247 sharedpath = os.path.relpath(sharedpath, destvfs.base)
248 248 requirements += 'relshared\n'
249 249 except (IOError, ValueError) as e:
250 250 # ValueError is raised on Windows if the drive letters differ on
251 251 # each path
252 252 raise error.Abort(_('cannot calculate relative path'),
253 253 hint=str(e))
254 254 else:
255 255 requirements += 'shared\n'
256 256
257 257 destvfs.write('requires', requirements)
258 258 destvfs.write('sharedpath', sharedpath)
259 259
260 260 r = repository(ui, destwvfs.base)
261 261 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
262 262 _postshareupdate(r, update, checkout=checkout)
263 263 return r
264 264
265 265 def unshare(ui, repo):
266 266 """convert a shared repository to a normal one
267 267
268 268 Copy the store data to the repo and remove the sharedpath data.
269 269 """
270 270
271 271 destlock = lock = None
272 272 lock = repo.lock()
273 273 try:
274 274 # we use locks here because if we race with commit, we
275 275 # can end up with extra data in the cloned revlogs that's
276 276 # not pointed to by changesets, thus causing verify to
277 277 # fail
278 278
279 279 destlock = copystore(ui, repo, repo.path)
280 280
281 281 sharefile = repo.vfs.join('sharedpath')
282 282 util.rename(sharefile, sharefile + '.old')
283 283
284 284 repo.requirements.discard('shared')
285 285 repo.requirements.discard('relshared')
286 286 repo._writerequirements()
287 287 finally:
288 288 destlock and destlock.release()
289 289 lock and lock.release()
290 290
291 291 # update store, spath, svfs and sjoin of repo
292 292 repo.unfiltered().__init__(repo.baseui, repo.root)
293 293
294 294 # TODO: figure out how to access subrepos that exist, but were previously
295 295 # removed from .hgsub
296 296 c = repo['.']
297 297 subs = c.substate
298 298 for s in sorted(subs):
299 299 c.sub(s).unshare()
300 300
301 301 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
302 302 """Called after a new shared repo is created.
303 303
304 304 The new repo only has a requirements file and pointer to the source.
305 305 This function configures additional shared data.
306 306
307 307 Extensions can wrap this function and write additional entries to
308 308 destrepo/.hg/shared to indicate additional pieces of data to be shared.
309 309 """
310 310 default = defaultpath or sourcerepo.ui.config('paths', 'default')
311 311 if default:
312 312 template = ('[paths]\n'
313 313 'default = %s\n')
314 314 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
315 315
316 316 with destrepo.wlock():
317 317 if bookmarks:
318 318 destrepo.vfs.write('shared', sharedbookmarks + '\n')
319 319
320 320 def _postshareupdate(repo, update, checkout=None):
321 321 """Maybe perform a working directory update after a shared repo is created.
322 322
323 323 ``update`` can be a boolean or a revision to update to.
324 324 """
325 325 if not update:
326 326 return
327 327
328 328 repo.ui.status(_("updating working directory\n"))
329 329 if update is not True:
330 330 checkout = update
331 331 for test in (checkout, 'default', 'tip'):
332 332 if test is None:
333 333 continue
334 334 try:
335 335 uprev = repo.lookup(test)
336 336 break
337 337 except error.RepoLookupError:
338 338 continue
339 339 _update(repo, uprev)
340 340
341 341 def copystore(ui, srcrepo, destpath):
342 342 '''copy files from store of srcrepo in destpath
343 343
344 344 returns destlock
345 345 '''
346 346 destlock = None
347 347 try:
348 348 hardlink = None
349 349 num = 0
350 350 closetopic = [None]
351 351 def prog(topic, pos):
352 352 if pos is None:
353 353 closetopic[0] = topic
354 354 else:
355 355 ui.progress(topic, pos + num)
356 356 srcpublishing = srcrepo.publishing()
357 357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 358 dstvfs = vfsmod.vfs(destpath)
359 359 for f in srcrepo.store.copylist():
360 360 if srcpublishing and f.endswith('phaseroots'):
361 361 continue
362 362 dstbase = os.path.dirname(f)
363 363 if dstbase and not dstvfs.exists(dstbase):
364 364 dstvfs.mkdir(dstbase)
365 365 if srcvfs.exists(f):
366 366 if f.endswith('data'):
367 367 # 'dstbase' may be empty (e.g. revlog format 0)
368 368 lockfile = os.path.join(dstbase, "lock")
369 369 # lock to avoid premature writing to the target
370 370 destlock = lock.lock(dstvfs, lockfile)
371 371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 372 hardlink, progress=prog)
373 373 num += n
374 374 if hardlink:
375 375 ui.debug("linked %d files\n" % num)
376 376 if closetopic[0]:
377 377 ui.progress(closetopic[0], None)
378 378 else:
379 379 ui.debug("copied %d files\n" % num)
380 380 if closetopic[0]:
381 381 ui.progress(closetopic[0], None)
382 382 return destlock
383 383 except: # re-raises
384 384 release(destlock)
385 385 raise
386 386
387 387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 388 rev=None, update=True, stream=False):
389 389 """Perform a clone using a shared repo.
390 390
391 391 The store for the repository will be located at <sharepath>/.hg. The
392 392 specified revisions will be cloned or pulled from "source". A shared repo
393 393 will be created at "dest" and a working copy will be created if "update" is
394 394 True.
395 395 """
396 396 revs = None
397 397 if rev:
398 398 if not srcpeer.capable('lookup'):
399 399 raise error.Abort(_("src repository does not support "
400 400 "revision lookup and so doesn't "
401 401 "support clone by revision"))
402 402 revs = [srcpeer.lookup(r) for r in rev]
403 403
404 404 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 405 # 2 clients may race creating or populating it.
406 406 pooldir = os.path.dirname(sharepath)
407 407 # lock class requires the directory to exist.
408 408 try:
409 409 util.makedir(pooldir, False)
410 410 except OSError as e:
411 411 if e.errno != errno.EEXIST:
412 412 raise
413 413
414 414 poolvfs = vfsmod.vfs(pooldir)
415 415 basename = os.path.basename(sharepath)
416 416
417 417 with lock.lock(poolvfs, '%s.lock' % basename):
418 418 if os.path.exists(sharepath):
419 419 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 420 basename)
421 421 else:
422 422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 423 # Always use pull mode because hardlinks in share mode don't work
424 424 # well. Never update because working copies aren't necessary in
425 425 # share mode.
426 426 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 427 rev=rev, update=False, stream=stream)
428 428
429 429 # Resolve the value to put in [paths] section for the source.
430 430 if islocal(source):
431 431 defaultpath = os.path.abspath(util.urllocalpath(source))
432 432 else:
433 433 defaultpath = source
434 434
435 435 sharerepo = repository(ui, path=sharepath)
436 436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 437 defaultpath=defaultpath)
438 438
439 439 # We need to perform a pull against the dest repo to fetch bookmarks
440 440 # and other non-store data that isn't shared by default. In the case of
441 441 # non-existing shared repo, this means we pull from the remote twice. This
442 442 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 443 # way to pull just non-changegroup data.
444 444 destrepo = repository(ui, path=dest)
445 445 exchange.pull(destrepo, srcpeer, heads=revs)
446 446
447 447 _postshareupdate(destrepo, update)
448 448
449 449 return srcpeer, peer(ui, peeropts, dest)
450 450
451 451 # Recomputing branch cache might be slow on big repos,
452 452 # so just copy it
453 453 def _copycache(srcrepo, dstcachedir, fname):
454 454 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 456 dstbranchcache = os.path.join(dstcachedir, fname)
457 457 if os.path.exists(srcbranchcache):
458 458 if not os.path.exists(dstcachedir):
459 459 os.mkdir(dstcachedir)
460 460 util.copyfile(srcbranchcache, dstbranchcache)
461 461
462 def _cachetocopy(srcrepo):
463 """return the list of cache file valuable to copy during a clone"""
464 # In local clones we're copying all nodes, not just served
465 # ones. Therefore copy all branch caches over.
466 cachefiles = ['branch2']
467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
469 cachefiles += ['tags2']
470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
471 cachefiles += ['hgtagsfnodes1']
472 return cachefiles
473
474 462 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
475 463 update=True, stream=False, branch=None, shareopts=None):
476 464 """Make a copy of an existing repository.
477 465
478 466 Create a copy of an existing repository in a new directory. The
479 467 source and destination are URLs, as passed to the repository
480 468 function. Returns a pair of repository peers, the source and
481 469 newly created destination.
482 470
483 471 The location of the source is added to the new repository's
484 472 .hg/hgrc file, as the default to be used for future pulls and
485 473 pushes.
486 474
487 475 If an exception is raised, the partly cloned/updated destination
488 476 repository will be deleted.
489 477
490 478 Arguments:
491 479
492 480 source: repository object or URL
493 481
494 482 dest: URL of destination repository to create (defaults to base
495 483 name of source repository)
496 484
497 485 pull: always pull from source repository, even in local case or if the
498 486 server prefers streaming
499 487
500 488 stream: stream raw data uncompressed from repository (fast over
501 489 LAN, slow over WAN)
502 490
503 491 rev: revision to clone up to (implies pull=True)
504 492
505 493 update: update working directory after clone completes, if
506 494 destination is local repository (True means update to default rev,
507 495 anything else is treated as a revision)
508 496
509 497 branch: branches to clone
510 498
511 499 shareopts: dict of options to control auto sharing behavior. The "pool" key
512 500 activates auto sharing mode and defines the directory for stores. The
513 501 "mode" key determines how to construct the directory name of the shared
514 502 repository. "identity" means the name is derived from the node of the first
515 503 changeset in the repository. "remote" means the name is derived from the
516 504 remote's path/URL. Defaults to "identity."
517 505 """
518 506
519 507 if isinstance(source, bytes):
520 508 origsource = ui.expandpath(source)
521 509 source, branch = parseurl(origsource, branch)
522 510 srcpeer = peer(ui, peeropts, source)
523 511 else:
524 512 srcpeer = source.peer() # in case we were called with a localrepo
525 513 branch = (None, branch or [])
526 514 origsource = source = srcpeer.url()
527 515 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
528 516
529 517 if dest is None:
530 518 dest = defaultdest(source)
531 519 if dest:
532 520 ui.status(_("destination directory: %s\n") % dest)
533 521 else:
534 522 dest = ui.expandpath(dest)
535 523
536 524 dest = util.urllocalpath(dest)
537 525 source = util.urllocalpath(source)
538 526
539 527 if not dest:
540 528 raise error.Abort(_("empty destination path is not valid"))
541 529
542 530 destvfs = vfsmod.vfs(dest, expandpath=True)
543 531 if destvfs.lexists():
544 532 if not destvfs.isdir():
545 533 raise error.Abort(_("destination '%s' already exists") % dest)
546 534 elif destvfs.listdir():
547 535 raise error.Abort(_("destination '%s' is not empty") % dest)
548 536
549 537 shareopts = shareopts or {}
550 538 sharepool = shareopts.get('pool')
551 539 sharenamemode = shareopts.get('mode')
552 540 if sharepool and islocal(dest):
553 541 sharepath = None
554 542 if sharenamemode == 'identity':
555 543 # Resolve the name from the initial changeset in the remote
556 544 # repository. This returns nullid when the remote is empty. It
557 545 # raises RepoLookupError if revision 0 is filtered or otherwise
558 546 # not available. If we fail to resolve, sharing is not enabled.
559 547 try:
560 548 rootnode = srcpeer.lookup('0')
561 549 if rootnode != node.nullid:
562 550 sharepath = os.path.join(sharepool, node.hex(rootnode))
563 551 else:
564 552 ui.status(_('(not using pooled storage: '
565 553 'remote appears to be empty)\n'))
566 554 except error.RepoLookupError:
567 555 ui.status(_('(not using pooled storage: '
568 556 'unable to resolve identity of remote)\n'))
569 557 elif sharenamemode == 'remote':
570 558 sharepath = os.path.join(
571 559 sharepool, node.hex(hashlib.sha1(source).digest()))
572 560 else:
573 561 raise error.Abort(_('unknown share naming mode: %s') %
574 562 sharenamemode)
575 563
576 564 if sharepath:
577 565 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
578 566 dest, pull=pull, rev=rev, update=update,
579 567 stream=stream)
580 568
581 569 srclock = destlock = cleandir = None
582 570 srcrepo = srcpeer.local()
583 571 try:
584 572 abspath = origsource
585 573 if islocal(origsource):
586 574 abspath = os.path.abspath(util.urllocalpath(origsource))
587 575
588 576 if islocal(dest):
589 577 cleandir = dest
590 578
591 579 copy = False
592 580 if (srcrepo and srcrepo.cancopy() and islocal(dest)
593 581 and not phases.hassecret(srcrepo)):
594 582 copy = not pull and not rev
595 583
596 584 if copy:
597 585 try:
598 586 # we use a lock here because if we race with commit, we
599 587 # can end up with extra data in the cloned revlogs that's
600 588 # not pointed to by changesets, thus causing verify to
601 589 # fail
602 590 srclock = srcrepo.lock(wait=False)
603 591 except error.LockError:
604 592 copy = False
605 593
606 594 if copy:
607 595 srcrepo.hook('preoutgoing', throw=True, source='clone')
608 596 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
609 597 if not os.path.exists(dest):
610 598 os.mkdir(dest)
611 599 else:
612 600 # only clean up directories we create ourselves
613 601 cleandir = hgdir
614 602 try:
615 603 destpath = hgdir
616 604 util.makedir(destpath, notindexed=True)
617 605 except OSError as inst:
618 606 if inst.errno == errno.EEXIST:
619 607 cleandir = None
620 608 raise error.Abort(_("destination '%s' already exists")
621 609 % dest)
622 610 raise
623 611
624 612 destlock = copystore(ui, srcrepo, destpath)
625 613 # copy bookmarks over
626 614 srcbookmarks = srcrepo.vfs.join('bookmarks')
627 615 dstbookmarks = os.path.join(destpath, 'bookmarks')
628 616 if os.path.exists(srcbookmarks):
629 617 util.copyfile(srcbookmarks, dstbookmarks)
630 618
631 619 dstcachedir = os.path.join(destpath, 'cache')
632 for cache in _cachetocopy(srcrepo):
620 for cache in cacheutil.cachetocopy(srcrepo):
633 621 _copycache(srcrepo, dstcachedir, cache)
634 622
635 623 # we need to re-init the repo after manually copying the data
636 624 # into it
637 625 destpeer = peer(srcrepo, peeropts, dest)
638 626 srcrepo.hook('outgoing', source='clone',
639 627 node=node.hex(node.nullid))
640 628 else:
641 629 try:
642 630 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
643 631 # only pass ui when no srcrepo
644 632 except OSError as inst:
645 633 if inst.errno == errno.EEXIST:
646 634 cleandir = None
647 635 raise error.Abort(_("destination '%s' already exists")
648 636 % dest)
649 637 raise
650 638
651 639 revs = None
652 640 if rev:
653 641 if not srcpeer.capable('lookup'):
654 642 raise error.Abort(_("src repository does not support "
655 643 "revision lookup and so doesn't "
656 644 "support clone by revision"))
657 645 revs = [srcpeer.lookup(r) for r in rev]
658 646 checkout = revs[0]
659 647 local = destpeer.local()
660 648 if local:
661 649 u = util.url(abspath)
662 650 defaulturl = bytes(u)
663 651 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
664 652 if not stream:
665 653 if pull:
666 654 stream = False
667 655 else:
668 656 stream = None
669 657 # internal config: ui.quietbookmarkmove
670 658 overrides = {('ui', 'quietbookmarkmove'): True}
671 659 with local.ui.configoverride(overrides, 'clone'):
672 660 exchange.pull(local, srcpeer, revs,
673 661 streamclonerequested=stream)
674 662 elif srcrepo:
675 663 exchange.push(srcrepo, destpeer, revs=revs,
676 664 bookmarks=srcrepo._bookmarks.keys())
677 665 else:
678 666 raise error.Abort(_("clone from remote to remote not supported")
679 667 )
680 668
681 669 cleandir = None
682 670
683 671 destrepo = destpeer.local()
684 672 if destrepo:
685 673 template = uimod.samplehgrcs['cloned']
686 674 u = util.url(abspath)
687 675 u.passwd = None
688 676 defaulturl = bytes(u)
689 677 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
690 678 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691 679
692 680 if ui.configbool('experimental', 'remotenames'):
693 681 logexchange.pullremotenames(destrepo, srcpeer)
694 682
695 683 if update:
696 684 if update is not True:
697 685 checkout = srcpeer.lookup(update)
698 686 uprev = None
699 687 status = None
700 688 if checkout is not None:
701 689 try:
702 690 uprev = destrepo.lookup(checkout)
703 691 except error.RepoLookupError:
704 692 if update is not True:
705 693 try:
706 694 uprev = destrepo.lookup(update)
707 695 except error.RepoLookupError:
708 696 pass
709 697 if uprev is None:
710 698 try:
711 699 uprev = destrepo._bookmarks['@']
712 700 update = '@'
713 701 bn = destrepo[uprev].branch()
714 702 if bn == 'default':
715 703 status = _("updating to bookmark @\n")
716 704 else:
717 705 status = (_("updating to bookmark @ on branch %s\n")
718 706 % bn)
719 707 except KeyError:
720 708 try:
721 709 uprev = destrepo.branchtip('default')
722 710 except error.RepoLookupError:
723 711 uprev = destrepo.lookup('tip')
724 712 if not status:
725 713 bn = destrepo[uprev].branch()
726 714 status = _("updating to branch %s\n") % bn
727 715 destrepo.ui.status(status)
728 716 _update(destrepo, uprev)
729 717 if update in destrepo._bookmarks:
730 718 bookmarks.activate(destrepo, update)
731 719 finally:
732 720 release(srclock, destlock)
733 721 if cleandir is not None:
734 722 shutil.rmtree(cleandir, True)
735 723 if srcpeer is not None:
736 724 srcpeer.close()
737 725 return srcpeer, destpeer
738 726
739 727 def _showstats(repo, stats, quietempty=False):
740 728 if quietempty and not any(stats):
741 729 return
742 730 repo.ui.status(_("%d files updated, %d files merged, "
743 731 "%d files removed, %d files unresolved\n") % stats)
744 732
745 733 def updaterepo(repo, node, overwrite, updatecheck=None):
746 734 """Update the working directory to node.
747 735
748 736 When overwrite is set, changes are clobbered, merged else
749 737
750 738 returns stats (see pydoc mercurial.merge.applyupdates)"""
751 739 return mergemod.update(repo, node, False, overwrite,
752 740 labels=['working copy', 'destination'],
753 741 updatecheck=updatecheck)
754 742
755 743 def update(repo, node, quietempty=False, updatecheck=None):
756 744 """update the working directory to node"""
757 745 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
758 746 _showstats(repo, stats, quietempty)
759 747 if stats[3]:
760 748 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
761 749 return stats[3] > 0
762 750
763 751 # naming conflict in clone()
764 752 _update = update
765 753
766 754 def clean(repo, node, show_stats=True, quietempty=False):
767 755 """forcibly switch the working directory to node, clobbering changes"""
768 756 stats = updaterepo(repo, node, True)
769 757 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
770 758 if show_stats:
771 759 _showstats(repo, stats, quietempty)
772 760 return stats[3] > 0
773 761
774 762 # naming conflict in updatetotally()
775 763 _clean = clean
776 764
777 765 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
778 766 """Update the working directory with extra care for non-file components
779 767
780 768 This takes care of non-file components below:
781 769
782 770 :bookmark: might be advanced or (in)activated
783 771
784 772 This takes arguments below:
785 773
786 774 :checkout: to which revision the working directory is updated
787 775 :brev: a name, which might be a bookmark to be activated after updating
788 776 :clean: whether changes in the working directory can be discarded
789 777 :updatecheck: how to deal with a dirty working directory
790 778
791 779 Valid values for updatecheck are (None => linear):
792 780
793 781 * abort: abort if the working directory is dirty
794 782 * none: don't check (merge working directory changes into destination)
795 783 * linear: check that update is linear before merging working directory
796 784 changes into destination
797 785 * noconflict: check that the update does not result in file merges
798 786
799 787 This returns whether conflict is detected at updating or not.
800 788 """
801 789 if updatecheck is None:
802 790 updatecheck = ui.config('commands', 'update.check')
803 791 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
804 792 # If not configured, or invalid value configured
805 793 updatecheck = 'linear'
806 794 with repo.wlock():
807 795 movemarkfrom = None
808 796 warndest = False
809 797 if checkout is None:
810 798 updata = destutil.destupdate(repo, clean=clean)
811 799 checkout, movemarkfrom, brev = updata
812 800 warndest = True
813 801
814 802 if clean:
815 803 ret = _clean(repo, checkout)
816 804 else:
817 805 if updatecheck == 'abort':
818 806 cmdutil.bailifchanged(repo, merge=False)
819 807 updatecheck = 'none'
820 808 ret = _update(repo, checkout, updatecheck=updatecheck)
821 809
822 810 if not ret and movemarkfrom:
823 811 if movemarkfrom == repo['.'].node():
824 812 pass # no-op update
825 813 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
826 814 b = ui.label(repo._activebookmark, 'bookmarks.active')
827 815 ui.status(_("updating bookmark %s\n") % b)
828 816 else:
829 817 # this can happen with a non-linear update
830 818 b = ui.label(repo._activebookmark, 'bookmarks')
831 819 ui.status(_("(leaving bookmark %s)\n") % b)
832 820 bookmarks.deactivate(repo)
833 821 elif brev in repo._bookmarks:
834 822 if brev != repo._activebookmark:
835 823 b = ui.label(brev, 'bookmarks.active')
836 824 ui.status(_("(activating bookmark %s)\n") % b)
837 825 bookmarks.activate(repo, brev)
838 826 elif brev:
839 827 if repo._activebookmark:
840 828 b = ui.label(repo._activebookmark, 'bookmarks')
841 829 ui.status(_("(leaving bookmark %s)\n") % b)
842 830 bookmarks.deactivate(repo)
843 831
844 832 if warndest:
845 833 destutil.statusotherdests(ui, repo)
846 834
847 835 return ret
848 836
849 837 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
850 838 abort=False):
851 839 """Branch merge with node, resolving changes. Return true if any
852 840 unresolved conflicts."""
853 841 if not abort:
854 842 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
855 843 labels=labels)
856 844 else:
857 845 ms = mergemod.mergestate.read(repo)
858 846 if ms.active():
859 847 # there were conflicts
860 848 node = ms.localctx.hex()
861 849 else:
862 850 # there were no conficts, mergestate was not stored
863 851 node = repo['.'].hex()
864 852
865 853 repo.ui.status(_("aborting the merge, updating back to"
866 854 " %s\n") % node[:12])
867 855 stats = mergemod.update(repo, node, branchmerge=False, force=True,
868 856 labels=labels)
869 857
870 858 _showstats(repo, stats)
871 859 if stats[3]:
872 860 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
873 861 "or 'hg merge --abort' to abandon\n"))
874 862 elif remind and not abort:
875 863 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
876 864 return stats[3] > 0
877 865
878 866 def _incoming(displaychlist, subreporecurse, ui, repo, source,
879 867 opts, buffered=False):
880 868 """
881 869 Helper for incoming / gincoming.
882 870 displaychlist gets called with
883 871 (remoterepo, incomingchangesetlist, displayer) parameters,
884 872 and is supposed to contain only code that can't be unified.
885 873 """
886 874 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
887 875 other = peer(repo, opts, source)
888 876 ui.status(_('comparing with %s\n') % util.hidepassword(source))
889 877 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
890 878
891 879 if revs:
892 880 revs = [other.lookup(rev) for rev in revs]
893 881 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
894 882 revs, opts["bundle"], opts["force"])
895 883 try:
896 884 if not chlist:
897 885 ui.status(_("no changes found\n"))
898 886 return subreporecurse()
899 887 ui.pager('incoming')
900 888 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
901 889 displaychlist(other, chlist, displayer)
902 890 displayer.close()
903 891 finally:
904 892 cleanupfn()
905 893 subreporecurse()
906 894 return 0 # exit code is zero since we found incoming changes
907 895
908 896 def incoming(ui, repo, source, opts):
909 897 def subreporecurse():
910 898 ret = 1
911 899 if opts.get('subrepos'):
912 900 ctx = repo[None]
913 901 for subpath in sorted(ctx.substate):
914 902 sub = ctx.sub(subpath)
915 903 ret = min(ret, sub.incoming(ui, source, opts))
916 904 return ret
917 905
918 906 def display(other, chlist, displayer):
919 907 limit = cmdutil.loglimit(opts)
920 908 if opts.get('newest_first'):
921 909 chlist.reverse()
922 910 count = 0
923 911 for n in chlist:
924 912 if limit is not None and count >= limit:
925 913 break
926 914 parents = [p for p in other.changelog.parents(n) if p != nullid]
927 915 if opts.get('no_merges') and len(parents) == 2:
928 916 continue
929 917 count += 1
930 918 displayer.show(other[n])
931 919 return _incoming(display, subreporecurse, ui, repo, source, opts)
932 920
933 921 def _outgoing(ui, repo, dest, opts):
934 922 path = ui.paths.getpath(dest, default=('default-push', 'default'))
935 923 if not path:
936 924 raise error.Abort(_('default repository not configured!'),
937 925 hint=_("see 'hg help config.paths'"))
938 926 dest = path.pushloc or path.loc
939 927 branches = path.branch, opts.get('branch') or []
940 928
941 929 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
942 930 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
943 931 if revs:
944 932 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
945 933
946 934 other = peer(repo, opts, dest)
947 935 outgoing = discovery.findcommonoutgoing(repo, other, revs,
948 936 force=opts.get('force'))
949 937 o = outgoing.missing
950 938 if not o:
951 939 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
952 940 return o, other
953 941
954 942 def outgoing(ui, repo, dest, opts):
955 943 def recurse():
956 944 ret = 1
957 945 if opts.get('subrepos'):
958 946 ctx = repo[None]
959 947 for subpath in sorted(ctx.substate):
960 948 sub = ctx.sub(subpath)
961 949 ret = min(ret, sub.outgoing(ui, dest, opts))
962 950 return ret
963 951
964 952 limit = cmdutil.loglimit(opts)
965 953 o, other = _outgoing(ui, repo, dest, opts)
966 954 if not o:
967 955 cmdutil.outgoinghooks(ui, repo, other, opts, o)
968 956 return recurse()
969 957
970 958 if opts.get('newest_first'):
971 959 o.reverse()
972 960 ui.pager('outgoing')
973 961 displayer = cmdutil.show_changeset(ui, repo, opts)
974 962 count = 0
975 963 for n in o:
976 964 if limit is not None and count >= limit:
977 965 break
978 966 parents = [p for p in repo.changelog.parents(n) if p != nullid]
979 967 if opts.get('no_merges') and len(parents) == 2:
980 968 continue
981 969 count += 1
982 970 displayer.show(repo[n])
983 971 displayer.close()
984 972 cmdutil.outgoinghooks(ui, repo, other, opts, o)
985 973 recurse()
986 974 return 0 # exit code is zero since we found outgoing changes
987 975
988 976 def verify(repo):
989 977 """verify the consistency of a repository"""
990 978 ret = verifymod.verify(repo)
991 979
992 980 # Broken subrepo references in hidden csets don't seem worth worrying about,
993 981 # since they can't be pushed/pulled, and --hidden can be used if they are a
994 982 # concern.
995 983
996 984 # pathto() is needed for -R case
997 985 revs = repo.revs("filelog(%s)",
998 986 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
999 987
1000 988 if revs:
1001 989 repo.ui.status(_('checking subrepo links\n'))
1002 990 for rev in revs:
1003 991 ctx = repo[rev]
1004 992 try:
1005 993 for subpath in ctx.substate:
1006 994 try:
1007 995 ret = (ctx.sub(subpath, allowcreate=False).verify()
1008 996 or ret)
1009 997 except error.RepoError as e:
1010 998 repo.ui.warn(('%s: %s\n') % (rev, e))
1011 999 except Exception:
1012 1000 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1013 1001 node.short(ctx.node()))
1014 1002
1015 1003 return ret
1016 1004
1017 1005 def remoteui(src, opts):
1018 1006 'build a remote ui from ui or repo and opts'
1019 1007 if util.safehasattr(src, 'baseui'): # looks like a repository
1020 1008 dst = src.baseui.copy() # drop repo-specific config
1021 1009 src = src.ui # copy target options from repo
1022 1010 else: # assume it's a global ui object
1023 1011 dst = src.copy() # keep all global options
1024 1012
1025 1013 # copy ssh-specific options
1026 1014 for o in 'ssh', 'remotecmd':
1027 1015 v = opts.get(o) or src.config('ui', o)
1028 1016 if v:
1029 1017 dst.setconfig("ui", o, v, 'copied')
1030 1018
1031 1019 # copy bundle-specific options
1032 1020 r = src.config('bundle', 'mainreporoot')
1033 1021 if r:
1034 1022 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1035 1023
1036 1024 # copy selected local settings to the remote ui
1037 1025 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1038 1026 for key, val in src.configitems(sect):
1039 1027 dst.setconfig(sect, key, val, 'copied')
1040 1028 v = src.config('web', 'cacerts')
1041 1029 if v:
1042 1030 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1043 1031
1044 1032 return dst
1045 1033
1046 1034 # Files of interest
1047 1035 # Used to check if the repository has changed looking at mtime and size of
1048 1036 # these files.
1049 1037 foi = [('spath', '00changelog.i'),
1050 1038 ('spath', 'phaseroots'), # ! phase can change content at the same size
1051 1039 ('spath', 'obsstore'),
1052 1040 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1053 1041 ]
1054 1042
1055 1043 class cachedlocalrepo(object):
1056 1044 """Holds a localrepository that can be cached and reused."""
1057 1045
1058 1046 def __init__(self, repo):
1059 1047 """Create a new cached repo from an existing repo.
1060 1048
1061 1049 We assume the passed in repo was recently created. If the
1062 1050 repo has changed between when it was created and when it was
1063 1051 turned into a cache, it may not refresh properly.
1064 1052 """
1065 1053 assert isinstance(repo, localrepo.localrepository)
1066 1054 self._repo = repo
1067 1055 self._state, self.mtime = self._repostate()
1068 1056 self._filtername = repo.filtername
1069 1057
1070 1058 def fetch(self):
1071 1059 """Refresh (if necessary) and return a repository.
1072 1060
1073 1061 If the cached instance is out of date, it will be recreated
1074 1062 automatically and returned.
1075 1063
1076 1064 Returns a tuple of the repo and a boolean indicating whether a new
1077 1065 repo instance was created.
1078 1066 """
1079 1067 # We compare the mtimes and sizes of some well-known files to
1080 1068 # determine if the repo changed. This is not precise, as mtimes
1081 1069 # are susceptible to clock skew and imprecise filesystems and
1082 1070 # file content can change while maintaining the same size.
1083 1071
1084 1072 state, mtime = self._repostate()
1085 1073 if state == self._state:
1086 1074 return self._repo, False
1087 1075
1088 1076 repo = repository(self._repo.baseui, self._repo.url())
1089 1077 if self._filtername:
1090 1078 self._repo = repo.filtered(self._filtername)
1091 1079 else:
1092 1080 self._repo = repo.unfiltered()
1093 1081 self._state = state
1094 1082 self.mtime = mtime
1095 1083
1096 1084 return self._repo, True
1097 1085
1098 1086 def _repostate(self):
1099 1087 state = []
1100 1088 maxmtime = -1
1101 1089 for attr, fname in foi:
1102 1090 prefix = getattr(self._repo, attr)
1103 1091 p = os.path.join(prefix, fname)
1104 1092 try:
1105 1093 st = os.stat(p)
1106 1094 except OSError:
1107 1095 st = os.stat(prefix)
1108 1096 state.append((st.st_mtime, st.st_size))
1109 1097 maxmtime = max(maxmtime, st.st_mtime)
1110 1098
1111 1099 return tuple(state), maxmtime
1112 1100
1113 1101 def copy(self):
1114 1102 """Obtain a copy of this class instance.
1115 1103
1116 1104 A new localrepository instance is obtained. The new instance should be
1117 1105 completely independent of the original.
1118 1106 """
1119 1107 repo = repository(self._repo.baseui, self._repo.origroot)
1120 1108 if self._filtername:
1121 1109 repo = repo.filtered(self._filtername)
1122 1110 else:
1123 1111 repo = repo.unfiltered()
1124 1112 c = cachedlocalrepo(repo)
1125 1113 c._state = self._state
1126 1114 c.mtime = self.mtime
1127 1115 return c
General Comments 0
You need to be logged in to leave comments. Login now