##// END OF EJS Templates
py3: check for bytes instead of str in isinstance
Pulkit Goyal -
r33018:071732d9 default
parent child Browse files
Show More
@@ -1,1064 +1,1064 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 31 merge as mergemod,
32 32 node,
33 33 phases,
34 34 repoview,
35 35 scmutil,
36 36 sshpeer,
37 37 statichttprepo,
38 38 ui as uimod,
39 39 unionrepo,
40 40 url,
41 41 util,
42 42 verify as verifymod,
43 43 vfs as vfsmod,
44 44 )
45 45
46 46 release = lock.release
47 47
48 48 # shared features
49 49 sharedbookmarks = 'bookmarks'
50 50
51 51 def _local(path):
52 52 path = util.expandpath(util.urllocalpath(path))
53 53 return (os.path.isfile(path) and bundlerepo or localrepo)
54 54
55 55 def addbranchrevs(lrepo, other, branches, revs):
56 56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 57 hashbranch, branches = branches
58 58 if not hashbranch and not branches:
59 59 x = revs or None
60 60 if util.safehasattr(revs, 'first'):
61 61 y = revs.first()
62 62 elif revs:
63 63 y = revs[0]
64 64 else:
65 65 y = None
66 66 return x, y
67 67 if revs:
68 68 revs = list(revs)
69 69 else:
70 70 revs = []
71 71
72 72 if not peer.capable('branchmap'):
73 73 if branches:
74 74 raise error.Abort(_("remote branch lookup not supported"))
75 75 revs.append(hashbranch)
76 76 return revs, revs[0]
77 77 branchmap = peer.branchmap()
78 78
79 79 def primary(branch):
80 80 if branch == '.':
81 81 if not lrepo:
82 82 raise error.Abort(_("dirstate branch not accessible"))
83 83 branch = lrepo.dirstate.branch()
84 84 if branch in branchmap:
85 85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 86 return True
87 87 else:
88 88 return False
89 89
90 90 for branch in branches:
91 91 if not primary(branch):
92 92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 93 if hashbranch:
94 94 if not primary(hashbranch):
95 95 revs.append(hashbranch)
96 96 return revs, revs[0]
97 97
98 98 def parseurl(path, branches=None):
99 99 '''parse url#branch, returning (url, (branch, branches))'''
100 100
101 101 u = util.url(path)
102 102 branch = None
103 103 if u.fragment:
104 104 branch = u.fragment
105 105 u.fragment = None
106 106 return bytes(u), (branch, branches or [])
107 107
108 108 schemes = {
109 109 'bundle': bundlerepo,
110 110 'union': unionrepo,
111 111 'file': _local,
112 112 'http': httppeer,
113 113 'https': httppeer,
114 114 'ssh': sshpeer,
115 115 'static-http': statichttprepo,
116 116 }
117 117
118 118 def _peerlookup(path):
119 119 u = util.url(path)
120 120 scheme = u.scheme or 'file'
121 121 thing = schemes.get(scheme) or schemes['file']
122 122 try:
123 123 return thing(path)
124 124 except TypeError:
125 125 # we can't test callable(thing) because 'thing' can be an unloaded
126 126 # module that implements __call__
127 127 if not util.safehasattr(thing, 'instance'):
128 128 raise
129 129 return thing
130 130
131 131 def islocal(repo):
132 132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, str):
133 if isinstance(repo, bytes):
134 134 try:
135 135 return _peerlookup(repo).islocal(repo)
136 136 except AttributeError:
137 137 return False
138 138 return repo.local()
139 139
140 140 def openpath(ui, path):
141 141 '''open path with open if local, url.open if remote'''
142 142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 143 if pathurl.islocal():
144 144 return util.posixfile(pathurl.localpath(), 'rb')
145 145 else:
146 146 return url.open(ui, path)
147 147
148 148 # a list of (ui, repo) functions called for wire peer initialization
149 149 wirepeersetupfuncs = []
150 150
151 151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 152 """return a repository object for the specified path"""
153 153 obj = _peerlookup(path).instance(ui, path, create)
154 154 ui = getattr(obj, "ui", ui)
155 155 for f in presetupfuncs or []:
156 156 f(ui, obj)
157 157 for name, module in extensions.extensions(ui):
158 158 hook = getattr(module, 'reposetup', None)
159 159 if hook:
160 160 hook(ui, obj)
161 161 if not obj.local():
162 162 for f in wirepeersetupfuncs:
163 163 f(ui, obj)
164 164 return obj
165 165
166 166 def repository(ui, path='', create=False, presetupfuncs=None):
167 167 """return a repository object for the specified path"""
168 168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 169 repo = peer.local()
170 170 if not repo:
171 171 raise error.Abort(_("repository '%s' is not local") %
172 172 (path or peer.url()))
173 173 return repo.filtered('visible')
174 174
175 175 def peer(uiorrepo, opts, path, create=False):
176 176 '''return a repository peer for the specified path'''
177 177 rui = remoteui(uiorrepo, opts)
178 178 return _peerorrepo(rui, path, create).peer()
179 179
180 180 def defaultdest(source):
181 181 '''return default destination of clone if none is given
182 182
183 183 >>> defaultdest('foo')
184 184 'foo'
185 185 >>> defaultdest('/foo/bar')
186 186 'bar'
187 187 >>> defaultdest('/')
188 188 ''
189 189 >>> defaultdest('')
190 190 ''
191 191 >>> defaultdest('http://example.org/')
192 192 ''
193 193 >>> defaultdest('http://example.org/foo/')
194 194 'foo'
195 195 '''
196 196 path = util.url(source).path
197 197 if not path:
198 198 return ''
199 199 return os.path.basename(os.path.normpath(path))
200 200
201 201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 202 relative=False):
203 203 '''create a shared repository'''
204 204
205 205 if not islocal(source):
206 206 raise error.Abort(_('can only share local repositories'))
207 207
208 208 if not dest:
209 209 dest = defaultdest(source)
210 210 else:
211 211 dest = ui.expandpath(dest)
212 212
213 213 if isinstance(source, str):
214 214 origsource = ui.expandpath(source)
215 215 source, branches = parseurl(origsource)
216 216 srcrepo = repository(ui, source)
217 217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 218 else:
219 219 srcrepo = source.local()
220 220 origsource = source = srcrepo.url()
221 221 checkout = None
222 222
223 223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 224
225 225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 227
228 228 if destvfs.lexists():
229 229 raise error.Abort(_('destination already exists'))
230 230
231 231 if not destwvfs.isdir():
232 232 destwvfs.mkdir()
233 233 destvfs.makedir()
234 234
235 235 requirements = ''
236 236 try:
237 237 requirements = srcrepo.vfs.read('requires')
238 238 except IOError as inst:
239 239 if inst.errno != errno.ENOENT:
240 240 raise
241 241
242 242 if relative:
243 243 try:
244 244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 245 requirements += 'relshared\n'
246 246 except IOError as e:
247 247 raise error.Abort(_('cannot calculate relative path'),
248 248 hint=str(e))
249 249 else:
250 250 requirements += 'shared\n'
251 251
252 252 destvfs.write('requires', requirements)
253 253 destvfs.write('sharedpath', sharedpath)
254 254
255 255 r = repository(ui, destwvfs.base)
256 256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 257 _postshareupdate(r, update, checkout=checkout)
258 258
259 259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 260 """Called after a new shared repo is created.
261 261
262 262 The new repo only has a requirements file and pointer to the source.
263 263 This function configures additional shared data.
264 264
265 265 Extensions can wrap this function and write additional entries to
266 266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 267 """
268 268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 269 if default:
270 270 fp = destrepo.vfs("hgrc", "w", text=True)
271 271 fp.write("[paths]\n")
272 272 fp.write("default = %s\n" % default)
273 273 fp.close()
274 274
275 275 with destrepo.wlock():
276 276 if bookmarks:
277 277 fp = destrepo.vfs('shared', 'w')
278 278 fp.write(sharedbookmarks + '\n')
279 279 fp.close()
280 280
281 281 def _postshareupdate(repo, update, checkout=None):
282 282 """Maybe perform a working directory update after a shared repo is created.
283 283
284 284 ``update`` can be a boolean or a revision to update to.
285 285 """
286 286 if not update:
287 287 return
288 288
289 289 repo.ui.status(_("updating working directory\n"))
290 290 if update is not True:
291 291 checkout = update
292 292 for test in (checkout, 'default', 'tip'):
293 293 if test is None:
294 294 continue
295 295 try:
296 296 uprev = repo.lookup(test)
297 297 break
298 298 except error.RepoLookupError:
299 299 continue
300 300 _update(repo, uprev)
301 301
302 302 def copystore(ui, srcrepo, destpath):
303 303 '''copy files from store of srcrepo in destpath
304 304
305 305 returns destlock
306 306 '''
307 307 destlock = None
308 308 try:
309 309 hardlink = None
310 310 num = 0
311 311 closetopic = [None]
312 312 def prog(topic, pos):
313 313 if pos is None:
314 314 closetopic[0] = topic
315 315 else:
316 316 ui.progress(topic, pos + num)
317 317 srcpublishing = srcrepo.publishing()
318 318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 319 dstvfs = vfsmod.vfs(destpath)
320 320 for f in srcrepo.store.copylist():
321 321 if srcpublishing and f.endswith('phaseroots'):
322 322 continue
323 323 dstbase = os.path.dirname(f)
324 324 if dstbase and not dstvfs.exists(dstbase):
325 325 dstvfs.mkdir(dstbase)
326 326 if srcvfs.exists(f):
327 327 if f.endswith('data'):
328 328 # 'dstbase' may be empty (e.g. revlog format 0)
329 329 lockfile = os.path.join(dstbase, "lock")
330 330 # lock to avoid premature writing to the target
331 331 destlock = lock.lock(dstvfs, lockfile)
332 332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 333 hardlink, progress=prog)
334 334 num += n
335 335 if hardlink:
336 336 ui.debug("linked %d files\n" % num)
337 337 if closetopic[0]:
338 338 ui.progress(closetopic[0], None)
339 339 else:
340 340 ui.debug("copied %d files\n" % num)
341 341 if closetopic[0]:
342 342 ui.progress(closetopic[0], None)
343 343 return destlock
344 344 except: # re-raises
345 345 release(destlock)
346 346 raise
347 347
348 348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 349 rev=None, update=True, stream=False):
350 350 """Perform a clone using a shared repo.
351 351
352 352 The store for the repository will be located at <sharepath>/.hg. The
353 353 specified revisions will be cloned or pulled from "source". A shared repo
354 354 will be created at "dest" and a working copy will be created if "update" is
355 355 True.
356 356 """
357 357 revs = None
358 358 if rev:
359 359 if not srcpeer.capable('lookup'):
360 360 raise error.Abort(_("src repository does not support "
361 361 "revision lookup and so doesn't "
362 362 "support clone by revision"))
363 363 revs = [srcpeer.lookup(r) for r in rev]
364 364
365 365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 366 # 2 clients may race creating or populating it.
367 367 pooldir = os.path.dirname(sharepath)
368 368 # lock class requires the directory to exist.
369 369 try:
370 370 util.makedir(pooldir, False)
371 371 except OSError as e:
372 372 if e.errno != errno.EEXIST:
373 373 raise
374 374
375 375 poolvfs = vfsmod.vfs(pooldir)
376 376 basename = os.path.basename(sharepath)
377 377
378 378 with lock.lock(poolvfs, '%s.lock' % basename):
379 379 if os.path.exists(sharepath):
380 380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 381 basename)
382 382 else:
383 383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 384 # Always use pull mode because hardlinks in share mode don't work
385 385 # well. Never update because working copies aren't necessary in
386 386 # share mode.
387 387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 388 rev=rev, update=False, stream=stream)
389 389
390 390 # Resolve the value to put in [paths] section for the source.
391 391 if islocal(source):
392 392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 393 else:
394 394 defaultpath = source
395 395
396 396 sharerepo = repository(ui, path=sharepath)
397 397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 398 defaultpath=defaultpath)
399 399
400 400 # We need to perform a pull against the dest repo to fetch bookmarks
401 401 # and other non-store data that isn't shared by default. In the case of
402 402 # non-existing shared repo, this means we pull from the remote twice. This
403 403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 404 # way to pull just non-changegroup data.
405 405 destrepo = repository(ui, path=dest)
406 406 exchange.pull(destrepo, srcpeer, heads=revs)
407 407
408 408 _postshareupdate(destrepo, update)
409 409
410 410 return srcpeer, peer(ui, peeropts, dest)
411 411
412 412 # Recomputing branch cache might be slow on big repos,
413 413 # so just copy it
414 414 def _copycache(srcrepo, dstcachedir, fname):
415 415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 417 dstbranchcache = os.path.join(dstcachedir, fname)
418 418 if os.path.exists(srcbranchcache):
419 419 if not os.path.exists(dstcachedir):
420 420 os.mkdir(dstcachedir)
421 421 util.copyfile(srcbranchcache, dstbranchcache)
422 422
423 423 def _cachetocopy(srcrepo):
424 424 """return the list of cache file valuable to copy during a clone"""
425 425 # In local clones we're copying all nodes, not just served
426 426 # ones. Therefore copy all branch caches over.
427 427 cachefiles = ['branch2']
428 428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
429 429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
430 430 cachefiles += ['tags2']
431 431 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
432 432 cachefiles += ['hgtagsfnodes1']
433 433 return cachefiles
434 434
435 435 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
436 436 update=True, stream=False, branch=None, shareopts=None):
437 437 """Make a copy of an existing repository.
438 438
439 439 Create a copy of an existing repository in a new directory. The
440 440 source and destination are URLs, as passed to the repository
441 441 function. Returns a pair of repository peers, the source and
442 442 newly created destination.
443 443
444 444 The location of the source is added to the new repository's
445 445 .hg/hgrc file, as the default to be used for future pulls and
446 446 pushes.
447 447
448 448 If an exception is raised, the partly cloned/updated destination
449 449 repository will be deleted.
450 450
451 451 Arguments:
452 452
453 453 source: repository object or URL
454 454
455 455 dest: URL of destination repository to create (defaults to base
456 456 name of source repository)
457 457
458 458 pull: always pull from source repository, even in local case or if the
459 459 server prefers streaming
460 460
461 461 stream: stream raw data uncompressed from repository (fast over
462 462 LAN, slow over WAN)
463 463
464 464 rev: revision to clone up to (implies pull=True)
465 465
466 466 update: update working directory after clone completes, if
467 467 destination is local repository (True means update to default rev,
468 468 anything else is treated as a revision)
469 469
470 470 branch: branches to clone
471 471
472 472 shareopts: dict of options to control auto sharing behavior. The "pool" key
473 473 activates auto sharing mode and defines the directory for stores. The
474 474 "mode" key determines how to construct the directory name of the shared
475 475 repository. "identity" means the name is derived from the node of the first
476 476 changeset in the repository. "remote" means the name is derived from the
477 477 remote's path/URL. Defaults to "identity."
478 478 """
479 479
480 480 if isinstance(source, bytes):
481 481 origsource = ui.expandpath(source)
482 482 source, branch = parseurl(origsource, branch)
483 483 srcpeer = peer(ui, peeropts, source)
484 484 else:
485 485 srcpeer = source.peer() # in case we were called with a localrepo
486 486 branch = (None, branch or [])
487 487 origsource = source = srcpeer.url()
488 488 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
489 489
490 490 if dest is None:
491 491 dest = defaultdest(source)
492 492 if dest:
493 493 ui.status(_("destination directory: %s\n") % dest)
494 494 else:
495 495 dest = ui.expandpath(dest)
496 496
497 497 dest = util.urllocalpath(dest)
498 498 source = util.urllocalpath(source)
499 499
500 500 if not dest:
501 501 raise error.Abort(_("empty destination path is not valid"))
502 502
503 503 destvfs = vfsmod.vfs(dest, expandpath=True)
504 504 if destvfs.lexists():
505 505 if not destvfs.isdir():
506 506 raise error.Abort(_("destination '%s' already exists") % dest)
507 507 elif destvfs.listdir():
508 508 raise error.Abort(_("destination '%s' is not empty") % dest)
509 509
510 510 shareopts = shareopts or {}
511 511 sharepool = shareopts.get('pool')
512 512 sharenamemode = shareopts.get('mode')
513 513 if sharepool and islocal(dest):
514 514 sharepath = None
515 515 if sharenamemode == 'identity':
516 516 # Resolve the name from the initial changeset in the remote
517 517 # repository. This returns nullid when the remote is empty. It
518 518 # raises RepoLookupError if revision 0 is filtered or otherwise
519 519 # not available. If we fail to resolve, sharing is not enabled.
520 520 try:
521 521 rootnode = srcpeer.lookup('0')
522 522 if rootnode != node.nullid:
523 523 sharepath = os.path.join(sharepool, node.hex(rootnode))
524 524 else:
525 525 ui.status(_('(not using pooled storage: '
526 526 'remote appears to be empty)\n'))
527 527 except error.RepoLookupError:
528 528 ui.status(_('(not using pooled storage: '
529 529 'unable to resolve identity of remote)\n'))
530 530 elif sharenamemode == 'remote':
531 531 sharepath = os.path.join(
532 532 sharepool, hashlib.sha1(source).hexdigest())
533 533 else:
534 534 raise error.Abort(_('unknown share naming mode: %s') %
535 535 sharenamemode)
536 536
537 537 if sharepath:
538 538 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
539 539 dest, pull=pull, rev=rev, update=update,
540 540 stream=stream)
541 541
542 542 srclock = destlock = cleandir = None
543 543 srcrepo = srcpeer.local()
544 544 try:
545 545 abspath = origsource
546 546 if islocal(origsource):
547 547 abspath = os.path.abspath(util.urllocalpath(origsource))
548 548
549 549 if islocal(dest):
550 550 cleandir = dest
551 551
552 552 copy = False
553 553 if (srcrepo and srcrepo.cancopy() and islocal(dest)
554 554 and not phases.hassecret(srcrepo)):
555 555 copy = not pull and not rev
556 556
557 557 if copy:
558 558 try:
559 559 # we use a lock here because if we race with commit, we
560 560 # can end up with extra data in the cloned revlogs that's
561 561 # not pointed to by changesets, thus causing verify to
562 562 # fail
563 563 srclock = srcrepo.lock(wait=False)
564 564 except error.LockError:
565 565 copy = False
566 566
567 567 if copy:
568 568 srcrepo.hook('preoutgoing', throw=True, source='clone')
569 569 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
570 570 if not os.path.exists(dest):
571 571 os.mkdir(dest)
572 572 else:
573 573 # only clean up directories we create ourselves
574 574 cleandir = hgdir
575 575 try:
576 576 destpath = hgdir
577 577 util.makedir(destpath, notindexed=True)
578 578 except OSError as inst:
579 579 if inst.errno == errno.EEXIST:
580 580 cleandir = None
581 581 raise error.Abort(_("destination '%s' already exists")
582 582 % dest)
583 583 raise
584 584
585 585 destlock = copystore(ui, srcrepo, destpath)
586 586 # copy bookmarks over
587 587 srcbookmarks = srcrepo.vfs.join('bookmarks')
588 588 dstbookmarks = os.path.join(destpath, 'bookmarks')
589 589 if os.path.exists(srcbookmarks):
590 590 util.copyfile(srcbookmarks, dstbookmarks)
591 591
592 592 dstcachedir = os.path.join(destpath, 'cache')
593 593 for cache in _cachetocopy(srcrepo):
594 594 _copycache(srcrepo, dstcachedir, cache)
595 595
596 596 # we need to re-init the repo after manually copying the data
597 597 # into it
598 598 destpeer = peer(srcrepo, peeropts, dest)
599 599 srcrepo.hook('outgoing', source='clone',
600 600 node=node.hex(node.nullid))
601 601 else:
602 602 try:
603 603 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
604 604 # only pass ui when no srcrepo
605 605 except OSError as inst:
606 606 if inst.errno == errno.EEXIST:
607 607 cleandir = None
608 608 raise error.Abort(_("destination '%s' already exists")
609 609 % dest)
610 610 raise
611 611
612 612 revs = None
613 613 if rev:
614 614 if not srcpeer.capable('lookup'):
615 615 raise error.Abort(_("src repository does not support "
616 616 "revision lookup and so doesn't "
617 617 "support clone by revision"))
618 618 revs = [srcpeer.lookup(r) for r in rev]
619 619 checkout = revs[0]
620 620 local = destpeer.local()
621 621 if local:
622 622 if not stream:
623 623 if pull:
624 624 stream = False
625 625 else:
626 626 stream = None
627 627 # internal config: ui.quietbookmarkmove
628 628 overrides = {('ui', 'quietbookmarkmove'): True}
629 629 with local.ui.configoverride(overrides, 'clone'):
630 630 exchange.pull(local, srcpeer, revs,
631 631 streamclonerequested=stream)
632 632 elif srcrepo:
633 633 exchange.push(srcrepo, destpeer, revs=revs,
634 634 bookmarks=srcrepo._bookmarks.keys())
635 635 else:
636 636 raise error.Abort(_("clone from remote to remote not supported")
637 637 )
638 638
639 639 cleandir = None
640 640
641 641 destrepo = destpeer.local()
642 642 if destrepo:
643 643 template = uimod.samplehgrcs['cloned']
644 644 fp = destrepo.vfs("hgrc", "w", text=True)
645 645 u = util.url(abspath)
646 646 u.passwd = None
647 647 defaulturl = str(u)
648 648 fp.write(template % defaulturl)
649 649 fp.close()
650 650
651 651 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
652 652
653 653 if update:
654 654 if update is not True:
655 655 checkout = srcpeer.lookup(update)
656 656 uprev = None
657 657 status = None
658 658 if checkout is not None:
659 659 try:
660 660 uprev = destrepo.lookup(checkout)
661 661 except error.RepoLookupError:
662 662 if update is not True:
663 663 try:
664 664 uprev = destrepo.lookup(update)
665 665 except error.RepoLookupError:
666 666 pass
667 667 if uprev is None:
668 668 try:
669 669 uprev = destrepo._bookmarks['@']
670 670 update = '@'
671 671 bn = destrepo[uprev].branch()
672 672 if bn == 'default':
673 673 status = _("updating to bookmark @\n")
674 674 else:
675 675 status = (_("updating to bookmark @ on branch %s\n")
676 676 % bn)
677 677 except KeyError:
678 678 try:
679 679 uprev = destrepo.branchtip('default')
680 680 except error.RepoLookupError:
681 681 uprev = destrepo.lookup('tip')
682 682 if not status:
683 683 bn = destrepo[uprev].branch()
684 684 status = _("updating to branch %s\n") % bn
685 685 destrepo.ui.status(status)
686 686 _update(destrepo, uprev)
687 687 if update in destrepo._bookmarks:
688 688 bookmarks.activate(destrepo, update)
689 689 finally:
690 690 release(srclock, destlock)
691 691 if cleandir is not None:
692 692 shutil.rmtree(cleandir, True)
693 693 if srcpeer is not None:
694 694 srcpeer.close()
695 695 return srcpeer, destpeer
696 696
697 697 def _showstats(repo, stats, quietempty=False):
698 698 if quietempty and not any(stats):
699 699 return
700 700 repo.ui.status(_("%d files updated, %d files merged, "
701 701 "%d files removed, %d files unresolved\n") % stats)
702 702
703 703 def updaterepo(repo, node, overwrite, updatecheck=None):
704 704 """Update the working directory to node.
705 705
706 706 When overwrite is set, changes are clobbered, merged else
707 707
708 708 returns stats (see pydoc mercurial.merge.applyupdates)"""
709 709 return mergemod.update(repo, node, False, overwrite,
710 710 labels=['working copy', 'destination'],
711 711 updatecheck=updatecheck)
712 712
713 713 def update(repo, node, quietempty=False, updatecheck=None):
714 714 """update the working directory to node"""
715 715 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
716 716 _showstats(repo, stats, quietempty)
717 717 if stats[3]:
718 718 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
719 719 return stats[3] > 0
720 720
721 721 # naming conflict in clone()
722 722 _update = update
723 723
724 724 def clean(repo, node, show_stats=True, quietempty=False):
725 725 """forcibly switch the working directory to node, clobbering changes"""
726 726 stats = updaterepo(repo, node, True)
727 727 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
728 728 if show_stats:
729 729 _showstats(repo, stats, quietempty)
730 730 return stats[3] > 0
731 731
732 732 # naming conflict in updatetotally()
733 733 _clean = clean
734 734
735 735 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
736 736 """Update the working directory with extra care for non-file components
737 737
738 738 This takes care of non-file components below:
739 739
740 740 :bookmark: might be advanced or (in)activated
741 741
742 742 This takes arguments below:
743 743
744 744 :checkout: to which revision the working directory is updated
745 745 :brev: a name, which might be a bookmark to be activated after updating
746 746 :clean: whether changes in the working directory can be discarded
747 747 :updatecheck: how to deal with a dirty working directory
748 748
749 749 Valid values for updatecheck are (None => linear):
750 750
751 751 * abort: abort if the working directory is dirty
752 752 * none: don't check (merge working directory changes into destination)
753 753 * linear: check that update is linear before merging working directory
754 754 changes into destination
755 755 * noconflict: check that the update does not result in file merges
756 756
757 757 This returns whether conflict is detected at updating or not.
758 758 """
759 759 if updatecheck is None:
760 760 updatecheck = ui.config('experimental', 'updatecheck')
761 761 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
762 762 # If not configured, or invalid value configured
763 763 updatecheck = 'linear'
764 764 with repo.wlock():
765 765 movemarkfrom = None
766 766 warndest = False
767 767 if checkout is None:
768 768 updata = destutil.destupdate(repo, clean=clean)
769 769 checkout, movemarkfrom, brev = updata
770 770 warndest = True
771 771
772 772 if clean:
773 773 ret = _clean(repo, checkout)
774 774 else:
775 775 if updatecheck == 'abort':
776 776 cmdutil.bailifchanged(repo, merge=False)
777 777 updatecheck = 'none'
778 778 ret = _update(repo, checkout, updatecheck=updatecheck)
779 779
780 780 if not ret and movemarkfrom:
781 781 if movemarkfrom == repo['.'].node():
782 782 pass # no-op update
783 783 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
784 784 b = ui.label(repo._activebookmark, 'bookmarks.active')
785 785 ui.status(_("updating bookmark %s\n") % b)
786 786 else:
787 787 # this can happen with a non-linear update
788 788 b = ui.label(repo._activebookmark, 'bookmarks')
789 789 ui.status(_("(leaving bookmark %s)\n") % b)
790 790 bookmarks.deactivate(repo)
791 791 elif brev in repo._bookmarks:
792 792 if brev != repo._activebookmark:
793 793 b = ui.label(brev, 'bookmarks.active')
794 794 ui.status(_("(activating bookmark %s)\n") % b)
795 795 bookmarks.activate(repo, brev)
796 796 elif brev:
797 797 if repo._activebookmark:
798 798 b = ui.label(repo._activebookmark, 'bookmarks')
799 799 ui.status(_("(leaving bookmark %s)\n") % b)
800 800 bookmarks.deactivate(repo)
801 801
802 802 if warndest:
803 803 destutil.statusotherdests(ui, repo)
804 804
805 805 return ret
806 806
807 807 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
808 808 """Branch merge with node, resolving changes. Return true if any
809 809 unresolved conflicts."""
810 810 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
811 811 labels=labels)
812 812 _showstats(repo, stats)
813 813 if stats[3]:
814 814 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
815 815 "or 'hg update -C .' to abandon\n"))
816 816 elif remind:
817 817 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
818 818 return stats[3] > 0
819 819
820 820 def _incoming(displaychlist, subreporecurse, ui, repo, source,
821 821 opts, buffered=False):
822 822 """
823 823 Helper for incoming / gincoming.
824 824 displaychlist gets called with
825 825 (remoterepo, incomingchangesetlist, displayer) parameters,
826 826 and is supposed to contain only code that can't be unified.
827 827 """
828 828 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
829 829 other = peer(repo, opts, source)
830 830 ui.status(_('comparing with %s\n') % util.hidepassword(source))
831 831 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
832 832
833 833 if revs:
834 834 revs = [other.lookup(rev) for rev in revs]
835 835 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
836 836 revs, opts["bundle"], opts["force"])
837 837 try:
838 838 if not chlist:
839 839 ui.status(_("no changes found\n"))
840 840 return subreporecurse()
841 841 ui.pager('incoming')
842 842 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
843 843 displaychlist(other, chlist, displayer)
844 844 displayer.close()
845 845 finally:
846 846 cleanupfn()
847 847 subreporecurse()
848 848 return 0 # exit code is zero since we found incoming changes
849 849
850 850 def incoming(ui, repo, source, opts):
851 851 def subreporecurse():
852 852 ret = 1
853 853 if opts.get('subrepos'):
854 854 ctx = repo[None]
855 855 for subpath in sorted(ctx.substate):
856 856 sub = ctx.sub(subpath)
857 857 ret = min(ret, sub.incoming(ui, source, opts))
858 858 return ret
859 859
860 860 def display(other, chlist, displayer):
861 861 limit = cmdutil.loglimit(opts)
862 862 if opts.get('newest_first'):
863 863 chlist.reverse()
864 864 count = 0
865 865 for n in chlist:
866 866 if limit is not None and count >= limit:
867 867 break
868 868 parents = [p for p in other.changelog.parents(n) if p != nullid]
869 869 if opts.get('no_merges') and len(parents) == 2:
870 870 continue
871 871 count += 1
872 872 displayer.show(other[n])
873 873 return _incoming(display, subreporecurse, ui, repo, source, opts)
874 874
875 875 def _outgoing(ui, repo, dest, opts):
876 876 dest = ui.expandpath(dest or 'default-push', dest or 'default')
877 877 dest, branches = parseurl(dest, opts.get('branch'))
878 878 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
879 879 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
880 880 if revs:
881 881 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
882 882
883 883 other = peer(repo, opts, dest)
884 884 outgoing = discovery.findcommonoutgoing(repo, other, revs,
885 885 force=opts.get('force'))
886 886 o = outgoing.missing
887 887 if not o:
888 888 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
889 889 return o, other
890 890
891 891 def outgoing(ui, repo, dest, opts):
892 892 def recurse():
893 893 ret = 1
894 894 if opts.get('subrepos'):
895 895 ctx = repo[None]
896 896 for subpath in sorted(ctx.substate):
897 897 sub = ctx.sub(subpath)
898 898 ret = min(ret, sub.outgoing(ui, dest, opts))
899 899 return ret
900 900
901 901 limit = cmdutil.loglimit(opts)
902 902 o, other = _outgoing(ui, repo, dest, opts)
903 903 if not o:
904 904 cmdutil.outgoinghooks(ui, repo, other, opts, o)
905 905 return recurse()
906 906
907 907 if opts.get('newest_first'):
908 908 o.reverse()
909 909 ui.pager('outgoing')
910 910 displayer = cmdutil.show_changeset(ui, repo, opts)
911 911 count = 0
912 912 for n in o:
913 913 if limit is not None and count >= limit:
914 914 break
915 915 parents = [p for p in repo.changelog.parents(n) if p != nullid]
916 916 if opts.get('no_merges') and len(parents) == 2:
917 917 continue
918 918 count += 1
919 919 displayer.show(repo[n])
920 920 displayer.close()
921 921 cmdutil.outgoinghooks(ui, repo, other, opts, o)
922 922 recurse()
923 923 return 0 # exit code is zero since we found outgoing changes
924 924
925 925 def verify(repo):
926 926 """verify the consistency of a repository"""
927 927 ret = verifymod.verify(repo)
928 928
929 929 # Broken subrepo references in hidden csets don't seem worth worrying about,
930 930 # since they can't be pushed/pulled, and --hidden can be used if they are a
931 931 # concern.
932 932
933 933 # pathto() is needed for -R case
934 934 revs = repo.revs("filelog(%s)",
935 935 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
936 936
937 937 if revs:
938 938 repo.ui.status(_('checking subrepo links\n'))
939 939 for rev in revs:
940 940 ctx = repo[rev]
941 941 try:
942 942 for subpath in ctx.substate:
943 943 try:
944 944 ret = (ctx.sub(subpath, allowcreate=False).verify()
945 945 or ret)
946 946 except error.RepoError as e:
947 947 repo.ui.warn(('%s: %s\n') % (rev, e))
948 948 except Exception:
949 949 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
950 950 node.short(ctx.node()))
951 951
952 952 return ret
953 953
954 954 def remoteui(src, opts):
955 955 'build a remote ui from ui or repo and opts'
956 956 if util.safehasattr(src, 'baseui'): # looks like a repository
957 957 dst = src.baseui.copy() # drop repo-specific config
958 958 src = src.ui # copy target options from repo
959 959 else: # assume it's a global ui object
960 960 dst = src.copy() # keep all global options
961 961
962 962 # copy ssh-specific options
963 963 for o in 'ssh', 'remotecmd':
964 964 v = opts.get(o) or src.config('ui', o)
965 965 if v:
966 966 dst.setconfig("ui", o, v, 'copied')
967 967
968 968 # copy bundle-specific options
969 969 r = src.config('bundle', 'mainreporoot')
970 970 if r:
971 971 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
972 972
973 973 # copy selected local settings to the remote ui
974 974 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
975 975 for key, val in src.configitems(sect):
976 976 dst.setconfig(sect, key, val, 'copied')
977 977 v = src.config('web', 'cacerts')
978 978 if v:
979 979 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
980 980
981 981 return dst
982 982
983 983 # Files of interest
984 984 # Used to check if the repository has changed looking at mtime and size of
985 985 # these files.
986 986 foi = [('spath', '00changelog.i'),
987 987 ('spath', 'phaseroots'), # ! phase can change content at the same size
988 988 ('spath', 'obsstore'),
989 989 ('path', 'bookmarks'), # ! bookmark can change content at the same size
990 990 ]
991 991
992 992 class cachedlocalrepo(object):
993 993 """Holds a localrepository that can be cached and reused."""
994 994
995 995 def __init__(self, repo):
996 996 """Create a new cached repo from an existing repo.
997 997
998 998 We assume the passed in repo was recently created. If the
999 999 repo has changed between when it was created and when it was
1000 1000 turned into a cache, it may not refresh properly.
1001 1001 """
1002 1002 assert isinstance(repo, localrepo.localrepository)
1003 1003 self._repo = repo
1004 1004 self._state, self.mtime = self._repostate()
1005 1005 self._filtername = repo.filtername
1006 1006
1007 1007 def fetch(self):
1008 1008 """Refresh (if necessary) and return a repository.
1009 1009
1010 1010 If the cached instance is out of date, it will be recreated
1011 1011 automatically and returned.
1012 1012
1013 1013 Returns a tuple of the repo and a boolean indicating whether a new
1014 1014 repo instance was created.
1015 1015 """
1016 1016 # We compare the mtimes and sizes of some well-known files to
1017 1017 # determine if the repo changed. This is not precise, as mtimes
1018 1018 # are susceptible to clock skew and imprecise filesystems and
1019 1019 # file content can change while maintaining the same size.
1020 1020
1021 1021 state, mtime = self._repostate()
1022 1022 if state == self._state:
1023 1023 return self._repo, False
1024 1024
1025 1025 repo = repository(self._repo.baseui, self._repo.url())
1026 1026 if self._filtername:
1027 1027 self._repo = repo.filtered(self._filtername)
1028 1028 else:
1029 1029 self._repo = repo.unfiltered()
1030 1030 self._state = state
1031 1031 self.mtime = mtime
1032 1032
1033 1033 return self._repo, True
1034 1034
1035 1035 def _repostate(self):
1036 1036 state = []
1037 1037 maxmtime = -1
1038 1038 for attr, fname in foi:
1039 1039 prefix = getattr(self._repo, attr)
1040 1040 p = os.path.join(prefix, fname)
1041 1041 try:
1042 1042 st = os.stat(p)
1043 1043 except OSError:
1044 1044 st = os.stat(prefix)
1045 1045 state.append((st.st_mtime, st.st_size))
1046 1046 maxmtime = max(maxmtime, st.st_mtime)
1047 1047
1048 1048 return tuple(state), maxmtime
1049 1049
1050 1050 def copy(self):
1051 1051 """Obtain a copy of this class instance.
1052 1052
1053 1053 A new localrepository instance is obtained. The new instance should be
1054 1054 completely independent of the original.
1055 1055 """
1056 1056 repo = repository(self._repo.baseui, self._repo.origroot)
1057 1057 if self._filtername:
1058 1058 repo = repo.filtered(self._filtername)
1059 1059 else:
1060 1060 repo = repo.unfiltered()
1061 1061 c = cachedlocalrepo(repo)
1062 1062 c._state = self._state
1063 1063 c.mtime = self.mtime
1064 1064 return c
General Comments 0
You need to be logged in to leave comments. Login now