##// END OF EJS Templates
share: allow more than one level of directories to be created
Matt Harbison -
r39222:e8deaa77 default
parent child Browse files
Show More
@@ -1,1177 +1,1177 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import functools
13 13 import hashlib
14 14 import os
15 15 import shutil
16 16 import stat
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 nullid,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 bundlerepo,
26 26 cacheutil,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 httppeer,
34 34 localrepo,
35 35 lock,
36 36 logcmdutil,
37 37 logexchange,
38 38 merge as mergemod,
39 39 node,
40 40 phases,
41 41 scmutil,
42 42 sshpeer,
43 43 statichttprepo,
44 44 ui as uimod,
45 45 unionrepo,
46 46 url,
47 47 util,
48 48 verify as verifymod,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 from .utils import (
53 53 stringutil,
54 54 )
55 55
56 56 release = lock.release
57 57
58 58 # shared features
59 59 sharedbookmarks = 'bookmarks'
60 60
61 61 def _local(path):
62 62 path = util.expandpath(util.urllocalpath(path))
63 63 return (os.path.isfile(path) and bundlerepo or localrepo)
64 64
65 65 def addbranchrevs(lrepo, other, branches, revs):
66 66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 67 hashbranch, branches = branches
68 68 if not hashbranch and not branches:
69 69 x = revs or None
70 70 if revs:
71 71 y = revs[0]
72 72 else:
73 73 y = None
74 74 return x, y
75 75 if revs:
76 76 revs = list(revs)
77 77 else:
78 78 revs = []
79 79
80 80 if not peer.capable('branchmap'):
81 81 if branches:
82 82 raise error.Abort(_("remote branch lookup not supported"))
83 83 revs.append(hashbranch)
84 84 return revs, revs[0]
85 85
86 86 with peer.commandexecutor() as e:
87 87 branchmap = e.callcommand('branchmap', {}).result()
88 88
89 89 def primary(branch):
90 90 if branch == '.':
91 91 if not lrepo:
92 92 raise error.Abort(_("dirstate branch not accessible"))
93 93 branch = lrepo.dirstate.branch()
94 94 if branch in branchmap:
95 95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 96 return True
97 97 else:
98 98 return False
99 99
100 100 for branch in branches:
101 101 if not primary(branch):
102 102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 103 if hashbranch:
104 104 if not primary(hashbranch):
105 105 revs.append(hashbranch)
106 106 return revs, revs[0]
107 107
108 108 def parseurl(path, branches=None):
109 109 '''parse url#branch, returning (url, (branch, branches))'''
110 110
111 111 u = util.url(path)
112 112 branch = None
113 113 if u.fragment:
114 114 branch = u.fragment
115 115 u.fragment = None
116 116 return bytes(u), (branch, branches or [])
117 117
118 118 schemes = {
119 119 'bundle': bundlerepo,
120 120 'union': unionrepo,
121 121 'file': _local,
122 122 'http': httppeer,
123 123 'https': httppeer,
124 124 'ssh': sshpeer,
125 125 'static-http': statichttprepo,
126 126 }
127 127
128 128 def _peerlookup(path):
129 129 u = util.url(path)
130 130 scheme = u.scheme or 'file'
131 131 thing = schemes.get(scheme) or schemes['file']
132 132 try:
133 133 return thing(path)
134 134 except TypeError:
135 135 # we can't test callable(thing) because 'thing' can be an unloaded
136 136 # module that implements __call__
137 137 if not util.safehasattr(thing, 'instance'):
138 138 raise
139 139 return thing
140 140
141 141 def islocal(repo):
142 142 '''return true if repo (or path pointing to repo) is local'''
143 143 if isinstance(repo, bytes):
144 144 try:
145 145 return _peerlookup(repo).islocal(repo)
146 146 except AttributeError:
147 147 return False
148 148 return repo.local()
149 149
150 150 def openpath(ui, path):
151 151 '''open path with open if local, url.open if remote'''
152 152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 153 if pathurl.islocal():
154 154 return util.posixfile(pathurl.localpath(), 'rb')
155 155 else:
156 156 return url.open(ui, path)
157 157
158 158 # a list of (ui, repo) functions called for wire peer initialization
159 159 wirepeersetupfuncs = []
160 160
161 161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 162 intents=None):
163 163 """return a repository object for the specified path"""
164 164 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
165 165 ui = getattr(obj, "ui", ui)
166 166 if ui.configbool('devel', 'debug.extensions'):
167 167 log = functools.partial(
168 168 ui.debug, 'debug.extensions: ', label='debug.extensions')
169 169 else:
170 170 log = lambda *a, **kw: None
171 171 for f in presetupfuncs or []:
172 172 f(ui, obj)
173 173 log('- executing reposetup hooks\n')
174 174 for name, module in extensions.extensions(ui):
175 175 log(' - running reposetup for %s\n' % (name,))
176 176 hook = getattr(module, 'reposetup', None)
177 177 if hook:
178 178 hook(ui, obj)
179 179 if not obj.local():
180 180 for f in wirepeersetupfuncs:
181 181 f(ui, obj)
182 182 return obj
183 183
184 184 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
185 185 """return a repository object for the specified path"""
186 186 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
187 187 intents=intents)
188 188 repo = peer.local()
189 189 if not repo:
190 190 raise error.Abort(_("repository '%s' is not local") %
191 191 (path or peer.url()))
192 192 return repo.filtered('visible')
193 193
194 194 def peer(uiorrepo, opts, path, create=False, intents=None):
195 195 '''return a repository peer for the specified path'''
196 196 rui = remoteui(uiorrepo, opts)
197 197 return _peerorrepo(rui, path, create, intents=intents).peer()
198 198
199 199 def defaultdest(source):
200 200 '''return default destination of clone if none is given
201 201
202 202 >>> defaultdest(b'foo')
203 203 'foo'
204 204 >>> defaultdest(b'/foo/bar')
205 205 'bar'
206 206 >>> defaultdest(b'/')
207 207 ''
208 208 >>> defaultdest(b'')
209 209 ''
210 210 >>> defaultdest(b'http://example.org/')
211 211 ''
212 212 >>> defaultdest(b'http://example.org/foo/')
213 213 'foo'
214 214 '''
215 215 path = util.url(source).path
216 216 if not path:
217 217 return ''
218 218 return os.path.basename(os.path.normpath(path))
219 219
220 220 def sharedreposource(repo):
221 221 """Returns repository object for source repository of a shared repo.
222 222
223 223 If repo is not a shared repository, returns None.
224 224 """
225 225 if repo.sharedpath == repo.path:
226 226 return None
227 227
228 228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
229 229 return repo.srcrepo
230 230
231 231 # the sharedpath always ends in the .hg; we want the path to the repo
232 232 source = repo.vfs.split(repo.sharedpath)[0]
233 233 srcurl, branches = parseurl(source)
234 234 srcrepo = repository(repo.ui, srcurl)
235 235 repo.srcrepo = srcrepo
236 236 return srcrepo
237 237
238 238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
239 239 relative=False):
240 240 '''create a shared repository'''
241 241
242 242 if not islocal(source):
243 243 raise error.Abort(_('can only share local repositories'))
244 244
245 245 if not dest:
246 246 dest = defaultdest(source)
247 247 else:
248 248 dest = ui.expandpath(dest)
249 249
250 250 if isinstance(source, bytes):
251 251 origsource = ui.expandpath(source)
252 252 source, branches = parseurl(origsource)
253 253 srcrepo = repository(ui, source)
254 254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
255 255 else:
256 256 srcrepo = source.local()
257 257 origsource = source = srcrepo.url()
258 258 checkout = None
259 259
260 260 sharedpath = srcrepo.sharedpath # if our source is already sharing
261 261
262 262 destwvfs = vfsmod.vfs(dest, realpath=True)
263 263 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
264 264
265 265 if destvfs.lexists():
266 266 raise error.Abort(_('destination already exists'))
267 267
268 268 if not destwvfs.isdir():
269 destwvfs.mkdir()
269 destwvfs.makedirs()
270 270 destvfs.makedir()
271 271
272 272 requirements = ''
273 273 try:
274 274 requirements = srcrepo.vfs.read('requires')
275 275 except IOError as inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 if relative:
280 280 try:
281 281 sharedpath = os.path.relpath(sharedpath, destvfs.base)
282 282 requirements += 'relshared\n'
283 283 except (IOError, ValueError) as e:
284 284 # ValueError is raised on Windows if the drive letters differ on
285 285 # each path
286 286 raise error.Abort(_('cannot calculate relative path'),
287 287 hint=stringutil.forcebytestr(e))
288 288 else:
289 289 requirements += 'shared\n'
290 290
291 291 destvfs.write('requires', requirements)
292 292 destvfs.write('sharedpath', sharedpath)
293 293
294 294 r = repository(ui, destwvfs.base)
295 295 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
296 296 _postshareupdate(r, update, checkout=checkout)
297 297 return r
298 298
299 299 def unshare(ui, repo):
300 300 """convert a shared repository to a normal one
301 301
302 302 Copy the store data to the repo and remove the sharedpath data.
303 303 """
304 304
305 305 destlock = lock = None
306 306 lock = repo.lock()
307 307 try:
308 308 # we use locks here because if we race with commit, we
309 309 # can end up with extra data in the cloned revlogs that's
310 310 # not pointed to by changesets, thus causing verify to
311 311 # fail
312 312
313 313 destlock = copystore(ui, repo, repo.path)
314 314
315 315 sharefile = repo.vfs.join('sharedpath')
316 316 util.rename(sharefile, sharefile + '.old')
317 317
318 318 repo.requirements.discard('shared')
319 319 repo.requirements.discard('relshared')
320 320 repo._writerequirements()
321 321 finally:
322 322 destlock and destlock.release()
323 323 lock and lock.release()
324 324
325 325 # update store, spath, svfs and sjoin of repo
326 326 repo.unfiltered().__init__(repo.baseui, repo.root)
327 327
328 328 # TODO: figure out how to access subrepos that exist, but were previously
329 329 # removed from .hgsub
330 330 c = repo['.']
331 331 subs = c.substate
332 332 for s in sorted(subs):
333 333 c.sub(s).unshare()
334 334
335 335 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
336 336 """Called after a new shared repo is created.
337 337
338 338 The new repo only has a requirements file and pointer to the source.
339 339 This function configures additional shared data.
340 340
341 341 Extensions can wrap this function and write additional entries to
342 342 destrepo/.hg/shared to indicate additional pieces of data to be shared.
343 343 """
344 344 default = defaultpath or sourcerepo.ui.config('paths', 'default')
345 345 if default:
346 346 template = ('[paths]\n'
347 347 'default = %s\n')
348 348 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
349 349
350 350 with destrepo.wlock():
351 351 if bookmarks:
352 352 destrepo.vfs.write('shared', sharedbookmarks + '\n')
353 353
354 354 def _postshareupdate(repo, update, checkout=None):
355 355 """Maybe perform a working directory update after a shared repo is created.
356 356
357 357 ``update`` can be a boolean or a revision to update to.
358 358 """
359 359 if not update:
360 360 return
361 361
362 362 repo.ui.status(_("updating working directory\n"))
363 363 if update is not True:
364 364 checkout = update
365 365 for test in (checkout, 'default', 'tip'):
366 366 if test is None:
367 367 continue
368 368 try:
369 369 uprev = repo.lookup(test)
370 370 break
371 371 except error.RepoLookupError:
372 372 continue
373 373 _update(repo, uprev)
374 374
375 375 def copystore(ui, srcrepo, destpath):
376 376 '''copy files from store of srcrepo in destpath
377 377
378 378 returns destlock
379 379 '''
380 380 destlock = None
381 381 try:
382 382 hardlink = None
383 383 topic = _('linking') if hardlink else _('copying')
384 384 progress = ui.makeprogress(topic)
385 385 num = 0
386 386 srcpublishing = srcrepo.publishing()
387 387 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
388 388 dstvfs = vfsmod.vfs(destpath)
389 389 for f in srcrepo.store.copylist():
390 390 if srcpublishing and f.endswith('phaseroots'):
391 391 continue
392 392 dstbase = os.path.dirname(f)
393 393 if dstbase and not dstvfs.exists(dstbase):
394 394 dstvfs.mkdir(dstbase)
395 395 if srcvfs.exists(f):
396 396 if f.endswith('data'):
397 397 # 'dstbase' may be empty (e.g. revlog format 0)
398 398 lockfile = os.path.join(dstbase, "lock")
399 399 # lock to avoid premature writing to the target
400 400 destlock = lock.lock(dstvfs, lockfile)
401 401 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
402 402 hardlink, progress)
403 403 num += n
404 404 if hardlink:
405 405 ui.debug("linked %d files\n" % num)
406 406 else:
407 407 ui.debug("copied %d files\n" % num)
408 408 progress.complete()
409 409 return destlock
410 410 except: # re-raises
411 411 release(destlock)
412 412 raise
413 413
414 414 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
415 415 rev=None, update=True, stream=False):
416 416 """Perform a clone using a shared repo.
417 417
418 418 The store for the repository will be located at <sharepath>/.hg. The
419 419 specified revisions will be cloned or pulled from "source". A shared repo
420 420 will be created at "dest" and a working copy will be created if "update" is
421 421 True.
422 422 """
423 423 revs = None
424 424 if rev:
425 425 if not srcpeer.capable('lookup'):
426 426 raise error.Abort(_("src repository does not support "
427 427 "revision lookup and so doesn't "
428 428 "support clone by revision"))
429 429
430 430 # TODO this is batchable.
431 431 remoterevs = []
432 432 for r in rev:
433 433 with srcpeer.commandexecutor() as e:
434 434 remoterevs.append(e.callcommand('lookup', {
435 435 'key': r,
436 436 }).result())
437 437 revs = remoterevs
438 438
439 439 # Obtain a lock before checking for or cloning the pooled repo otherwise
440 440 # 2 clients may race creating or populating it.
441 441 pooldir = os.path.dirname(sharepath)
442 442 # lock class requires the directory to exist.
443 443 try:
444 444 util.makedir(pooldir, False)
445 445 except OSError as e:
446 446 if e.errno != errno.EEXIST:
447 447 raise
448 448
449 449 poolvfs = vfsmod.vfs(pooldir)
450 450 basename = os.path.basename(sharepath)
451 451
452 452 with lock.lock(poolvfs, '%s.lock' % basename):
453 453 if os.path.exists(sharepath):
454 454 ui.status(_('(sharing from existing pooled repository %s)\n') %
455 455 basename)
456 456 else:
457 457 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
458 458 # Always use pull mode because hardlinks in share mode don't work
459 459 # well. Never update because working copies aren't necessary in
460 460 # share mode.
461 461 clone(ui, peeropts, source, dest=sharepath, pull=True,
462 462 revs=rev, update=False, stream=stream)
463 463
464 464 # Resolve the value to put in [paths] section for the source.
465 465 if islocal(source):
466 466 defaultpath = os.path.abspath(util.urllocalpath(source))
467 467 else:
468 468 defaultpath = source
469 469
470 470 sharerepo = repository(ui, path=sharepath)
471 471 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
472 472 defaultpath=defaultpath)
473 473
474 474 # We need to perform a pull against the dest repo to fetch bookmarks
475 475 # and other non-store data that isn't shared by default. In the case of
476 476 # non-existing shared repo, this means we pull from the remote twice. This
477 477 # is a bit weird. But at the time it was implemented, there wasn't an easy
478 478 # way to pull just non-changegroup data.
479 479 destrepo = repository(ui, path=dest)
480 480 exchange.pull(destrepo, srcpeer, heads=revs)
481 481
482 482 _postshareupdate(destrepo, update)
483 483
484 484 return srcpeer, peer(ui, peeropts, dest)
485 485
486 486 # Recomputing branch cache might be slow on big repos,
487 487 # so just copy it
488 488 def _copycache(srcrepo, dstcachedir, fname):
489 489 """copy a cache from srcrepo to destcachedir (if it exists)"""
490 490 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
491 491 dstbranchcache = os.path.join(dstcachedir, fname)
492 492 if os.path.exists(srcbranchcache):
493 493 if not os.path.exists(dstcachedir):
494 494 os.mkdir(dstcachedir)
495 495 util.copyfile(srcbranchcache, dstbranchcache)
496 496
497 497 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
498 498 update=True, stream=False, branch=None, shareopts=None):
499 499 """Make a copy of an existing repository.
500 500
501 501 Create a copy of an existing repository in a new directory. The
502 502 source and destination are URLs, as passed to the repository
503 503 function. Returns a pair of repository peers, the source and
504 504 newly created destination.
505 505
506 506 The location of the source is added to the new repository's
507 507 .hg/hgrc file, as the default to be used for future pulls and
508 508 pushes.
509 509
510 510 If an exception is raised, the partly cloned/updated destination
511 511 repository will be deleted.
512 512
513 513 Arguments:
514 514
515 515 source: repository object or URL
516 516
517 517 dest: URL of destination repository to create (defaults to base
518 518 name of source repository)
519 519
520 520 pull: always pull from source repository, even in local case or if the
521 521 server prefers streaming
522 522
523 523 stream: stream raw data uncompressed from repository (fast over
524 524 LAN, slow over WAN)
525 525
526 526 revs: revision to clone up to (implies pull=True)
527 527
528 528 update: update working directory after clone completes, if
529 529 destination is local repository (True means update to default rev,
530 530 anything else is treated as a revision)
531 531
532 532 branch: branches to clone
533 533
534 534 shareopts: dict of options to control auto sharing behavior. The "pool" key
535 535 activates auto sharing mode and defines the directory for stores. The
536 536 "mode" key determines how to construct the directory name of the shared
537 537 repository. "identity" means the name is derived from the node of the first
538 538 changeset in the repository. "remote" means the name is derived from the
539 539 remote's path/URL. Defaults to "identity."
540 540 """
541 541
542 542 if isinstance(source, bytes):
543 543 origsource = ui.expandpath(source)
544 544 source, branches = parseurl(origsource, branch)
545 545 srcpeer = peer(ui, peeropts, source)
546 546 else:
547 547 srcpeer = source.peer() # in case we were called with a localrepo
548 548 branches = (None, branch or [])
549 549 origsource = source = srcpeer.url()
550 550 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
551 551
552 552 if dest is None:
553 553 dest = defaultdest(source)
554 554 if dest:
555 555 ui.status(_("destination directory: %s\n") % dest)
556 556 else:
557 557 dest = ui.expandpath(dest)
558 558
559 559 dest = util.urllocalpath(dest)
560 560 source = util.urllocalpath(source)
561 561
562 562 if not dest:
563 563 raise error.Abort(_("empty destination path is not valid"))
564 564
565 565 destvfs = vfsmod.vfs(dest, expandpath=True)
566 566 if destvfs.lexists():
567 567 if not destvfs.isdir():
568 568 raise error.Abort(_("destination '%s' already exists") % dest)
569 569 elif destvfs.listdir():
570 570 raise error.Abort(_("destination '%s' is not empty") % dest)
571 571
572 572 shareopts = shareopts or {}
573 573 sharepool = shareopts.get('pool')
574 574 sharenamemode = shareopts.get('mode')
575 575 if sharepool and islocal(dest):
576 576 sharepath = None
577 577 if sharenamemode == 'identity':
578 578 # Resolve the name from the initial changeset in the remote
579 579 # repository. This returns nullid when the remote is empty. It
580 580 # raises RepoLookupError if revision 0 is filtered or otherwise
581 581 # not available. If we fail to resolve, sharing is not enabled.
582 582 try:
583 583 with srcpeer.commandexecutor() as e:
584 584 rootnode = e.callcommand('lookup', {
585 585 'key': '0',
586 586 }).result()
587 587
588 588 if rootnode != node.nullid:
589 589 sharepath = os.path.join(sharepool, node.hex(rootnode))
590 590 else:
591 591 ui.status(_('(not using pooled storage: '
592 592 'remote appears to be empty)\n'))
593 593 except error.RepoLookupError:
594 594 ui.status(_('(not using pooled storage: '
595 595 'unable to resolve identity of remote)\n'))
596 596 elif sharenamemode == 'remote':
597 597 sharepath = os.path.join(
598 598 sharepool, node.hex(hashlib.sha1(source).digest()))
599 599 else:
600 600 raise error.Abort(_('unknown share naming mode: %s') %
601 601 sharenamemode)
602 602
603 603 if sharepath:
604 604 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
605 605 dest, pull=pull, rev=revs, update=update,
606 606 stream=stream)
607 607
608 608 srclock = destlock = cleandir = None
609 609 srcrepo = srcpeer.local()
610 610 try:
611 611 abspath = origsource
612 612 if islocal(origsource):
613 613 abspath = os.path.abspath(util.urllocalpath(origsource))
614 614
615 615 if islocal(dest):
616 616 cleandir = dest
617 617
618 618 copy = False
619 619 if (srcrepo and srcrepo.cancopy() and islocal(dest)
620 620 and not phases.hassecret(srcrepo)):
621 621 copy = not pull and not revs
622 622
623 623 if copy:
624 624 try:
625 625 # we use a lock here because if we race with commit, we
626 626 # can end up with extra data in the cloned revlogs that's
627 627 # not pointed to by changesets, thus causing verify to
628 628 # fail
629 629 srclock = srcrepo.lock(wait=False)
630 630 except error.LockError:
631 631 copy = False
632 632
633 633 if copy:
634 634 srcrepo.hook('preoutgoing', throw=True, source='clone')
635 635 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
636 636 if not os.path.exists(dest):
637 637 util.makedirs(dest)
638 638 else:
639 639 # only clean up directories we create ourselves
640 640 cleandir = hgdir
641 641 try:
642 642 destpath = hgdir
643 643 util.makedir(destpath, notindexed=True)
644 644 except OSError as inst:
645 645 if inst.errno == errno.EEXIST:
646 646 cleandir = None
647 647 raise error.Abort(_("destination '%s' already exists")
648 648 % dest)
649 649 raise
650 650
651 651 destlock = copystore(ui, srcrepo, destpath)
652 652 # copy bookmarks over
653 653 srcbookmarks = srcrepo.vfs.join('bookmarks')
654 654 dstbookmarks = os.path.join(destpath, 'bookmarks')
655 655 if os.path.exists(srcbookmarks):
656 656 util.copyfile(srcbookmarks, dstbookmarks)
657 657
658 658 dstcachedir = os.path.join(destpath, 'cache')
659 659 for cache in cacheutil.cachetocopy(srcrepo):
660 660 _copycache(srcrepo, dstcachedir, cache)
661 661
662 662 # we need to re-init the repo after manually copying the data
663 663 # into it
664 664 destpeer = peer(srcrepo, peeropts, dest)
665 665 srcrepo.hook('outgoing', source='clone',
666 666 node=node.hex(node.nullid))
667 667 else:
668 668 try:
669 669 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
670 670 # only pass ui when no srcrepo
671 671 except OSError as inst:
672 672 if inst.errno == errno.EEXIST:
673 673 cleandir = None
674 674 raise error.Abort(_("destination '%s' already exists")
675 675 % dest)
676 676 raise
677 677
678 678 if revs:
679 679 if not srcpeer.capable('lookup'):
680 680 raise error.Abort(_("src repository does not support "
681 681 "revision lookup and so doesn't "
682 682 "support clone by revision"))
683 683
684 684 # TODO this is batchable.
685 685 remoterevs = []
686 686 for rev in revs:
687 687 with srcpeer.commandexecutor() as e:
688 688 remoterevs.append(e.callcommand('lookup', {
689 689 'key': rev,
690 690 }).result())
691 691 revs = remoterevs
692 692
693 693 checkout = revs[0]
694 694 else:
695 695 revs = None
696 696 local = destpeer.local()
697 697 if local:
698 698 u = util.url(abspath)
699 699 defaulturl = bytes(u)
700 700 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
701 701 if not stream:
702 702 if pull:
703 703 stream = False
704 704 else:
705 705 stream = None
706 706 # internal config: ui.quietbookmarkmove
707 707 overrides = {('ui', 'quietbookmarkmove'): True}
708 708 with local.ui.configoverride(overrides, 'clone'):
709 709 exchange.pull(local, srcpeer, revs,
710 710 streamclonerequested=stream)
711 711 elif srcrepo:
712 712 exchange.push(srcrepo, destpeer, revs=revs,
713 713 bookmarks=srcrepo._bookmarks.keys())
714 714 else:
715 715 raise error.Abort(_("clone from remote to remote not supported")
716 716 )
717 717
718 718 cleandir = None
719 719
720 720 destrepo = destpeer.local()
721 721 if destrepo:
722 722 template = uimod.samplehgrcs['cloned']
723 723 u = util.url(abspath)
724 724 u.passwd = None
725 725 defaulturl = bytes(u)
726 726 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
727 727 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
728 728
729 729 if ui.configbool('experimental', 'remotenames'):
730 730 logexchange.pullremotenames(destrepo, srcpeer)
731 731
732 732 if update:
733 733 if update is not True:
734 734 with srcpeer.commandexecutor() as e:
735 735 checkout = e.callcommand('lookup', {
736 736 'key': update,
737 737 }).result()
738 738
739 739 uprev = None
740 740 status = None
741 741 if checkout is not None:
742 742 # Some extensions (at least hg-git and hg-subversion) have
743 743 # a peer.lookup() implementation that returns a name instead
744 744 # of a nodeid. We work around it here until we've figured
745 745 # out a better solution.
746 746 if len(checkout) == 20 and checkout in destrepo:
747 747 uprev = checkout
748 748 elif scmutil.isrevsymbol(destrepo, checkout):
749 749 uprev = scmutil.revsymbol(destrepo, checkout).node()
750 750 else:
751 751 if update is not True:
752 752 try:
753 753 uprev = destrepo.lookup(update)
754 754 except error.RepoLookupError:
755 755 pass
756 756 if uprev is None:
757 757 try:
758 758 uprev = destrepo._bookmarks['@']
759 759 update = '@'
760 760 bn = destrepo[uprev].branch()
761 761 if bn == 'default':
762 762 status = _("updating to bookmark @\n")
763 763 else:
764 764 status = (_("updating to bookmark @ on branch %s\n")
765 765 % bn)
766 766 except KeyError:
767 767 try:
768 768 uprev = destrepo.branchtip('default')
769 769 except error.RepoLookupError:
770 770 uprev = destrepo.lookup('tip')
771 771 if not status:
772 772 bn = destrepo[uprev].branch()
773 773 status = _("updating to branch %s\n") % bn
774 774 destrepo.ui.status(status)
775 775 _update(destrepo, uprev)
776 776 if update in destrepo._bookmarks:
777 777 bookmarks.activate(destrepo, update)
778 778 finally:
779 779 release(srclock, destlock)
780 780 if cleandir is not None:
781 781 shutil.rmtree(cleandir, True)
782 782 if srcpeer is not None:
783 783 srcpeer.close()
784 784 return srcpeer, destpeer
785 785
786 786 def _showstats(repo, stats, quietempty=False):
787 787 if quietempty and stats.isempty():
788 788 return
789 789 repo.ui.status(_("%d files updated, %d files merged, "
790 790 "%d files removed, %d files unresolved\n") % (
791 791 stats.updatedcount, stats.mergedcount,
792 792 stats.removedcount, stats.unresolvedcount))
793 793
794 794 def updaterepo(repo, node, overwrite, updatecheck=None):
795 795 """Update the working directory to node.
796 796
797 797 When overwrite is set, changes are clobbered, merged else
798 798
799 799 returns stats (see pydoc mercurial.merge.applyupdates)"""
800 800 return mergemod.update(repo, node, False, overwrite,
801 801 labels=['working copy', 'destination'],
802 802 updatecheck=updatecheck)
803 803
804 804 def update(repo, node, quietempty=False, updatecheck=None):
805 805 """update the working directory to node"""
806 806 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
807 807 _showstats(repo, stats, quietempty)
808 808 if stats.unresolvedcount:
809 809 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
810 810 return stats.unresolvedcount > 0
811 811
812 812 # naming conflict in clone()
813 813 _update = update
814 814
815 815 def clean(repo, node, show_stats=True, quietempty=False):
816 816 """forcibly switch the working directory to node, clobbering changes"""
817 817 stats = updaterepo(repo, node, True)
818 818 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
819 819 if show_stats:
820 820 _showstats(repo, stats, quietempty)
821 821 return stats.unresolvedcount > 0
822 822
823 823 # naming conflict in updatetotally()
824 824 _clean = clean
825 825
826 826 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
827 827 """Update the working directory with extra care for non-file components
828 828
829 829 This takes care of non-file components below:
830 830
831 831 :bookmark: might be advanced or (in)activated
832 832
833 833 This takes arguments below:
834 834
835 835 :checkout: to which revision the working directory is updated
836 836 :brev: a name, which might be a bookmark to be activated after updating
837 837 :clean: whether changes in the working directory can be discarded
838 838 :updatecheck: how to deal with a dirty working directory
839 839
840 840 Valid values for updatecheck are (None => linear):
841 841
842 842 * abort: abort if the working directory is dirty
843 843 * none: don't check (merge working directory changes into destination)
844 844 * linear: check that update is linear before merging working directory
845 845 changes into destination
846 846 * noconflict: check that the update does not result in file merges
847 847
848 848 This returns whether conflict is detected at updating or not.
849 849 """
850 850 if updatecheck is None:
851 851 updatecheck = ui.config('commands', 'update.check')
852 852 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
853 853 # If not configured, or invalid value configured
854 854 updatecheck = 'linear'
855 855 with repo.wlock():
856 856 movemarkfrom = None
857 857 warndest = False
858 858 if checkout is None:
859 859 updata = destutil.destupdate(repo, clean=clean)
860 860 checkout, movemarkfrom, brev = updata
861 861 warndest = True
862 862
863 863 if clean:
864 864 ret = _clean(repo, checkout)
865 865 else:
866 866 if updatecheck == 'abort':
867 867 cmdutil.bailifchanged(repo, merge=False)
868 868 updatecheck = 'none'
869 869 ret = _update(repo, checkout, updatecheck=updatecheck)
870 870
871 871 if not ret and movemarkfrom:
872 872 if movemarkfrom == repo['.'].node():
873 873 pass # no-op update
874 874 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
875 875 b = ui.label(repo._activebookmark, 'bookmarks.active')
876 876 ui.status(_("updating bookmark %s\n") % b)
877 877 else:
878 878 # this can happen with a non-linear update
879 879 b = ui.label(repo._activebookmark, 'bookmarks')
880 880 ui.status(_("(leaving bookmark %s)\n") % b)
881 881 bookmarks.deactivate(repo)
882 882 elif brev in repo._bookmarks:
883 883 if brev != repo._activebookmark:
884 884 b = ui.label(brev, 'bookmarks.active')
885 885 ui.status(_("(activating bookmark %s)\n") % b)
886 886 bookmarks.activate(repo, brev)
887 887 elif brev:
888 888 if repo._activebookmark:
889 889 b = ui.label(repo._activebookmark, 'bookmarks')
890 890 ui.status(_("(leaving bookmark %s)\n") % b)
891 891 bookmarks.deactivate(repo)
892 892
893 893 if warndest:
894 894 destutil.statusotherdests(ui, repo)
895 895
896 896 return ret
897 897
898 898 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
899 899 abort=False):
900 900 """Branch merge with node, resolving changes. Return true if any
901 901 unresolved conflicts."""
902 902 if not abort:
903 903 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
904 904 labels=labels)
905 905 else:
906 906 ms = mergemod.mergestate.read(repo)
907 907 if ms.active():
908 908 # there were conflicts
909 909 node = ms.localctx.hex()
910 910 else:
911 911 # there were no conficts, mergestate was not stored
912 912 node = repo['.'].hex()
913 913
914 914 repo.ui.status(_("aborting the merge, updating back to"
915 915 " %s\n") % node[:12])
916 916 stats = mergemod.update(repo, node, branchmerge=False, force=True,
917 917 labels=labels)
918 918
919 919 _showstats(repo, stats)
920 920 if stats.unresolvedcount:
921 921 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
922 922 "or 'hg merge --abort' to abandon\n"))
923 923 elif remind and not abort:
924 924 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
925 925 return stats.unresolvedcount > 0
926 926
927 927 def _incoming(displaychlist, subreporecurse, ui, repo, source,
928 928 opts, buffered=False):
929 929 """
930 930 Helper for incoming / gincoming.
931 931 displaychlist gets called with
932 932 (remoterepo, incomingchangesetlist, displayer) parameters,
933 933 and is supposed to contain only code that can't be unified.
934 934 """
935 935 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
936 936 other = peer(repo, opts, source)
937 937 ui.status(_('comparing with %s\n') % util.hidepassword(source))
938 938 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
939 939
940 940 if revs:
941 941 revs = [other.lookup(rev) for rev in revs]
942 942 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
943 943 revs, opts["bundle"], opts["force"])
944 944 try:
945 945 if not chlist:
946 946 ui.status(_("no changes found\n"))
947 947 return subreporecurse()
948 948 ui.pager('incoming')
949 949 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
950 950 buffered=buffered)
951 951 displaychlist(other, chlist, displayer)
952 952 displayer.close()
953 953 finally:
954 954 cleanupfn()
955 955 subreporecurse()
956 956 return 0 # exit code is zero since we found incoming changes
957 957
958 958 def incoming(ui, repo, source, opts):
959 959 def subreporecurse():
960 960 ret = 1
961 961 if opts.get('subrepos'):
962 962 ctx = repo[None]
963 963 for subpath in sorted(ctx.substate):
964 964 sub = ctx.sub(subpath)
965 965 ret = min(ret, sub.incoming(ui, source, opts))
966 966 return ret
967 967
968 968 def display(other, chlist, displayer):
969 969 limit = logcmdutil.getlimit(opts)
970 970 if opts.get('newest_first'):
971 971 chlist.reverse()
972 972 count = 0
973 973 for n in chlist:
974 974 if limit is not None and count >= limit:
975 975 break
976 976 parents = [p for p in other.changelog.parents(n) if p != nullid]
977 977 if opts.get('no_merges') and len(parents) == 2:
978 978 continue
979 979 count += 1
980 980 displayer.show(other[n])
981 981 return _incoming(display, subreporecurse, ui, repo, source, opts)
982 982
983 983 def _outgoing(ui, repo, dest, opts):
984 984 path = ui.paths.getpath(dest, default=('default-push', 'default'))
985 985 if not path:
986 986 raise error.Abort(_('default repository not configured!'),
987 987 hint=_("see 'hg help config.paths'"))
988 988 dest = path.pushloc or path.loc
989 989 branches = path.branch, opts.get('branch') or []
990 990
991 991 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
992 992 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
993 993 if revs:
994 994 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
995 995
996 996 other = peer(repo, opts, dest)
997 997 outgoing = discovery.findcommonoutgoing(repo, other, revs,
998 998 force=opts.get('force'))
999 999 o = outgoing.missing
1000 1000 if not o:
1001 1001 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1002 1002 return o, other
1003 1003
1004 1004 def outgoing(ui, repo, dest, opts):
1005 1005 def recurse():
1006 1006 ret = 1
1007 1007 if opts.get('subrepos'):
1008 1008 ctx = repo[None]
1009 1009 for subpath in sorted(ctx.substate):
1010 1010 sub = ctx.sub(subpath)
1011 1011 ret = min(ret, sub.outgoing(ui, dest, opts))
1012 1012 return ret
1013 1013
1014 1014 limit = logcmdutil.getlimit(opts)
1015 1015 o, other = _outgoing(ui, repo, dest, opts)
1016 1016 if not o:
1017 1017 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1018 1018 return recurse()
1019 1019
1020 1020 if opts.get('newest_first'):
1021 1021 o.reverse()
1022 1022 ui.pager('outgoing')
1023 1023 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1024 1024 count = 0
1025 1025 for n in o:
1026 1026 if limit is not None and count >= limit:
1027 1027 break
1028 1028 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1029 1029 if opts.get('no_merges') and len(parents) == 2:
1030 1030 continue
1031 1031 count += 1
1032 1032 displayer.show(repo[n])
1033 1033 displayer.close()
1034 1034 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1035 1035 recurse()
1036 1036 return 0 # exit code is zero since we found outgoing changes
1037 1037
1038 1038 def verify(repo):
1039 1039 """verify the consistency of a repository"""
1040 1040 ret = verifymod.verify(repo)
1041 1041
1042 1042 # Broken subrepo references in hidden csets don't seem worth worrying about,
1043 1043 # since they can't be pushed/pulled, and --hidden can be used if they are a
1044 1044 # concern.
1045 1045
1046 1046 # pathto() is needed for -R case
1047 1047 revs = repo.revs("filelog(%s)",
1048 1048 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1049 1049
1050 1050 if revs:
1051 1051 repo.ui.status(_('checking subrepo links\n'))
1052 1052 for rev in revs:
1053 1053 ctx = repo[rev]
1054 1054 try:
1055 1055 for subpath in ctx.substate:
1056 1056 try:
1057 1057 ret = (ctx.sub(subpath, allowcreate=False).verify()
1058 1058 or ret)
1059 1059 except error.RepoError as e:
1060 1060 repo.ui.warn(('%d: %s\n') % (rev, e))
1061 1061 except Exception:
1062 1062 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1063 1063 node.short(ctx.node()))
1064 1064
1065 1065 return ret
1066 1066
1067 1067 def remoteui(src, opts):
1068 1068 'build a remote ui from ui or repo and opts'
1069 1069 if util.safehasattr(src, 'baseui'): # looks like a repository
1070 1070 dst = src.baseui.copy() # drop repo-specific config
1071 1071 src = src.ui # copy target options from repo
1072 1072 else: # assume it's a global ui object
1073 1073 dst = src.copy() # keep all global options
1074 1074
1075 1075 # copy ssh-specific options
1076 1076 for o in 'ssh', 'remotecmd':
1077 1077 v = opts.get(o) or src.config('ui', o)
1078 1078 if v:
1079 1079 dst.setconfig("ui", o, v, 'copied')
1080 1080
1081 1081 # copy bundle-specific options
1082 1082 r = src.config('bundle', 'mainreporoot')
1083 1083 if r:
1084 1084 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1085 1085
1086 1086 # copy selected local settings to the remote ui
1087 1087 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1088 1088 for key, val in src.configitems(sect):
1089 1089 dst.setconfig(sect, key, val, 'copied')
1090 1090 v = src.config('web', 'cacerts')
1091 1091 if v:
1092 1092 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1093 1093
1094 1094 return dst
1095 1095
1096 1096 # Files of interest
1097 1097 # Used to check if the repository has changed looking at mtime and size of
1098 1098 # these files.
1099 1099 foi = [('spath', '00changelog.i'),
1100 1100 ('spath', 'phaseroots'), # ! phase can change content at the same size
1101 1101 ('spath', 'obsstore'),
1102 1102 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1103 1103 ]
1104 1104
1105 1105 class cachedlocalrepo(object):
1106 1106 """Holds a localrepository that can be cached and reused."""
1107 1107
1108 1108 def __init__(self, repo):
1109 1109 """Create a new cached repo from an existing repo.
1110 1110
1111 1111 We assume the passed in repo was recently created. If the
1112 1112 repo has changed between when it was created and when it was
1113 1113 turned into a cache, it may not refresh properly.
1114 1114 """
1115 1115 assert isinstance(repo, localrepo.localrepository)
1116 1116 self._repo = repo
1117 1117 self._state, self.mtime = self._repostate()
1118 1118 self._filtername = repo.filtername
1119 1119
1120 1120 def fetch(self):
1121 1121 """Refresh (if necessary) and return a repository.
1122 1122
1123 1123 If the cached instance is out of date, it will be recreated
1124 1124 automatically and returned.
1125 1125
1126 1126 Returns a tuple of the repo and a boolean indicating whether a new
1127 1127 repo instance was created.
1128 1128 """
1129 1129 # We compare the mtimes and sizes of some well-known files to
1130 1130 # determine if the repo changed. This is not precise, as mtimes
1131 1131 # are susceptible to clock skew and imprecise filesystems and
1132 1132 # file content can change while maintaining the same size.
1133 1133
1134 1134 state, mtime = self._repostate()
1135 1135 if state == self._state:
1136 1136 return self._repo, False
1137 1137
1138 1138 repo = repository(self._repo.baseui, self._repo.url())
1139 1139 if self._filtername:
1140 1140 self._repo = repo.filtered(self._filtername)
1141 1141 else:
1142 1142 self._repo = repo.unfiltered()
1143 1143 self._state = state
1144 1144 self.mtime = mtime
1145 1145
1146 1146 return self._repo, True
1147 1147
1148 1148 def _repostate(self):
1149 1149 state = []
1150 1150 maxmtime = -1
1151 1151 for attr, fname in foi:
1152 1152 prefix = getattr(self._repo, attr)
1153 1153 p = os.path.join(prefix, fname)
1154 1154 try:
1155 1155 st = os.stat(p)
1156 1156 except OSError:
1157 1157 st = os.stat(prefix)
1158 1158 state.append((st[stat.ST_MTIME], st.st_size))
1159 1159 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1160 1160
1161 1161 return tuple(state), maxmtime
1162 1162
1163 1163 def copy(self):
1164 1164 """Obtain a copy of this class instance.
1165 1165
1166 1166 A new localrepository instance is obtained. The new instance should be
1167 1167 completely independent of the original.
1168 1168 """
1169 1169 repo = repository(self._repo.baseui, self._repo.origroot)
1170 1170 if self._filtername:
1171 1171 repo = repo.filtered(self._filtername)
1172 1172 else:
1173 1173 repo = repo.unfiltered()
1174 1174 c = cachedlocalrepo(repo)
1175 1175 c._state = self._state
1176 1176 c.mtime = self.mtime
1177 1177 return c
@@ -1,445 +1,445 b''
1 1 $ echo "[extensions]" >> $HGRCPATH
2 2 $ echo "share = " >> $HGRCPATH
3 3
4 4 prepare repo1
5 5
6 6 $ hg init repo1
7 7 $ cd repo1
8 8 $ echo a > a
9 9 $ hg commit -A -m'init'
10 10 adding a
11 11
12 12 share it
13 13
14 14 $ cd ..
15 15 $ hg share repo1 repo2
16 16 updating working directory
17 17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 18
19 19 share shouldn't have a store dir
20 20
21 21 $ cd repo2
22 22 $ test -d .hg/store
23 23 [1]
24 24
25 25 share shouldn't have a cache dir, original repo should
26 26
27 27 $ hg branches
28 28 default 0:d3873e73d99e
29 29 $ hg tags
30 30 tip 0:d3873e73d99e
31 31 $ test -d .hg/cache
32 32 [1]
33 33 $ ls -1 ../repo1/.hg/cache
34 34 branch2-served
35 35 manifestfulltextcache
36 36 rbc-names-v1
37 37 rbc-revs-v1
38 38 tags2-visible
39 39
40 40 Some sed versions appends newline, some don't, and some just fails
41 41
42 42 $ cat .hg/sharedpath; echo
43 43 $TESTTMP/repo1/.hg
44 44
45 45 trailing newline on .hg/sharedpath is ok
46 46 $ hg tip -q
47 47 0:d3873e73d99e
48 48 $ echo '' >> .hg/sharedpath
49 49 $ cat .hg/sharedpath
50 50 $TESTTMP/repo1/.hg
51 51 $ hg tip -q
52 52 0:d3873e73d99e
53 53
54 54 commit in shared clone
55 55
56 56 $ echo a >> a
57 57 $ hg commit -m'change in shared clone'
58 58
59 59 check original
60 60
61 61 $ cd ../repo1
62 62 $ hg log
63 63 changeset: 1:8af4dc49db9e
64 64 tag: tip
65 65 user: test
66 66 date: Thu Jan 01 00:00:00 1970 +0000
67 67 summary: change in shared clone
68 68
69 69 changeset: 0:d3873e73d99e
70 70 user: test
71 71 date: Thu Jan 01 00:00:00 1970 +0000
72 72 summary: init
73 73
74 74 $ hg update
75 75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 76 $ cat a # should be two lines of "a"
77 77 a
78 78 a
79 79
80 80 commit in original
81 81
82 82 $ echo b > b
83 83 $ hg commit -A -m'another file'
84 84 adding b
85 85
86 86 check in shared clone
87 87
88 88 $ cd ../repo2
89 89 $ hg log
90 90 changeset: 2:c2e0ac586386
91 91 tag: tip
92 92 user: test
93 93 date: Thu Jan 01 00:00:00 1970 +0000
94 94 summary: another file
95 95
96 96 changeset: 1:8af4dc49db9e
97 97 user: test
98 98 date: Thu Jan 01 00:00:00 1970 +0000
99 99 summary: change in shared clone
100 100
101 101 changeset: 0:d3873e73d99e
102 102 user: test
103 103 date: Thu Jan 01 00:00:00 1970 +0000
104 104 summary: init
105 105
106 106 $ hg update
107 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 108 $ cat b # should exist with one "b"
109 109 b
110 110
111 111 hg serve shared clone
112 112
113 113 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
114 114 $ cat hg.pid >> $DAEMON_PIDS
115 115 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
116 116 200 Script output follows
117 117
118 118
119 119 -rw-r--r-- 4 a
120 120 -rw-r--r-- 2 b
121 121
122 122
123 123
124 124 test unshare command
125 125
126 126 $ hg unshare
127 127 $ test -d .hg/store
128 128 $ test -f .hg/sharedpath
129 129 [1]
130 130 $ grep shared .hg/requires
131 131 [1]
132 132 $ hg unshare
133 133 abort: this is not a shared repo
134 134 [255]
135 135
136 136 check that a change does not propagate
137 137
138 138 $ echo b >> b
139 139 $ hg commit -m'change in unshared'
140 140 $ cd ../repo1
141 141 $ hg id -r tip
142 142 c2e0ac586386 tip
143 143
144 144 $ cd ..
145 145
146 146
147 147 test sharing bookmarks
148 148
149 149 $ hg share -B repo1 repo3
150 150 updating working directory
151 151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 152 $ cd repo1
153 153 $ hg bookmark bm1
154 154 $ hg bookmarks
155 155 * bm1 2:c2e0ac586386
156 156 $ cd ../repo2
157 157 $ hg book bm2
158 158 $ hg bookmarks
159 159 * bm2 3:0e6e70d1d5f1
160 160 $ cd ../repo3
161 161 $ hg bookmarks
162 162 bm1 2:c2e0ac586386
163 163 $ hg book bm3
164 164 $ hg bookmarks
165 165 bm1 2:c2e0ac586386
166 166 * bm3 2:c2e0ac586386
167 167 $ cd ../repo1
168 168 $ hg bookmarks
169 169 * bm1 2:c2e0ac586386
170 170 bm3 2:c2e0ac586386
171 171
172 172 check whether HG_PENDING makes pending changes only in relatd
173 173 repositories visible to an external hook.
174 174
175 175 In "hg share" case, another transaction can't run in other
176 176 repositories sharing same source repository, because starting
177 177 transaction requires locking store of source repository.
178 178
179 179 Therefore, this test scenario ignores checking visibility of
180 180 .hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks.
181 181
182 182 $ cat > $TESTTMP/checkbookmarks.sh <<EOF
183 183 > echo "@repo1"
184 184 > hg -R "$TESTTMP/repo1" bookmarks
185 185 > echo "@repo2"
186 186 > hg -R "$TESTTMP/repo2" bookmarks
187 187 > echo "@repo3"
188 188 > hg -R "$TESTTMP/repo3" bookmarks
189 189 > exit 1 # to avoid adding new bookmark for subsequent tests
190 190 > EOF
191 191
192 192 $ cd ../repo1
193 193 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
194 194 @repo1
195 195 bm1 2:c2e0ac586386
196 196 bm3 2:c2e0ac586386
197 197 * bmX 2:c2e0ac586386
198 198 @repo2
199 199 * bm2 3:0e6e70d1d5f1
200 200 @repo3
201 201 bm1 2:c2e0ac586386
202 202 * bm3 2:c2e0ac586386
203 203 bmX 2:c2e0ac586386
204 204 transaction abort!
205 205 rollback completed
206 206 abort: pretxnclose hook exited with status 1
207 207 [255]
208 208 $ hg book bm1
209 209
210 210 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
211 211 src), because (1) HG_PENDING refers only repo3 and (2)
212 212 "bookmarks.pending" is written only into repo3.
213 213
214 214 $ cd ../repo3
215 215 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
216 216 @repo1
217 217 * bm1 2:c2e0ac586386
218 218 bm3 2:c2e0ac586386
219 219 @repo2
220 220 * bm2 3:0e6e70d1d5f1
221 221 @repo3
222 222 bm1 2:c2e0ac586386
223 223 bm3 2:c2e0ac586386
224 224 * bmX 2:c2e0ac586386
225 225 transaction abort!
226 226 rollback completed
227 227 abort: pretxnclose hook exited with status 1
228 228 [255]
229 229 $ hg book bm3
230 230
231 231 $ cd ../repo1
232 232
233 233 test that commits work
234 234
235 235 $ echo 'shared bookmarks' > a
236 236 $ hg commit -m 'testing shared bookmarks'
237 237 $ hg bookmarks
238 238 * bm1 3:b87954705719
239 239 bm3 2:c2e0ac586386
240 240 $ cd ../repo3
241 241 $ hg bookmarks
242 242 bm1 3:b87954705719
243 243 * bm3 2:c2e0ac586386
244 244 $ echo 'more shared bookmarks' > a
245 245 $ hg commit -m 'testing shared bookmarks'
246 246 created new head
247 247 $ hg bookmarks
248 248 bm1 3:b87954705719
249 249 * bm3 4:62f4ded848e4
250 250 $ cd ../repo1
251 251 $ hg bookmarks
252 252 * bm1 3:b87954705719
253 253 bm3 4:62f4ded848e4
254 254 $ cd ..
255 255
256 256 non largefiles repos won't enable largefiles
257 257
258 258 $ hg share --config extensions.largefiles= repo3 sharedrepo
259 259 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
260 260 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
261 261 updating working directory
262 262 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 263 $ [ -f sharedrepo/.hg/hgrc ]
264 264 [1]
265 265
266 266 test pushing bookmarks works
267 267
268 268 $ hg clone repo3 repo4
269 269 updating to branch default
270 270 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
271 271 $ cd repo4
272 272 $ hg boo bm4
273 273 $ echo foo > b
274 274 $ hg commit -m 'foo in b'
275 275 $ hg boo
276 276 bm1 3:b87954705719
277 277 bm3 4:62f4ded848e4
278 278 * bm4 5:92793bfc8cad
279 279 $ hg push -B bm4
280 280 pushing to $TESTTMP/repo3
281 281 searching for changes
282 282 adding changesets
283 283 adding manifests
284 284 adding file changes
285 285 added 1 changesets with 1 changes to 1 files
286 286 exporting bookmark bm4
287 287 $ cd ../repo1
288 288 $ hg bookmarks
289 289 * bm1 3:b87954705719
290 290 bm3 4:62f4ded848e4
291 291 bm4 5:92793bfc8cad
292 292 $ cd ../repo3
293 293 $ hg bookmarks
294 294 bm1 3:b87954705719
295 295 * bm3 4:62f4ded848e4
296 296 bm4 5:92793bfc8cad
297 297 $ cd ..
298 298
299 299 test behavior when sharing a shared repo
300 300
301 $ hg share -B repo3 repo5
301 $ hg share -B repo3 missingdir/repo5
302 302 updating working directory
303 303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 $ cd repo5
304 $ cd missingdir/repo5
305 305 $ hg book
306 306 bm1 3:b87954705719
307 307 bm3 4:62f4ded848e4
308 308 bm4 5:92793bfc8cad
309 $ cd ..
309 $ cd ../..
310 310
311 311 test what happens when an active bookmark is deleted
312 312
313 313 $ cd repo1
314 314 $ hg boo -d bm3
315 315 $ hg boo
316 316 * bm1 3:b87954705719
317 317 bm4 5:92793bfc8cad
318 318 $ cd ../repo3
319 319 $ hg boo
320 320 bm1 3:b87954705719
321 321 bm4 5:92793bfc8cad
322 322 $ cd ..
323 323
324 324 verify that bookmarks are not written on failed transaction
325 325
326 326 $ cat > failpullbookmarks.py << EOF
327 327 > """A small extension that makes bookmark pulls fail, for testing"""
328 328 > from __future__ import absolute_import
329 329 > from mercurial import (
330 330 > error,
331 331 > exchange,
332 332 > extensions,
333 333 > )
334 334 > def _pullbookmarks(orig, pullop):
335 335 > orig(pullop)
336 336 > raise error.HookAbort('forced failure by extension')
337 337 > def extsetup(ui):
338 338 > extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
339 339 > EOF
340 340 $ cd repo4
341 341 $ hg boo
342 342 bm1 3:b87954705719
343 343 bm3 4:62f4ded848e4
344 344 * bm4 5:92793bfc8cad
345 345 $ cd ../repo3
346 346 $ hg boo
347 347 bm1 3:b87954705719
348 348 bm4 5:92793bfc8cad
349 349 $ hg --config "extensions.failpullbookmarks=$TESTTMP/failpullbookmarks.py" pull $TESTTMP/repo4
350 350 pulling from $TESTTMP/repo4
351 351 searching for changes
352 352 no changes found
353 353 adding remote bookmark bm3
354 354 abort: forced failure by extension
355 355 [255]
356 356 $ hg boo
357 357 bm1 3:b87954705719
358 358 bm4 5:92793bfc8cad
359 359 $ hg pull $TESTTMP/repo4
360 360 pulling from $TESTTMP/repo4
361 361 searching for changes
362 362 no changes found
363 363 adding remote bookmark bm3
364 364 1 local changesets published
365 365 $ hg boo
366 366 bm1 3:b87954705719
367 367 * bm3 4:62f4ded848e4
368 368 bm4 5:92793bfc8cad
369 369 $ cd ..
370 370
371 371 verify bookmark behavior after unshare
372 372
373 373 $ cd repo3
374 374 $ hg unshare
375 375 $ hg boo
376 376 bm1 3:b87954705719
377 377 * bm3 4:62f4ded848e4
378 378 bm4 5:92793bfc8cad
379 379 $ hg boo -d bm4
380 380 $ hg boo bm5
381 381 $ hg boo
382 382 bm1 3:b87954705719
383 383 bm3 4:62f4ded848e4
384 384 * bm5 4:62f4ded848e4
385 385 $ cd ../repo1
386 386 $ hg boo
387 387 * bm1 3:b87954705719
388 388 bm3 4:62f4ded848e4
389 389 bm4 5:92793bfc8cad
390 390 $ cd ..
391 391
392 392 test shared clones using relative paths work
393 393
394 394 $ mkdir thisdir
395 395 $ hg init thisdir/orig
396 396 $ hg share -U thisdir/orig thisdir/abs
397 397 $ hg share -U --relative thisdir/abs thisdir/rel
398 398 $ cat thisdir/rel/.hg/sharedpath
399 399 ../../orig/.hg (no-eol)
400 400 $ grep shared thisdir/*/.hg/requires
401 401 thisdir/abs/.hg/requires:shared
402 402 thisdir/rel/.hg/requires:shared
403 403 thisdir/rel/.hg/requires:relshared
404 404
405 405 test that relative shared paths aren't relative to $PWD
406 406
407 407 $ cd thisdir
408 408 $ hg -R rel root
409 409 $TESTTMP/thisdir/rel
410 410 $ cd ..
411 411
412 412 now test that relative paths really are relative, survive across
413 413 renames and changes of PWD
414 414
415 415 $ hg -R thisdir/abs root
416 416 $TESTTMP/thisdir/abs
417 417 $ hg -R thisdir/rel root
418 418 $TESTTMP/thisdir/rel
419 419 $ mv thisdir thatdir
420 420 $ hg -R thatdir/abs root
421 421 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg!
422 422 [255]
423 423 $ hg -R thatdir/rel root
424 424 $TESTTMP/thatdir/rel
425 425
426 426 test unshare relshared repo
427 427
428 428 $ cd thatdir/rel
429 429 $ hg unshare
430 430 $ test -d .hg/store
431 431 $ test -f .hg/sharedpath
432 432 [1]
433 433 $ grep shared .hg/requires
434 434 [1]
435 435 $ hg unshare
436 436 abort: this is not a shared repo
437 437 [255]
438 438 $ cd ../..
439 439
440 440 $ rm -r thatdir
441 441
442 442 Explicitly kill daemons to let the test exit on Windows
443 443
444 444 $ killdaemons.py
445 445
General Comments 0
You need to be logged in to leave comments. Login now