##// END OF EJS Templates
streamclone: add support for cloning non append-only file...
Boris Feld -
r35783:56c30b31 default
parent child Browse files
Show More
@@ -1465,6 +1465,7 b' def _pullbundle2(pullop):'
1465 1465 kwargs['cg'] = False
1466 1466 kwargs['stream'] = True
1467 1467 pullop.stepsdone.add('changegroup')
1468 pullop.stepsdone.add('phases')
1468 1469
1469 1470 else:
1470 1471 # pulling changegroup
@@ -1472,15 +1473,15 b' def _pullbundle2(pullop):'
1472 1473
1473 1474 kwargs['cg'] = pullop.fetch
1474 1475
1475 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1476 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1477 if (not legacyphase and hasbinaryphase):
1478 kwargs['phases'] = True
1479 pullop.stepsdone.add('phases')
1476 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1477 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1478 if (not legacyphase and hasbinaryphase):
1479 kwargs['phases'] = True
1480 pullop.stepsdone.add('phases')
1480 1481
1481 if 'listkeys' in pullop.remotebundle2caps:
1482 if 'phases' not in pullop.stepsdone:
1483 kwargs['listkeys'] = ['phases']
1482 if 'listkeys' in pullop.remotebundle2caps:
1483 if 'phases' not in pullop.stepsdone:
1484 kwargs['listkeys'] = ['phases']
1484 1485
1485 1486 bookmarksrequested = False
1486 1487 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
@@ -7,7 +7,10 b''
7 7
8 8 from __future__ import absolute_import
9 9
10 import contextlib
11 import os
10 12 import struct
13 import tempfile
11 14
12 15 from .i18n import _
13 16 from . import (
@@ -428,32 +431,77 b' class streamcloneapplier(object):'
428 431 def apply(self, repo):
429 432 return applybundlev1(repo, self._fh)
430 433
434 # type of file to stream
435 _fileappend = 0 # append only file
436 _filefull = 1 # full snapshot file
437
438 # This is it's own function so extensions can override it.
439 def _walkstreamfullstorefiles(repo):
440 """list snapshot file from the store"""
441 fnames = []
442 if not repo.publishing():
443 fnames.append('phaseroots')
444 return fnames
445
446 def _filterfull(entry, copy, vfs):
447 """actually copy the snapshot files"""
448 name, ftype, data = entry
449 if ftype != _filefull:
450 return entry
451 return (name, ftype, copy(vfs.join(name)))
452
453 @contextlib.contextmanager
454 def maketempcopies():
455 """return a function to temporary copy file"""
456 files = []
457 try:
458 def copy(src):
459 fd, dst = tempfile.mkstemp()
460 os.close(fd)
461 files.append(dst)
462 util.copyfiles(src, dst, hardlink=True)
463 return dst
464 yield copy
465 finally:
466 for tmp in files:
467 util.tryunlink(tmp)
468
431 469 def _emit(repo, entries, totalfilesize):
432 470 """actually emit the stream bundle"""
471 vfs = repo.svfs
433 472 progress = repo.ui.progress
434 473 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
435 vfs = repo.svfs
436 try:
437 seen = 0
438 for name, size in entries:
439 yield util.uvarintencode(len(name))
440 fp = vfs(name)
441 try:
442 yield util.uvarintencode(size)
443 yield name
444 if size <= 65536:
445 chunks = (fp.read(size),)
446 else:
447 chunks = util.filechunkiter(fp, limit=size)
448 for chunk in chunks:
449 seen += len(chunk)
450 progress(_('bundle'), seen, total=totalfilesize,
451 unit=_('bytes'))
452 yield chunk
453 finally:
454 fp.close()
455 finally:
456 progress(_('bundle'), None)
474 with maketempcopies() as copy:
475 try:
476 # copy is delayed until we are in the try
477 entries = [_filterfull(e, copy, vfs) for e in entries]
478 yield None # this release the lock on the repository
479 seen = 0
480
481 for name, ftype, data in entries:
482 yield util.uvarintencode(len(name))
483 if ftype == _fileappend:
484 fp = vfs(name)
485 size = data
486 elif ftype == _filefull:
487 fp = open(data, 'rb')
488 size = util.fstat(fp).st_size
489 try:
490 yield util.uvarintencode(size)
491 yield name
492 if size <= 65536:
493 chunks = (fp.read(size),)
494 else:
495 chunks = util.filechunkiter(fp, limit=size)
496 for chunk in chunks:
497 seen += len(chunk)
498 progress(_('bundle'), seen, total=totalfilesize,
499 unit=_('bytes'))
500 yield chunk
501 finally:
502 fp.close()
503 finally:
504 progress(_('bundle'), None)
457 505
458 506 def generatev2(repo):
459 507 """Emit content for version 2 of a streaming clone.
@@ -475,10 +523,16 b' def generatev2(repo):'
475 523 repo.ui.debug('scanning\n')
476 524 for name, ename, size in _walkstreamfiles(repo):
477 525 if size:
478 entries.append((name, size))
526 entries.append((name, _fileappend, size))
479 527 totalfilesize += size
528 for name in _walkstreamfullstorefiles(repo):
529 if repo.svfs.exists(name):
530 totalfilesize += repo.svfs.lstat(name).st_size
531 entries.append((name, _filefull, None))
480 532
481 533 chunks = _emit(repo, entries, totalfilesize)
534 first = next(chunks)
535 assert first is None
482 536
483 537 return len(entries), totalfilesize, chunks
484 538
@@ -100,9 +100,7 b' Clone with background file closing enabl'
100 100 transferred 96.3 KB in * seconds (* */sec) (glob)
101 101 bundle2-input-part: total payload size 110887
102 102 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-part: "phase-heads" supported
104 bundle2-input-part: total payload size 24
105 bundle2-input-bundle: 2 parts total
103 bundle2-input-bundle: 1 parts total
106 104 checking for updated bookmarks
107 105 #endif
108 106
@@ -320,13 +318,13 b' Clone as non publishing'
320 318 #if stream-bundle2
321 319 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
322 320 streaming all changes
323 1027 files to transfer, 96.3 KB of data
324 transferred 96.3 KB in * seconds (* */sec) (glob)
321 1028 files to transfer, 96.4 KB of data
322 transferred 96.4 KB in * seconds (* */sec) (glob)
325 323 updating to branch default
326 324 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
327 325 $ hg -R phase-no-publish phase -r 'all()'
328 0: public
329 1: public
326 0: draft
327 1: draft
330 328 #endif
331 329
332 330 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now