Show More
@@ -1465,6 +1465,7 b' def _pullbundle2(pullop):' | |||||
1465 | kwargs['cg'] = False |
|
1465 | kwargs['cg'] = False | |
1466 | kwargs['stream'] = True |
|
1466 | kwargs['stream'] = True | |
1467 | pullop.stepsdone.add('changegroup') |
|
1467 | pullop.stepsdone.add('changegroup') | |
|
1468 | pullop.stepsdone.add('phases') | |||
1468 |
|
1469 | |||
1469 | else: |
|
1470 | else: | |
1470 | # pulling changegroup |
|
1471 | # pulling changegroup | |
@@ -1472,15 +1473,15 b' def _pullbundle2(pullop):' | |||||
1472 |
|
1473 | |||
1473 | kwargs['cg'] = pullop.fetch |
|
1474 | kwargs['cg'] = pullop.fetch | |
1474 |
|
1475 | |||
1475 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') |
|
1476 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') | |
1476 | hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) |
|
1477 | hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) | |
1477 | if (not legacyphase and hasbinaryphase): |
|
1478 | if (not legacyphase and hasbinaryphase): | |
1478 | kwargs['phases'] = True |
|
1479 | kwargs['phases'] = True | |
1479 | pullop.stepsdone.add('phases') |
|
1480 | pullop.stepsdone.add('phases') | |
1480 |
|
1481 | |||
1481 | if 'listkeys' in pullop.remotebundle2caps: |
|
1482 | if 'listkeys' in pullop.remotebundle2caps: | |
1482 | if 'phases' not in pullop.stepsdone: |
|
1483 | if 'phases' not in pullop.stepsdone: | |
1483 | kwargs['listkeys'] = ['phases'] |
|
1484 | kwargs['listkeys'] = ['phases'] | |
1484 |
|
1485 | |||
1485 | bookmarksrequested = False |
|
1486 | bookmarksrequested = False | |
1486 | legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') |
|
1487 | legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') |
@@ -7,7 +7,10 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
|
10 | import contextlib | |||
|
11 | import os | |||
10 | import struct |
|
12 | import struct | |
|
13 | import tempfile | |||
11 |
|
14 | |||
12 | from .i18n import _ |
|
15 | from .i18n import _ | |
13 | from . import ( |
|
16 | from . import ( | |
@@ -428,32 +431,77 b' class streamcloneapplier(object):' | |||||
428 | def apply(self, repo): |
|
431 | def apply(self, repo): | |
429 | return applybundlev1(repo, self._fh) |
|
432 | return applybundlev1(repo, self._fh) | |
430 |
|
433 | |||
|
434 | # type of file to stream | |||
|
435 | _fileappend = 0 # append only file | |||
|
436 | _filefull = 1 # full snapshot file | |||
|
437 | ||||
|
438 | # This is it's own function so extensions can override it. | |||
|
439 | def _walkstreamfullstorefiles(repo): | |||
|
440 | """list snapshot file from the store""" | |||
|
441 | fnames = [] | |||
|
442 | if not repo.publishing(): | |||
|
443 | fnames.append('phaseroots') | |||
|
444 | return fnames | |||
|
445 | ||||
|
446 | def _filterfull(entry, copy, vfs): | |||
|
447 | """actually copy the snapshot files""" | |||
|
448 | name, ftype, data = entry | |||
|
449 | if ftype != _filefull: | |||
|
450 | return entry | |||
|
451 | return (name, ftype, copy(vfs.join(name))) | |||
|
452 | ||||
|
453 | @contextlib.contextmanager | |||
|
454 | def maketempcopies(): | |||
|
455 | """return a function to temporary copy file""" | |||
|
456 | files = [] | |||
|
457 | try: | |||
|
458 | def copy(src): | |||
|
459 | fd, dst = tempfile.mkstemp() | |||
|
460 | os.close(fd) | |||
|
461 | files.append(dst) | |||
|
462 | util.copyfiles(src, dst, hardlink=True) | |||
|
463 | return dst | |||
|
464 | yield copy | |||
|
465 | finally: | |||
|
466 | for tmp in files: | |||
|
467 | util.tryunlink(tmp) | |||
|
468 | ||||
431 | def _emit(repo, entries, totalfilesize): |
|
469 | def _emit(repo, entries, totalfilesize): | |
432 | """actually emit the stream bundle""" |
|
470 | """actually emit the stream bundle""" | |
|
471 | vfs = repo.svfs | |||
433 | progress = repo.ui.progress |
|
472 | progress = repo.ui.progress | |
434 | progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes')) |
|
473 | progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes')) | |
435 | vfs = repo.svfs |
|
474 | with maketempcopies() as copy: | |
436 | try: |
|
475 | try: | |
437 | seen = 0 |
|
476 | # copy is delayed until we are in the try | |
438 | for name, size in entries: |
|
477 | entries = [_filterfull(e, copy, vfs) for e in entries] | |
439 | yield util.uvarintencode(len(name)) |
|
478 | yield None # this release the lock on the repository | |
440 |
|
|
479 | seen = 0 | |
441 | try: |
|
480 | ||
442 | yield util.uvarintencode(size) |
|
481 | for name, ftype, data in entries: | |
443 | yield name |
|
482 | yield util.uvarintencode(len(name)) | |
444 |
if |
|
483 | if ftype == _fileappend: | |
445 |
|
|
484 | fp = vfs(name) | |
446 |
|
|
485 | size = data | |
447 | chunks = util.filechunkiter(fp, limit=size) |
|
486 | elif ftype == _filefull: | |
448 | for chunk in chunks: |
|
487 | fp = open(data, 'rb') | |
449 |
s |
|
488 | size = util.fstat(fp).st_size | |
450 | progress(_('bundle'), seen, total=totalfilesize, |
|
489 | try: | |
451 | unit=_('bytes')) |
|
490 | yield util.uvarintencode(size) | |
452 |
yield |
|
491 | yield name | |
453 | finally: |
|
492 | if size <= 65536: | |
454 | fp.close() |
|
493 | chunks = (fp.read(size),) | |
455 | finally: |
|
494 | else: | |
456 | progress(_('bundle'), None) |
|
495 | chunks = util.filechunkiter(fp, limit=size) | |
|
496 | for chunk in chunks: | |||
|
497 | seen += len(chunk) | |||
|
498 | progress(_('bundle'), seen, total=totalfilesize, | |||
|
499 | unit=_('bytes')) | |||
|
500 | yield chunk | |||
|
501 | finally: | |||
|
502 | fp.close() | |||
|
503 | finally: | |||
|
504 | progress(_('bundle'), None) | |||
457 |
|
505 | |||
458 | def generatev2(repo): |
|
506 | def generatev2(repo): | |
459 | """Emit content for version 2 of a streaming clone. |
|
507 | """Emit content for version 2 of a streaming clone. | |
@@ -475,10 +523,16 b' def generatev2(repo):' | |||||
475 | repo.ui.debug('scanning\n') |
|
523 | repo.ui.debug('scanning\n') | |
476 | for name, ename, size in _walkstreamfiles(repo): |
|
524 | for name, ename, size in _walkstreamfiles(repo): | |
477 | if size: |
|
525 | if size: | |
478 | entries.append((name, size)) |
|
526 | entries.append((name, _fileappend, size)) | |
479 | totalfilesize += size |
|
527 | totalfilesize += size | |
|
528 | for name in _walkstreamfullstorefiles(repo): | |||
|
529 | if repo.svfs.exists(name): | |||
|
530 | totalfilesize += repo.svfs.lstat(name).st_size | |||
|
531 | entries.append((name, _filefull, None)) | |||
480 |
|
532 | |||
481 | chunks = _emit(repo, entries, totalfilesize) |
|
533 | chunks = _emit(repo, entries, totalfilesize) | |
|
534 | first = next(chunks) | |||
|
535 | assert first is None | |||
482 |
|
536 | |||
483 | return len(entries), totalfilesize, chunks |
|
537 | return len(entries), totalfilesize, chunks | |
484 |
|
538 |
@@ -100,9 +100,7 b' Clone with background file closing enabl' | |||||
100 | transferred 96.3 KB in * seconds (* */sec) (glob) |
|
100 | transferred 96.3 KB in * seconds (* */sec) (glob) | |
101 | bundle2-input-part: total payload size 110887 |
|
101 | bundle2-input-part: total payload size 110887 | |
102 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
102 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported | |
103 | bundle2-input-part: "phase-heads" supported |
|
103 | bundle2-input-bundle: 1 parts total | |
104 | bundle2-input-part: total payload size 24 |
|
|||
105 | bundle2-input-bundle: 2 parts total |
|
|||
106 | checking for updated bookmarks |
|
104 | checking for updated bookmarks | |
107 | #endif |
|
105 | #endif | |
108 |
|
106 | |||
@@ -320,13 +318,13 b' Clone as non publishing' | |||||
320 | #if stream-bundle2 |
|
318 | #if stream-bundle2 | |
321 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish |
|
319 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish | |
322 | streaming all changes |
|
320 | streaming all changes | |
323 |
102 |
|
321 | 1028 files to transfer, 96.4 KB of data | |
324 |
transferred 96. |
|
322 | transferred 96.4 KB in * seconds (* */sec) (glob) | |
325 | updating to branch default |
|
323 | updating to branch default | |
326 | 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
324 | 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
327 | $ hg -R phase-no-publish phase -r 'all()' |
|
325 | $ hg -R phase-no-publish phase -r 'all()' | |
328 | 0: public |
|
326 | 0: draft | |
329 | 1: public |
|
327 | 1: draft | |
330 | #endif |
|
328 | #endif | |
331 |
|
329 | |||
332 | $ killdaemons.py |
|
330 | $ killdaemons.py |
General Comments 0
You need to be logged in to leave comments.
Login now