Show More
@@ -313,16 +313,15 b" def generatebundlev1(repo, compression='" | |||
|
313 | 313 | # This is where we'll add compression in the future. |
|
314 | 314 | assert compression == 'UN' |
|
315 | 315 | |
|
316 | seen = 0 | |
|
317 | repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes')) | |
|
316 | progress = repo.ui.makeprogress(_('bundle'), total=bytecount, | |
|
317 | unit=_('bytes')) | |
|
318 | progress.update(0) | |
|
318 | 319 | |
|
319 | 320 | for chunk in it: |
|
320 |
|
|
|
321 | repo.ui.progress(_('bundle'), seen, total=bytecount, | |
|
322 | unit=_('bytes')) | |
|
321 | progress.increment(step=len(chunk)) | |
|
323 | 322 | yield chunk |
|
324 | 323 | |
|
325 |
|
|
|
324 | progress.update(None) | |
|
326 | 325 | |
|
327 | 326 | return requirements, gen() |
|
328 | 327 | |
@@ -338,8 +337,9 b' def consumev1(repo, fp, filecount, bytec' | |||
|
338 | 337 | with repo.lock(): |
|
339 | 338 | repo.ui.status(_('%d files to transfer, %s of data\n') % |
|
340 | 339 | (filecount, util.bytecount(bytecount))) |
|
341 | handled_bytes = 0 | |
|
342 | repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes')) | |
|
340 | progress = repo.ui.makeprogress(_('clone'), total=bytecount, | |
|
341 | unit=_('bytes')) | |
|
342 | progress.update(0) | |
|
343 | 343 | start = util.timer() |
|
344 | 344 | |
|
345 | 345 | # TODO: get rid of (potential) inconsistency |
@@ -374,9 +374,7 b' def consumev1(repo, fp, filecount, bytec' | |||
|
374 | 374 | path = store.decodedir(name) |
|
375 | 375 | with repo.svfs(path, 'w', backgroundclose=True) as ofp: |
|
376 | 376 | for chunk in util.filechunkiter(fp, limit=size): |
|
377 |
|
|
|
378 | repo.ui.progress(_('clone'), handled_bytes, | |
|
379 | total=bytecount, unit=_('bytes')) | |
|
377 | progress.increment(step=len(chunk)) | |
|
380 | 378 | ofp.write(chunk) |
|
381 | 379 | |
|
382 | 380 | # force @filecache properties to be reloaded from |
@@ -386,7 +384,7 b' def consumev1(repo, fp, filecount, bytec' | |||
|
386 | 384 | elapsed = util.timer() - start |
|
387 | 385 | if elapsed <= 0: |
|
388 | 386 | elapsed = 0.001 |
|
389 |
|
|
|
387 | progress.update(None) | |
|
390 | 388 | repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % |
|
391 | 389 | (util.bytecount(bytecount), elapsed, |
|
392 | 390 | util.bytecount(bytecount / elapsed))) |
@@ -494,8 +492,9 b' def _makemap(repo):' | |||
|
494 | 492 | def _emit2(repo, entries, totalfilesize): |
|
495 | 493 | """actually emit the stream bundle""" |
|
496 | 494 | vfsmap = _makemap(repo) |
|
497 | progress = repo.ui.progress | |
|
498 | progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes')) | |
|
495 | progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize, | |
|
496 | unit=_('bytes')) | |
|
497 | progress.update(0) | |
|
499 | 498 | with maketempcopies() as copy: |
|
500 | 499 | try: |
|
501 | 500 | # copy is delayed until we are in the try |
@@ -522,13 +521,12 b' def _emit2(repo, entries, totalfilesize)' | |||
|
522 | 521 | chunks = util.filechunkiter(fp, limit=size) |
|
523 | 522 | for chunk in chunks: |
|
524 | 523 | seen += len(chunk) |
|
525 |
progress( |
|
|
526 | unit=_('bytes')) | |
|
524 | progress.update(seen) | |
|
527 | 525 | yield chunk |
|
528 | 526 | finally: |
|
529 | 527 | fp.close() |
|
530 | 528 | finally: |
|
531 |
progress( |
|
|
529 | progress.update(None) | |
|
532 | 530 | |
|
533 | 531 | def generatev2(repo): |
|
534 | 532 | """Emit content for version 2 of a streaming clone. |
@@ -589,10 +587,9 b' def consumev2(repo, fp, filecount, files' | |||
|
589 | 587 | (filecount, util.bytecount(filesize))) |
|
590 | 588 | |
|
591 | 589 | start = util.timer() |
|
592 | handledbytes = 0 | |
|
593 | progress = repo.ui.progress | |
|
594 | ||
|
595 | progress(_('clone'), handledbytes, total=filesize, unit=_('bytes')) | |
|
590 | progress = repo.ui.makeprogress(_('clone'), total=filesize, | |
|
591 | unit=_('bytes')) | |
|
592 | progress.update(0) | |
|
596 | 593 | |
|
597 | 594 | vfsmap = _makemap(repo) |
|
598 | 595 | |
@@ -614,9 +611,7 b' def consumev2(repo, fp, filecount, files' | |||
|
614 | 611 | |
|
615 | 612 | with vfs(name, 'w') as ofp: |
|
616 | 613 | for chunk in util.filechunkiter(fp, limit=datalen): |
|
617 |
|
|
|
618 | progress(_('clone'), handledbytes, total=filesize, | |
|
619 | unit=_('bytes')) | |
|
614 | progress.increment(step=len(chunk)) | |
|
620 | 615 | ofp.write(chunk) |
|
621 | 616 | |
|
622 | 617 | # force @filecache properties to be reloaded from |
@@ -626,10 +621,10 b' def consumev2(repo, fp, filecount, files' | |||
|
626 | 621 | elapsed = util.timer() - start |
|
627 | 622 | if elapsed <= 0: |
|
628 | 623 | elapsed = 0.001 |
|
629 |
progress( |
|
|
624 | progress.update(None) | |
|
630 | 625 | repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % |
|
631 |
(util.bytecount( |
|
|
632 |
util.bytecount( |
|
|
626 | (util.bytecount(progress.pos), elapsed, | |
|
627 | util.bytecount(progress.pos / elapsed))) | |
|
633 | 628 | |
|
634 | 629 | def applybundlev2(repo, fp, filecount, filesize, requirements): |
|
635 | 630 | missingreqs = [r for r in requirements if r not in repo.supported] |
General Comments 0
You need to be logged in to leave comments.
Login now