##// END OF EJS Templates
streamclone: use progress helper...
Martin von Zweigbergk -
r38368:e59eaf51 default
parent child Browse files
Show More
@@ -1,649 +1,644 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import struct
13 13 import warnings
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 branchmap,
18 18 cacheutil,
19 19 error,
20 20 phases,
21 21 pycompat,
22 22 store,
23 23 util,
24 24 )
25 25
26 26 def canperformstreamclone(pullop, bundle2=False):
27 27 """Whether it is possible to perform a streaming clone as part of pull.
28 28
29 29 ``bundle2`` will cause the function to consider stream clone through
30 30 bundle2 and only through bundle2.
31 31
32 32 Returns a tuple of (supported, requirements). ``supported`` is True if
33 33 streaming clone is supported and False otherwise. ``requirements`` is
34 34 a set of repo requirements from the remote, or ``None`` if stream clone
35 35 isn't supported.
36 36 """
37 37 repo = pullop.repo
38 38 remote = pullop.remote
39 39
40 40 bundle2supported = False
41 41 if pullop.canusebundle2:
42 42 if 'v2' in pullop.remotebundle2caps.get('stream', []):
43 43 bundle2supported = True
44 44 # else
45 45 # Server doesn't support bundle2 stream clone or doesn't support
46 46 # the versions we support. Fall back and possibly allow legacy.
47 47
48 48 # Ensures legacy code path uses available bundle2.
49 49 if bundle2supported and not bundle2:
50 50 return False, None
51 51 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
52 52 elif bundle2 and not bundle2supported:
53 53 return False, None
54 54
55 55 # Streaming clone only works on empty repositories.
56 56 if len(repo):
57 57 return False, None
58 58
59 59 # Streaming clone only works if all data is being requested.
60 60 if pullop.heads:
61 61 return False, None
62 62
63 63 streamrequested = pullop.streamclonerequested
64 64
65 65 # If we don't have a preference, let the server decide for us. This
66 66 # likely only comes into play in LANs.
67 67 if streamrequested is None:
68 68 # The server can advertise whether to prefer streaming clone.
69 69 streamrequested = remote.capable('stream-preferred')
70 70
71 71 if not streamrequested:
72 72 return False, None
73 73
74 74 # In order for stream clone to work, the client has to support all the
75 75 # requirements advertised by the server.
76 76 #
77 77 # The server advertises its requirements via the "stream" and "streamreqs"
78 78 # capability. "stream" (a value-less capability) is advertised if and only
79 79 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
80 80 # is advertised and contains a comma-delimited list of requirements.
81 81 requirements = set()
82 82 if remote.capable('stream'):
83 83 requirements.add('revlogv1')
84 84 else:
85 85 streamreqs = remote.capable('streamreqs')
86 86 # This is weird and shouldn't happen with modern servers.
87 87 if not streamreqs:
88 88 pullop.repo.ui.warn(_(
89 89 'warning: stream clone requested but server has them '
90 90 'disabled\n'))
91 91 return False, None
92 92
93 93 streamreqs = set(streamreqs.split(','))
94 94 # Server requires something we don't support. Bail.
95 95 missingreqs = streamreqs - repo.supportedformats
96 96 if missingreqs:
97 97 pullop.repo.ui.warn(_(
98 98 'warning: stream clone requested but client is missing '
99 99 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
100 100 pullop.repo.ui.warn(
101 101 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
102 102 'for more information)\n'))
103 103 return False, None
104 104 requirements = streamreqs
105 105
106 106 return True, requirements
107 107
108 108 def maybeperformlegacystreamclone(pullop):
109 109 """Possibly perform a legacy stream clone operation.
110 110
111 111 Legacy stream clones are performed as part of pull but before all other
112 112 operations.
113 113
114 114 A legacy stream clone will not be performed if a bundle2 stream clone is
115 115 supported.
116 116 """
117 117 supported, requirements = canperformstreamclone(pullop)
118 118
119 119 if not supported:
120 120 return
121 121
122 122 repo = pullop.repo
123 123 remote = pullop.remote
124 124
125 125 # Save remote branchmap. We will use it later to speed up branchcache
126 126 # creation.
127 127 rbranchmap = None
128 128 if remote.capable('branchmap'):
129 129 with remote.commandexecutor() as e:
130 130 rbranchmap = e.callcommand('branchmap', {}).result()
131 131
132 132 repo.ui.status(_('streaming all changes\n'))
133 133
134 134 with remote.commandexecutor() as e:
135 135 fp = e.callcommand('stream_out', {}).result()
136 136
137 137 # TODO strictly speaking, this code should all be inside the context
138 138 # manager because the context manager is supposed to ensure all wire state
139 139 # is flushed when exiting. But the legacy peers don't do this, so it
140 140 # doesn't matter.
141 141 l = fp.readline()
142 142 try:
143 143 resp = int(l)
144 144 except ValueError:
145 145 raise error.ResponseError(
146 146 _('unexpected response from remote server:'), l)
147 147 if resp == 1:
148 148 raise error.Abort(_('operation forbidden by server'))
149 149 elif resp == 2:
150 150 raise error.Abort(_('locking the remote repository failed'))
151 151 elif resp != 0:
152 152 raise error.Abort(_('the server sent an unknown error code'))
153 153
154 154 l = fp.readline()
155 155 try:
156 156 filecount, bytecount = map(int, l.split(' ', 1))
157 157 except (ValueError, TypeError):
158 158 raise error.ResponseError(
159 159 _('unexpected response from remote server:'), l)
160 160
161 161 with repo.lock():
162 162 consumev1(repo, fp, filecount, bytecount)
163 163
164 164 # new requirements = old non-format requirements +
165 165 # new format-related remote requirements
166 166 # requirements from the streamed-in repository
167 167 repo.requirements = requirements | (
168 168 repo.requirements - repo.supportedformats)
169 169 repo._applyopenerreqs()
170 170 repo._writerequirements()
171 171
172 172 if rbranchmap:
173 173 branchmap.replacecache(repo, rbranchmap)
174 174
175 175 repo.invalidate()
176 176
177 177 def allowservergeneration(repo):
178 178 """Whether streaming clones are allowed from the server."""
179 179 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
180 180 return False
181 181
182 182 # The way stream clone works makes it impossible to hide secret changesets.
183 183 # So don't allow this by default.
184 184 secret = phases.hassecret(repo)
185 185 if secret:
186 186 return repo.ui.configbool('server', 'uncompressedallowsecret')
187 187
188 188 return True
189 189
190 190 # This is it's own function so extensions can override it.
191 191 def _walkstreamfiles(repo):
192 192 return repo.store.walk()
193 193
194 194 def generatev1(repo):
195 195 """Emit content for version 1 of a streaming clone.
196 196
197 197 This returns a 3-tuple of (file count, byte size, data iterator).
198 198
199 199 The data iterator consists of N entries for each file being transferred.
200 200 Each file entry starts as a line with the file name and integer size
201 201 delimited by a null byte.
202 202
203 203 The raw file data follows. Following the raw file data is the next file
204 204 entry, or EOF.
205 205
206 206 When used on the wire protocol, an additional line indicating protocol
207 207 success will be prepended to the stream. This function is not responsible
208 208 for adding it.
209 209
210 210 This function will obtain a repository lock to ensure a consistent view of
211 211 the store is captured. It therefore may raise LockError.
212 212 """
213 213 entries = []
214 214 total_bytes = 0
215 215 # Get consistent snapshot of repo, lock during scan.
216 216 with repo.lock():
217 217 repo.ui.debug('scanning\n')
218 218 for name, ename, size in _walkstreamfiles(repo):
219 219 if size:
220 220 entries.append((name, size))
221 221 total_bytes += size
222 222
223 223 repo.ui.debug('%d files, %d bytes to transfer\n' %
224 224 (len(entries), total_bytes))
225 225
226 226 svfs = repo.svfs
227 227 debugflag = repo.ui.debugflag
228 228
229 229 def emitrevlogdata():
230 230 for name, size in entries:
231 231 if debugflag:
232 232 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
233 233 # partially encode name over the wire for backwards compat
234 234 yield '%s\0%d\n' % (store.encodedir(name), size)
235 235 # auditing at this stage is both pointless (paths are already
236 236 # trusted by the local repo) and expensive
237 237 with svfs(name, 'rb', auditpath=False) as fp:
238 238 if size <= 65536:
239 239 yield fp.read(size)
240 240 else:
241 241 for chunk in util.filechunkiter(fp, limit=size):
242 242 yield chunk
243 243
244 244 return len(entries), total_bytes, emitrevlogdata()
245 245
246 246 def generatev1wireproto(repo):
247 247 """Emit content for version 1 of streaming clone suitable for the wire.
248 248
249 249 This is the data output from ``generatev1()`` with 2 header lines. The
250 250 first line indicates overall success. The 2nd contains the file count and
251 251 byte size of payload.
252 252
253 253 The success line contains "0" for success, "1" for stream generation not
254 254 allowed, and "2" for error locking the repository (possibly indicating
255 255 a permissions error for the server process).
256 256 """
257 257 if not allowservergeneration(repo):
258 258 yield '1\n'
259 259 return
260 260
261 261 try:
262 262 filecount, bytecount, it = generatev1(repo)
263 263 except error.LockError:
264 264 yield '2\n'
265 265 return
266 266
267 267 # Indicates successful response.
268 268 yield '0\n'
269 269 yield '%d %d\n' % (filecount, bytecount)
270 270 for chunk in it:
271 271 yield chunk
272 272
273 273 def generatebundlev1(repo, compression='UN'):
274 274 """Emit content for version 1 of a stream clone bundle.
275 275
276 276 The first 4 bytes of the output ("HGS1") denote this as stream clone
277 277 bundle version 1.
278 278
279 279 The next 2 bytes indicate the compression type. Only "UN" is currently
280 280 supported.
281 281
282 282 The next 16 bytes are two 64-bit big endian unsigned integers indicating
283 283 file count and byte count, respectively.
284 284
285 285 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
286 286 of the requirements string, including a trailing \0. The following N bytes
287 287 are the requirements string, which is ASCII containing a comma-delimited
288 288 list of repo requirements that are needed to support the data.
289 289
290 290 The remaining content is the output of ``generatev1()`` (which may be
291 291 compressed in the future).
292 292
293 293 Returns a tuple of (requirements, data generator).
294 294 """
295 295 if compression != 'UN':
296 296 raise ValueError('we do not support the compression argument yet')
297 297
298 298 requirements = repo.requirements & repo.supportedformats
299 299 requires = ','.join(sorted(requirements))
300 300
301 301 def gen():
302 302 yield 'HGS1'
303 303 yield compression
304 304
305 305 filecount, bytecount, it = generatev1(repo)
306 306 repo.ui.status(_('writing %d bytes for %d files\n') %
307 307 (bytecount, filecount))
308 308
309 309 yield struct.pack('>QQ', filecount, bytecount)
310 310 yield struct.pack('>H', len(requires) + 1)
311 311 yield requires + '\0'
312 312
313 313 # This is where we'll add compression in the future.
314 314 assert compression == 'UN'
315 315
316 seen = 0
317 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
316 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
317 unit=_('bytes'))
318 progress.update(0)
318 319
319 320 for chunk in it:
320 seen += len(chunk)
321 repo.ui.progress(_('bundle'), seen, total=bytecount,
322 unit=_('bytes'))
321 progress.increment(step=len(chunk))
323 322 yield chunk
324 323
325 repo.ui.progress(_('bundle'), None)
324 progress.update(None)
326 325
327 326 return requirements, gen()
328 327
329 328 def consumev1(repo, fp, filecount, bytecount):
330 329 """Apply the contents from version 1 of a streaming clone file handle.
331 330
332 331 This takes the output from "stream_out" and applies it to the specified
333 332 repository.
334 333
335 334 Like "stream_out," the status line added by the wire protocol is not
336 335 handled by this function.
337 336 """
338 337 with repo.lock():
339 338 repo.ui.status(_('%d files to transfer, %s of data\n') %
340 339 (filecount, util.bytecount(bytecount)))
341 handled_bytes = 0
342 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
340 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
341 unit=_('bytes'))
342 progress.update(0)
343 343 start = util.timer()
344 344
345 345 # TODO: get rid of (potential) inconsistency
346 346 #
347 347 # If transaction is started and any @filecache property is
348 348 # changed at this point, it causes inconsistency between
349 349 # in-memory cached property and streamclone-ed file on the
350 350 # disk. Nested transaction prevents transaction scope "clone"
351 351 # below from writing in-memory changes out at the end of it,
352 352 # even though in-memory changes are discarded at the end of it
353 353 # regardless of transaction nesting.
354 354 #
355 355 # But transaction nesting can't be simply prohibited, because
356 356 # nesting occurs also in ordinary case (e.g. enabling
357 357 # clonebundles).
358 358
359 359 with repo.transaction('clone'):
360 360 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
361 361 for i in xrange(filecount):
362 362 # XXX doesn't support '\n' or '\r' in filenames
363 363 l = fp.readline()
364 364 try:
365 365 name, size = l.split('\0', 1)
366 366 size = int(size)
367 367 except (ValueError, TypeError):
368 368 raise error.ResponseError(
369 369 _('unexpected response from remote server:'), l)
370 370 if repo.ui.debugflag:
371 371 repo.ui.debug('adding %s (%s)\n' %
372 372 (name, util.bytecount(size)))
373 373 # for backwards compat, name was partially encoded
374 374 path = store.decodedir(name)
375 375 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
376 376 for chunk in util.filechunkiter(fp, limit=size):
377 handled_bytes += len(chunk)
378 repo.ui.progress(_('clone'), handled_bytes,
379 total=bytecount, unit=_('bytes'))
377 progress.increment(step=len(chunk))
380 378 ofp.write(chunk)
381 379
382 380 # force @filecache properties to be reloaded from
383 381 # streamclone-ed file at next access
384 382 repo.invalidate(clearfilecache=True)
385 383
386 384 elapsed = util.timer() - start
387 385 if elapsed <= 0:
388 386 elapsed = 0.001
389 repo.ui.progress(_('clone'), None)
387 progress.update(None)
390 388 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
391 389 (util.bytecount(bytecount), elapsed,
392 390 util.bytecount(bytecount / elapsed)))
393 391
394 392 def readbundle1header(fp):
395 393 compression = fp.read(2)
396 394 if compression != 'UN':
397 395 raise error.Abort(_('only uncompressed stream clone bundles are '
398 396 'supported; got %s') % compression)
399 397
400 398 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
401 399 requireslen = struct.unpack('>H', fp.read(2))[0]
402 400 requires = fp.read(requireslen)
403 401
404 402 if not requires.endswith('\0'):
405 403 raise error.Abort(_('malformed stream clone bundle: '
406 404 'requirements not properly encoded'))
407 405
408 406 requirements = set(requires.rstrip('\0').split(','))
409 407
410 408 return filecount, bytecount, requirements
411 409
412 410 def applybundlev1(repo, fp):
413 411 """Apply the content from a stream clone bundle version 1.
414 412
415 413 We assume the 4 byte header has been read and validated and the file handle
416 414 is at the 2 byte compression identifier.
417 415 """
418 416 if len(repo):
419 417 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
420 418 'repo'))
421 419
422 420 filecount, bytecount, requirements = readbundle1header(fp)
423 421 missingreqs = requirements - repo.supportedformats
424 422 if missingreqs:
425 423 raise error.Abort(_('unable to apply stream clone: '
426 424 'unsupported format: %s') %
427 425 ', '.join(sorted(missingreqs)))
428 426
429 427 consumev1(repo, fp, filecount, bytecount)
430 428
431 429 class streamcloneapplier(object):
432 430 """Class to manage applying streaming clone bundles.
433 431
434 432 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
435 433 readers to perform bundle type-specific functionality.
436 434 """
437 435 def __init__(self, fh):
438 436 self._fh = fh
439 437
440 438 def apply(self, repo):
441 439 return applybundlev1(repo, self._fh)
442 440
443 441 # type of file to stream
444 442 _fileappend = 0 # append only file
445 443 _filefull = 1 # full snapshot file
446 444
447 445 # Source of the file
448 446 _srcstore = 's' # store (svfs)
449 447 _srccache = 'c' # cache (cache)
450 448
451 449 # This is it's own function so extensions can override it.
452 450 def _walkstreamfullstorefiles(repo):
453 451 """list snapshot file from the store"""
454 452 fnames = []
455 453 if not repo.publishing():
456 454 fnames.append('phaseroots')
457 455 return fnames
458 456
459 457 def _filterfull(entry, copy, vfsmap):
460 458 """actually copy the snapshot files"""
461 459 src, name, ftype, data = entry
462 460 if ftype != _filefull:
463 461 return entry
464 462 return (src, name, ftype, copy(vfsmap[src].join(name)))
465 463
466 464 @contextlib.contextmanager
467 465 def maketempcopies():
468 466 """return a function to temporary copy file"""
469 467 files = []
470 468 try:
471 469 def copy(src):
472 470 fd, dst = pycompat.mkstemp()
473 471 os.close(fd)
474 472 files.append(dst)
475 473 util.copyfiles(src, dst, hardlink=True)
476 474 return dst
477 475 yield copy
478 476 finally:
479 477 for tmp in files:
480 478 util.tryunlink(tmp)
481 479
482 480 def _makemap(repo):
483 481 """make a (src -> vfs) map for the repo"""
484 482 vfsmap = {
485 483 _srcstore: repo.svfs,
486 484 _srccache: repo.cachevfs,
487 485 }
488 486 # we keep repo.vfs out of the on purpose, ther are too many danger there
489 487 # (eg: .hg/hgrc)
490 488 assert repo.vfs not in vfsmap.values()
491 489
492 490 return vfsmap
493 491
494 492 def _emit2(repo, entries, totalfilesize):
495 493 """actually emit the stream bundle"""
496 494 vfsmap = _makemap(repo)
497 progress = repo.ui.progress
498 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
495 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
496 unit=_('bytes'))
497 progress.update(0)
499 498 with maketempcopies() as copy:
500 499 try:
501 500 # copy is delayed until we are in the try
502 501 entries = [_filterfull(e, copy, vfsmap) for e in entries]
503 502 yield None # this release the lock on the repository
504 503 seen = 0
505 504
506 505 for src, name, ftype, data in entries:
507 506 vfs = vfsmap[src]
508 507 yield src
509 508 yield util.uvarintencode(len(name))
510 509 if ftype == _fileappend:
511 510 fp = vfs(name)
512 511 size = data
513 512 elif ftype == _filefull:
514 513 fp = open(data, 'rb')
515 514 size = util.fstat(fp).st_size
516 515 try:
517 516 yield util.uvarintencode(size)
518 517 yield name
519 518 if size <= 65536:
520 519 chunks = (fp.read(size),)
521 520 else:
522 521 chunks = util.filechunkiter(fp, limit=size)
523 522 for chunk in chunks:
524 523 seen += len(chunk)
525 progress(_('bundle'), seen, total=totalfilesize,
526 unit=_('bytes'))
524 progress.update(seen)
527 525 yield chunk
528 526 finally:
529 527 fp.close()
530 528 finally:
531 progress(_('bundle'), None)
529 progress.update(None)
532 530
533 531 def generatev2(repo):
534 532 """Emit content for version 2 of a streaming clone.
535 533
536 534 the data stream consists the following entries:
537 535 1) A char representing the file destination (eg: store or cache)
538 536 2) A varint containing the length of the filename
539 537 3) A varint containing the length of file data
540 538 4) N bytes containing the filename (the internal, store-agnostic form)
541 539 5) N bytes containing the file data
542 540
543 541 Returns a 3-tuple of (file count, file size, data iterator).
544 542 """
545 543
546 544 with repo.lock():
547 545
548 546 entries = []
549 547 totalfilesize = 0
550 548
551 549 repo.ui.debug('scanning\n')
552 550 for name, ename, size in _walkstreamfiles(repo):
553 551 if size:
554 552 entries.append((_srcstore, name, _fileappend, size))
555 553 totalfilesize += size
556 554 for name in _walkstreamfullstorefiles(repo):
557 555 if repo.svfs.exists(name):
558 556 totalfilesize += repo.svfs.lstat(name).st_size
559 557 entries.append((_srcstore, name, _filefull, None))
560 558 for name in cacheutil.cachetocopy(repo):
561 559 if repo.cachevfs.exists(name):
562 560 totalfilesize += repo.cachevfs.lstat(name).st_size
563 561 entries.append((_srccache, name, _filefull, None))
564 562
565 563 chunks = _emit2(repo, entries, totalfilesize)
566 564 first = next(chunks)
567 565 assert first is None
568 566
569 567 return len(entries), totalfilesize, chunks
570 568
571 569 @contextlib.contextmanager
572 570 def nested(*ctxs):
573 571 with warnings.catch_warnings():
574 572 # For some reason, Python decided 'nested' was deprecated without
575 573 # replacement. They officially advertised for filtering the deprecation
576 574 # warning for people who actually need the feature.
577 575 warnings.filterwarnings("ignore",category=DeprecationWarning)
578 576 with contextlib.nested(*ctxs):
579 577 yield
580 578
581 579 def consumev2(repo, fp, filecount, filesize):
582 580 """Apply the contents from a version 2 streaming clone.
583 581
584 582 Data is read from an object that only needs to provide a ``read(size)``
585 583 method.
586 584 """
587 585 with repo.lock():
588 586 repo.ui.status(_('%d files to transfer, %s of data\n') %
589 587 (filecount, util.bytecount(filesize)))
590 588
591 589 start = util.timer()
592 handledbytes = 0
593 progress = repo.ui.progress
594
595 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
590 progress = repo.ui.makeprogress(_('clone'), total=filesize,
591 unit=_('bytes'))
592 progress.update(0)
596 593
597 594 vfsmap = _makemap(repo)
598 595
599 596 with repo.transaction('clone'):
600 597 ctxs = (vfs.backgroundclosing(repo.ui)
601 598 for vfs in vfsmap.values())
602 599 with nested(*ctxs):
603 600 for i in range(filecount):
604 601 src = util.readexactly(fp, 1)
605 602 vfs = vfsmap[src]
606 603 namelen = util.uvarintdecodestream(fp)
607 604 datalen = util.uvarintdecodestream(fp)
608 605
609 606 name = util.readexactly(fp, namelen)
610 607
611 608 if repo.ui.debugflag:
612 609 repo.ui.debug('adding [%s] %s (%s)\n' %
613 610 (src, name, util.bytecount(datalen)))
614 611
615 612 with vfs(name, 'w') as ofp:
616 613 for chunk in util.filechunkiter(fp, limit=datalen):
617 handledbytes += len(chunk)
618 progress(_('clone'), handledbytes, total=filesize,
619 unit=_('bytes'))
614 progress.increment(step=len(chunk))
620 615 ofp.write(chunk)
621 616
622 617 # force @filecache properties to be reloaded from
623 618 # streamclone-ed file at next access
624 619 repo.invalidate(clearfilecache=True)
625 620
626 621 elapsed = util.timer() - start
627 622 if elapsed <= 0:
628 623 elapsed = 0.001
629 progress(_('clone'), None)
624 progress.update(None)
630 625 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
631 (util.bytecount(handledbytes), elapsed,
632 util.bytecount(handledbytes / elapsed)))
626 (util.bytecount(progress.pos), elapsed,
627 util.bytecount(progress.pos / elapsed)))
633 628
634 629 def applybundlev2(repo, fp, filecount, filesize, requirements):
635 630 missingreqs = [r for r in requirements if r not in repo.supported]
636 631 if missingreqs:
637 632 raise error.Abort(_('unable to apply stream clone: '
638 633 'unsupported format: %s') %
639 634 ', '.join(sorted(missingreqs)))
640 635
641 636 consumev2(repo, fp, filecount, filesize)
642 637
643 638 # new requirements = old non-format requirements +
644 639 # new format-related remote requirements
645 640 # requirements from the streamed-in repository
646 641 repo.requirements = set(requirements) | (
647 642 repo.requirements - repo.supportedformats)
648 643 repo._applyopenerreqs()
649 644 repo._writerequirements()
General Comments 0
You need to be logged in to leave comments. Login now