##// END OF EJS Templates
streamclone: pass narrowing related info in _walkstreamfiles()...
Pulkit Goyal -
r40375:f0e8f277 default
parent child Browse files
Show More
@@ -1,655 +1,660 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 branchmap,
17 17 cacheutil,
18 18 error,
19 narrowspec,
19 20 phases,
20 21 pycompat,
21 22 repository,
22 23 store,
23 24 util,
24 25 )
25 26
26 27 def canperformstreamclone(pullop, bundle2=False):
27 28 """Whether it is possible to perform a streaming clone as part of pull.
28 29
29 30 ``bundle2`` will cause the function to consider stream clone through
30 31 bundle2 and only through bundle2.
31 32
32 33 Returns a tuple of (supported, requirements). ``supported`` is True if
33 34 streaming clone is supported and False otherwise. ``requirements`` is
34 35 a set of repo requirements from the remote, or ``None`` if stream clone
35 36 isn't supported.
36 37 """
37 38 repo = pullop.repo
38 39 remote = pullop.remote
39 40
40 41 bundle2supported = False
41 42 if pullop.canusebundle2:
42 43 if 'v2' in pullop.remotebundle2caps.get('stream', []):
43 44 bundle2supported = True
44 45 # else
45 46 # Server doesn't support bundle2 stream clone or doesn't support
46 47 # the versions we support. Fall back and possibly allow legacy.
47 48
48 49 # Ensures legacy code path uses available bundle2.
49 50 if bundle2supported and not bundle2:
50 51 return False, None
51 52 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
52 53 elif bundle2 and not bundle2supported:
53 54 return False, None
54 55
55 56 # Streaming clone only works on empty repositories.
56 57 if len(repo):
57 58 return False, None
58 59
59 60 # Streaming clone only works if all data is being requested.
60 61 if pullop.heads:
61 62 return False, None
62 63
63 64 streamrequested = pullop.streamclonerequested
64 65
65 66 # If we don't have a preference, let the server decide for us. This
66 67 # likely only comes into play in LANs.
67 68 if streamrequested is None:
68 69 # The server can advertise whether to prefer streaming clone.
69 70 streamrequested = remote.capable('stream-preferred')
70 71
71 72 if not streamrequested:
72 73 return False, None
73 74
74 75 # In order for stream clone to work, the client has to support all the
75 76 # requirements advertised by the server.
76 77 #
77 78 # The server advertises its requirements via the "stream" and "streamreqs"
78 79 # capability. "stream" (a value-less capability) is advertised if and only
79 80 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
80 81 # is advertised and contains a comma-delimited list of requirements.
81 82 requirements = set()
82 83 if remote.capable('stream'):
83 84 requirements.add('revlogv1')
84 85 else:
85 86 streamreqs = remote.capable('streamreqs')
86 87 # This is weird and shouldn't happen with modern servers.
87 88 if not streamreqs:
88 89 pullop.repo.ui.warn(_(
89 90 'warning: stream clone requested but server has them '
90 91 'disabled\n'))
91 92 return False, None
92 93
93 94 streamreqs = set(streamreqs.split(','))
94 95 # Server requires something we don't support. Bail.
95 96 missingreqs = streamreqs - repo.supportedformats
96 97 if missingreqs:
97 98 pullop.repo.ui.warn(_(
98 99 'warning: stream clone requested but client is missing '
99 100 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
100 101 pullop.repo.ui.warn(
101 102 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
102 103 'for more information)\n'))
103 104 return False, None
104 105 requirements = streamreqs
105 106
106 107 return True, requirements
107 108
108 109 def maybeperformlegacystreamclone(pullop):
109 110 """Possibly perform a legacy stream clone operation.
110 111
111 112 Legacy stream clones are performed as part of pull but before all other
112 113 operations.
113 114
114 115 A legacy stream clone will not be performed if a bundle2 stream clone is
115 116 supported.
116 117 """
117 118 from . import localrepo
118 119
119 120 supported, requirements = canperformstreamclone(pullop)
120 121
121 122 if not supported:
122 123 return
123 124
124 125 repo = pullop.repo
125 126 remote = pullop.remote
126 127
127 128 # Save remote branchmap. We will use it later to speed up branchcache
128 129 # creation.
129 130 rbranchmap = None
130 131 if remote.capable('branchmap'):
131 132 with remote.commandexecutor() as e:
132 133 rbranchmap = e.callcommand('branchmap', {}).result()
133 134
134 135 repo.ui.status(_('streaming all changes\n'))
135 136
136 137 with remote.commandexecutor() as e:
137 138 fp = e.callcommand('stream_out', {}).result()
138 139
139 140 # TODO strictly speaking, this code should all be inside the context
140 141 # manager because the context manager is supposed to ensure all wire state
141 142 # is flushed when exiting. But the legacy peers don't do this, so it
142 143 # doesn't matter.
143 144 l = fp.readline()
144 145 try:
145 146 resp = int(l)
146 147 except ValueError:
147 148 raise error.ResponseError(
148 149 _('unexpected response from remote server:'), l)
149 150 if resp == 1:
150 151 raise error.Abort(_('operation forbidden by server'))
151 152 elif resp == 2:
152 153 raise error.Abort(_('locking the remote repository failed'))
153 154 elif resp != 0:
154 155 raise error.Abort(_('the server sent an unknown error code'))
155 156
156 157 l = fp.readline()
157 158 try:
158 159 filecount, bytecount = map(int, l.split(' ', 1))
159 160 except (ValueError, TypeError):
160 161 raise error.ResponseError(
161 162 _('unexpected response from remote server:'), l)
162 163
163 164 with repo.lock():
164 165 consumev1(repo, fp, filecount, bytecount)
165 166
166 167 # new requirements = old non-format requirements +
167 168 # new format-related remote requirements
168 169 # requirements from the streamed-in repository
169 170 repo.requirements = requirements | (
170 171 repo.requirements - repo.supportedformats)
171 172 repo.svfs.options = localrepo.resolvestorevfsoptions(
172 173 repo.ui, repo.requirements, repo.features)
173 174 repo._writerequirements()
174 175
175 176 if rbranchmap:
176 177 branchmap.replacecache(repo, rbranchmap)
177 178
178 179 repo.invalidate()
179 180
180 181 def allowservergeneration(repo):
181 182 """Whether streaming clones are allowed from the server."""
182 183 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
183 184 return False
184 185
185 186 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
186 187 return False
187 188
188 189 # The way stream clone works makes it impossible to hide secret changesets.
189 190 # So don't allow this by default.
190 191 secret = phases.hassecret(repo)
191 192 if secret:
192 193 return repo.ui.configbool('server', 'uncompressedallowsecret')
193 194
194 195 return True
195 196
196 197 # This is it's own function so extensions can override it.
197 def _walkstreamfiles(repo):
198 return repo.store.walk()
198 def _walkstreamfiles(repo, matcher=None):
199 return repo.store.walk(matcher)
199 200
200 201 def generatev1(repo):
201 202 """Emit content for version 1 of a streaming clone.
202 203
203 204 This returns a 3-tuple of (file count, byte size, data iterator).
204 205
205 206 The data iterator consists of N entries for each file being transferred.
206 207 Each file entry starts as a line with the file name and integer size
207 208 delimited by a null byte.
208 209
209 210 The raw file data follows. Following the raw file data is the next file
210 211 entry, or EOF.
211 212
212 213 When used on the wire protocol, an additional line indicating protocol
213 214 success will be prepended to the stream. This function is not responsible
214 215 for adding it.
215 216
216 217 This function will obtain a repository lock to ensure a consistent view of
217 218 the store is captured. It therefore may raise LockError.
218 219 """
219 220 entries = []
220 221 total_bytes = 0
221 222 # Get consistent snapshot of repo, lock during scan.
222 223 with repo.lock():
223 224 repo.ui.debug('scanning\n')
224 225 for name, ename, size in _walkstreamfiles(repo):
225 226 if size:
226 227 entries.append((name, size))
227 228 total_bytes += size
228 229
229 230 repo.ui.debug('%d files, %d bytes to transfer\n' %
230 231 (len(entries), total_bytes))
231 232
232 233 svfs = repo.svfs
233 234 debugflag = repo.ui.debugflag
234 235
235 236 def emitrevlogdata():
236 237 for name, size in entries:
237 238 if debugflag:
238 239 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
239 240 # partially encode name over the wire for backwards compat
240 241 yield '%s\0%d\n' % (store.encodedir(name), size)
241 242 # auditing at this stage is both pointless (paths are already
242 243 # trusted by the local repo) and expensive
243 244 with svfs(name, 'rb', auditpath=False) as fp:
244 245 if size <= 65536:
245 246 yield fp.read(size)
246 247 else:
247 248 for chunk in util.filechunkiter(fp, limit=size):
248 249 yield chunk
249 250
250 251 return len(entries), total_bytes, emitrevlogdata()
251 252
252 253 def generatev1wireproto(repo):
253 254 """Emit content for version 1 of streaming clone suitable for the wire.
254 255
255 256 This is the data output from ``generatev1()`` with 2 header lines. The
256 257 first line indicates overall success. The 2nd contains the file count and
257 258 byte size of payload.
258 259
259 260 The success line contains "0" for success, "1" for stream generation not
260 261 allowed, and "2" for error locking the repository (possibly indicating
261 262 a permissions error for the server process).
262 263 """
263 264 if not allowservergeneration(repo):
264 265 yield '1\n'
265 266 return
266 267
267 268 try:
268 269 filecount, bytecount, it = generatev1(repo)
269 270 except error.LockError:
270 271 yield '2\n'
271 272 return
272 273
273 274 # Indicates successful response.
274 275 yield '0\n'
275 276 yield '%d %d\n' % (filecount, bytecount)
276 277 for chunk in it:
277 278 yield chunk
278 279
279 280 def generatebundlev1(repo, compression='UN'):
280 281 """Emit content for version 1 of a stream clone bundle.
281 282
282 283 The first 4 bytes of the output ("HGS1") denote this as stream clone
283 284 bundle version 1.
284 285
285 286 The next 2 bytes indicate the compression type. Only "UN" is currently
286 287 supported.
287 288
288 289 The next 16 bytes are two 64-bit big endian unsigned integers indicating
289 290 file count and byte count, respectively.
290 291
291 292 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
292 293 of the requirements string, including a trailing \0. The following N bytes
293 294 are the requirements string, which is ASCII containing a comma-delimited
294 295 list of repo requirements that are needed to support the data.
295 296
296 297 The remaining content is the output of ``generatev1()`` (which may be
297 298 compressed in the future).
298 299
299 300 Returns a tuple of (requirements, data generator).
300 301 """
301 302 if compression != 'UN':
302 303 raise ValueError('we do not support the compression argument yet')
303 304
304 305 requirements = repo.requirements & repo.supportedformats
305 306 requires = ','.join(sorted(requirements))
306 307
307 308 def gen():
308 309 yield 'HGS1'
309 310 yield compression
310 311
311 312 filecount, bytecount, it = generatev1(repo)
312 313 repo.ui.status(_('writing %d bytes for %d files\n') %
313 314 (bytecount, filecount))
314 315
315 316 yield struct.pack('>QQ', filecount, bytecount)
316 317 yield struct.pack('>H', len(requires) + 1)
317 318 yield requires + '\0'
318 319
319 320 # This is where we'll add compression in the future.
320 321 assert compression == 'UN'
321 322
322 323 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
323 324 unit=_('bytes'))
324 325 progress.update(0)
325 326
326 327 for chunk in it:
327 328 progress.increment(step=len(chunk))
328 329 yield chunk
329 330
330 331 progress.complete()
331 332
332 333 return requirements, gen()
333 334
334 335 def consumev1(repo, fp, filecount, bytecount):
335 336 """Apply the contents from version 1 of a streaming clone file handle.
336 337
337 338 This takes the output from "stream_out" and applies it to the specified
338 339 repository.
339 340
340 341 Like "stream_out," the status line added by the wire protocol is not
341 342 handled by this function.
342 343 """
343 344 with repo.lock():
344 345 repo.ui.status(_('%d files to transfer, %s of data\n') %
345 346 (filecount, util.bytecount(bytecount)))
346 347 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
347 348 unit=_('bytes'))
348 349 progress.update(0)
349 350 start = util.timer()
350 351
351 352 # TODO: get rid of (potential) inconsistency
352 353 #
353 354 # If transaction is started and any @filecache property is
354 355 # changed at this point, it causes inconsistency between
355 356 # in-memory cached property and streamclone-ed file on the
356 357 # disk. Nested transaction prevents transaction scope "clone"
357 358 # below from writing in-memory changes out at the end of it,
358 359 # even though in-memory changes are discarded at the end of it
359 360 # regardless of transaction nesting.
360 361 #
361 362 # But transaction nesting can't be simply prohibited, because
362 363 # nesting occurs also in ordinary case (e.g. enabling
363 364 # clonebundles).
364 365
365 366 with repo.transaction('clone'):
366 367 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
367 368 for i in pycompat.xrange(filecount):
368 369 # XXX doesn't support '\n' or '\r' in filenames
369 370 l = fp.readline()
370 371 try:
371 372 name, size = l.split('\0', 1)
372 373 size = int(size)
373 374 except (ValueError, TypeError):
374 375 raise error.ResponseError(
375 376 _('unexpected response from remote server:'), l)
376 377 if repo.ui.debugflag:
377 378 repo.ui.debug('adding %s (%s)\n' %
378 379 (name, util.bytecount(size)))
379 380 # for backwards compat, name was partially encoded
380 381 path = store.decodedir(name)
381 382 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
382 383 for chunk in util.filechunkiter(fp, limit=size):
383 384 progress.increment(step=len(chunk))
384 385 ofp.write(chunk)
385 386
386 387 # force @filecache properties to be reloaded from
387 388 # streamclone-ed file at next access
388 389 repo.invalidate(clearfilecache=True)
389 390
390 391 elapsed = util.timer() - start
391 392 if elapsed <= 0:
392 393 elapsed = 0.001
393 394 progress.complete()
394 395 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
395 396 (util.bytecount(bytecount), elapsed,
396 397 util.bytecount(bytecount / elapsed)))
397 398
398 399 def readbundle1header(fp):
399 400 compression = fp.read(2)
400 401 if compression != 'UN':
401 402 raise error.Abort(_('only uncompressed stream clone bundles are '
402 403 'supported; got %s') % compression)
403 404
404 405 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
405 406 requireslen = struct.unpack('>H', fp.read(2))[0]
406 407 requires = fp.read(requireslen)
407 408
408 409 if not requires.endswith('\0'):
409 410 raise error.Abort(_('malformed stream clone bundle: '
410 411 'requirements not properly encoded'))
411 412
412 413 requirements = set(requires.rstrip('\0').split(','))
413 414
414 415 return filecount, bytecount, requirements
415 416
416 417 def applybundlev1(repo, fp):
417 418 """Apply the content from a stream clone bundle version 1.
418 419
419 420 We assume the 4 byte header has been read and validated and the file handle
420 421 is at the 2 byte compression identifier.
421 422 """
422 423 if len(repo):
423 424 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
424 425 'repo'))
425 426
426 427 filecount, bytecount, requirements = readbundle1header(fp)
427 428 missingreqs = requirements - repo.supportedformats
428 429 if missingreqs:
429 430 raise error.Abort(_('unable to apply stream clone: '
430 431 'unsupported format: %s') %
431 432 ', '.join(sorted(missingreqs)))
432 433
433 434 consumev1(repo, fp, filecount, bytecount)
434 435
435 436 class streamcloneapplier(object):
436 437 """Class to manage applying streaming clone bundles.
437 438
438 439 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
439 440 readers to perform bundle type-specific functionality.
440 441 """
441 442 def __init__(self, fh):
442 443 self._fh = fh
443 444
444 445 def apply(self, repo):
445 446 return applybundlev1(repo, self._fh)
446 447
447 448 # type of file to stream
448 449 _fileappend = 0 # append only file
449 450 _filefull = 1 # full snapshot file
450 451
451 452 # Source of the file
452 453 _srcstore = 's' # store (svfs)
453 454 _srccache = 'c' # cache (cache)
454 455
455 456 # This is it's own function so extensions can override it.
456 457 def _walkstreamfullstorefiles(repo):
457 458 """list snapshot file from the store"""
458 459 fnames = []
459 460 if not repo.publishing():
460 461 fnames.append('phaseroots')
461 462 return fnames
462 463
463 464 def _filterfull(entry, copy, vfsmap):
464 465 """actually copy the snapshot files"""
465 466 src, name, ftype, data = entry
466 467 if ftype != _filefull:
467 468 return entry
468 469 return (src, name, ftype, copy(vfsmap[src].join(name)))
469 470
470 471 @contextlib.contextmanager
471 472 def maketempcopies():
472 473 """return a function to temporary copy file"""
473 474 files = []
474 475 try:
475 476 def copy(src):
476 477 fd, dst = pycompat.mkstemp()
477 478 os.close(fd)
478 479 files.append(dst)
479 480 util.copyfiles(src, dst, hardlink=True)
480 481 return dst
481 482 yield copy
482 483 finally:
483 484 for tmp in files:
484 485 util.tryunlink(tmp)
485 486
486 487 def _makemap(repo):
487 488 """make a (src -> vfs) map for the repo"""
488 489 vfsmap = {
489 490 _srcstore: repo.svfs,
490 491 _srccache: repo.cachevfs,
491 492 }
492 493 # we keep repo.vfs out of the on purpose, ther are too many danger there
493 494 # (eg: .hg/hgrc)
494 495 assert repo.vfs not in vfsmap.values()
495 496
496 497 return vfsmap
497 498
498 499 def _emit2(repo, entries, totalfilesize):
499 500 """actually emit the stream bundle"""
500 501 vfsmap = _makemap(repo)
501 502 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
502 503 unit=_('bytes'))
503 504 progress.update(0)
504 505 with maketempcopies() as copy, progress:
505 506 # copy is delayed until we are in the try
506 507 entries = [_filterfull(e, copy, vfsmap) for e in entries]
507 508 yield None # this release the lock on the repository
508 509 seen = 0
509 510
510 511 for src, name, ftype, data in entries:
511 512 vfs = vfsmap[src]
512 513 yield src
513 514 yield util.uvarintencode(len(name))
514 515 if ftype == _fileappend:
515 516 fp = vfs(name)
516 517 size = data
517 518 elif ftype == _filefull:
518 519 fp = open(data, 'rb')
519 520 size = util.fstat(fp).st_size
520 521 try:
521 522 yield util.uvarintencode(size)
522 523 yield name
523 524 if size <= 65536:
524 525 chunks = (fp.read(size),)
525 526 else:
526 527 chunks = util.filechunkiter(fp, limit=size)
527 528 for chunk in chunks:
528 529 seen += len(chunk)
529 530 progress.update(seen)
530 531 yield chunk
531 532 finally:
532 533 fp.close()
533 534
534 535 def generatev2(repo, includes, excludes):
535 536 """Emit content for version 2 of a streaming clone.
536 537
537 538 the data stream consists the following entries:
538 539 1) A char representing the file destination (eg: store or cache)
539 540 2) A varint containing the length of the filename
540 541 3) A varint containing the length of file data
541 542 4) N bytes containing the filename (the internal, store-agnostic form)
542 543 5) N bytes containing the file data
543 544
544 545 Returns a 3-tuple of (file count, file size, data iterator).
545 546 """
546 547
547 548 # temporarily raise error until we add storage level logic
548 549 if includes or excludes:
549 550 raise error.Abort(_("server does not support narrow stream clones"))
550 551
551 552 with repo.lock():
552 553
553 554 entries = []
554 555 totalfilesize = 0
555 556
557 matcher = None
558 if includes or excludes:
559 matcher = narrowspec.match(repo.root, includes, excludes)
560
556 561 repo.ui.debug('scanning\n')
557 for name, ename, size in _walkstreamfiles(repo):
562 for name, ename, size in _walkstreamfiles(repo, matcher):
558 563 if size:
559 564 entries.append((_srcstore, name, _fileappend, size))
560 565 totalfilesize += size
561 566 for name in _walkstreamfullstorefiles(repo):
562 567 if repo.svfs.exists(name):
563 568 totalfilesize += repo.svfs.lstat(name).st_size
564 569 entries.append((_srcstore, name, _filefull, None))
565 570 for name in cacheutil.cachetocopy(repo):
566 571 if repo.cachevfs.exists(name):
567 572 totalfilesize += repo.cachevfs.lstat(name).st_size
568 573 entries.append((_srccache, name, _filefull, None))
569 574
570 575 chunks = _emit2(repo, entries, totalfilesize)
571 576 first = next(chunks)
572 577 assert first is None
573 578
574 579 return len(entries), totalfilesize, chunks
575 580
576 581 @contextlib.contextmanager
577 582 def nested(*ctxs):
578 583 this = ctxs[0]
579 584 rest = ctxs[1:]
580 585 with this:
581 586 if rest:
582 587 with nested(*rest):
583 588 yield
584 589 else:
585 590 yield
586 591
587 592 def consumev2(repo, fp, filecount, filesize):
588 593 """Apply the contents from a version 2 streaming clone.
589 594
590 595 Data is read from an object that only needs to provide a ``read(size)``
591 596 method.
592 597 """
593 598 with repo.lock():
594 599 repo.ui.status(_('%d files to transfer, %s of data\n') %
595 600 (filecount, util.bytecount(filesize)))
596 601
597 602 start = util.timer()
598 603 progress = repo.ui.makeprogress(_('clone'), total=filesize,
599 604 unit=_('bytes'))
600 605 progress.update(0)
601 606
602 607 vfsmap = _makemap(repo)
603 608
604 609 with repo.transaction('clone'):
605 610 ctxs = (vfs.backgroundclosing(repo.ui)
606 611 for vfs in vfsmap.values())
607 612 with nested(*ctxs):
608 613 for i in range(filecount):
609 614 src = util.readexactly(fp, 1)
610 615 vfs = vfsmap[src]
611 616 namelen = util.uvarintdecodestream(fp)
612 617 datalen = util.uvarintdecodestream(fp)
613 618
614 619 name = util.readexactly(fp, namelen)
615 620
616 621 if repo.ui.debugflag:
617 622 repo.ui.debug('adding [%s] %s (%s)\n' %
618 623 (src, name, util.bytecount(datalen)))
619 624
620 625 with vfs(name, 'w') as ofp:
621 626 for chunk in util.filechunkiter(fp, limit=datalen):
622 627 progress.increment(step=len(chunk))
623 628 ofp.write(chunk)
624 629
625 630 # force @filecache properties to be reloaded from
626 631 # streamclone-ed file at next access
627 632 repo.invalidate(clearfilecache=True)
628 633
629 634 elapsed = util.timer() - start
630 635 if elapsed <= 0:
631 636 elapsed = 0.001
632 637 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
633 638 (util.bytecount(progress.pos), elapsed,
634 639 util.bytecount(progress.pos / elapsed)))
635 640 progress.complete()
636 641
637 642 def applybundlev2(repo, fp, filecount, filesize, requirements):
638 643 from . import localrepo
639 644
640 645 missingreqs = [r for r in requirements if r not in repo.supported]
641 646 if missingreqs:
642 647 raise error.Abort(_('unable to apply stream clone: '
643 648 'unsupported format: %s') %
644 649 ', '.join(sorted(missingreqs)))
645 650
646 651 consumev2(repo, fp, filecount, filesize)
647 652
648 653 # new requirements = old non-format requirements +
649 654 # new format-related remote requirements
650 655 # requirements from the streamed-in repository
651 656 repo.requirements = set(requirements) | (
652 657 repo.requirements - repo.supportedformats)
653 658 repo.svfs.options = localrepo.resolvestorevfsoptions(
654 659 repo.ui, repo.requirements, repo.features)
655 660 repo._writerequirements()
General Comments 0
You need to be logged in to leave comments. Login now