##// END OF EJS Templates
streamclone: reimplement nested context manager...
Augie Fackler -
r39793:97f2992c default
parent child Browse files
Show More
@@ -1,647 +1,647 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import struct
13 import warnings
14 13
15 14 from .i18n import _
16 15 from . import (
17 16 branchmap,
18 17 cacheutil,
19 18 error,
20 19 phases,
21 20 pycompat,
22 21 store,
23 22 util,
24 23 )
25 24
26 25 def canperformstreamclone(pullop, bundle2=False):
27 26 """Whether it is possible to perform a streaming clone as part of pull.
28 27
29 28 ``bundle2`` will cause the function to consider stream clone through
30 29 bundle2 and only through bundle2.
31 30
32 31 Returns a tuple of (supported, requirements). ``supported`` is True if
33 32 streaming clone is supported and False otherwise. ``requirements`` is
34 33 a set of repo requirements from the remote, or ``None`` if stream clone
35 34 isn't supported.
36 35 """
37 36 repo = pullop.repo
38 37 remote = pullop.remote
39 38
40 39 bundle2supported = False
41 40 if pullop.canusebundle2:
42 41 if 'v2' in pullop.remotebundle2caps.get('stream', []):
43 42 bundle2supported = True
44 43 # else
45 44 # Server doesn't support bundle2 stream clone or doesn't support
46 45 # the versions we support. Fall back and possibly allow legacy.
47 46
48 47 # Ensures legacy code path uses available bundle2.
49 48 if bundle2supported and not bundle2:
50 49 return False, None
51 50 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
52 51 elif bundle2 and not bundle2supported:
53 52 return False, None
54 53
55 54 # Streaming clone only works on empty repositories.
56 55 if len(repo):
57 56 return False, None
58 57
59 58 # Streaming clone only works if all data is being requested.
60 59 if pullop.heads:
61 60 return False, None
62 61
63 62 streamrequested = pullop.streamclonerequested
64 63
65 64 # If we don't have a preference, let the server decide for us. This
66 65 # likely only comes into play in LANs.
67 66 if streamrequested is None:
68 67 # The server can advertise whether to prefer streaming clone.
69 68 streamrequested = remote.capable('stream-preferred')
70 69
71 70 if not streamrequested:
72 71 return False, None
73 72
74 73 # In order for stream clone to work, the client has to support all the
75 74 # requirements advertised by the server.
76 75 #
77 76 # The server advertises its requirements via the "stream" and "streamreqs"
78 77 # capability. "stream" (a value-less capability) is advertised if and only
79 78 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
80 79 # is advertised and contains a comma-delimited list of requirements.
81 80 requirements = set()
82 81 if remote.capable('stream'):
83 82 requirements.add('revlogv1')
84 83 else:
85 84 streamreqs = remote.capable('streamreqs')
86 85 # This is weird and shouldn't happen with modern servers.
87 86 if not streamreqs:
88 87 pullop.repo.ui.warn(_(
89 88 'warning: stream clone requested but server has them '
90 89 'disabled\n'))
91 90 return False, None
92 91
93 92 streamreqs = set(streamreqs.split(','))
94 93 # Server requires something we don't support. Bail.
95 94 missingreqs = streamreqs - repo.supportedformats
96 95 if missingreqs:
97 96 pullop.repo.ui.warn(_(
98 97 'warning: stream clone requested but client is missing '
99 98 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
100 99 pullop.repo.ui.warn(
101 100 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
102 101 'for more information)\n'))
103 102 return False, None
104 103 requirements = streamreqs
105 104
106 105 return True, requirements
107 106
108 107 def maybeperformlegacystreamclone(pullop):
109 108 """Possibly perform a legacy stream clone operation.
110 109
111 110 Legacy stream clones are performed as part of pull but before all other
112 111 operations.
113 112
114 113 A legacy stream clone will not be performed if a bundle2 stream clone is
115 114 supported.
116 115 """
117 116 from . import localrepo
118 117
119 118 supported, requirements = canperformstreamclone(pullop)
120 119
121 120 if not supported:
122 121 return
123 122
124 123 repo = pullop.repo
125 124 remote = pullop.remote
126 125
127 126 # Save remote branchmap. We will use it later to speed up branchcache
128 127 # creation.
129 128 rbranchmap = None
130 129 if remote.capable('branchmap'):
131 130 with remote.commandexecutor() as e:
132 131 rbranchmap = e.callcommand('branchmap', {}).result()
133 132
134 133 repo.ui.status(_('streaming all changes\n'))
135 134
136 135 with remote.commandexecutor() as e:
137 136 fp = e.callcommand('stream_out', {}).result()
138 137
139 138 # TODO strictly speaking, this code should all be inside the context
140 139 # manager because the context manager is supposed to ensure all wire state
141 140 # is flushed when exiting. But the legacy peers don't do this, so it
142 141 # doesn't matter.
143 142 l = fp.readline()
144 143 try:
145 144 resp = int(l)
146 145 except ValueError:
147 146 raise error.ResponseError(
148 147 _('unexpected response from remote server:'), l)
149 148 if resp == 1:
150 149 raise error.Abort(_('operation forbidden by server'))
151 150 elif resp == 2:
152 151 raise error.Abort(_('locking the remote repository failed'))
153 152 elif resp != 0:
154 153 raise error.Abort(_('the server sent an unknown error code'))
155 154
156 155 l = fp.readline()
157 156 try:
158 157 filecount, bytecount = map(int, l.split(' ', 1))
159 158 except (ValueError, TypeError):
160 159 raise error.ResponseError(
161 160 _('unexpected response from remote server:'), l)
162 161
163 162 with repo.lock():
164 163 consumev1(repo, fp, filecount, bytecount)
165 164
166 165 # new requirements = old non-format requirements +
167 166 # new format-related remote requirements
168 167 # requirements from the streamed-in repository
169 168 repo.requirements = requirements | (
170 169 repo.requirements - repo.supportedformats)
171 170 repo.svfs.options = localrepo.resolvestorevfsoptions(
172 171 repo.ui, repo.requirements)
173 172 repo._writerequirements()
174 173
175 174 if rbranchmap:
176 175 branchmap.replacecache(repo, rbranchmap)
177 176
178 177 repo.invalidate()
179 178
180 179 def allowservergeneration(repo):
181 180 """Whether streaming clones are allowed from the server."""
182 181 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
183 182 return False
184 183
185 184 # The way stream clone works makes it impossible to hide secret changesets.
186 185 # So don't allow this by default.
187 186 secret = phases.hassecret(repo)
188 187 if secret:
189 188 return repo.ui.configbool('server', 'uncompressedallowsecret')
190 189
191 190 return True
192 191
193 192 # This is it's own function so extensions can override it.
194 193 def _walkstreamfiles(repo):
195 194 return repo.store.walk()
196 195
197 196 def generatev1(repo):
198 197 """Emit content for version 1 of a streaming clone.
199 198
200 199 This returns a 3-tuple of (file count, byte size, data iterator).
201 200
202 201 The data iterator consists of N entries for each file being transferred.
203 202 Each file entry starts as a line with the file name and integer size
204 203 delimited by a null byte.
205 204
206 205 The raw file data follows. Following the raw file data is the next file
207 206 entry, or EOF.
208 207
209 208 When used on the wire protocol, an additional line indicating protocol
210 209 success will be prepended to the stream. This function is not responsible
211 210 for adding it.
212 211
213 212 This function will obtain a repository lock to ensure a consistent view of
214 213 the store is captured. It therefore may raise LockError.
215 214 """
216 215 entries = []
217 216 total_bytes = 0
218 217 # Get consistent snapshot of repo, lock during scan.
219 218 with repo.lock():
220 219 repo.ui.debug('scanning\n')
221 220 for name, ename, size in _walkstreamfiles(repo):
222 221 if size:
223 222 entries.append((name, size))
224 223 total_bytes += size
225 224
226 225 repo.ui.debug('%d files, %d bytes to transfer\n' %
227 226 (len(entries), total_bytes))
228 227
229 228 svfs = repo.svfs
230 229 debugflag = repo.ui.debugflag
231 230
232 231 def emitrevlogdata():
233 232 for name, size in entries:
234 233 if debugflag:
235 234 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
236 235 # partially encode name over the wire for backwards compat
237 236 yield '%s\0%d\n' % (store.encodedir(name), size)
238 237 # auditing at this stage is both pointless (paths are already
239 238 # trusted by the local repo) and expensive
240 239 with svfs(name, 'rb', auditpath=False) as fp:
241 240 if size <= 65536:
242 241 yield fp.read(size)
243 242 else:
244 243 for chunk in util.filechunkiter(fp, limit=size):
245 244 yield chunk
246 245
247 246 return len(entries), total_bytes, emitrevlogdata()
248 247
249 248 def generatev1wireproto(repo):
250 249 """Emit content for version 1 of streaming clone suitable for the wire.
251 250
252 251 This is the data output from ``generatev1()`` with 2 header lines. The
253 252 first line indicates overall success. The 2nd contains the file count and
254 253 byte size of payload.
255 254
256 255 The success line contains "0" for success, "1" for stream generation not
257 256 allowed, and "2" for error locking the repository (possibly indicating
258 257 a permissions error for the server process).
259 258 """
260 259 if not allowservergeneration(repo):
261 260 yield '1\n'
262 261 return
263 262
264 263 try:
265 264 filecount, bytecount, it = generatev1(repo)
266 265 except error.LockError:
267 266 yield '2\n'
268 267 return
269 268
270 269 # Indicates successful response.
271 270 yield '0\n'
272 271 yield '%d %d\n' % (filecount, bytecount)
273 272 for chunk in it:
274 273 yield chunk
275 274
276 275 def generatebundlev1(repo, compression='UN'):
277 276 """Emit content for version 1 of a stream clone bundle.
278 277
279 278 The first 4 bytes of the output ("HGS1") denote this as stream clone
280 279 bundle version 1.
281 280
282 281 The next 2 bytes indicate the compression type. Only "UN" is currently
283 282 supported.
284 283
285 284 The next 16 bytes are two 64-bit big endian unsigned integers indicating
286 285 file count and byte count, respectively.
287 286
288 287 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
289 288 of the requirements string, including a trailing \0. The following N bytes
290 289 are the requirements string, which is ASCII containing a comma-delimited
291 290 list of repo requirements that are needed to support the data.
292 291
293 292 The remaining content is the output of ``generatev1()`` (which may be
294 293 compressed in the future).
295 294
296 295 Returns a tuple of (requirements, data generator).
297 296 """
298 297 if compression != 'UN':
299 298 raise ValueError('we do not support the compression argument yet')
300 299
301 300 requirements = repo.requirements & repo.supportedformats
302 301 requires = ','.join(sorted(requirements))
303 302
304 303 def gen():
305 304 yield 'HGS1'
306 305 yield compression
307 306
308 307 filecount, bytecount, it = generatev1(repo)
309 308 repo.ui.status(_('writing %d bytes for %d files\n') %
310 309 (bytecount, filecount))
311 310
312 311 yield struct.pack('>QQ', filecount, bytecount)
313 312 yield struct.pack('>H', len(requires) + 1)
314 313 yield requires + '\0'
315 314
316 315 # This is where we'll add compression in the future.
317 316 assert compression == 'UN'
318 317
319 318 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
320 319 unit=_('bytes'))
321 320 progress.update(0)
322 321
323 322 for chunk in it:
324 323 progress.increment(step=len(chunk))
325 324 yield chunk
326 325
327 326 progress.complete()
328 327
329 328 return requirements, gen()
330 329
331 330 def consumev1(repo, fp, filecount, bytecount):
332 331 """Apply the contents from version 1 of a streaming clone file handle.
333 332
334 333 This takes the output from "stream_out" and applies it to the specified
335 334 repository.
336 335
337 336 Like "stream_out," the status line added by the wire protocol is not
338 337 handled by this function.
339 338 """
340 339 with repo.lock():
341 340 repo.ui.status(_('%d files to transfer, %s of data\n') %
342 341 (filecount, util.bytecount(bytecount)))
343 342 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
344 343 unit=_('bytes'))
345 344 progress.update(0)
346 345 start = util.timer()
347 346
348 347 # TODO: get rid of (potential) inconsistency
349 348 #
350 349 # If transaction is started and any @filecache property is
351 350 # changed at this point, it causes inconsistency between
352 351 # in-memory cached property and streamclone-ed file on the
353 352 # disk. Nested transaction prevents transaction scope "clone"
354 353 # below from writing in-memory changes out at the end of it,
355 354 # even though in-memory changes are discarded at the end of it
356 355 # regardless of transaction nesting.
357 356 #
358 357 # But transaction nesting can't be simply prohibited, because
359 358 # nesting occurs also in ordinary case (e.g. enabling
360 359 # clonebundles).
361 360
362 361 with repo.transaction('clone'):
363 362 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
364 363 for i in pycompat.xrange(filecount):
365 364 # XXX doesn't support '\n' or '\r' in filenames
366 365 l = fp.readline()
367 366 try:
368 367 name, size = l.split('\0', 1)
369 368 size = int(size)
370 369 except (ValueError, TypeError):
371 370 raise error.ResponseError(
372 371 _('unexpected response from remote server:'), l)
373 372 if repo.ui.debugflag:
374 373 repo.ui.debug('adding %s (%s)\n' %
375 374 (name, util.bytecount(size)))
376 375 # for backwards compat, name was partially encoded
377 376 path = store.decodedir(name)
378 377 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
379 378 for chunk in util.filechunkiter(fp, limit=size):
380 379 progress.increment(step=len(chunk))
381 380 ofp.write(chunk)
382 381
383 382 # force @filecache properties to be reloaded from
384 383 # streamclone-ed file at next access
385 384 repo.invalidate(clearfilecache=True)
386 385
387 386 elapsed = util.timer() - start
388 387 if elapsed <= 0:
389 388 elapsed = 0.001
390 389 progress.complete()
391 390 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
392 391 (util.bytecount(bytecount), elapsed,
393 392 util.bytecount(bytecount / elapsed)))
394 393
395 394 def readbundle1header(fp):
396 395 compression = fp.read(2)
397 396 if compression != 'UN':
398 397 raise error.Abort(_('only uncompressed stream clone bundles are '
399 398 'supported; got %s') % compression)
400 399
401 400 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
402 401 requireslen = struct.unpack('>H', fp.read(2))[0]
403 402 requires = fp.read(requireslen)
404 403
405 404 if not requires.endswith('\0'):
406 405 raise error.Abort(_('malformed stream clone bundle: '
407 406 'requirements not properly encoded'))
408 407
409 408 requirements = set(requires.rstrip('\0').split(','))
410 409
411 410 return filecount, bytecount, requirements
412 411
413 412 def applybundlev1(repo, fp):
414 413 """Apply the content from a stream clone bundle version 1.
415 414
416 415 We assume the 4 byte header has been read and validated and the file handle
417 416 is at the 2 byte compression identifier.
418 417 """
419 418 if len(repo):
420 419 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
421 420 'repo'))
422 421
423 422 filecount, bytecount, requirements = readbundle1header(fp)
424 423 missingreqs = requirements - repo.supportedformats
425 424 if missingreqs:
426 425 raise error.Abort(_('unable to apply stream clone: '
427 426 'unsupported format: %s') %
428 427 ', '.join(sorted(missingreqs)))
429 428
430 429 consumev1(repo, fp, filecount, bytecount)
431 430
432 431 class streamcloneapplier(object):
433 432 """Class to manage applying streaming clone bundles.
434 433
435 434 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
436 435 readers to perform bundle type-specific functionality.
437 436 """
438 437 def __init__(self, fh):
439 438 self._fh = fh
440 439
441 440 def apply(self, repo):
442 441 return applybundlev1(repo, self._fh)
443 442
444 443 # type of file to stream
445 444 _fileappend = 0 # append only file
446 445 _filefull = 1 # full snapshot file
447 446
448 447 # Source of the file
449 448 _srcstore = 's' # store (svfs)
450 449 _srccache = 'c' # cache (cache)
451 450
452 451 # This is it's own function so extensions can override it.
453 452 def _walkstreamfullstorefiles(repo):
454 453 """list snapshot file from the store"""
455 454 fnames = []
456 455 if not repo.publishing():
457 456 fnames.append('phaseroots')
458 457 return fnames
459 458
460 459 def _filterfull(entry, copy, vfsmap):
461 460 """actually copy the snapshot files"""
462 461 src, name, ftype, data = entry
463 462 if ftype != _filefull:
464 463 return entry
465 464 return (src, name, ftype, copy(vfsmap[src].join(name)))
466 465
467 466 @contextlib.contextmanager
468 467 def maketempcopies():
469 468 """return a function to temporary copy file"""
470 469 files = []
471 470 try:
472 471 def copy(src):
473 472 fd, dst = pycompat.mkstemp()
474 473 os.close(fd)
475 474 files.append(dst)
476 475 util.copyfiles(src, dst, hardlink=True)
477 476 return dst
478 477 yield copy
479 478 finally:
480 479 for tmp in files:
481 480 util.tryunlink(tmp)
482 481
483 482 def _makemap(repo):
484 483 """make a (src -> vfs) map for the repo"""
485 484 vfsmap = {
486 485 _srcstore: repo.svfs,
487 486 _srccache: repo.cachevfs,
488 487 }
489 488 # we keep repo.vfs out of the on purpose, ther are too many danger there
490 489 # (eg: .hg/hgrc)
491 490 assert repo.vfs not in vfsmap.values()
492 491
493 492 return vfsmap
494 493
495 494 def _emit2(repo, entries, totalfilesize):
496 495 """actually emit the stream bundle"""
497 496 vfsmap = _makemap(repo)
498 497 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
499 498 unit=_('bytes'))
500 499 progress.update(0)
501 500 with maketempcopies() as copy, progress:
502 501 # copy is delayed until we are in the try
503 502 entries = [_filterfull(e, copy, vfsmap) for e in entries]
504 503 yield None # this release the lock on the repository
505 504 seen = 0
506 505
507 506 for src, name, ftype, data in entries:
508 507 vfs = vfsmap[src]
509 508 yield src
510 509 yield util.uvarintencode(len(name))
511 510 if ftype == _fileappend:
512 511 fp = vfs(name)
513 512 size = data
514 513 elif ftype == _filefull:
515 514 fp = open(data, 'rb')
516 515 size = util.fstat(fp).st_size
517 516 try:
518 517 yield util.uvarintencode(size)
519 518 yield name
520 519 if size <= 65536:
521 520 chunks = (fp.read(size),)
522 521 else:
523 522 chunks = util.filechunkiter(fp, limit=size)
524 523 for chunk in chunks:
525 524 seen += len(chunk)
526 525 progress.update(seen)
527 526 yield chunk
528 527 finally:
529 528 fp.close()
530 529
531 530 def generatev2(repo):
532 531 """Emit content for version 2 of a streaming clone.
533 532
534 533 the data stream consists the following entries:
535 534 1) A char representing the file destination (eg: store or cache)
536 535 2) A varint containing the length of the filename
537 536 3) A varint containing the length of file data
538 537 4) N bytes containing the filename (the internal, store-agnostic form)
539 538 5) N bytes containing the file data
540 539
541 540 Returns a 3-tuple of (file count, file size, data iterator).
542 541 """
543 542
544 543 with repo.lock():
545 544
546 545 entries = []
547 546 totalfilesize = 0
548 547
549 548 repo.ui.debug('scanning\n')
550 549 for name, ename, size in _walkstreamfiles(repo):
551 550 if size:
552 551 entries.append((_srcstore, name, _fileappend, size))
553 552 totalfilesize += size
554 553 for name in _walkstreamfullstorefiles(repo):
555 554 if repo.svfs.exists(name):
556 555 totalfilesize += repo.svfs.lstat(name).st_size
557 556 entries.append((_srcstore, name, _filefull, None))
558 557 for name in cacheutil.cachetocopy(repo):
559 558 if repo.cachevfs.exists(name):
560 559 totalfilesize += repo.cachevfs.lstat(name).st_size
561 560 entries.append((_srccache, name, _filefull, None))
562 561
563 562 chunks = _emit2(repo, entries, totalfilesize)
564 563 first = next(chunks)
565 564 assert first is None
566 565
567 566 return len(entries), totalfilesize, chunks
568 567
569 568 @contextlib.contextmanager
570 569 def nested(*ctxs):
571 with warnings.catch_warnings():
572 # For some reason, Python decided 'nested' was deprecated without
573 # replacement. They officially advertised for filtering the deprecation
574 # warning for people who actually need the feature.
575 warnings.filterwarnings("ignore",category=DeprecationWarning)
576 with contextlib.nested(*ctxs):
570 this = ctxs[0]
571 rest = ctxs[1:]
572 with this:
573 if rest:
574 with nested(*rest):
575 yield
576 else:
577 577 yield
578 578
579 579 def consumev2(repo, fp, filecount, filesize):
580 580 """Apply the contents from a version 2 streaming clone.
581 581
582 582 Data is read from an object that only needs to provide a ``read(size)``
583 583 method.
584 584 """
585 585 with repo.lock():
586 586 repo.ui.status(_('%d files to transfer, %s of data\n') %
587 587 (filecount, util.bytecount(filesize)))
588 588
589 589 start = util.timer()
590 590 progress = repo.ui.makeprogress(_('clone'), total=filesize,
591 591 unit=_('bytes'))
592 592 progress.update(0)
593 593
594 594 vfsmap = _makemap(repo)
595 595
596 596 with repo.transaction('clone'):
597 597 ctxs = (vfs.backgroundclosing(repo.ui)
598 598 for vfs in vfsmap.values())
599 599 with nested(*ctxs):
600 600 for i in range(filecount):
601 601 src = util.readexactly(fp, 1)
602 602 vfs = vfsmap[src]
603 603 namelen = util.uvarintdecodestream(fp)
604 604 datalen = util.uvarintdecodestream(fp)
605 605
606 606 name = util.readexactly(fp, namelen)
607 607
608 608 if repo.ui.debugflag:
609 609 repo.ui.debug('adding [%s] %s (%s)\n' %
610 610 (src, name, util.bytecount(datalen)))
611 611
612 612 with vfs(name, 'w') as ofp:
613 613 for chunk in util.filechunkiter(fp, limit=datalen):
614 614 progress.increment(step=len(chunk))
615 615 ofp.write(chunk)
616 616
617 617 # force @filecache properties to be reloaded from
618 618 # streamclone-ed file at next access
619 619 repo.invalidate(clearfilecache=True)
620 620
621 621 elapsed = util.timer() - start
622 622 if elapsed <= 0:
623 623 elapsed = 0.001
624 624 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
625 625 (util.bytecount(progress.pos), elapsed,
626 626 util.bytecount(progress.pos / elapsed)))
627 627 progress.complete()
628 628
629 629 def applybundlev2(repo, fp, filecount, filesize, requirements):
630 630 from . import localrepo
631 631
632 632 missingreqs = [r for r in requirements if r not in repo.supported]
633 633 if missingreqs:
634 634 raise error.Abort(_('unable to apply stream clone: '
635 635 'unsupported format: %s') %
636 636 ', '.join(sorted(missingreqs)))
637 637
638 638 consumev2(repo, fp, filecount, filesize)
639 639
640 640 # new requirements = old non-format requirements +
641 641 # new format-related remote requirements
642 642 # requirements from the streamed-in repository
643 643 repo.requirements = set(requirements) | (
644 644 repo.requirements - repo.supportedformats)
645 645 repo.svfs.options = localrepo.resolvestorevfsoptions(
646 646 repo.ui, repo.requirements)
647 647 repo._writerequirements()
General Comments 0
You need to be logged in to leave comments. Login now