##// END OF EJS Templates
typing: make `bundlerepository` subclass `localrepository` while type checking...
Matt Harbison -
r53148:9d4ad05b default
parent child Browse files
Show More
@@ -1,759 +1,766
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import annotations
15 15
16 16 import contextlib
17 17 import os
18 18 import shutil
19 import typing
19 20
20 21 from .i18n import _
21 22 from .node import (
22 23 hex,
23 24 nullrev,
24 25 )
25 26
26 27 from . import (
27 28 bundle2,
28 29 changegroup,
29 30 changelog,
30 31 cmdutil,
31 32 discovery,
32 33 encoding,
33 34 error,
34 35 exchange,
35 36 filelog,
36 37 localrepo,
37 38 manifest,
38 39 mdiff,
39 40 pathutil,
40 41 phases,
41 42 pycompat,
42 43 revlog,
43 44 revlogutils,
44 45 util,
45 46 vfs as vfsmod,
46 47 )
47 48 from .utils import (
48 49 urlutil,
49 50 )
50 51
51 52 from .revlogutils import (
52 53 constants as revlog_constants,
53 54 )
54 55
55 56
56 57 class bundlerevlog(revlog.revlog):
57 58 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
58 59 # How it works:
59 60 # To retrieve a revision, we need to know the offset of the revision in
60 61 # the bundle (an unbundle object). We store this offset in the index
61 62 # (start). The base of the delta is stored in the base field.
62 63 #
63 64 # To differentiate a rev in the bundle from a rev in the revlog, we
64 65 # check revision against repotiprev.
65 66 opener = vfsmod.readonlyvfs(opener)
66 67 revlog.revlog.__init__(self, opener, target=target, radix=radix)
67 68 self.bundle = cgunpacker
68 69 n = len(self)
69 70 self.repotiprev = n - 1
70 71 self.bundlerevs = set() # used by 'bundle()' revset expression
71 72 for deltadata in cgunpacker.deltaiter():
72 73 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
73 74
74 75 size = len(delta)
75 76 start = cgunpacker.tell() - size
76 77
77 78 if self.index.has_node(node):
78 79 # this can happen if two branches make the same change
79 80 self.bundlerevs.add(self.index.rev(node))
80 81 continue
81 82 if cs == node:
82 83 linkrev = nullrev
83 84 else:
84 85 linkrev = linkmapper(cs)
85 86
86 87 for p in (p1, p2):
87 88 if not self.index.has_node(p):
88 89 raise error.LookupError(
89 90 p, self.display_id, _(b"unknown parent")
90 91 )
91 92
92 93 if not self.index.has_node(deltabase):
93 94 raise error.LookupError(
94 95 deltabase, self.display_id, _(b'unknown delta base')
95 96 )
96 97
97 98 baserev = self.rev(deltabase)
98 99 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
99 100 e = revlogutils.entry(
100 101 flags=flags,
101 102 data_offset=start,
102 103 data_compressed_length=size,
103 104 data_delta_base=baserev,
104 105 link_rev=linkrev,
105 106 parent_rev_1=self.rev(p1),
106 107 parent_rev_2=self.rev(p2),
107 108 node_id=node,
108 109 )
109 110 self.index.append(e)
110 111 self.bundlerevs.add(n)
111 112 n += 1
112 113
113 114 @contextlib.contextmanager
114 115 def reading(self):
115 116 if self.repotiprev < 0:
116 117 yield
117 118 else:
118 119 with super().reading() as x:
119 120 yield x
120 121
121 122 def _chunk(self, rev):
122 123 # Warning: in case of bundle, the diff is against what we stored as
123 124 # delta base, not against rev - 1
124 125 # XXX: could use some caching
125 126 if rev <= self.repotiprev:
126 127 return revlog.revlog._chunk(self, rev)
127 128 self.bundle.seek(self.start(rev))
128 129 return self.bundle.read(self.length(rev))
129 130
130 131 def revdiff(self, rev1, rev2):
131 132 """return or calculate a delta between two revisions"""
132 133 if rev1 > self.repotiprev and rev2 > self.repotiprev:
133 134 # hot path for bundle
134 135 revb = self.index[rev2][3]
135 136 if revb == rev1:
136 137 return self._chunk(rev2)
137 138 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
138 139 return revlog.revlog.revdiff(self, rev1, rev2)
139 140
140 141 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
141 142
142 143 def _rawtext(self, node, rev):
143 144 if rev is None:
144 145 rev = self.rev(node)
145 146 validated = False
146 147 rawtext = None
147 148 chain = []
148 149 iterrev = rev
149 150 # reconstruct the revision if it is from a changegroup
150 151 while iterrev > self.repotiprev:
151 152 if (
152 153 self._inner._revisioncache
153 154 and self._inner._revisioncache[1] == iterrev
154 155 ):
155 156 rawtext = self._inner._revisioncache[2]
156 157 break
157 158 chain.append(iterrev)
158 159 iterrev = self.index[iterrev][3]
159 160 if iterrev == nullrev:
160 161 rawtext = b''
161 162 elif rawtext is None:
162 163 r = super(bundlerevlog, self)._rawtext(
163 164 self.node(iterrev),
164 165 iterrev,
165 166 )
166 167 __, rawtext, validated = r
167 168 if chain:
168 169 validated = False
169 170 while chain:
170 171 delta = self._chunk(chain.pop())
171 172 rawtext = mdiff.patches(rawtext, [delta])
172 173 return rev, rawtext, validated
173 174
174 175 def addrevision(self, *args, **kwargs):
175 176 raise NotImplementedError
176 177
177 178 def addgroup(self, *args, **kwargs):
178 179 raise NotImplementedError
179 180
180 181 def strip(self, *args, **kwargs):
181 182 raise NotImplementedError
182 183
183 184 def checksize(self):
184 185 raise NotImplementedError
185 186
186 187
187 188 class bundlechangelog(bundlerevlog, changelog.changelog):
188 189 def __init__(self, opener, cgunpacker):
189 190 changelog.changelog.__init__(self, opener)
190 191 linkmapper = lambda x: x
191 192 bundlerevlog.__init__(
192 193 self,
193 194 opener,
194 195 (revlog_constants.KIND_CHANGELOG, None),
195 196 self.radix,
196 197 cgunpacker,
197 198 linkmapper,
198 199 )
199 200
200 201
201 202 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
202 203 def __init__(
203 204 self,
204 205 nodeconstants,
205 206 opener,
206 207 cgunpacker,
207 208 linkmapper,
208 209 dirlogstarts=None,
209 210 dir=b'',
210 211 ):
211 212 # XXX manifestrevlog is not actually a revlog , so mixing it with
212 213 # bundlerevlog is not a good idea.
213 214 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
214 215 bundlerevlog.__init__(
215 216 self,
216 217 opener,
217 218 (revlog_constants.KIND_MANIFESTLOG, dir),
218 219 self._revlog.radix,
219 220 cgunpacker,
220 221 linkmapper,
221 222 )
222 223 if dirlogstarts is None:
223 224 dirlogstarts = {}
224 225 if self.bundle.version == b"03":
225 226 dirlogstarts = _getfilestarts(self.bundle)
226 227 self._dirlogstarts = dirlogstarts
227 228 self._linkmapper = linkmapper
228 229
229 230 def dirlog(self, d):
230 231 if d in self._dirlogstarts:
231 232 self.bundle.seek(self._dirlogstarts[d])
232 233 return bundlemanifest(
233 234 self.nodeconstants,
234 235 self.opener,
235 236 self.bundle,
236 237 self._linkmapper,
237 238 self._dirlogstarts,
238 239 dir=d,
239 240 )
240 241 return super(bundlemanifest, self).dirlog(d)
241 242
242 243
243 244 class bundlefilelog(filelog.filelog):
244 245 def __init__(self, opener, path, cgunpacker, linkmapper):
245 246 filelog.filelog.__init__(self, opener, path)
246 247 self._revlog = bundlerevlog(
247 248 opener,
248 249 # XXX should use the unencoded path
249 250 target=(revlog_constants.KIND_FILELOG, path),
250 251 radix=self._revlog.radix,
251 252 cgunpacker=cgunpacker,
252 253 linkmapper=linkmapper,
253 254 )
254 255
255 256
256 257 class bundlepeer(localrepo.localpeer):
257 258 def canpush(self):
258 259 return False
259 260
260 261
261 262 class bundlephasecache(phases.phasecache):
262 263 def __init__(self, *args, **kwargs):
263 264 super(bundlephasecache, self).__init__(*args, **kwargs)
264 265 if hasattr(self, 'opener'):
265 266 self.opener = vfsmod.readonlyvfs(self.opener)
266 267
267 268 def write(self):
268 269 raise NotImplementedError
269 270
270 271 def _write(self, fp):
271 272 raise NotImplementedError
272 273
273 274 def _updateroots(self, repo, phase, newroots, tr, invalidate=True):
274 275 self._phaseroots[phase] = newroots
275 276 if invalidate:
276 277 self.invalidate()
277 278 self.dirty = True
278 279
279 280
280 281 def _getfilestarts(cgunpacker):
281 282 filespos = {}
282 283 for chunkdata in iter(cgunpacker.filelogheader, {}):
283 284 fname = chunkdata[b'filename']
284 285 filespos[fname] = cgunpacker.tell()
285 286 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
286 287 pass
287 288 return filespos
288 289
289 290
290 class bundlerepository:
291 _bundle_repo_baseclass = object
292
293 if typing.TYPE_CHECKING:
294 _bundle_repo_baseclass = localrepo.localrepository
295
296
297 class bundlerepository(_bundle_repo_baseclass):
291 298 """A repository instance that is a union of a local repo and a bundle.
292 299
293 300 Instances represent a read-only repository composed of a local repository
294 301 with the contents of a bundle file applied. The repository instance is
295 302 conceptually similar to the state of a repository after an
296 303 ``hg unbundle`` operation. However, the contents of the bundle are never
297 304 applied to the actual base repository.
298 305
299 306 Instances constructed directly are not usable as repository objects.
300 307 Use instance() or makebundlerepository() to create instances.
301 308 """
302 309
303 310 def __init__(self, bundlepath, url, tempparent):
304 311 self._tempparent = tempparent
305 312 self._url = url
306 313
307 314 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
308 315
309 316 # dict with the mapping 'filename' -> position in the changegroup.
310 317 self._cgfilespos = {}
311 318 self._bundlefile = None
312 319 self._cgunpacker = None
313 320 self.tempfile = None
314 321 f = util.posixfile(bundlepath, b"rb")
315 322 bundle = exchange.readbundle(self.ui, f, bundlepath)
316 323
317 324 if isinstance(bundle, bundle2.unbundle20):
318 325 self._bundlefile = bundle
319 326
320 327 cgpart = None
321 328 for part in bundle.iterparts(seekable=True):
322 329 if part.type == b'phase-heads':
323 330 self._handle_bundle2_phase_part(bundle, part)
324 331 elif part.type == b'changegroup':
325 332 if cgpart:
326 333 raise NotImplementedError(
327 334 b"can't process multiple changegroups"
328 335 )
329 336 cgpart = part
330 337 self._handle_bundle2_cg_part(bundle, part)
331 338
332 339 if not cgpart:
333 340 raise error.Abort(_(b"No changegroups found"))
334 341
335 342 # This is required to placate a later consumer, which expects
336 343 # the payload offset to be at the beginning of the changegroup.
337 344 # We need to do this after the iterparts() generator advances
338 345 # because iterparts() will seek to end of payload after the
339 346 # generator returns control to iterparts().
340 347 cgpart.seek(0, os.SEEK_SET)
341 348
342 349 elif isinstance(bundle, changegroup.cg1unpacker):
343 350 self._handle_bundle1(bundle, bundlepath)
344 351 else:
345 352 raise error.Abort(
346 353 _(b'bundle type %r cannot be read') % type(bundle)
347 354 )
348 355
349 356 def _handle_bundle1(self, bundle, bundlepath):
350 357 if bundle.compressed():
351 358 f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN')
352 359 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
353 360
354 361 self._bundlefile = bundle
355 362 self._cgunpacker = bundle
356 363
357 364 self.firstnewrev = self.changelog.repotiprev + 1
358 365 phases.retractboundary(
359 366 self,
360 367 None,
361 368 phases.draft,
362 369 [ctx.node() for ctx in self[self.firstnewrev :]],
363 370 )
364 371
365 372 def _handle_bundle2_cg_part(self, bundle, part):
366 373 assert part.type == b'changegroup'
367 374 cgstream = part
368 375 targetphase = part.params.get(b'targetphase')
369 376 try:
370 377 targetphase = int(targetphase)
371 378 except TypeError:
372 379 pass
373 380 if targetphase is None:
374 381 targetphase = phases.draft
375 382 if targetphase not in phases.allphases:
376 383 m = _(b'unsupported targetphase: %d')
377 384 m %= targetphase
378 385 raise error.Abort(m)
379 386 version = part.params.get(b'version', b'01')
380 387 legalcgvers = changegroup.supportedincomingversions(self)
381 388 if version not in legalcgvers:
382 389 msg = _(b'Unsupported changegroup version: %s')
383 390 raise error.Abort(msg % version)
384 391 if bundle.compressed():
385 392 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
386 393
387 394 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
388 395
389 396 self.firstnewrev = self.changelog.repotiprev + 1
390 397 phases.retractboundary(
391 398 self,
392 399 None,
393 400 targetphase,
394 401 [ctx.node() for ctx in self[self.firstnewrev :]],
395 402 )
396 403
397 404 def _handle_bundle2_phase_part(self, bundle, part):
398 405 assert part.type == b'phase-heads'
399 406
400 407 unfi = self.unfiltered()
401 408 headsbyphase = phases.binarydecode(part)
402 409 phases.updatephases(unfi, lambda: None, headsbyphase)
403 410
404 411 def _writetempbundle(self, readfn, suffix, header=b''):
405 412 """Write a temporary file to disk"""
406 413 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
407 414 self.tempfile = temp
408 415
409 416 with os.fdopen(fdtemp, 'wb') as fptemp:
410 417 fptemp.write(header)
411 418 while True:
412 419 chunk = readfn(2**18)
413 420 if not chunk:
414 421 break
415 422 fptemp.write(chunk)
416 423
417 424 return self.vfs.open(self.tempfile, mode=b"rb")
418 425
419 426 @localrepo.unfilteredpropertycache
420 427 def _phasecache(self):
421 428 return bundlephasecache(self, self._phasedefaults)
422 429
423 430 @localrepo.unfilteredpropertycache
424 431 def changelog(self):
425 432 # consume the header if it exists
426 433 self._cgunpacker.changelogheader()
427 434 c = bundlechangelog(self.svfs, self._cgunpacker)
428 435 self.manstart = self._cgunpacker.tell()
429 436 return c
430 437
431 438 def _refreshchangelog(self):
432 439 # changelog for bundle repo are not filecache, this method is not
433 440 # applicable.
434 441 pass
435 442
436 443 @localrepo.unfilteredpropertycache
437 444 def manifestlog(self):
438 445 self._cgunpacker.seek(self.manstart)
439 446 # consume the header if it exists
440 447 self._cgunpacker.manifestheader()
441 448 linkmapper = self.unfiltered().changelog.rev
442 449 rootstore = bundlemanifest(
443 450 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
444 451 )
445 452 self.filestart = self._cgunpacker.tell()
446 453
447 454 return manifest.manifestlog(
448 455 self.svfs, self, rootstore, self.narrowmatch()
449 456 )
450 457
451 458 def _consumemanifest(self):
452 459 """Consumes the manifest portion of the bundle, setting filestart so the
453 460 file portion can be read."""
454 461 self._cgunpacker.seek(self.manstart)
455 462 self._cgunpacker.manifestheader()
456 463 for delta in self._cgunpacker.deltaiter():
457 464 pass
458 465 self.filestart = self._cgunpacker.tell()
459 466
460 467 @localrepo.unfilteredpropertycache
461 468 def manstart(self):
462 469 self.changelog
463 470 return self.manstart
464 471
465 472 @localrepo.unfilteredpropertycache
466 473 def filestart(self):
467 474 self.manifestlog
468 475
469 476 # If filestart was not set by self.manifestlog, that means the
470 477 # manifestlog implementation did not consume the manifests from the
471 478 # changegroup (ex: it might be consuming trees from a separate bundle2
472 479 # part instead). So we need to manually consume it.
473 480 if 'filestart' not in self.__dict__:
474 481 self._consumemanifest()
475 482
476 483 return self.filestart
477 484
478 485 def url(self):
479 486 return self._url
480 487
481 488 def file(self, f):
482 489 if not self._cgfilespos:
483 490 self._cgunpacker.seek(self.filestart)
484 491 self._cgfilespos = _getfilestarts(self._cgunpacker)
485 492
486 493 if f in self._cgfilespos:
487 494 self._cgunpacker.seek(self._cgfilespos[f])
488 495 linkmapper = self.unfiltered().changelog.rev
489 496 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
490 497 else:
491 498 return super(bundlerepository, self).file(f)
492 499
493 500 def close(self):
494 501 """Close assigned bundle file immediately."""
495 502 self._bundlefile.close()
496 503 if self.tempfile is not None:
497 504 self.vfs.unlink(self.tempfile)
498 505 if self._tempparent:
499 506 shutil.rmtree(self._tempparent, True)
500 507
501 508 def cancopy(self):
502 509 return False
503 510
504 511 def peer(self, path=None, remotehidden=False):
505 512 return bundlepeer(self, path=path, remotehidden=remotehidden)
506 513
507 514 def getcwd(self):
508 515 return encoding.getcwd() # always outside the repo
509 516
510 517 # Check if parents exist in localrepo before setting
511 518 def setparents(self, p1, p2=None):
512 519 if p2 is None:
513 520 p2 = self.nullid
514 521 p1rev = self.changelog.rev(p1)
515 522 p2rev = self.changelog.rev(p2)
516 523 msg = _(b"setting parent to node %s that only exists in the bundle\n")
517 524 if self.changelog.repotiprev < p1rev:
518 525 self.ui.warn(msg % hex(p1))
519 526 if self.changelog.repotiprev < p2rev:
520 527 self.ui.warn(msg % hex(p2))
521 528 return super(bundlerepository, self).setparents(p1, p2)
522 529
523 530
524 531 def instance(ui, path, create, intents=None, createopts=None):
525 532 if create:
526 533 raise error.Abort(_(b'cannot create new bundle repository'))
527 534 # internal config: bundle.mainreporoot
528 535 parentpath = ui.config(b"bundle", b"mainreporoot")
529 536 if not parentpath:
530 537 # try to find the correct path to the working directory repo
531 538 parentpath = cmdutil.findrepo(encoding.getcwd())
532 539 if parentpath is None:
533 540 parentpath = b''
534 541 if parentpath:
535 542 # Try to make the full path relative so we get a nice, short URL.
536 543 # In particular, we don't want temp dir names in test outputs.
537 544 cwd = encoding.getcwd()
538 545 if parentpath == cwd:
539 546 parentpath = b''
540 547 else:
541 548 cwd = pathutil.normasprefix(cwd)
542 549 if parentpath.startswith(cwd):
543 550 parentpath = parentpath[len(cwd) :]
544 551 u = urlutil.url(path)
545 552 path = u.localpath()
546 553 if u.scheme == b'bundle':
547 554 s = path.split(b"+", 1)
548 555 if len(s) == 1:
549 556 repopath, bundlename = parentpath, s[0]
550 557 else:
551 558 repopath, bundlename = s
552 559 else:
553 560 repopath, bundlename = parentpath, path
554 561
555 562 return makebundlerepository(ui, repopath, bundlename)
556 563
557 564
558 565 def makebundlerepository(ui, repopath, bundlepath):
559 566 """Make a bundle repository object based on repo and bundle paths."""
560 567 if repopath:
561 568 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
562 569 else:
563 570 url = b'bundle:%s' % bundlepath
564 571
565 572 # Because we can't make any guarantees about the type of the base
566 573 # repository, we can't have a static class representing the bundle
567 574 # repository. We also can't make any guarantees about how to even
568 575 # call the base repository's constructor!
569 576 #
570 577 # So, our strategy is to go through ``localrepo.instance()`` to construct
571 578 # a repo instance. Then, we dynamically create a new type derived from
572 579 # both it and our ``bundlerepository`` class which overrides some
573 580 # functionality. We then change the type of the constructed repository
574 581 # to this new type and initialize the bundle-specific bits of it.
575 582
576 583 try:
577 584 repo = localrepo.instance(ui, repopath, create=False)
578 585 tempparent = None
579 586 except error.RequirementError:
580 587 raise # no fallback if the backing repo is unsupported
581 588 except error.RepoError:
582 589 tempparent = pycompat.mkdtemp()
583 590 try:
584 591 repo = localrepo.instance(ui, tempparent, create=True)
585 592 except Exception:
586 593 shutil.rmtree(tempparent)
587 594 raise
588 595
589 596 class derivedbundlerepository(bundlerepository, repo.__class__):
590 597 pass
591 598
592 599 repo.__class__ = derivedbundlerepository
593 600 bundlerepository.__init__(repo, bundlepath, url, tempparent)
594 601
595 602 return repo
596 603
597 604
598 605 class bundletransactionmanager:
599 606 def transaction(self):
600 607 return None
601 608
602 609 def close(self):
603 610 raise NotImplementedError
604 611
605 612 def release(self):
606 613 raise NotImplementedError
607 614
608 615
609 616 def getremotechanges(
610 617 ui, repo, peer, onlyheads=None, bundlename=None, force=False
611 618 ):
612 619 """obtains a bundle of changes incoming from peer
613 620
614 621 "onlyheads" restricts the returned changes to those reachable from the
615 622 specified heads.
616 623 "bundlename", if given, stores the bundle to this file path permanently;
617 624 otherwise it's stored to a temp file and gets deleted again when you call
618 625 the returned "cleanupfn".
619 626 "force" indicates whether to proceed on unrelated repos.
620 627
621 628 Returns a tuple (local, csets, cleanupfn):
622 629
623 630 "local" is a local repo from which to obtain the actual incoming
624 631 changesets; it is a bundlerepo for the obtained bundle when the
625 632 original "peer" is remote.
626 633 "csets" lists the incoming changeset node ids.
627 634 "cleanupfn" must be called without arguments when you're done processing
628 635 the changes; it closes both the original "peer" and the one returned
629 636 here.
630 637 """
631 638 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
632 639 common, incoming, rheads = tmp
633 640 if not incoming:
634 641 try:
635 642 if bundlename:
636 643 os.unlink(bundlename)
637 644 except OSError:
638 645 pass
639 646 return repo, [], peer.close
640 647
641 648 commonset = set(common)
642 649 rheads = [x for x in rheads if x not in commonset]
643 650
644 651 bundle = None
645 652 bundlerepo = None
646 653 localrepo = peer.local()
647 654 if bundlename or not localrepo:
648 655 # create a bundle (uncompressed if peer repo is not local)
649 656
650 657 # developer config: devel.legacy.exchange
651 658 legexc = ui.configlist(b'devel', b'legacy.exchange')
652 659 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
653 660 canbundle2 = (
654 661 not forcebundle1
655 662 and peer.capable(b'getbundle')
656 663 and peer.capable(b'bundle2')
657 664 )
658 665 if canbundle2:
659 666 with peer.commandexecutor() as e:
660 667 b2 = e.callcommand(
661 668 b'getbundle',
662 669 {
663 670 b'source': b'incoming',
664 671 b'common': common,
665 672 b'heads': rheads,
666 673 b'bundlecaps': exchange.caps20to10(
667 674 repo, role=b'client'
668 675 ),
669 676 b'cg': True,
670 677 },
671 678 ).result()
672 679
673 680 fname = bundle = changegroup.writechunks(
674 681 ui, b2._forwardchunks(), bundlename
675 682 )
676 683 else:
677 684 if peer.capable(b'getbundle'):
678 685 with peer.commandexecutor() as e:
679 686 cg = e.callcommand(
680 687 b'getbundle',
681 688 {
682 689 b'source': b'incoming',
683 690 b'common': common,
684 691 b'heads': rheads,
685 692 },
686 693 ).result()
687 694 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
688 695 # compat with older servers when pulling all remote heads
689 696
690 697 with peer.commandexecutor() as e:
691 698 cg = e.callcommand(
692 699 b'changegroup',
693 700 {
694 701 b'nodes': incoming,
695 702 b'source': b'incoming',
696 703 },
697 704 ).result()
698 705
699 706 rheads = None
700 707 else:
701 708 with peer.commandexecutor() as e:
702 709 cg = e.callcommand(
703 710 b'changegroupsubset',
704 711 {
705 712 b'bases': incoming,
706 713 b'heads': rheads,
707 714 b'source': b'incoming',
708 715 },
709 716 ).result()
710 717
711 718 if localrepo:
712 719 bundletype = b"HG10BZ"
713 720 else:
714 721 bundletype = b"HG10UN"
715 722 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
716 723 # keep written bundle?
717 724 if bundlename:
718 725 bundle = None
719 726 if not localrepo:
720 727 # use the created uncompressed bundlerepo
721 728 localrepo = bundlerepo = makebundlerepository(
722 729 repo.baseui, repo.root, fname
723 730 )
724 731
725 732 # this repo contains local and peer now, so filter out local again
726 733 common = repo.heads()
727 734 if localrepo:
728 735 # Part of common may be remotely filtered
729 736 # So use an unfiltered version
730 737 # The discovery process probably need cleanup to avoid that
731 738 localrepo = localrepo.unfiltered()
732 739
733 740 csets = localrepo.changelog.findmissing(common, rheads)
734 741
735 742 if bundlerepo:
736 743 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
737 744
738 745 with peer.commandexecutor() as e:
739 746 remotephases = e.callcommand(
740 747 b'listkeys',
741 748 {
742 749 b'namespace': b'phases',
743 750 },
744 751 ).result()
745 752
746 753 pullop = exchange.pulloperation(
747 754 bundlerepo, peer, path=None, heads=reponodes
748 755 )
749 756 pullop.trmanager = bundletransactionmanager()
750 757 exchange._pullapplyphases(pullop, remotephases)
751 758
752 759 def cleanup():
753 760 if bundlerepo:
754 761 bundlerepo.close()
755 762 if bundle:
756 763 os.unlink(bundle)
757 764 peer.close()
758 765
759 766 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now