##// END OF EJS Templates
index: use `index.has_node` in `exchangev2._pullchangesetdiscovery`...
marmoute -
r43852:e461d2e6 default draft
parent child Browse files
Show More
@@ -1,775 +1,775 b''
1 1 # exchangev2.py - repository exchange for wire protocol version 2
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import weakref
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 nullid,
16 16 short,
17 17 )
18 18 from . import (
19 19 bookmarks,
20 20 error,
21 21 mdiff,
22 22 narrowspec,
23 23 phases,
24 24 pycompat,
25 25 setdiscovery,
26 26 )
27 27 from .interfaces import repository
28 28
29 29
30 30 def pull(pullop):
31 31 """Pull using wire protocol version 2."""
32 32 repo = pullop.repo
33 33 remote = pullop.remote
34 34
35 35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36 36
37 37 # If this is a clone and it was requested to perform a "stream clone",
38 38 # we obtain the raw files data from the remote then fall back to an
39 39 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 40 # for long-term usage.
41 41 if usingrawchangelogandmanifest:
42 42 with repo.transaction(b'clone'):
43 43 _fetchrawstorefiles(repo, remote)
44 44 repo.invalidate(clearfilecache=True)
45 45
46 46 tr = pullop.trmanager.transaction()
47 47
48 48 # We don't use the repo's narrow matcher here because the patterns passed
49 49 # to exchange.pull() could be different.
50 50 narrowmatcher = narrowspec.match(
51 51 repo.root,
52 52 # Empty maps to nevermatcher. So always
53 53 # set includes if missing.
54 54 pullop.includepats or {b'path:.'},
55 55 pullop.excludepats,
56 56 )
57 57
58 58 if pullop.includepats or pullop.excludepats:
59 59 pathfilter = {}
60 60 if pullop.includepats:
61 61 pathfilter[b'include'] = sorted(pullop.includepats)
62 62 if pullop.excludepats:
63 63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 64 else:
65 65 pathfilter = None
66 66
67 67 # Figure out what needs to be fetched.
68 68 common, fetch, remoteheads = _pullchangesetdiscovery(
69 69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 70 )
71 71
72 72 # And fetch the data.
73 73 pullheads = pullop.heads or remoteheads
74 74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75 75
76 76 # New revisions are written to the changelog. But all other updates
77 77 # are deferred. Do those now.
78 78
79 79 # Ensure all new changesets are draft by default. If the repo is
80 80 # publishing, the phase will be adjusted by the loop below.
81 81 if csetres[b'added']:
82 82 phases.registernew(repo, tr, phases.draft, csetres[b'added'])
83 83
84 84 # And adjust the phase of all changesets accordingly.
85 85 for phase in phases.phasenames:
86 86 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
87 87 continue
88 88
89 89 phases.advanceboundary(
90 90 repo,
91 91 tr,
92 92 phases.phasenames.index(phase),
93 93 csetres[b'nodesbyphase'][phase],
94 94 )
95 95
96 96 # Write bookmark updates.
97 97 bookmarks.updatefromremote(
98 98 repo.ui,
99 99 repo,
100 100 csetres[b'bookmarks'],
101 101 remote.url(),
102 102 pullop.gettransaction,
103 103 explicit=pullop.explicitbookmarks,
104 104 )
105 105
106 106 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
107 107
108 108 # We don't properly support shallow changeset and manifest yet. So we apply
109 109 # depth limiting locally.
110 110 if pullop.depth:
111 111 relevantcsetnodes = set()
112 112 clnode = repo.changelog.node
113 113
114 114 for rev in repo.revs(
115 115 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
116 116 ):
117 117 relevantcsetnodes.add(clnode(rev))
118 118
119 119 csetrelevantfilter = lambda n: n in relevantcsetnodes
120 120
121 121 else:
122 122 csetrelevantfilter = lambda n: True
123 123
124 124 # If obtaining the raw store files, we need to scan the full repo to
125 125 # derive all the changesets, manifests, and linkrevs.
126 126 if usingrawchangelogandmanifest:
127 127 csetsforfiles = []
128 128 mnodesforfiles = []
129 129 manifestlinkrevs = {}
130 130
131 131 for rev in repo:
132 132 ctx = repo[rev]
133 133 node = ctx.node()
134 134
135 135 if not csetrelevantfilter(node):
136 136 continue
137 137
138 138 mnode = ctx.manifestnode()
139 139
140 140 csetsforfiles.append(node)
141 141 mnodesforfiles.append(mnode)
142 142 manifestlinkrevs[mnode] = rev
143 143
144 144 else:
145 145 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
146 146 mnodesforfiles = manres[b'added']
147 147 manifestlinkrevs = manres[b'linkrevs']
148 148
149 149 # Find all file nodes referenced by added manifests and fetch those
150 150 # revisions.
151 151 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
152 152 _fetchfilesfromcsets(
153 153 repo,
154 154 tr,
155 155 remote,
156 156 pathfilter,
157 157 fnodes,
158 158 csetsforfiles,
159 159 manifestlinkrevs,
160 160 shallow=bool(pullop.depth),
161 161 )
162 162
163 163
164 164 def _checkuserawstorefiledata(pullop):
165 165 """Check whether we should use rawstorefiledata command to retrieve data."""
166 166
167 167 repo = pullop.repo
168 168 remote = pullop.remote
169 169
170 170 # Command to obtain raw store data isn't available.
171 171 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
172 172 return False
173 173
174 174 # Only honor if user requested stream clone operation.
175 175 if not pullop.streamclonerequested:
176 176 return False
177 177
178 178 # Only works on empty repos.
179 179 if len(repo):
180 180 return False
181 181
182 182 # TODO This is super hacky. There needs to be a storage API for this. We
183 183 # also need to check for compatibility with the remote.
184 184 if b'revlogv1' not in repo.requirements:
185 185 return False
186 186
187 187 return True
188 188
189 189
190 190 def _fetchrawstorefiles(repo, remote):
191 191 with remote.commandexecutor() as e:
192 192 objs = e.callcommand(
193 193 b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],}
194 194 ).result()
195 195
196 196 # First object is a summary of files data that follows.
197 197 overall = next(objs)
198 198
199 199 progress = repo.ui.makeprogress(
200 200 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
201 201 )
202 202 with progress:
203 203 progress.update(0)
204 204
205 205 # Next are pairs of file metadata, data.
206 206 while True:
207 207 try:
208 208 filemeta = next(objs)
209 209 except StopIteration:
210 210 break
211 211
212 212 for k in (b'location', b'path', b'size'):
213 213 if k not in filemeta:
214 214 raise error.Abort(
215 215 _(b'remote file data missing key: %s') % k
216 216 )
217 217
218 218 if filemeta[b'location'] == b'store':
219 219 vfs = repo.svfs
220 220 else:
221 221 raise error.Abort(
222 222 _(b'invalid location for raw file data: %s')
223 223 % filemeta[b'location']
224 224 )
225 225
226 226 bytesremaining = filemeta[b'size']
227 227
228 228 with vfs.open(filemeta[b'path'], b'wb') as fh:
229 229 while True:
230 230 try:
231 231 chunk = next(objs)
232 232 except StopIteration:
233 233 break
234 234
235 235 bytesremaining -= len(chunk)
236 236
237 237 if bytesremaining < 0:
238 238 raise error.Abort(
239 239 _(
240 240 b'received invalid number of bytes for file '
241 241 b'data; expected %d, got extra'
242 242 )
243 243 % filemeta[b'size']
244 244 )
245 245
246 246 progress.increment(step=len(chunk))
247 247 fh.write(chunk)
248 248
249 249 try:
250 250 if chunk.islast:
251 251 break
252 252 except AttributeError:
253 253 raise error.Abort(
254 254 _(
255 255 b'did not receive indefinite length bytestring '
256 256 b'for file data'
257 257 )
258 258 )
259 259
260 260 if bytesremaining:
261 261 raise error.Abort(
262 262 _(
263 263 b'received invalid number of bytes for'
264 264 b'file data; expected %d got %d'
265 265 )
266 266 % (
267 267 filemeta[b'size'],
268 268 filemeta[b'size'] - bytesremaining,
269 269 )
270 270 )
271 271
272 272
273 273 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
274 274 """Determine which changesets need to be pulled."""
275 275
276 276 if heads:
277 277 knownnode = repo.changelog.hasnode
278 278 if all(knownnode(head) for head in heads):
279 279 return heads, False, heads
280 280
281 281 # TODO wire protocol version 2 is capable of more efficient discovery
282 282 # than setdiscovery. Consider implementing something better.
283 283 common, fetch, remoteheads = setdiscovery.findcommonheads(
284 284 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
285 285 )
286 286
287 287 common = set(common)
288 288 remoteheads = set(remoteheads)
289 289
290 290 # If a remote head is filtered locally, put it back in the common set.
291 291 # See the comment in exchange._pulldiscoverychangegroup() for more.
292 292
293 293 if fetch and remoteheads:
294 nodemap = repo.unfiltered().changelog.nodemap
294 has_node = repo.unfiltered().changelog.index.has_node
295 295
296 common |= {head for head in remoteheads if head in nodemap}
296 common |= {head for head in remoteheads if has_node(head)}
297 297
298 298 if set(remoteheads).issubset(common):
299 299 fetch = []
300 300
301 301 common.discard(nullid)
302 302
303 303 return common, fetch, remoteheads
304 304
305 305
306 306 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
307 307 # TODO consider adding a step here where we obtain the DAG shape first
308 308 # (or ask the server to slice changesets into chunks for us) so that
309 309 # we can perform multiple fetches in batches. This will facilitate
310 310 # resuming interrupted clones, higher server-side cache hit rates due
311 311 # to smaller segments, etc.
312 312 with remote.commandexecutor() as e:
313 313 objs = e.callcommand(
314 314 b'changesetdata',
315 315 {
316 316 b'revisions': [
317 317 {
318 318 b'type': b'changesetdagrange',
319 319 b'roots': sorted(common),
320 320 b'heads': sorted(remoteheads),
321 321 }
322 322 ],
323 323 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
324 324 },
325 325 ).result()
326 326
327 327 # The context manager waits on all response data when exiting. So
328 328 # we need to remain in the context manager in order to stream data.
329 329 return _processchangesetdata(repo, tr, objs)
330 330
331 331
332 332 def _processchangesetdata(repo, tr, objs):
333 333 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
334 334
335 335 urepo = repo.unfiltered()
336 336 cl = urepo.changelog
337 337
338 338 cl.delayupdate(tr)
339 339
340 340 # The first emitted object is a header describing the data that
341 341 # follows.
342 342 meta = next(objs)
343 343
344 344 progress = repo.ui.makeprogress(
345 345 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
346 346 )
347 347
348 348 manifestnodes = {}
349 349
350 350 def linkrev(node):
351 351 repo.ui.debug(b'add changeset %s\n' % short(node))
352 352 # Linkrev for changelog is always self.
353 353 return len(cl)
354 354
355 355 def onchangeset(cl, node):
356 356 progress.increment()
357 357
358 358 revision = cl.changelogrevision(node)
359 359
360 360 # We need to preserve the mapping of changelog revision to node
361 361 # so we can set the linkrev accordingly when manifests are added.
362 362 manifestnodes[cl.rev(node)] = revision.manifest
363 363
364 364 nodesbyphase = {phase: set() for phase in phases.phasenames}
365 365 remotebookmarks = {}
366 366
367 367 # addgroup() expects a 7-tuple describing revisions. This normalizes
368 368 # the wire data to that format.
369 369 #
370 370 # This loop also aggregates non-revision metadata, such as phase
371 371 # data.
372 372 def iterrevisions():
373 373 for cset in objs:
374 374 node = cset[b'node']
375 375
376 376 if b'phase' in cset:
377 377 nodesbyphase[cset[b'phase']].add(node)
378 378
379 379 for mark in cset.get(b'bookmarks', []):
380 380 remotebookmarks[mark] = node
381 381
382 382 # TODO add mechanism for extensions to examine records so they
383 383 # can siphon off custom data fields.
384 384
385 385 extrafields = {}
386 386
387 387 for field, size in cset.get(b'fieldsfollowing', []):
388 388 extrafields[field] = next(objs)
389 389
390 390 # Some entries might only be metadata only updates.
391 391 if b'revision' not in extrafields:
392 392 continue
393 393
394 394 data = extrafields[b'revision']
395 395
396 396 yield (
397 397 node,
398 398 cset[b'parents'][0],
399 399 cset[b'parents'][1],
400 400 # Linknode is always itself for changesets.
401 401 cset[b'node'],
402 402 # We always send full revisions. So delta base is not set.
403 403 nullid,
404 404 mdiff.trivialdiffheader(len(data)) + data,
405 405 # Flags not yet supported.
406 406 0,
407 407 )
408 408
409 409 added = cl.addgroup(
410 410 iterrevisions(), linkrev, weakref.proxy(tr), addrevisioncb=onchangeset
411 411 )
412 412
413 413 progress.complete()
414 414
415 415 return {
416 416 b'added': added,
417 417 b'nodesbyphase': nodesbyphase,
418 418 b'bookmarks': remotebookmarks,
419 419 b'manifestnodes': manifestnodes,
420 420 }
421 421
422 422
423 423 def _fetchmanifests(repo, tr, remote, manifestnodes):
424 424 rootmanifest = repo.manifestlog.getstorage(b'')
425 425
426 426 # Some manifests can be shared between changesets. Filter out revisions
427 427 # we already know about.
428 428 fetchnodes = []
429 429 linkrevs = {}
430 430 seen = set()
431 431
432 432 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
433 433 if node in seen:
434 434 continue
435 435
436 436 try:
437 437 rootmanifest.rev(node)
438 438 except error.LookupError:
439 439 fetchnodes.append(node)
440 440 linkrevs[node] = clrev
441 441
442 442 seen.add(node)
443 443
444 444 # TODO handle tree manifests
445 445
446 446 # addgroup() expects 7-tuple describing revisions. This normalizes
447 447 # the wire data to that format.
448 448 def iterrevisions(objs, progress):
449 449 for manifest in objs:
450 450 node = manifest[b'node']
451 451
452 452 extrafields = {}
453 453
454 454 for field, size in manifest.get(b'fieldsfollowing', []):
455 455 extrafields[field] = next(objs)
456 456
457 457 if b'delta' in extrafields:
458 458 basenode = manifest[b'deltabasenode']
459 459 delta = extrafields[b'delta']
460 460 elif b'revision' in extrafields:
461 461 basenode = nullid
462 462 revision = extrafields[b'revision']
463 463 delta = mdiff.trivialdiffheader(len(revision)) + revision
464 464 else:
465 465 continue
466 466
467 467 yield (
468 468 node,
469 469 manifest[b'parents'][0],
470 470 manifest[b'parents'][1],
471 471 # The value passed in is passed to the lookup function passed
472 472 # to addgroup(). We already have a map of manifest node to
473 473 # changelog revision number. So we just pass in the
474 474 # manifest node here and use linkrevs.__getitem__ as the
475 475 # resolution function.
476 476 node,
477 477 basenode,
478 478 delta,
479 479 # Flags not yet supported.
480 480 0,
481 481 )
482 482
483 483 progress.increment()
484 484
485 485 progress = repo.ui.makeprogress(
486 486 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
487 487 )
488 488
489 489 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
490 490 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
491 491 # TODO make size configurable on client?
492 492
493 493 # We send commands 1 at a time to the remote. This is not the most
494 494 # efficient because we incur a round trip at the end of each batch.
495 495 # However, the existing frame-based reactor keeps consuming server
496 496 # data in the background. And this results in response data buffering
497 497 # in memory. This can consume gigabytes of memory.
498 498 # TODO send multiple commands in a request once background buffering
499 499 # issues are resolved.
500 500
501 501 added = []
502 502
503 503 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
504 504 batch = [node for node in fetchnodes[i : i + batchsize]]
505 505 if not batch:
506 506 continue
507 507
508 508 with remote.commandexecutor() as e:
509 509 objs = e.callcommand(
510 510 b'manifestdata',
511 511 {
512 512 b'tree': b'',
513 513 b'nodes': batch,
514 514 b'fields': {b'parents', b'revision'},
515 515 b'haveparents': True,
516 516 },
517 517 ).result()
518 518
519 519 # Chomp off header object.
520 520 next(objs)
521 521
522 522 added.extend(
523 523 rootmanifest.addgroup(
524 524 iterrevisions(objs, progress),
525 525 linkrevs.__getitem__,
526 526 weakref.proxy(tr),
527 527 )
528 528 )
529 529
530 530 progress.complete()
531 531
532 532 return {
533 533 b'added': added,
534 534 b'linkrevs': linkrevs,
535 535 }
536 536
537 537
538 538 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
539 539 """Determine what file nodes are relevant given a set of manifest nodes.
540 540
541 541 Returns a dict mapping file paths to dicts of file node to first manifest
542 542 node.
543 543 """
544 544 ml = repo.manifestlog
545 545 fnodes = collections.defaultdict(dict)
546 546
547 547 progress = repo.ui.makeprogress(
548 548 _(b'scanning manifests'), total=len(manifestnodes)
549 549 )
550 550
551 551 with progress:
552 552 for manifestnode in manifestnodes:
553 553 m = ml.get(b'', manifestnode)
554 554
555 555 # TODO this will pull in unwanted nodes because it takes the storage
556 556 # delta into consideration. What we really want is something that
557 557 # takes the delta between the manifest's parents. And ideally we
558 558 # would ignore file nodes that are known locally. For now, ignore
559 559 # both these limitations. This will result in incremental fetches
560 560 # requesting data we already have. So this is far from ideal.
561 561 md = m.readfast()
562 562
563 563 for path, fnode in md.items():
564 564 if matcher(path):
565 565 fnodes[path].setdefault(fnode, manifestnode)
566 566
567 567 progress.increment()
568 568
569 569 return fnodes
570 570
571 571
572 572 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
573 573 """Fetch file data from explicit file revisions."""
574 574
575 575 def iterrevisions(objs, progress):
576 576 for filerevision in objs:
577 577 node = filerevision[b'node']
578 578
579 579 extrafields = {}
580 580
581 581 for field, size in filerevision.get(b'fieldsfollowing', []):
582 582 extrafields[field] = next(objs)
583 583
584 584 if b'delta' in extrafields:
585 585 basenode = filerevision[b'deltabasenode']
586 586 delta = extrafields[b'delta']
587 587 elif b'revision' in extrafields:
588 588 basenode = nullid
589 589 revision = extrafields[b'revision']
590 590 delta = mdiff.trivialdiffheader(len(revision)) + revision
591 591 else:
592 592 continue
593 593
594 594 yield (
595 595 node,
596 596 filerevision[b'parents'][0],
597 597 filerevision[b'parents'][1],
598 598 node,
599 599 basenode,
600 600 delta,
601 601 # Flags not yet supported.
602 602 0,
603 603 )
604 604
605 605 progress.increment()
606 606
607 607 progress = repo.ui.makeprogress(
608 608 _(b'files'),
609 609 unit=_(b'chunks'),
610 610 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
611 611 )
612 612
613 613 # TODO make batch size configurable
614 614 batchsize = 10000
615 615 fnodeslist = [x for x in sorted(fnodes.items())]
616 616
617 617 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
618 618 batch = [x for x in fnodeslist[i : i + batchsize]]
619 619 if not batch:
620 620 continue
621 621
622 622 with remote.commandexecutor() as e:
623 623 fs = []
624 624 locallinkrevs = {}
625 625
626 626 for path, nodes in batch:
627 627 fs.append(
628 628 (
629 629 path,
630 630 e.callcommand(
631 631 b'filedata',
632 632 {
633 633 b'path': path,
634 634 b'nodes': sorted(nodes),
635 635 b'fields': {b'parents', b'revision'},
636 636 b'haveparents': True,
637 637 },
638 638 ),
639 639 )
640 640 )
641 641
642 642 locallinkrevs[path] = {
643 643 node: linkrevs[manifestnode]
644 644 for node, manifestnode in pycompat.iteritems(nodes)
645 645 }
646 646
647 647 for path, f in fs:
648 648 objs = f.result()
649 649
650 650 # Chomp off header objects.
651 651 next(objs)
652 652
653 653 store = repo.file(path)
654 654 store.addgroup(
655 655 iterrevisions(objs, progress),
656 656 locallinkrevs[path].__getitem__,
657 657 weakref.proxy(tr),
658 658 )
659 659
660 660
661 661 def _fetchfilesfromcsets(
662 662 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
663 663 ):
664 664 """Fetch file data from explicit changeset revisions."""
665 665
666 666 def iterrevisions(objs, remaining, progress):
667 667 while remaining:
668 668 filerevision = next(objs)
669 669
670 670 node = filerevision[b'node']
671 671
672 672 extrafields = {}
673 673
674 674 for field, size in filerevision.get(b'fieldsfollowing', []):
675 675 extrafields[field] = next(objs)
676 676
677 677 if b'delta' in extrafields:
678 678 basenode = filerevision[b'deltabasenode']
679 679 delta = extrafields[b'delta']
680 680 elif b'revision' in extrafields:
681 681 basenode = nullid
682 682 revision = extrafields[b'revision']
683 683 delta = mdiff.trivialdiffheader(len(revision)) + revision
684 684 else:
685 685 continue
686 686
687 687 if b'linknode' in filerevision:
688 688 linknode = filerevision[b'linknode']
689 689 else:
690 690 linknode = node
691 691
692 692 yield (
693 693 node,
694 694 filerevision[b'parents'][0],
695 695 filerevision[b'parents'][1],
696 696 linknode,
697 697 basenode,
698 698 delta,
699 699 # Flags not yet supported.
700 700 0,
701 701 )
702 702
703 703 progress.increment()
704 704 remaining -= 1
705 705
706 706 progress = repo.ui.makeprogress(
707 707 _(b'files'),
708 708 unit=_(b'chunks'),
709 709 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
710 710 )
711 711
712 712 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
713 713 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
714 714
715 715 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
716 716 fields = {b'parents', b'revision'}
717 717 clrev = repo.changelog.rev
718 718
719 719 # There are no guarantees that we'll have ancestor revisions if
720 720 # a) this repo has shallow file storage b) shallow data fetching is enabled.
721 721 # Force remote to not delta against possibly unknown revisions when these
722 722 # conditions hold.
723 723 haveparents = not (shallowfiles or shallow)
724 724
725 725 # Similarly, we may not have calculated linkrevs for all incoming file
726 726 # revisions. Ask the remote to do work for us in this case.
727 727 if not haveparents:
728 728 fields.add(b'linknode')
729 729
730 730 for i in pycompat.xrange(0, len(csets), batchsize):
731 731 batch = [x for x in csets[i : i + batchsize]]
732 732 if not batch:
733 733 continue
734 734
735 735 with remote.commandexecutor() as e:
736 736 args = {
737 737 b'revisions': [
738 738 {b'type': b'changesetexplicit', b'nodes': batch,}
739 739 ],
740 740 b'fields': fields,
741 741 b'haveparents': haveparents,
742 742 }
743 743
744 744 if pathfilter:
745 745 args[b'pathfilter'] = pathfilter
746 746
747 747 objs = e.callcommand(b'filesdata', args).result()
748 748
749 749 # First object is an overall header.
750 750 overall = next(objs)
751 751
752 752 # We have overall['totalpaths'] segments.
753 753 for i in pycompat.xrange(overall[b'totalpaths']):
754 754 header = next(objs)
755 755
756 756 path = header[b'path']
757 757 store = repo.file(path)
758 758
759 759 linkrevs = {
760 760 fnode: manlinkrevs[mnode]
761 761 for fnode, mnode in pycompat.iteritems(fnodes[path])
762 762 }
763 763
764 764 def getlinkrev(node):
765 765 if node in linkrevs:
766 766 return linkrevs[node]
767 767 else:
768 768 return clrev(node)
769 769
770 770 store.addgroup(
771 771 iterrevisions(objs, header[b'totalitems'], progress),
772 772 getlinkrev,
773 773 weakref.proxy(tr),
774 774 maybemissingparents=shallow,
775 775 )
General Comments 0
You need to be logged in to leave comments. Login now