##// END OF EJS Templates
changegroup: remove one special case from lookupmflinknode...
Augie Fackler -
r27239:65c47779 default
parent child Browse files
Show More
@@ -1,971 +1,973
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35
36 36 def readexactly(stream, n):
37 37 '''read n bytes from stream.read and abort if less was available'''
38 38 s = stream.read(n)
39 39 if len(s) < n:
40 40 raise error.Abort(_("stream ended unexpectedly"
41 41 " (got %d bytes, expected %d)")
42 42 % (len(s), n))
43 43 return s
44 44
45 45 def getchunk(stream):
46 46 """return the next chunk from stream as a string"""
47 47 d = readexactly(stream, 4)
48 48 l = struct.unpack(">l", d)[0]
49 49 if l <= 4:
50 50 if l:
51 51 raise error.Abort(_("invalid chunk length %d") % l)
52 52 return ""
53 53 return readexactly(stream, l - 4)
54 54
55 55 def chunkheader(length):
56 56 """return a changegroup chunk header (string)"""
57 57 return struct.pack(">l", length + 4)
58 58
59 59 def closechunk():
60 60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 61 return struct.pack(">l", 0)
62 62
63 63 def combineresults(results):
64 64 """logic to combine 0 or more addchangegroup results into one"""
65 65 changedheads = 0
66 66 result = 1
67 67 for ret in results:
68 68 # If any changegroup result is 0, return 0
69 69 if ret == 0:
70 70 result = 0
71 71 break
72 72 if ret < -1:
73 73 changedheads += ret + 1
74 74 elif ret > 1:
75 75 changedheads += ret - 1
76 76 if changedheads > 0:
77 77 result = 1 + changedheads
78 78 elif changedheads < 0:
79 79 result = -1 + changedheads
80 80 return result
81 81
82 82 bundletypes = {
83 83 "": ("", None), # only when using unbundle on ssh and old http servers
84 84 # since the unification ssh accepts a header but there
85 85 # is no capability signaling it.
86 86 "HG20": (), # special-cased below
87 87 "HG10UN": ("HG10UN", None),
88 88 "HG10BZ": ("HG10", 'BZ'),
89 89 "HG10GZ": ("HG10GZ", 'GZ'),
90 90 }
91 91
92 92 # hgweb uses this list to communicate its preferred type
93 93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94 94
95 95 def writechunks(ui, chunks, filename, vfs=None):
96 96 """Write chunks to a file and return its filename.
97 97
98 98 The stream is assumed to be a bundle file.
99 99 Existing files will not be overwritten.
100 100 If no filename is specified, a temporary file is created.
101 101 """
102 102 fh = None
103 103 cleanup = None
104 104 try:
105 105 if filename:
106 106 if vfs:
107 107 fh = vfs.open(filename, "wb")
108 108 else:
109 109 fh = open(filename, "wb")
110 110 else:
111 111 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 112 fh = os.fdopen(fd, "wb")
113 113 cleanup = filename
114 114 for c in chunks:
115 115 fh.write(c)
116 116 cleanup = None
117 117 return filename
118 118 finally:
119 119 if fh is not None:
120 120 fh.close()
121 121 if cleanup is not None:
122 122 if filename and vfs:
123 123 vfs.unlink(cleanup)
124 124 else:
125 125 os.unlink(cleanup)
126 126
127 127 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 128 """Write a bundle file and return its filename.
129 129
130 130 Existing files will not be overwritten.
131 131 If no filename is specified, a temporary file is created.
132 132 bz2 compression can be turned off.
133 133 The bundle file will be deleted in case of errors.
134 134 """
135 135
136 136 if bundletype == "HG20":
137 137 from . import bundle2
138 138 bundle = bundle2.bundle20(ui)
139 139 bundle.setcompression(compression)
140 140 part = bundle.newpart('changegroup', data=cg.getchunks())
141 141 part.addparam('version', cg.version)
142 142 chunkiter = bundle.getchunks()
143 143 else:
144 144 # compression argument is only for the bundle2 case
145 145 assert compression is None
146 146 if cg.version != '01':
147 147 raise error.Abort(_('old bundle types only supports v1 '
148 148 'changegroups'))
149 149 header, comp = bundletypes[bundletype]
150 150 if comp not in util.compressors:
151 151 raise error.Abort(_('unknown stream compression type: %s')
152 152 % comp)
153 153 z = util.compressors[comp]()
154 154 subchunkiter = cg.getchunks()
155 155 def chunkiter():
156 156 yield header
157 157 for chunk in subchunkiter:
158 158 yield z.compress(chunk)
159 159 yield z.flush()
160 160 chunkiter = chunkiter()
161 161
162 162 # parse the changegroup data, otherwise we will block
163 163 # in case of sshrepo because we don't know the end of the stream
164 164
165 165 # an empty chunkgroup is the end of the changegroup
166 166 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 167 # after that, an empty chunkgroup is the end of the changegroup
168 168 return writechunks(ui, chunkiter, filename, vfs=vfs)
169 169
170 170 class cg1unpacker(object):
171 171 """Unpacker for cg1 changegroup streams.
172 172
173 173 A changegroup unpacker handles the framing of the revision data in
174 174 the wire format. Most consumers will want to use the apply()
175 175 method to add the changes from the changegroup to a repository.
176 176
177 177 If you're forwarding a changegroup unmodified to another consumer,
178 178 use getchunks(), which returns an iterator of changegroup
179 179 chunks. This is mostly useful for cases where you need to know the
180 180 data stream has ended by observing the end of the changegroup.
181 181
182 182 deltachunk() is useful only if you're applying delta data. Most
183 183 consumers should prefer apply() instead.
184 184
185 185 A few other public methods exist. Those are used only for
186 186 bundlerepo and some debug commands - their use is discouraged.
187 187 """
188 188 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 189 deltaheadersize = struct.calcsize(deltaheader)
190 190 version = '01'
191 191 def __init__(self, fh, alg):
192 192 if alg == 'UN':
193 193 alg = None # get more modern without breaking too much
194 194 if not alg in util.decompressors:
195 195 raise error.Abort(_('unknown stream compression type: %s')
196 196 % alg)
197 197 if alg == 'BZ':
198 198 alg = '_truncatedBZ'
199 199 self._stream = util.decompressors[alg](fh)
200 200 self._type = alg
201 201 self.callback = None
202 202
203 203 # These methods (compressed, read, seek, tell) all appear to only
204 204 # be used by bundlerepo, but it's a little hard to tell.
205 205 def compressed(self):
206 206 return self._type is not None
207 207 def read(self, l):
208 208 return self._stream.read(l)
209 209 def seek(self, pos):
210 210 return self._stream.seek(pos)
211 211 def tell(self):
212 212 return self._stream.tell()
213 213 def close(self):
214 214 return self._stream.close()
215 215
216 216 def _chunklength(self):
217 217 d = readexactly(self._stream, 4)
218 218 l = struct.unpack(">l", d)[0]
219 219 if l <= 4:
220 220 if l:
221 221 raise error.Abort(_("invalid chunk length %d") % l)
222 222 return 0
223 223 if self.callback:
224 224 self.callback()
225 225 return l - 4
226 226
227 227 def changelogheader(self):
228 228 """v10 does not have a changelog header chunk"""
229 229 return {}
230 230
231 231 def manifestheader(self):
232 232 """v10 does not have a manifest header chunk"""
233 233 return {}
234 234
235 235 def filelogheader(self):
236 236 """return the header of the filelogs chunk, v10 only has the filename"""
237 237 l = self._chunklength()
238 238 if not l:
239 239 return {}
240 240 fname = readexactly(self._stream, l)
241 241 return {'filename': fname}
242 242
243 243 def _deltaheader(self, headertuple, prevnode):
244 244 node, p1, p2, cs = headertuple
245 245 if prevnode is None:
246 246 deltabase = p1
247 247 else:
248 248 deltabase = prevnode
249 249 return node, p1, p2, deltabase, cs
250 250
251 251 def deltachunk(self, prevnode):
252 252 l = self._chunklength()
253 253 if not l:
254 254 return {}
255 255 headerdata = readexactly(self._stream, self.deltaheadersize)
256 256 header = struct.unpack(self.deltaheader, headerdata)
257 257 delta = readexactly(self._stream, l - self.deltaheadersize)
258 258 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
259 259 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
260 260 'deltabase': deltabase, 'delta': delta}
261 261
262 262 def getchunks(self):
263 263 """returns all the chunks contains in the bundle
264 264
265 265 Used when you need to forward the binary stream to a file or another
266 266 network API. To do so, it parse the changegroup data, otherwise it will
267 267 block in case of sshrepo because it don't know the end of the stream.
268 268 """
269 269 # an empty chunkgroup is the end of the changegroup
270 270 # a changegroup has at least 2 chunkgroups (changelog and manifest).
271 271 # after that, an empty chunkgroup is the end of the changegroup
272 272 empty = False
273 273 count = 0
274 274 while not empty or count <= 2:
275 275 empty = True
276 276 count += 1
277 277 while True:
278 278 chunk = getchunk(self)
279 279 if not chunk:
280 280 break
281 281 empty = False
282 282 yield chunkheader(len(chunk))
283 283 pos = 0
284 284 while pos < len(chunk):
285 285 next = pos + 2**20
286 286 yield chunk[pos:next]
287 287 pos = next
288 288 yield closechunk()
289 289
290 290 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
291 291 # We know that we'll never have more manifests than we had
292 292 # changesets.
293 293 self.callback = prog(_('manifests'), numchanges)
294 294 # no need to check for empty manifest group here:
295 295 # if the result of the merge of 1 and 2 is the same in 3 and 4,
296 296 # no new manifest will be created and the manifest group will
297 297 # be empty during the pull
298 298 self.manifestheader()
299 299 repo.manifest.addgroup(self, revmap, trp)
300 300 repo.ui.progress(_('manifests'), None)
301 301
302 302 def apply(self, repo, srctype, url, emptyok=False,
303 303 targetphase=phases.draft, expectedtotal=None):
304 304 """Add the changegroup returned by source.read() to this repo.
305 305 srctype is a string like 'push', 'pull', or 'unbundle'. url is
306 306 the URL of the repo where this changegroup is coming from.
307 307
308 308 Return an integer summarizing the change to this repo:
309 309 - nothing changed or no source: 0
310 310 - more heads than before: 1+added heads (2..n)
311 311 - fewer heads than before: -1-removed heads (-2..-n)
312 312 - number of heads stays the same: 1
313 313 """
314 314 repo = repo.unfiltered()
315 315 wasempty = (len(repo.changelog) == 0)
316 316 def csmap(x):
317 317 repo.ui.debug("add changeset %s\n" % short(x))
318 318 return len(cl)
319 319
320 320 def revmap(x):
321 321 return cl.rev(x)
322 322
323 323 changesets = files = revisions = 0
324 324
325 325 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
326 326 try:
327 327 # The transaction could have been created before and already
328 328 # carries source information. In this case we use the top
329 329 # level data. We overwrite the argument because we need to use
330 330 # the top level value (if they exist) in this function.
331 331 srctype = tr.hookargs.setdefault('source', srctype)
332 332 url = tr.hookargs.setdefault('url', url)
333 333 repo.hook('prechangegroup', throw=True, **tr.hookargs)
334 334
335 335 # write changelog data to temp files so concurrent readers
336 336 # will not see an inconsistent view
337 337 cl = repo.changelog
338 338 cl.delayupdate(tr)
339 339 oldheads = cl.heads()
340 340
341 341 trp = weakref.proxy(tr)
342 342 # pull off the changeset group
343 343 repo.ui.status(_("adding changesets\n"))
344 344 clstart = len(cl)
345 345 class prog(object):
346 346 def __init__(self, step, total):
347 347 self._step = step
348 348 self._total = total
349 349 self._count = 1
350 350 def __call__(self):
351 351 repo.ui.progress(self._step, self._count, unit=_('chunks'),
352 352 total=self._total)
353 353 self._count += 1
354 354 self.callback = prog(_('changesets'), expectedtotal)
355 355
356 356 efiles = set()
357 357 def onchangelog(cl, node):
358 358 efiles.update(cl.read(node)[3])
359 359
360 360 self.changelogheader()
361 361 srccontent = cl.addgroup(self, csmap, trp,
362 362 addrevisioncb=onchangelog)
363 363 efiles = len(efiles)
364 364
365 365 if not (srccontent or emptyok):
366 366 raise error.Abort(_("received changelog group is empty"))
367 367 clend = len(cl)
368 368 changesets = clend - clstart
369 369 repo.ui.progress(_('changesets'), None)
370 370
371 371 # pull off the manifest group
372 372 repo.ui.status(_("adding manifests\n"))
373 373 self._unpackmanifests(repo, revmap, trp, prog, changesets)
374 374
375 375 needfiles = {}
376 376 if repo.ui.configbool('server', 'validate', default=False):
377 377 # validate incoming csets have their manifests
378 378 for cset in xrange(clstart, clend):
379 379 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
380 380 mfest = repo.manifest.readdelta(mfnode)
381 381 # store file nodes we must see
382 382 for f, n in mfest.iteritems():
383 383 needfiles.setdefault(f, set()).add(n)
384 384
385 385 # process the files
386 386 repo.ui.status(_("adding file changes\n"))
387 387 self.callback = None
388 388 pr = prog(_('files'), efiles)
389 389 newrevs, newfiles = _addchangegroupfiles(
390 390 repo, self, revmap, trp, pr, needfiles, wasempty)
391 391 revisions += newrevs
392 392 files += newfiles
393 393
394 394 dh = 0
395 395 if oldheads:
396 396 heads = cl.heads()
397 397 dh = len(heads) - len(oldheads)
398 398 for h in heads:
399 399 if h not in oldheads and repo[h].closesbranch():
400 400 dh -= 1
401 401 htext = ""
402 402 if dh:
403 403 htext = _(" (%+d heads)") % dh
404 404
405 405 repo.ui.status(_("added %d changesets"
406 406 " with %d changes to %d files%s\n")
407 407 % (changesets, revisions, files, htext))
408 408 repo.invalidatevolatilesets()
409 409
410 410 if changesets > 0:
411 411 if 'node' not in tr.hookargs:
412 412 tr.hookargs['node'] = hex(cl.node(clstart))
413 413 hookargs = dict(tr.hookargs)
414 414 else:
415 415 hookargs = dict(tr.hookargs)
416 416 hookargs['node'] = hex(cl.node(clstart))
417 417 repo.hook('pretxnchangegroup', throw=True, **hookargs)
418 418
419 419 added = [cl.node(r) for r in xrange(clstart, clend)]
420 420 publishing = repo.publishing()
421 421 if srctype in ('push', 'serve'):
422 422 # Old servers can not push the boundary themselves.
423 423 # New servers won't push the boundary if changeset already
424 424 # exists locally as secret
425 425 #
426 426 # We should not use added here but the list of all change in
427 427 # the bundle
428 428 if publishing:
429 429 phases.advanceboundary(repo, tr, phases.public, srccontent)
430 430 else:
431 431 # Those changesets have been pushed from the outside, their
432 432 # phases are going to be pushed alongside. Therefor
433 433 # `targetphase` is ignored.
434 434 phases.advanceboundary(repo, tr, phases.draft, srccontent)
435 435 phases.retractboundary(repo, tr, phases.draft, added)
436 436 elif srctype != 'strip':
437 437 # publishing only alter behavior during push
438 438 #
439 439 # strip should not touch boundary at all
440 440 phases.retractboundary(repo, tr, targetphase, added)
441 441
442 442 if changesets > 0:
443 443 if srctype != 'strip':
444 444 # During strip, branchcache is invalid but coming call to
445 445 # `destroyed` will repair it.
446 446 # In other case we can safely update cache on disk.
447 447 branchmap.updatecache(repo.filtered('served'))
448 448
449 449 def runhooks():
450 450 # These hooks run when the lock releases, not when the
451 451 # transaction closes. So it's possible for the changelog
452 452 # to have changed since we last saw it.
453 453 if clstart >= len(repo):
454 454 return
455 455
456 456 # forcefully update the on-disk branch cache
457 457 repo.ui.debug("updating the branch cache\n")
458 458 repo.hook("changegroup", **hookargs)
459 459
460 460 for n in added:
461 461 args = hookargs.copy()
462 462 args['node'] = hex(n)
463 463 repo.hook("incoming", **args)
464 464
465 465 newheads = [h for h in repo.heads() if h not in oldheads]
466 466 repo.ui.log("incoming",
467 467 "%s incoming changes - new heads: %s\n",
468 468 len(added),
469 469 ', '.join([hex(c[:6]) for c in newheads]))
470 470
471 471 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
472 472 lambda tr: repo._afterlock(runhooks))
473 473
474 474 tr.close()
475 475
476 476 finally:
477 477 tr.release()
478 478 repo.ui.flush()
479 479 # never return 0 here:
480 480 if dh < 0:
481 481 return dh - 1
482 482 else:
483 483 return dh + 1
484 484
485 485 class cg2unpacker(cg1unpacker):
486 486 """Unpacker for cg2 streams.
487 487
488 488 cg2 streams add support for generaldelta, so the delta header
489 489 format is slightly different. All other features about the data
490 490 remain the same.
491 491 """
492 492 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
493 493 deltaheadersize = struct.calcsize(deltaheader)
494 494 version = '02'
495 495
496 496 def _deltaheader(self, headertuple, prevnode):
497 497 node, p1, p2, deltabase, cs = headertuple
498 498 return node, p1, p2, deltabase, cs
499 499
500 500 class headerlessfixup(object):
501 501 def __init__(self, fh, h):
502 502 self._h = h
503 503 self._fh = fh
504 504 def read(self, n):
505 505 if self._h:
506 506 d, self._h = self._h[:n], self._h[n:]
507 507 if len(d) < n:
508 508 d += readexactly(self._fh, n - len(d))
509 509 return d
510 510 return readexactly(self._fh, n)
511 511
512 512 class cg1packer(object):
513 513 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
514 514 version = '01'
515 515 def __init__(self, repo, bundlecaps=None):
516 516 """Given a source repo, construct a bundler.
517 517
518 518 bundlecaps is optional and can be used to specify the set of
519 519 capabilities which can be used to build the bundle.
520 520 """
521 521 # Set of capabilities we can use to build the bundle.
522 522 if bundlecaps is None:
523 523 bundlecaps = set()
524 524 self._bundlecaps = bundlecaps
525 525 # experimental config: bundle.reorder
526 526 reorder = repo.ui.config('bundle', 'reorder', 'auto')
527 527 if reorder == 'auto':
528 528 reorder = None
529 529 else:
530 530 reorder = util.parsebool(reorder)
531 531 self._repo = repo
532 532 self._reorder = reorder
533 533 self._progress = repo.ui.progress
534 534 if self._repo.ui.verbose and not self._repo.ui.debugflag:
535 535 self._verbosenote = self._repo.ui.note
536 536 else:
537 537 self._verbosenote = lambda s: None
538 538
539 539 def close(self):
540 540 return closechunk()
541 541
542 542 def fileheader(self, fname):
543 543 return chunkheader(len(fname)) + fname
544 544
545 545 def group(self, nodelist, revlog, lookup, units=None):
546 546 """Calculate a delta group, yielding a sequence of changegroup chunks
547 547 (strings).
548 548
549 549 Given a list of changeset revs, return a set of deltas and
550 550 metadata corresponding to nodes. The first delta is
551 551 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 552 guaranteed to have this parent as it has all history before
553 553 these changesets. In the case firstparent is nullrev the
554 554 changegroup starts with a full revision.
555 555
556 556 If units is not None, progress detail will be generated, units specifies
557 557 the type of revlog that is touched (changelog, manifest, etc.).
558 558 """
559 559 # if we don't have any revisions touched by these changesets, bail
560 560 if len(nodelist) == 0:
561 561 yield self.close()
562 562 return
563 563
564 564 # for generaldelta revlogs, we linearize the revs; this will both be
565 565 # much quicker and generate a much smaller bundle
566 566 if (revlog._generaldelta and self._reorder is None) or self._reorder:
567 567 dag = dagutil.revlogdag(revlog)
568 568 revs = set(revlog.rev(n) for n in nodelist)
569 569 revs = dag.linearize(revs)
570 570 else:
571 571 revs = sorted([revlog.rev(n) for n in nodelist])
572 572
573 573 # add the parent of the first rev
574 574 p = revlog.parentrevs(revs[0])[0]
575 575 revs.insert(0, p)
576 576
577 577 # build deltas
578 578 total = len(revs) - 1
579 579 msgbundling = _('bundling')
580 580 for r in xrange(len(revs) - 1):
581 581 if units is not None:
582 582 self._progress(msgbundling, r + 1, unit=units, total=total)
583 583 prev, curr = revs[r], revs[r + 1]
584 584 linknode = lookup(revlog.node(curr))
585 585 for c in self.revchunk(revlog, curr, prev, linknode):
586 586 yield c
587 587
588 588 if units is not None:
589 589 self._progress(msgbundling, None)
590 590 yield self.close()
591 591
592 592 # filter any nodes that claim to be part of the known set
593 593 def prune(self, revlog, missing, commonrevs):
594 594 rr, rl = revlog.rev, revlog.linkrev
595 595 return [n for n in missing if rl(rr(n)) not in commonrevs]
596 596
597 597 def _packmanifests(self, mfnodes, lookuplinknode):
598 598 """Pack flat manifests into a changegroup stream."""
599 599 ml = self._repo.manifest
600 600 size = 0
601 601 for chunk in self.group(
602 602 mfnodes, ml, lookuplinknode, units=_('manifests')):
603 603 size += len(chunk)
604 604 yield chunk
605 605 self._verbosenote(_('%8.i (manifests)\n') % size)
606 606
607 607 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
608 608 '''yield a sequence of changegroup chunks (strings)'''
609 609 repo = self._repo
610 610 cl = repo.changelog
611 611 ml = repo.manifest
612 612
613 613 clrevorder = {}
614 614 mfs = {} # needed manifests
615 615 fnodes = {} # needed file nodes
616 616 # maps manifest node id -> set(changed files)
617 617 mfchangedfiles = {}
618 618
619 619 # Callback for the changelog, used to collect changed files and manifest
620 620 # nodes.
621 621 # Returns the linkrev node (identity in the changelog case).
622 622 def lookupcl(x):
623 623 c = cl.read(x)
624 624 clrevorder[x] = len(clrevorder)
625 625 n = c[0]
626 626 # record the first changeset introducing this manifest version
627 627 mfs.setdefault(n, x)
628 628 # Record a complete list of potentially-changed files in
629 629 # this manifest.
630 630 mfchangedfiles.setdefault(n, set()).update(c[3])
631 631 return x
632 632
633 633 self._verbosenote(_('uncompressed size of bundle content:\n'))
634 634 size = 0
635 635 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
636 636 size += len(chunk)
637 637 yield chunk
638 638 self._verbosenote(_('%8.i (changelog)\n') % size)
639 639
640 640 # We need to make sure that the linkrev in the changegroup refers to
641 641 # the first changeset that introduced the manifest or file revision.
642 642 # The fastpath is usually safer than the slowpath, because the filelogs
643 643 # are walked in revlog order.
644 644 #
645 645 # When taking the slowpath with reorder=None and the manifest revlog
646 646 # uses generaldelta, the manifest may be walked in the "wrong" order.
647 647 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
648 648 # cc0ff93d0c0c).
649 649 #
650 650 # When taking the fastpath, we are only vulnerable to reordering
651 651 # of the changelog itself. The changelog never uses generaldelta, so
652 652 # it is only reordered when reorder=True. To handle this case, we
653 653 # simply take the slowpath, which already has the 'clrevorder' logic.
654 654 # This was also fixed in cc0ff93d0c0c.
655 655 fastpathlinkrev = fastpathlinkrev and not self._reorder
656 656 # Callback for the manifest, used to collect linkrevs for filelog
657 657 # revisions.
658 658 # Returns the linkrev node (collected in lookupcl).
659 if fastpathlinkrev:
660 lookupmflinknode = mfs.__getitem__
661 else:
659 662 def lookupmflinknode(x):
660 663 """Callback for looking up the linknode for manifests.
661 664
662 665 Returns the linkrev node for the specified manifest.
663 666
664 667 SIDE EFFECT:
665 668
666 669 fclnodes gets populated with the list of relevant
667 file nodes if we're not using fastpathlinkrev.
670 file nodes.
668 671
669 672 Note that this means you can't trust fclnodes until
670 673 after manifests have been sent to the client.
671 674 """
672 675 clnode = mfs[x]
673 if not fastpathlinkrev:
674 676 mdata = ml.readfast(x)
675 677 for f in mfchangedfiles[x]:
676 678 try:
677 679 n = mdata[f]
678 680 except KeyError:
679 681 continue
680 682 # record the first changeset introducing this filelog
681 683 # version
682 684 fclnodes = fnodes.setdefault(f, {})
683 685 fclnode = fclnodes.setdefault(n, clnode)
684 686 if clrevorder[clnode] < clrevorder[fclnode]:
685 687 fclnodes[n] = clnode
686 688 return clnode
687 689
688 690 mfnodes = self.prune(ml, mfs, commonrevs)
689 691 for x in self._packmanifests(mfnodes, lookupmflinknode):
690 692 yield x
691 693
692 694 mfs.clear()
693 695 clrevs = set(cl.rev(x) for x in clnodes)
694 696
695 697 def linknodes(filerevlog, fname):
696 698 if fastpathlinkrev:
697 699 llr = filerevlog.linkrev
698 700 def genfilenodes():
699 701 for r in filerevlog:
700 702 linkrev = llr(r)
701 703 if linkrev in clrevs:
702 704 yield filerevlog.node(r), cl.node(linkrev)
703 705 return dict(genfilenodes())
704 706 return fnodes.get(fname, {})
705 707
706 708 changedfiles = set()
707 709 for x in mfchangedfiles.itervalues():
708 710 changedfiles.update(x)
709 711 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
710 712 source):
711 713 yield chunk
712 714
713 715 yield self.close()
714 716
715 717 if clnodes:
716 718 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
717 719
718 720 # The 'source' parameter is useful for extensions
719 721 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
720 722 repo = self._repo
721 723 progress = self._progress
722 724 msgbundling = _('bundling')
723 725
724 726 total = len(changedfiles)
725 727 # for progress output
726 728 msgfiles = _('files')
727 729 for i, fname in enumerate(sorted(changedfiles)):
728 730 filerevlog = repo.file(fname)
729 731 if not filerevlog:
730 732 raise error.Abort(_("empty or missing revlog for %s") % fname)
731 733
732 734 linkrevnodes = linknodes(filerevlog, fname)
733 735 # Lookup for filenodes, we collected the linkrev nodes above in the
734 736 # fastpath case and with lookupmf in the slowpath case.
735 737 def lookupfilelog(x):
736 738 return linkrevnodes[x]
737 739
738 740 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
739 741 if filenodes:
740 742 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
741 743 total=total)
742 744 h = self.fileheader(fname)
743 745 size = len(h)
744 746 yield h
745 747 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
746 748 size += len(chunk)
747 749 yield chunk
748 750 self._verbosenote(_('%8.i %s\n') % (size, fname))
749 751 progress(msgbundling, None)
750 752
751 753 def deltaparent(self, revlog, rev, p1, p2, prev):
752 754 return prev
753 755
754 756 def revchunk(self, revlog, rev, prev, linknode):
755 757 node = revlog.node(rev)
756 758 p1, p2 = revlog.parentrevs(rev)
757 759 base = self.deltaparent(revlog, rev, p1, p2, prev)
758 760
759 761 prefix = ''
760 762 if revlog.iscensored(base) or revlog.iscensored(rev):
761 763 try:
762 764 delta = revlog.revision(node)
763 765 except error.CensoredNodeError as e:
764 766 delta = e.tombstone
765 767 if base == nullrev:
766 768 prefix = mdiff.trivialdiffheader(len(delta))
767 769 else:
768 770 baselen = revlog.rawsize(base)
769 771 prefix = mdiff.replacediffheader(baselen, len(delta))
770 772 elif base == nullrev:
771 773 delta = revlog.revision(node)
772 774 prefix = mdiff.trivialdiffheader(len(delta))
773 775 else:
774 776 delta = revlog.revdiff(base, rev)
775 777 p1n, p2n = revlog.parents(node)
776 778 basenode = revlog.node(base)
777 779 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
778 780 meta += prefix
779 781 l = len(meta) + len(delta)
780 782 yield chunkheader(l)
781 783 yield meta
782 784 yield delta
783 785 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
784 786 # do nothing with basenode, it is implicitly the previous one in HG10
785 787 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
786 788
787 789 class cg2packer(cg1packer):
788 790 version = '02'
789 791 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
790 792
791 793 def __init__(self, repo, bundlecaps=None):
792 794 super(cg2packer, self).__init__(repo, bundlecaps)
793 795 if self._reorder is None:
794 796 # Since generaldelta is directly supported by cg2, reordering
795 797 # generally doesn't help, so we disable it by default (treating
796 798 # bundle.reorder=auto just like bundle.reorder=False).
797 799 self._reorder = False
798 800
799 801 def deltaparent(self, revlog, rev, p1, p2, prev):
800 802 dp = revlog.deltaparent(rev)
801 803 # avoid storing full revisions; pick prev in those cases
802 804 # also pick prev when we can't be sure remote has dp
803 805 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
804 806 return prev
805 807 return dp
806 808
807 809 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
808 810 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
809 811
810 812 packermap = {'01': (cg1packer, cg1unpacker),
811 813 # cg2 adds support for exchanging generaldelta
812 814 '02': (cg2packer, cg2unpacker),
813 815 }
814 816
815 817 def _changegroupinfo(repo, nodes, source):
816 818 if repo.ui.verbose or source == 'bundle':
817 819 repo.ui.status(_("%d changesets found\n") % len(nodes))
818 820 if repo.ui.debugflag:
819 821 repo.ui.debug("list of changesets:\n")
820 822 for node in nodes:
821 823 repo.ui.debug("%s\n" % hex(node))
822 824
823 825 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
824 826 repo = repo.unfiltered()
825 827 commonrevs = outgoing.common
826 828 csets = outgoing.missing
827 829 heads = outgoing.missingheads
828 830 # We go through the fast path if we get told to, or if all (unfiltered
829 831 # heads have been requested (since we then know there all linkrevs will
830 832 # be pulled by the client).
831 833 heads.sort()
832 834 fastpathlinkrev = fastpath or (
833 835 repo.filtername is None and heads == sorted(repo.heads()))
834 836
835 837 repo.hook('preoutgoing', throw=True, source=source)
836 838 _changegroupinfo(repo, csets, source)
837 839 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
838 840
839 841 def getsubset(repo, outgoing, bundler, source, fastpath=False):
840 842 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
841 843 return packermap[bundler.version][1](util.chunkbuffer(gengroup), None)
842 844
843 845 def changegroupsubset(repo, roots, heads, source, version='01'):
844 846 """Compute a changegroup consisting of all the nodes that are
845 847 descendants of any of the roots and ancestors of any of the heads.
846 848 Return a chunkbuffer object whose read() method will return
847 849 successive changegroup chunks.
848 850
849 851 It is fairly complex as determining which filenodes and which
850 852 manifest nodes need to be included for the changeset to be complete
851 853 is non-trivial.
852 854
853 855 Another wrinkle is doing the reverse, figuring out which changeset in
854 856 the changegroup a particular filenode or manifestnode belongs to.
855 857 """
856 858 cl = repo.changelog
857 859 if not roots:
858 860 roots = [nullid]
859 861 discbases = []
860 862 for n in roots:
861 863 discbases.extend([p for p in cl.parents(n) if p != nullid])
862 864 # TODO: remove call to nodesbetween.
863 865 csets, roots, heads = cl.nodesbetween(roots, heads)
864 866 included = set(csets)
865 867 discbases = [n for n in discbases if n not in included]
866 868 outgoing = discovery.outgoing(cl, discbases, heads)
867 869 bundler = packermap[version][0](repo)
868 870 return getsubset(repo, outgoing, bundler, source)
869 871
870 872 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
871 873 version='01'):
872 874 """Like getbundle, but taking a discovery.outgoing as an argument.
873 875
874 876 This is only implemented for local repos and reuses potentially
875 877 precomputed sets in outgoing. Returns a raw changegroup generator."""
876 878 if not outgoing.missing:
877 879 return None
878 880 bundler = packermap[version][0](repo, bundlecaps)
879 881 return getsubsetraw(repo, outgoing, bundler, source)
880 882
881 883 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
882 884 version='01'):
883 885 """Like getbundle, but taking a discovery.outgoing as an argument.
884 886
885 887 This is only implemented for local repos and reuses potentially
886 888 precomputed sets in outgoing."""
887 889 if not outgoing.missing:
888 890 return None
889 891 bundler = packermap[version][0](repo, bundlecaps)
890 892 return getsubset(repo, outgoing, bundler, source)
891 893
892 894 def computeoutgoing(repo, heads, common):
893 895 """Computes which revs are outgoing given a set of common
894 896 and a set of heads.
895 897
896 898 This is a separate function so extensions can have access to
897 899 the logic.
898 900
899 901 Returns a discovery.outgoing object.
900 902 """
901 903 cl = repo.changelog
902 904 if common:
903 905 hasnode = cl.hasnode
904 906 common = [n for n in common if hasnode(n)]
905 907 else:
906 908 common = [nullid]
907 909 if not heads:
908 910 heads = cl.heads()
909 911 return discovery.outgoing(cl, common, heads)
910 912
911 913 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
912 914 version='01'):
913 915 """Like changegroupsubset, but returns the set difference between the
914 916 ancestors of heads and the ancestors common.
915 917
916 918 If heads is None, use the local heads. If common is None, use [nullid].
917 919
918 920 The nodes in common might not all be known locally due to the way the
919 921 current discovery protocol works.
920 922 """
921 923 outgoing = computeoutgoing(repo, heads, common)
922 924 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
923 925 version=version)
924 926
925 927 def changegroup(repo, basenodes, source):
926 928 # to avoid a race we use changegroupsubset() (issue1320)
927 929 return changegroupsubset(repo, basenodes, repo.heads(), source)
928 930
929 931 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles, wasempty):
930 932 revisions = 0
931 933 files = 0
932 934 while True:
933 935 chunkdata = source.filelogheader()
934 936 if not chunkdata:
935 937 break
936 938 f = chunkdata["filename"]
937 939 repo.ui.debug("adding %s revisions\n" % f)
938 940 pr()
939 941 fl = repo.file(f)
940 942 o = len(fl)
941 943 try:
942 944 if not fl.addgroup(source, revmap, trp):
943 945 raise error.Abort(_("received file revlog group is empty"))
944 946 except error.CensoredBaseError as e:
945 947 raise error.Abort(_("received delta base is censored: %s") % e)
946 948 revisions += len(fl) - o
947 949 files += 1
948 950 if f in needfiles:
949 951 needs = needfiles[f]
950 952 for new in xrange(o, len(fl)):
951 953 n = fl.node(new)
952 954 if n in needs:
953 955 needs.remove(n)
954 956 else:
955 957 raise error.Abort(
956 958 _("received spurious file revlog entry"))
957 959 if not needs:
958 960 del needfiles[f]
959 961 repo.ui.progress(_('files'), None)
960 962
961 963 for f, needs in needfiles.iteritems():
962 964 fl = repo.file(f)
963 965 for n in needs:
964 966 try:
965 967 fl.rev(n)
966 968 except error.LookupError:
967 969 raise error.Abort(
968 970 _('missing file data for %s:%s - run hg verify') %
969 971 (f, hex(n)))
970 972
971 973 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now