##// END OF EJS Templates
changegroup: don't support versions 01 and 02 with treemanifests...
Martin von Zweigbergk -
r27928:c0f11347 stable
parent child Browse files
Show More
@@ -1,1121 +1,1127 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36 36
37 37 def readexactly(stream, n):
38 38 '''read n bytes from stream.read and abort if less was available'''
39 39 s = stream.read(n)
40 40 if len(s) < n:
41 41 raise error.Abort(_("stream ended unexpectedly"
42 42 " (got %d bytes, expected %d)")
43 43 % (len(s), n))
44 44 return s
45 45
46 46 def getchunk(stream):
47 47 """return the next chunk from stream as a string"""
48 48 d = readexactly(stream, 4)
49 49 l = struct.unpack(">l", d)[0]
50 50 if l <= 4:
51 51 if l:
52 52 raise error.Abort(_("invalid chunk length %d") % l)
53 53 return ""
54 54 return readexactly(stream, l - 4)
55 55
56 56 def chunkheader(length):
57 57 """return a changegroup chunk header (string)"""
58 58 return struct.pack(">l", length + 4)
59 59
60 60 def closechunk():
61 61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 62 return struct.pack(">l", 0)
63 63
64 64 def combineresults(results):
65 65 """logic to combine 0 or more addchangegroup results into one"""
66 66 changedheads = 0
67 67 result = 1
68 68 for ret in results:
69 69 # If any changegroup result is 0, return 0
70 70 if ret == 0:
71 71 result = 0
72 72 break
73 73 if ret < -1:
74 74 changedheads += ret + 1
75 75 elif ret > 1:
76 76 changedheads += ret - 1
77 77 if changedheads > 0:
78 78 result = 1 + changedheads
79 79 elif changedheads < 0:
80 80 result = -1 + changedheads
81 81 return result
82 82
83 83 bundletypes = {
84 84 "": ("", None), # only when using unbundle on ssh and old http servers
85 85 # since the unification ssh accepts a header but there
86 86 # is no capability signaling it.
87 87 "HG20": (), # special-cased below
88 88 "HG10UN": ("HG10UN", None),
89 89 "HG10BZ": ("HG10", 'BZ'),
90 90 "HG10GZ": ("HG10GZ", 'GZ'),
91 91 }
92 92
93 93 # hgweb uses this list to communicate its preferred type
94 94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
95 95
96 96 def writechunks(ui, chunks, filename, vfs=None):
97 97 """Write chunks to a file and return its filename.
98 98
99 99 The stream is assumed to be a bundle file.
100 100 Existing files will not be overwritten.
101 101 If no filename is specified, a temporary file is created.
102 102 """
103 103 fh = None
104 104 cleanup = None
105 105 try:
106 106 if filename:
107 107 if vfs:
108 108 fh = vfs.open(filename, "wb")
109 109 else:
110 110 fh = open(filename, "wb")
111 111 else:
112 112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 113 fh = os.fdopen(fd, "wb")
114 114 cleanup = filename
115 115 for c in chunks:
116 116 fh.write(c)
117 117 cleanup = None
118 118 return filename
119 119 finally:
120 120 if fh is not None:
121 121 fh.close()
122 122 if cleanup is not None:
123 123 if filename and vfs:
124 124 vfs.unlink(cleanup)
125 125 else:
126 126 os.unlink(cleanup)
127 127
128 128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
129 129 """Write a bundle file and return its filename.
130 130
131 131 Existing files will not be overwritten.
132 132 If no filename is specified, a temporary file is created.
133 133 bz2 compression can be turned off.
134 134 The bundle file will be deleted in case of errors.
135 135 """
136 136
137 137 if bundletype == "HG20":
138 138 from . import bundle2
139 139 bundle = bundle2.bundle20(ui)
140 140 bundle.setcompression(compression)
141 141 part = bundle.newpart('changegroup', data=cg.getchunks())
142 142 part.addparam('version', cg.version)
143 143 chunkiter = bundle.getchunks()
144 144 else:
145 145 # compression argument is only for the bundle2 case
146 146 assert compression is None
147 147 if cg.version != '01':
148 148 raise error.Abort(_('old bundle types only supports v1 '
149 149 'changegroups'))
150 150 header, comp = bundletypes[bundletype]
151 151 if comp not in util.compressors:
152 152 raise error.Abort(_('unknown stream compression type: %s')
153 153 % comp)
154 154 z = util.compressors[comp]()
155 155 subchunkiter = cg.getchunks()
156 156 def chunkiter():
157 157 yield header
158 158 for chunk in subchunkiter:
159 159 yield z.compress(chunk)
160 160 yield z.flush()
161 161 chunkiter = chunkiter()
162 162
163 163 # parse the changegroup data, otherwise we will block
164 164 # in case of sshrepo because we don't know the end of the stream
165 165
166 166 # an empty chunkgroup is the end of the changegroup
167 167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
168 168 # after that, an empty chunkgroup is the end of the changegroup
169 169 return writechunks(ui, chunkiter, filename, vfs=vfs)
170 170
171 171 class cg1unpacker(object):
172 172 """Unpacker for cg1 changegroup streams.
173 173
174 174 A changegroup unpacker handles the framing of the revision data in
175 175 the wire format. Most consumers will want to use the apply()
176 176 method to add the changes from the changegroup to a repository.
177 177
178 178 If you're forwarding a changegroup unmodified to another consumer,
179 179 use getchunks(), which returns an iterator of changegroup
180 180 chunks. This is mostly useful for cases where you need to know the
181 181 data stream has ended by observing the end of the changegroup.
182 182
183 183 deltachunk() is useful only if you're applying delta data. Most
184 184 consumers should prefer apply() instead.
185 185
186 186 A few other public methods exist. Those are used only for
187 187 bundlerepo and some debug commands - their use is discouraged.
188 188 """
189 189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
190 190 deltaheadersize = struct.calcsize(deltaheader)
191 191 version = '01'
192 192 _grouplistcount = 1 # One list of files after the manifests
193 193
194 194 def __init__(self, fh, alg):
195 195 if alg == 'UN':
196 196 alg = None # get more modern without breaking too much
197 197 if not alg in util.decompressors:
198 198 raise error.Abort(_('unknown stream compression type: %s')
199 199 % alg)
200 200 if alg == 'BZ':
201 201 alg = '_truncatedBZ'
202 202 self._stream = util.decompressors[alg](fh)
203 203 self._type = alg
204 204 self.callback = None
205 205
206 206 # These methods (compressed, read, seek, tell) all appear to only
207 207 # be used by bundlerepo, but it's a little hard to tell.
208 208 def compressed(self):
209 209 return self._type is not None
210 210 def read(self, l):
211 211 return self._stream.read(l)
212 212 def seek(self, pos):
213 213 return self._stream.seek(pos)
214 214 def tell(self):
215 215 return self._stream.tell()
216 216 def close(self):
217 217 return self._stream.close()
218 218
219 219 def _chunklength(self):
220 220 d = readexactly(self._stream, 4)
221 221 l = struct.unpack(">l", d)[0]
222 222 if l <= 4:
223 223 if l:
224 224 raise error.Abort(_("invalid chunk length %d") % l)
225 225 return 0
226 226 if self.callback:
227 227 self.callback()
228 228 return l - 4
229 229
230 230 def changelogheader(self):
231 231 """v10 does not have a changelog header chunk"""
232 232 return {}
233 233
234 234 def manifestheader(self):
235 235 """v10 does not have a manifest header chunk"""
236 236 return {}
237 237
238 238 def filelogheader(self):
239 239 """return the header of the filelogs chunk, v10 only has the filename"""
240 240 l = self._chunklength()
241 241 if not l:
242 242 return {}
243 243 fname = readexactly(self._stream, l)
244 244 return {'filename': fname}
245 245
246 246 def _deltaheader(self, headertuple, prevnode):
247 247 node, p1, p2, cs = headertuple
248 248 if prevnode is None:
249 249 deltabase = p1
250 250 else:
251 251 deltabase = prevnode
252 252 flags = 0
253 253 return node, p1, p2, deltabase, cs, flags
254 254
255 255 def deltachunk(self, prevnode):
256 256 l = self._chunklength()
257 257 if not l:
258 258 return {}
259 259 headerdata = readexactly(self._stream, self.deltaheadersize)
260 260 header = struct.unpack(self.deltaheader, headerdata)
261 261 delta = readexactly(self._stream, l - self.deltaheadersize)
262 262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
263 263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
264 264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
265 265
266 266 def getchunks(self):
267 267 """returns all the chunks contains in the bundle
268 268
269 269 Used when you need to forward the binary stream to a file or another
270 270 network API. To do so, it parse the changegroup data, otherwise it will
271 271 block in case of sshrepo because it don't know the end of the stream.
272 272 """
273 273 # an empty chunkgroup is the end of the changegroup
274 274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
275 275 # after that, changegroup versions 1 and 2 have a series of groups
276 276 # with one group per file. changegroup 3 has a series of directory
277 277 # manifests before the files.
278 278 count = 0
279 279 emptycount = 0
280 280 while emptycount < self._grouplistcount:
281 281 empty = True
282 282 count += 1
283 283 while True:
284 284 chunk = getchunk(self)
285 285 if not chunk:
286 286 if empty and count > 2:
287 287 emptycount += 1
288 288 break
289 289 empty = False
290 290 yield chunkheader(len(chunk))
291 291 pos = 0
292 292 while pos < len(chunk):
293 293 next = pos + 2**20
294 294 yield chunk[pos:next]
295 295 pos = next
296 296 yield closechunk()
297 297
298 298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
299 299 # We know that we'll never have more manifests than we had
300 300 # changesets.
301 301 self.callback = prog(_('manifests'), numchanges)
302 302 # no need to check for empty manifest group here:
303 303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
304 304 # no new manifest will be created and the manifest group will
305 305 # be empty during the pull
306 306 self.manifestheader()
307 307 repo.manifest.addgroup(self, revmap, trp)
308 308 repo.ui.progress(_('manifests'), None)
309 309
310 310 def apply(self, repo, srctype, url, emptyok=False,
311 311 targetphase=phases.draft, expectedtotal=None):
312 312 """Add the changegroup returned by source.read() to this repo.
313 313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
314 314 the URL of the repo where this changegroup is coming from.
315 315
316 316 Return an integer summarizing the change to this repo:
317 317 - nothing changed or no source: 0
318 318 - more heads than before: 1+added heads (2..n)
319 319 - fewer heads than before: -1-removed heads (-2..-n)
320 320 - number of heads stays the same: 1
321 321 """
322 322 repo = repo.unfiltered()
323 323 def csmap(x):
324 324 repo.ui.debug("add changeset %s\n" % short(x))
325 325 return len(cl)
326 326
327 327 def revmap(x):
328 328 return cl.rev(x)
329 329
330 330 changesets = files = revisions = 0
331 331
332 332 try:
333 333 with repo.transaction("\n".join([srctype,
334 334 util.hidepassword(url)])) as tr:
335 335 # The transaction could have been created before and already
336 336 # carries source information. In this case we use the top
337 337 # level data. We overwrite the argument because we need to use
338 338 # the top level value (if they exist) in this function.
339 339 srctype = tr.hookargs.setdefault('source', srctype)
340 340 url = tr.hookargs.setdefault('url', url)
341 341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
342 342
343 343 # write changelog data to temp files so concurrent readers
344 344 # will not see an inconsistent view
345 345 cl = repo.changelog
346 346 cl.delayupdate(tr)
347 347 oldheads = cl.heads()
348 348
349 349 trp = weakref.proxy(tr)
350 350 # pull off the changeset group
351 351 repo.ui.status(_("adding changesets\n"))
352 352 clstart = len(cl)
353 353 class prog(object):
354 354 def __init__(self, step, total):
355 355 self._step = step
356 356 self._total = total
357 357 self._count = 1
358 358 def __call__(self):
359 359 repo.ui.progress(self._step, self._count,
360 360 unit=_('chunks'), total=self._total)
361 361 self._count += 1
362 362 self.callback = prog(_('changesets'), expectedtotal)
363 363
364 364 efiles = set()
365 365 def onchangelog(cl, node):
366 366 efiles.update(cl.read(node)[3])
367 367
368 368 self.changelogheader()
369 369 srccontent = cl.addgroup(self, csmap, trp,
370 370 addrevisioncb=onchangelog)
371 371 efiles = len(efiles)
372 372
373 373 if not (srccontent or emptyok):
374 374 raise error.Abort(_("received changelog group is empty"))
375 375 clend = len(cl)
376 376 changesets = clend - clstart
377 377 repo.ui.progress(_('changesets'), None)
378 378
379 379 # pull off the manifest group
380 380 repo.ui.status(_("adding manifests\n"))
381 381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
382 382
383 383 needfiles = {}
384 384 if repo.ui.configbool('server', 'validate', default=False):
385 385 # validate incoming csets have their manifests
386 386 for cset in xrange(clstart, clend):
387 387 mfnode = repo.changelog.read(
388 388 repo.changelog.node(cset))[0]
389 389 mfest = repo.manifest.readdelta(mfnode)
390 390 # store file nodes we must see
391 391 for f, n in mfest.iteritems():
392 392 needfiles.setdefault(f, set()).add(n)
393 393
394 394 # process the files
395 395 repo.ui.status(_("adding file changes\n"))
396 396 self.callback = None
397 397 pr = prog(_('files'), efiles)
398 398 newrevs, newfiles = _addchangegroupfiles(
399 399 repo, self, revmap, trp, pr, needfiles)
400 400 revisions += newrevs
401 401 files += newfiles
402 402
403 403 dh = 0
404 404 if oldheads:
405 405 heads = cl.heads()
406 406 dh = len(heads) - len(oldheads)
407 407 for h in heads:
408 408 if h not in oldheads and repo[h].closesbranch():
409 409 dh -= 1
410 410 htext = ""
411 411 if dh:
412 412 htext = _(" (%+d heads)") % dh
413 413
414 414 repo.ui.status(_("added %d changesets"
415 415 " with %d changes to %d files%s\n")
416 416 % (changesets, revisions, files, htext))
417 417 repo.invalidatevolatilesets()
418 418
419 419 if changesets > 0:
420 420 if 'node' not in tr.hookargs:
421 421 tr.hookargs['node'] = hex(cl.node(clstart))
422 422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
423 423 hookargs = dict(tr.hookargs)
424 424 else:
425 425 hookargs = dict(tr.hookargs)
426 426 hookargs['node'] = hex(cl.node(clstart))
427 427 hookargs['node_last'] = hex(cl.node(clend - 1))
428 428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
429 429
430 430 added = [cl.node(r) for r in xrange(clstart, clend)]
431 431 publishing = repo.publishing()
432 432 if srctype in ('push', 'serve'):
433 433 # Old servers can not push the boundary themselves.
434 434 # New servers won't push the boundary if changeset already
435 435 # exists locally as secret
436 436 #
437 437 # We should not use added here but the list of all change in
438 438 # the bundle
439 439 if publishing:
440 440 phases.advanceboundary(repo, tr, phases.public,
441 441 srccontent)
442 442 else:
443 443 # Those changesets have been pushed from the
444 444 # outside, their phases are going to be pushed
445 445 # alongside. Therefor `targetphase` is
446 446 # ignored.
447 447 phases.advanceboundary(repo, tr, phases.draft,
448 448 srccontent)
449 449 phases.retractboundary(repo, tr, phases.draft, added)
450 450 elif srctype != 'strip':
451 451 # publishing only alter behavior during push
452 452 #
453 453 # strip should not touch boundary at all
454 454 phases.retractboundary(repo, tr, targetphase, added)
455 455
456 456 if changesets > 0:
457 457 if srctype != 'strip':
458 458 # During strip, branchcache is invalid but
459 459 # coming call to `destroyed` will repair it.
460 460 # In other case we can safely update cache on
461 461 # disk.
462 462 branchmap.updatecache(repo.filtered('served'))
463 463
464 464 def runhooks():
465 465 # These hooks run when the lock releases, not when the
466 466 # transaction closes. So it's possible for the changelog
467 467 # to have changed since we last saw it.
468 468 if clstart >= len(repo):
469 469 return
470 470
471 471 # forcefully update the on-disk branch cache
472 472 repo.ui.debug("updating the branch cache\n")
473 473 repo.hook("changegroup", **hookargs)
474 474
475 475 for n in added:
476 476 args = hookargs.copy()
477 477 args['node'] = hex(n)
478 478 del args['node_last']
479 479 repo.hook("incoming", **args)
480 480
481 481 newheads = [h for h in repo.heads()
482 482 if h not in oldheads]
483 483 repo.ui.log("incoming",
484 484 "%s incoming changes - new heads: %s\n",
485 485 len(added),
486 486 ', '.join([hex(c[:6]) for c in newheads]))
487 487
488 488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
489 489 lambda tr: repo._afterlock(runhooks))
490 490 finally:
491 491 repo.ui.flush()
492 492 # never return 0 here:
493 493 if dh < 0:
494 494 return dh - 1
495 495 else:
496 496 return dh + 1
497 497
498 498 class cg2unpacker(cg1unpacker):
499 499 """Unpacker for cg2 streams.
500 500
501 501 cg2 streams add support for generaldelta, so the delta header
502 502 format is slightly different. All other features about the data
503 503 remain the same.
504 504 """
505 505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
506 506 deltaheadersize = struct.calcsize(deltaheader)
507 507 version = '02'
508 508
509 509 def _deltaheader(self, headertuple, prevnode):
510 510 node, p1, p2, deltabase, cs = headertuple
511 511 flags = 0
512 512 return node, p1, p2, deltabase, cs, flags
513 513
514 514 class cg3unpacker(cg2unpacker):
515 515 """Unpacker for cg3 streams.
516 516
517 517 cg3 streams add support for exchanging treemanifests and revlog
518 518 flags. It adds the revlog flags to the delta header and an empty chunk
519 519 separating manifests and files.
520 520 """
521 521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
522 522 deltaheadersize = struct.calcsize(deltaheader)
523 523 version = '03'
524 524 _grouplistcount = 2 # One list of manifests and one list of files
525 525
526 526 def _deltaheader(self, headertuple, prevnode):
527 527 node, p1, p2, deltabase, cs, flags = headertuple
528 528 return node, p1, p2, deltabase, cs, flags
529 529
530 530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
531 531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
532 532 numchanges)
533 533 while True:
534 534 chunkdata = self.filelogheader()
535 535 if not chunkdata:
536 536 break
537 537 # If we get here, there are directory manifests in the changegroup
538 538 d = chunkdata["filename"]
539 539 repo.ui.debug("adding %s revisions\n" % d)
540 540 dirlog = repo.manifest.dirlog(d)
541 541 if not dirlog.addgroup(self, revmap, trp):
542 542 raise error.Abort(_("received dir revlog group is empty"))
543 543
544 544 class headerlessfixup(object):
545 545 def __init__(self, fh, h):
546 546 self._h = h
547 547 self._fh = fh
548 548 def read(self, n):
549 549 if self._h:
550 550 d, self._h = self._h[:n], self._h[n:]
551 551 if len(d) < n:
552 552 d += readexactly(self._fh, n - len(d))
553 553 return d
554 554 return readexactly(self._fh, n)
555 555
556 556 def _moddirs(files):
557 557 """Given a set of modified files, find the list of modified directories.
558 558
559 559 This returns a list of (path to changed dir, changed dir) tuples,
560 560 as that's what the one client needs anyway.
561 561
562 562 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
563 563 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
564 564
565 565 """
566 566 alldirs = set()
567 567 for f in files:
568 568 path = f.split('/')[:-1]
569 569 for i in xrange(len(path) - 1, -1, -1):
570 570 dn = '/'.join(path[:i])
571 571 current = dn + '/', path[i] + '/'
572 572 if current in alldirs:
573 573 break
574 574 alldirs.add(current)
575 575 return sorted(alldirs)
576 576
577 577 class cg1packer(object):
578 578 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
579 579 version = '01'
580 580 def __init__(self, repo, bundlecaps=None):
581 581 """Given a source repo, construct a bundler.
582 582
583 583 bundlecaps is optional and can be used to specify the set of
584 584 capabilities which can be used to build the bundle.
585 585 """
586 586 # Set of capabilities we can use to build the bundle.
587 587 if bundlecaps is None:
588 588 bundlecaps = set()
589 589 self._bundlecaps = bundlecaps
590 590 # experimental config: bundle.reorder
591 591 reorder = repo.ui.config('bundle', 'reorder', 'auto')
592 592 if reorder == 'auto':
593 593 reorder = None
594 594 else:
595 595 reorder = util.parsebool(reorder)
596 596 self._repo = repo
597 597 self._reorder = reorder
598 598 self._progress = repo.ui.progress
599 599 if self._repo.ui.verbose and not self._repo.ui.debugflag:
600 600 self._verbosenote = self._repo.ui.note
601 601 else:
602 602 self._verbosenote = lambda s: None
603 603
604 604 def close(self):
605 605 return closechunk()
606 606
607 607 def fileheader(self, fname):
608 608 return chunkheader(len(fname)) + fname
609 609
610 610 def group(self, nodelist, revlog, lookup, units=None):
611 611 """Calculate a delta group, yielding a sequence of changegroup chunks
612 612 (strings).
613 613
614 614 Given a list of changeset revs, return a set of deltas and
615 615 metadata corresponding to nodes. The first delta is
616 616 first parent(nodelist[0]) -> nodelist[0], the receiver is
617 617 guaranteed to have this parent as it has all history before
618 618 these changesets. In the case firstparent is nullrev the
619 619 changegroup starts with a full revision.
620 620
621 621 If units is not None, progress detail will be generated, units specifies
622 622 the type of revlog that is touched (changelog, manifest, etc.).
623 623 """
624 624 # if we don't have any revisions touched by these changesets, bail
625 625 if len(nodelist) == 0:
626 626 yield self.close()
627 627 return
628 628
629 629 # for generaldelta revlogs, we linearize the revs; this will both be
630 630 # much quicker and generate a much smaller bundle
631 631 if (revlog._generaldelta and self._reorder is None) or self._reorder:
632 632 dag = dagutil.revlogdag(revlog)
633 633 revs = set(revlog.rev(n) for n in nodelist)
634 634 revs = dag.linearize(revs)
635 635 else:
636 636 revs = sorted([revlog.rev(n) for n in nodelist])
637 637
638 638 # add the parent of the first rev
639 639 p = revlog.parentrevs(revs[0])[0]
640 640 revs.insert(0, p)
641 641
642 642 # build deltas
643 643 total = len(revs) - 1
644 644 msgbundling = _('bundling')
645 645 for r in xrange(len(revs) - 1):
646 646 if units is not None:
647 647 self._progress(msgbundling, r + 1, unit=units, total=total)
648 648 prev, curr = revs[r], revs[r + 1]
649 649 linknode = lookup(revlog.node(curr))
650 650 for c in self.revchunk(revlog, curr, prev, linknode):
651 651 yield c
652 652
653 653 if units is not None:
654 654 self._progress(msgbundling, None)
655 655 yield self.close()
656 656
657 657 # filter any nodes that claim to be part of the known set
658 658 def prune(self, revlog, missing, commonrevs):
659 659 rr, rl = revlog.rev, revlog.linkrev
660 660 return [n for n in missing if rl(rr(n)) not in commonrevs]
661 661
662 662 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
663 663 """Pack flat manifests into a changegroup stream."""
664 664 ml = self._repo.manifest
665 665 size = 0
666 666 for chunk in self.group(
667 667 mfnodes, ml, lookuplinknode, units=_('manifests')):
668 668 size += len(chunk)
669 669 yield chunk
670 670 self._verbosenote(_('%8.i (manifests)\n') % size)
671 671 # It looks odd to assert this here, but tmfnodes doesn't get
672 672 # filled in until after we've called lookuplinknode for
673 673 # sending root manifests, so the only way to tell the streams
674 674 # got crossed is to check after we've done all the work.
675 675 assert not tmfnodes
676 676
677 677 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
678 678 '''yield a sequence of changegroup chunks (strings)'''
679 679 repo = self._repo
680 680 cl = repo.changelog
681 681 ml = repo.manifest
682 682
683 683 clrevorder = {}
684 684 mfs = {} # needed manifests
685 685 tmfnodes = {}
686 686 fnodes = {} # needed file nodes
687 687 # maps manifest node id -> set(changed files)
688 688 mfchangedfiles = {}
689 689
690 690 # Callback for the changelog, used to collect changed files and manifest
691 691 # nodes.
692 692 # Returns the linkrev node (identity in the changelog case).
693 693 def lookupcl(x):
694 694 c = cl.read(x)
695 695 clrevorder[x] = len(clrevorder)
696 696 n = c[0]
697 697 # record the first changeset introducing this manifest version
698 698 mfs.setdefault(n, x)
699 699 # Record a complete list of potentially-changed files in
700 700 # this manifest.
701 701 mfchangedfiles.setdefault(n, set()).update(c[3])
702 702 return x
703 703
704 704 self._verbosenote(_('uncompressed size of bundle content:\n'))
705 705 size = 0
706 706 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
707 707 size += len(chunk)
708 708 yield chunk
709 709 self._verbosenote(_('%8.i (changelog)\n') % size)
710 710
711 711 # We need to make sure that the linkrev in the changegroup refers to
712 712 # the first changeset that introduced the manifest or file revision.
713 713 # The fastpath is usually safer than the slowpath, because the filelogs
714 714 # are walked in revlog order.
715 715 #
716 716 # When taking the slowpath with reorder=None and the manifest revlog
717 717 # uses generaldelta, the manifest may be walked in the "wrong" order.
718 718 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
719 719 # cc0ff93d0c0c).
720 720 #
721 721 # When taking the fastpath, we are only vulnerable to reordering
722 722 # of the changelog itself. The changelog never uses generaldelta, so
723 723 # it is only reordered when reorder=True. To handle this case, we
724 724 # simply take the slowpath, which already has the 'clrevorder' logic.
725 725 # This was also fixed in cc0ff93d0c0c.
726 726 fastpathlinkrev = fastpathlinkrev and not self._reorder
727 727 # Treemanifests don't work correctly with fastpathlinkrev
728 728 # either, because we don't discover which directory nodes to
729 729 # send along with files. This could probably be fixed.
730 730 fastpathlinkrev = fastpathlinkrev and (
731 731 'treemanifest' not in repo.requirements)
732 732 # Callback for the manifest, used to collect linkrevs for filelog
733 733 # revisions.
734 734 # Returns the linkrev node (collected in lookupcl).
735 735 if fastpathlinkrev:
736 736 lookupmflinknode = mfs.__getitem__
737 737 else:
738 738 def lookupmflinknode(x):
739 739 """Callback for looking up the linknode for manifests.
740 740
741 741 Returns the linkrev node for the specified manifest.
742 742
743 743 SIDE EFFECT:
744 744
745 745 1) fclnodes gets populated with the list of relevant
746 746 file nodes if we're not using fastpathlinkrev
747 747 2) When treemanifests are in use, collects treemanifest nodes
748 748 to send
749 749
750 750 Note that this means manifests must be completely sent to
751 751 the client before you can trust the list of files and
752 752 treemanifests to send.
753 753 """
754 754 clnode = mfs[x]
755 755 # We no longer actually care about reading deltas of
756 756 # the manifest here, because we already know the list
757 757 # of changed files, so for treemanifests (which
758 758 # lazily-load anyway to *generate* a readdelta) we can
759 759 # just load them with read() and then we'll actually
760 760 # be able to correctly load node IDs from the
761 761 # submanifest entries.
762 762 if 'treemanifest' in repo.requirements:
763 763 mdata = ml.read(x)
764 764 else:
765 765 mdata = ml.readfast(x)
766 766 for f in mfchangedfiles[x]:
767 767 try:
768 768 n = mdata[f]
769 769 except KeyError:
770 770 continue
771 771 # record the first changeset introducing this filelog
772 772 # version
773 773 fclnodes = fnodes.setdefault(f, {})
774 774 fclnode = fclnodes.setdefault(n, clnode)
775 775 if clrevorder[clnode] < clrevorder[fclnode]:
776 776 fclnodes[n] = clnode
777 777 # gather list of changed treemanifest nodes
778 778 if 'treemanifest' in repo.requirements:
779 779 submfs = {'/': mdata}
780 780 for dn, bn in _moddirs(mfchangedfiles[x]):
781 781 submf = submfs[dn]
782 782 submf = submf._dirs[bn]
783 783 submfs[submf.dir()] = submf
784 784 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
785 785 tmfclnodes.setdefault(submf._node, clnode)
786 786 if clrevorder[clnode] < clrevorder[fclnode]:
787 787 tmfclnodes[n] = clnode
788 788 return clnode
789 789
790 790 mfnodes = self.prune(ml, mfs, commonrevs)
791 791 for x in self._packmanifests(
792 792 mfnodes, tmfnodes, lookupmflinknode):
793 793 yield x
794 794
795 795 mfs.clear()
796 796 clrevs = set(cl.rev(x) for x in clnodes)
797 797
798 798 if not fastpathlinkrev:
799 799 def linknodes(unused, fname):
800 800 return fnodes.get(fname, {})
801 801 else:
802 802 cln = cl.node
803 803 def linknodes(filerevlog, fname):
804 804 llr = filerevlog.linkrev
805 805 fln = filerevlog.node
806 806 revs = ((r, llr(r)) for r in filerevlog)
807 807 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
808 808
809 809 changedfiles = set()
810 810 for x in mfchangedfiles.itervalues():
811 811 changedfiles.update(x)
812 812 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
813 813 source):
814 814 yield chunk
815 815
816 816 yield self.close()
817 817
818 818 if clnodes:
819 819 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
820 820
821 821 # The 'source' parameter is useful for extensions
822 822 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
823 823 repo = self._repo
824 824 progress = self._progress
825 825 msgbundling = _('bundling')
826 826
827 827 total = len(changedfiles)
828 828 # for progress output
829 829 msgfiles = _('files')
830 830 for i, fname in enumerate(sorted(changedfiles)):
831 831 filerevlog = repo.file(fname)
832 832 if not filerevlog:
833 833 raise error.Abort(_("empty or missing revlog for %s") % fname)
834 834
835 835 linkrevnodes = linknodes(filerevlog, fname)
836 836 # Lookup for filenodes, we collected the linkrev nodes above in the
837 837 # fastpath case and with lookupmf in the slowpath case.
838 838 def lookupfilelog(x):
839 839 return linkrevnodes[x]
840 840
841 841 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
842 842 if filenodes:
843 843 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
844 844 total=total)
845 845 h = self.fileheader(fname)
846 846 size = len(h)
847 847 yield h
848 848 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
849 849 size += len(chunk)
850 850 yield chunk
851 851 self._verbosenote(_('%8.i %s\n') % (size, fname))
852 852 progress(msgbundling, None)
853 853
854 854 def deltaparent(self, revlog, rev, p1, p2, prev):
855 855 return prev
856 856
857 857 def revchunk(self, revlog, rev, prev, linknode):
858 858 node = revlog.node(rev)
859 859 p1, p2 = revlog.parentrevs(rev)
860 860 base = self.deltaparent(revlog, rev, p1, p2, prev)
861 861
862 862 prefix = ''
863 863 if revlog.iscensored(base) or revlog.iscensored(rev):
864 864 try:
865 865 delta = revlog.revision(node)
866 866 except error.CensoredNodeError as e:
867 867 delta = e.tombstone
868 868 if base == nullrev:
869 869 prefix = mdiff.trivialdiffheader(len(delta))
870 870 else:
871 871 baselen = revlog.rawsize(base)
872 872 prefix = mdiff.replacediffheader(baselen, len(delta))
873 873 elif base == nullrev:
874 874 delta = revlog.revision(node)
875 875 prefix = mdiff.trivialdiffheader(len(delta))
876 876 else:
877 877 delta = revlog.revdiff(base, rev)
878 878 p1n, p2n = revlog.parents(node)
879 879 basenode = revlog.node(base)
880 880 flags = revlog.flags(rev)
881 881 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
882 882 meta += prefix
883 883 l = len(meta) + len(delta)
884 884 yield chunkheader(l)
885 885 yield meta
886 886 yield delta
887 887 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
888 888 # do nothing with basenode, it is implicitly the previous one in HG10
889 889 # do nothing with flags, it is implicitly 0 for cg1 and cg2
890 890 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
891 891
892 892 class cg2packer(cg1packer):
893 893 version = '02'
894 894 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
895 895
896 896 def __init__(self, repo, bundlecaps=None):
897 897 super(cg2packer, self).__init__(repo, bundlecaps)
898 898 if self._reorder is None:
899 899 # Since generaldelta is directly supported by cg2, reordering
900 900 # generally doesn't help, so we disable it by default (treating
901 901 # bundle.reorder=auto just like bundle.reorder=False).
902 902 self._reorder = False
903 903
904 904 def deltaparent(self, revlog, rev, p1, p2, prev):
905 905 dp = revlog.deltaparent(rev)
906 906 # avoid storing full revisions; pick prev in those cases
907 907 # also pick prev when we can't be sure remote has dp
908 908 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
909 909 return prev
910 910 return dp
911 911
912 912 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
913 913 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
914 914 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
915 915
916 916 class cg3packer(cg2packer):
917 917 version = '03'
918 918 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
919 919
920 920 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
921 921 # Note that debug prints are super confusing in this code, as
922 922 # tmfnodes gets populated by the calls to lookuplinknode in
923 923 # the superclass's manifest packer. In the future we should
924 924 # probably see if we can refactor this somehow to be less
925 925 # confusing.
926 926 for x in super(cg3packer, self)._packmanifests(
927 927 mfnodes, {}, lookuplinknode):
928 928 yield x
929 929 dirlog = self._repo.manifest.dirlog
930 930 for name, nodes in tmfnodes.iteritems():
931 931 # For now, directory headers are simply file headers with
932 932 # a trailing '/' on the path (already in the name).
933 933 yield self.fileheader(name)
934 934 for chunk in self.group(nodes, dirlog(name), nodes.get):
935 935 yield chunk
936 936 yield self.close()
937 937
938 938 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
939 939 return struct.pack(
940 940 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
941 941
942 942 _packermap = {'01': (cg1packer, cg1unpacker),
943 943 # cg2 adds support for exchanging generaldelta
944 944 '02': (cg2packer, cg2unpacker),
945 945 # cg3 adds support for exchanging revlog flags and treemanifests
946 946 '03': (cg3packer, cg3unpacker),
947 947 }
948 948
949 949 def supportedversions(repo):
950 versions = _packermap.keys()
951 cg3 = ('treemanifest' in repo.requirements or
952 repo.ui.configbool('experimental', 'changegroup3') or
953 repo.ui.configbool('experimental', 'treemanifest'))
954 if not cg3:
955 versions.remove('03')
950 versions = set(_packermap.keys())
951 if ('treemanifest' in repo.requirements or
952 repo.ui.configbool('experimental', 'treemanifest')):
953 # Versions 01 and 02 support only flat manifests and it's just too
954 # expensive to convert between the flat manifest and tree manifest on
955 # the fly. Since tree manifests are hashed differently, all of history
956 # would have to be converted. Instead, we simply don't even pretend to
957 # support versions 01 and 02.
958 versions.discard('01')
959 versions.discard('02')
960 elif not repo.ui.configbool('experimental', 'changegroup3'):
961 versions.discard('03')
956 962 return versions
957 963
958 964 def getbundler(version, repo, bundlecaps=None):
959 965 assert version in supportedversions(repo)
960 966 return _packermap[version][0](repo, bundlecaps)
961 967
962 968 def getunbundler(version, fh, alg):
963 969 return _packermap[version][1](fh, alg)
964 970
965 971 def _changegroupinfo(repo, nodes, source):
966 972 if repo.ui.verbose or source == 'bundle':
967 973 repo.ui.status(_("%d changesets found\n") % len(nodes))
968 974 if repo.ui.debugflag:
969 975 repo.ui.debug("list of changesets:\n")
970 976 for node in nodes:
971 977 repo.ui.debug("%s\n" % hex(node))
972 978
973 979 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
974 980 repo = repo.unfiltered()
975 981 commonrevs = outgoing.common
976 982 csets = outgoing.missing
977 983 heads = outgoing.missingheads
978 984 # We go through the fast path if we get told to, or if all (unfiltered
979 985 # heads have been requested (since we then know there all linkrevs will
980 986 # be pulled by the client).
981 987 heads.sort()
982 988 fastpathlinkrev = fastpath or (
983 989 repo.filtername is None and heads == sorted(repo.heads()))
984 990
985 991 repo.hook('preoutgoing', throw=True, source=source)
986 992 _changegroupinfo(repo, csets, source)
987 993 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
988 994
989 995 def getsubset(repo, outgoing, bundler, source, fastpath=False):
990 996 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
991 997 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
992 998
993 999 def changegroupsubset(repo, roots, heads, source, version='01'):
994 1000 """Compute a changegroup consisting of all the nodes that are
995 1001 descendants of any of the roots and ancestors of any of the heads.
996 1002 Return a chunkbuffer object whose read() method will return
997 1003 successive changegroup chunks.
998 1004
999 1005 It is fairly complex as determining which filenodes and which
1000 1006 manifest nodes need to be included for the changeset to be complete
1001 1007 is non-trivial.
1002 1008
1003 1009 Another wrinkle is doing the reverse, figuring out which changeset in
1004 1010 the changegroup a particular filenode or manifestnode belongs to.
1005 1011 """
1006 1012 cl = repo.changelog
1007 1013 if not roots:
1008 1014 roots = [nullid]
1009 1015 discbases = []
1010 1016 for n in roots:
1011 1017 discbases.extend([p for p in cl.parents(n) if p != nullid])
1012 1018 # TODO: remove call to nodesbetween.
1013 1019 csets, roots, heads = cl.nodesbetween(roots, heads)
1014 1020 included = set(csets)
1015 1021 discbases = [n for n in discbases if n not in included]
1016 1022 outgoing = discovery.outgoing(cl, discbases, heads)
1017 1023 bundler = getbundler(version, repo)
1018 1024 return getsubset(repo, outgoing, bundler, source)
1019 1025
1020 1026 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1021 1027 version='01'):
1022 1028 """Like getbundle, but taking a discovery.outgoing as an argument.
1023 1029
1024 1030 This is only implemented for local repos and reuses potentially
1025 1031 precomputed sets in outgoing. Returns a raw changegroup generator."""
1026 1032 if not outgoing.missing:
1027 1033 return None
1028 1034 bundler = getbundler(version, repo, bundlecaps)
1029 1035 return getsubsetraw(repo, outgoing, bundler, source)
1030 1036
1031 1037 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1032 1038 version='01'):
1033 1039 """Like getbundle, but taking a discovery.outgoing as an argument.
1034 1040
1035 1041 This is only implemented for local repos and reuses potentially
1036 1042 precomputed sets in outgoing."""
1037 1043 if not outgoing.missing:
1038 1044 return None
1039 1045 bundler = getbundler(version, repo, bundlecaps)
1040 1046 return getsubset(repo, outgoing, bundler, source)
1041 1047
1042 1048 def computeoutgoing(repo, heads, common):
1043 1049 """Computes which revs are outgoing given a set of common
1044 1050 and a set of heads.
1045 1051
1046 1052 This is a separate function so extensions can have access to
1047 1053 the logic.
1048 1054
1049 1055 Returns a discovery.outgoing object.
1050 1056 """
1051 1057 cl = repo.changelog
1052 1058 if common:
1053 1059 hasnode = cl.hasnode
1054 1060 common = [n for n in common if hasnode(n)]
1055 1061 else:
1056 1062 common = [nullid]
1057 1063 if not heads:
1058 1064 heads = cl.heads()
1059 1065 return discovery.outgoing(cl, common, heads)
1060 1066
1061 1067 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1062 1068 version='01'):
1063 1069 """Like changegroupsubset, but returns the set difference between the
1064 1070 ancestors of heads and the ancestors common.
1065 1071
1066 1072 If heads is None, use the local heads. If common is None, use [nullid].
1067 1073
1068 1074 The nodes in common might not all be known locally due to the way the
1069 1075 current discovery protocol works.
1070 1076 """
1071 1077 outgoing = computeoutgoing(repo, heads, common)
1072 1078 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1073 1079 version=version)
1074 1080
1075 1081 def changegroup(repo, basenodes, source):
1076 1082 # to avoid a race we use changegroupsubset() (issue1320)
1077 1083 return changegroupsubset(repo, basenodes, repo.heads(), source)
1078 1084
1079 1085 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1080 1086 revisions = 0
1081 1087 files = 0
1082 1088 while True:
1083 1089 chunkdata = source.filelogheader()
1084 1090 if not chunkdata:
1085 1091 break
1086 1092 f = chunkdata["filename"]
1087 1093 repo.ui.debug("adding %s revisions\n" % f)
1088 1094 pr()
1089 1095 fl = repo.file(f)
1090 1096 o = len(fl)
1091 1097 try:
1092 1098 if not fl.addgroup(source, revmap, trp):
1093 1099 raise error.Abort(_("received file revlog group is empty"))
1094 1100 except error.CensoredBaseError as e:
1095 1101 raise error.Abort(_("received delta base is censored: %s") % e)
1096 1102 revisions += len(fl) - o
1097 1103 files += 1
1098 1104 if f in needfiles:
1099 1105 needs = needfiles[f]
1100 1106 for new in xrange(o, len(fl)):
1101 1107 n = fl.node(new)
1102 1108 if n in needs:
1103 1109 needs.remove(n)
1104 1110 else:
1105 1111 raise error.Abort(
1106 1112 _("received spurious file revlog entry"))
1107 1113 if not needs:
1108 1114 del needfiles[f]
1109 1115 repo.ui.progress(_('files'), None)
1110 1116
1111 1117 for f, needs in needfiles.iteritems():
1112 1118 fl = repo.file(f)
1113 1119 for n in needs:
1114 1120 try:
1115 1121 fl.rev(n)
1116 1122 except error.LookupError:
1117 1123 raise error.Abort(
1118 1124 _('missing file data for %s:%s - run hg verify') %
1119 1125 (f, hex(n)))
1120 1126
1121 1127 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now