##// END OF EJS Templates
changegroup: add a option to create bundle with full snapshot only...
Boris Feld -
r40459:808b7626 default
parent child Browse files
Show More
@@ -1,1392 +1,1394
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import weakref
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 error,
24 24 match as matchmod,
25 25 mdiff,
26 26 phases,
27 27 pycompat,
28 28 repository,
29 29 util,
30 30 )
31 31
32 32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
33 33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
34 34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
35 35
36 36 LFS_REQUIREMENT = 'lfs'
37 37
38 38 readexactly = util.readexactly
39 39
40 40 def getchunk(stream):
41 41 """return the next chunk from stream as a string"""
42 42 d = readexactly(stream, 4)
43 43 l = struct.unpack(">l", d)[0]
44 44 if l <= 4:
45 45 if l:
46 46 raise error.Abort(_("invalid chunk length %d") % l)
47 47 return ""
48 48 return readexactly(stream, l - 4)
49 49
50 50 def chunkheader(length):
51 51 """return a changegroup chunk header (string)"""
52 52 return struct.pack(">l", length + 4)
53 53
54 54 def closechunk():
55 55 """return a changegroup chunk header (string) for a zero-length chunk"""
56 56 return struct.pack(">l", 0)
57 57
58 58 def _fileheader(path):
59 59 """Obtain a changegroup chunk header for a named path."""
60 60 return chunkheader(len(path)) + path
61 61
62 62 def writechunks(ui, chunks, filename, vfs=None):
63 63 """Write chunks to a file and return its filename.
64 64
65 65 The stream is assumed to be a bundle file.
66 66 Existing files will not be overwritten.
67 67 If no filename is specified, a temporary file is created.
68 68 """
69 69 fh = None
70 70 cleanup = None
71 71 try:
72 72 if filename:
73 73 if vfs:
74 74 fh = vfs.open(filename, "wb")
75 75 else:
76 76 # Increase default buffer size because default is usually
77 77 # small (4k is common on Linux).
78 78 fh = open(filename, "wb", 131072)
79 79 else:
80 80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 81 fh = os.fdopen(fd, r"wb")
82 82 cleanup = filename
83 83 for c in chunks:
84 84 fh.write(c)
85 85 cleanup = None
86 86 return filename
87 87 finally:
88 88 if fh is not None:
89 89 fh.close()
90 90 if cleanup is not None:
91 91 if filename and vfs:
92 92 vfs.unlink(cleanup)
93 93 else:
94 94 os.unlink(cleanup)
95 95
96 96 class cg1unpacker(object):
97 97 """Unpacker for cg1 changegroup streams.
98 98
99 99 A changegroup unpacker handles the framing of the revision data in
100 100 the wire format. Most consumers will want to use the apply()
101 101 method to add the changes from the changegroup to a repository.
102 102
103 103 If you're forwarding a changegroup unmodified to another consumer,
104 104 use getchunks(), which returns an iterator of changegroup
105 105 chunks. This is mostly useful for cases where you need to know the
106 106 data stream has ended by observing the end of the changegroup.
107 107
108 108 deltachunk() is useful only if you're applying delta data. Most
109 109 consumers should prefer apply() instead.
110 110
111 111 A few other public methods exist. Those are used only for
112 112 bundlerepo and some debug commands - their use is discouraged.
113 113 """
114 114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 115 deltaheadersize = deltaheader.size
116 116 version = '01'
117 117 _grouplistcount = 1 # One list of files after the manifests
118 118
119 119 def __init__(self, fh, alg, extras=None):
120 120 if alg is None:
121 121 alg = 'UN'
122 122 if alg not in util.compengines.supportedbundletypes:
123 123 raise error.Abort(_('unknown stream compression type: %s')
124 124 % alg)
125 125 if alg == 'BZ':
126 126 alg = '_truncatedBZ'
127 127
128 128 compengine = util.compengines.forbundletype(alg)
129 129 self._stream = compengine.decompressorreader(fh)
130 130 self._type = alg
131 131 self.extras = extras or {}
132 132 self.callback = None
133 133
134 134 # These methods (compressed, read, seek, tell) all appear to only
135 135 # be used by bundlerepo, but it's a little hard to tell.
136 136 def compressed(self):
137 137 return self._type is not None and self._type != 'UN'
138 138 def read(self, l):
139 139 return self._stream.read(l)
140 140 def seek(self, pos):
141 141 return self._stream.seek(pos)
142 142 def tell(self):
143 143 return self._stream.tell()
144 144 def close(self):
145 145 return self._stream.close()
146 146
147 147 def _chunklength(self):
148 148 d = readexactly(self._stream, 4)
149 149 l = struct.unpack(">l", d)[0]
150 150 if l <= 4:
151 151 if l:
152 152 raise error.Abort(_("invalid chunk length %d") % l)
153 153 return 0
154 154 if self.callback:
155 155 self.callback()
156 156 return l - 4
157 157
158 158 def changelogheader(self):
159 159 """v10 does not have a changelog header chunk"""
160 160 return {}
161 161
162 162 def manifestheader(self):
163 163 """v10 does not have a manifest header chunk"""
164 164 return {}
165 165
166 166 def filelogheader(self):
167 167 """return the header of the filelogs chunk, v10 only has the filename"""
168 168 l = self._chunklength()
169 169 if not l:
170 170 return {}
171 171 fname = readexactly(self._stream, l)
172 172 return {'filename': fname}
173 173
174 174 def _deltaheader(self, headertuple, prevnode):
175 175 node, p1, p2, cs = headertuple
176 176 if prevnode is None:
177 177 deltabase = p1
178 178 else:
179 179 deltabase = prevnode
180 180 flags = 0
181 181 return node, p1, p2, deltabase, cs, flags
182 182
183 183 def deltachunk(self, prevnode):
184 184 l = self._chunklength()
185 185 if not l:
186 186 return {}
187 187 headerdata = readexactly(self._stream, self.deltaheadersize)
188 188 header = self.deltaheader.unpack(headerdata)
189 189 delta = readexactly(self._stream, l - self.deltaheadersize)
190 190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 191 return (node, p1, p2, cs, deltabase, delta, flags)
192 192
193 193 def getchunks(self):
194 194 """returns all the chunks contains in the bundle
195 195
196 196 Used when you need to forward the binary stream to a file or another
197 197 network API. To do so, it parse the changegroup data, otherwise it will
198 198 block in case of sshrepo because it don't know the end of the stream.
199 199 """
200 200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
201 201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
202 202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
203 203 # filelogs.
204 204 #
205 205 # Changelog and manifestlog parts are terminated with empty chunks. The
206 206 # tree and file parts are a list of entry sections. Each entry section
207 207 # is a series of chunks terminating in an empty chunk. The list of these
208 208 # entry sections is terminated in yet another empty chunk, so we know
209 209 # we've reached the end of the tree/file list when we reach an empty
210 210 # chunk that was proceeded by no non-empty chunks.
211 211
212 212 parts = 0
213 213 while parts < 2 + self._grouplistcount:
214 214 noentries = True
215 215 while True:
216 216 chunk = getchunk(self)
217 217 if not chunk:
218 218 # The first two empty chunks represent the end of the
219 219 # changelog and the manifestlog portions. The remaining
220 220 # empty chunks represent either A) the end of individual
221 221 # tree or file entries in the file list, or B) the end of
222 222 # the entire list. It's the end of the entire list if there
223 223 # were no entries (i.e. noentries is True).
224 224 if parts < 2:
225 225 parts += 1
226 226 elif noentries:
227 227 parts += 1
228 228 break
229 229 noentries = False
230 230 yield chunkheader(len(chunk))
231 231 pos = 0
232 232 while pos < len(chunk):
233 233 next = pos + 2**20
234 234 yield chunk[pos:next]
235 235 pos = next
236 236 yield closechunk()
237 237
238 238 def _unpackmanifests(self, repo, revmap, trp, prog):
239 239 self.callback = prog.increment
240 240 # no need to check for empty manifest group here:
241 241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
242 242 # no new manifest will be created and the manifest group will
243 243 # be empty during the pull
244 244 self.manifestheader()
245 245 deltas = self.deltaiter()
246 246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
247 247 prog.complete()
248 248 self.callback = None
249 249
250 250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
251 251 expectedtotal=None):
252 252 """Add the changegroup returned by source.read() to this repo.
253 253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
254 254 the URL of the repo where this changegroup is coming from.
255 255
256 256 Return an integer summarizing the change to this repo:
257 257 - nothing changed or no source: 0
258 258 - more heads than before: 1+added heads (2..n)
259 259 - fewer heads than before: -1-removed heads (-2..-n)
260 260 - number of heads stays the same: 1
261 261 """
262 262 repo = repo.unfiltered()
263 263 def csmap(x):
264 264 repo.ui.debug("add changeset %s\n" % short(x))
265 265 return len(cl)
266 266
267 267 def revmap(x):
268 268 return cl.rev(x)
269 269
270 270 changesets = files = revisions = 0
271 271
272 272 try:
273 273 # The transaction may already carry source information. In this
274 274 # case we use the top level data. We overwrite the argument
275 275 # because we need to use the top level value (if they exist)
276 276 # in this function.
277 277 srctype = tr.hookargs.setdefault('source', srctype)
278 278 url = tr.hookargs.setdefault('url', url)
279 279 repo.hook('prechangegroup',
280 280 throw=True, **pycompat.strkwargs(tr.hookargs))
281 281
282 282 # write changelog data to temp files so concurrent readers
283 283 # will not see an inconsistent view
284 284 cl = repo.changelog
285 285 cl.delayupdate(tr)
286 286 oldheads = set(cl.heads())
287 287
288 288 trp = weakref.proxy(tr)
289 289 # pull off the changeset group
290 290 repo.ui.status(_("adding changesets\n"))
291 291 clstart = len(cl)
292 292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
293 293 total=expectedtotal)
294 294 self.callback = progress.increment
295 295
296 296 efiles = set()
297 297 def onchangelog(cl, node):
298 298 efiles.update(cl.readfiles(node))
299 299
300 300 self.changelogheader()
301 301 deltas = self.deltaiter()
302 302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
303 303 efiles = len(efiles)
304 304
305 305 if not cgnodes:
306 306 repo.ui.develwarn('applied empty changelog from changegroup',
307 307 config='warn-empty-changegroup')
308 308 clend = len(cl)
309 309 changesets = clend - clstart
310 310 progress.complete()
311 311 self.callback = None
312 312
313 313 # pull off the manifest group
314 314 repo.ui.status(_("adding manifests\n"))
315 315 # We know that we'll never have more manifests than we had
316 316 # changesets.
317 317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
318 318 total=changesets)
319 319 self._unpackmanifests(repo, revmap, trp, progress)
320 320
321 321 needfiles = {}
322 322 if repo.ui.configbool('server', 'validate'):
323 323 cl = repo.changelog
324 324 ml = repo.manifestlog
325 325 # validate incoming csets have their manifests
326 326 for cset in pycompat.xrange(clstart, clend):
327 327 mfnode = cl.changelogrevision(cset).manifest
328 328 mfest = ml[mfnode].readdelta()
329 329 # store file cgnodes we must see
330 330 for f, n in mfest.iteritems():
331 331 needfiles.setdefault(f, set()).add(n)
332 332
333 333 # process the files
334 334 repo.ui.status(_("adding file changes\n"))
335 335 newrevs, newfiles = _addchangegroupfiles(
336 336 repo, self, revmap, trp, efiles, needfiles)
337 337 revisions += newrevs
338 338 files += newfiles
339 339
340 340 deltaheads = 0
341 341 if oldheads:
342 342 heads = cl.heads()
343 343 deltaheads = len(heads) - len(oldheads)
344 344 for h in heads:
345 345 if h not in oldheads and repo[h].closesbranch():
346 346 deltaheads -= 1
347 347 htext = ""
348 348 if deltaheads:
349 349 htext = _(" (%+d heads)") % deltaheads
350 350
351 351 repo.ui.status(_("added %d changesets"
352 352 " with %d changes to %d files%s\n")
353 353 % (changesets, revisions, files, htext))
354 354 repo.invalidatevolatilesets()
355 355
356 356 if changesets > 0:
357 357 if 'node' not in tr.hookargs:
358 358 tr.hookargs['node'] = hex(cl.node(clstart))
359 359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
360 360 hookargs = dict(tr.hookargs)
361 361 else:
362 362 hookargs = dict(tr.hookargs)
363 363 hookargs['node'] = hex(cl.node(clstart))
364 364 hookargs['node_last'] = hex(cl.node(clend - 1))
365 365 repo.hook('pretxnchangegroup',
366 366 throw=True, **pycompat.strkwargs(hookargs))
367 367
368 368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
369 369 phaseall = None
370 370 if srctype in ('push', 'serve'):
371 371 # Old servers can not push the boundary themselves.
372 372 # New servers won't push the boundary if changeset already
373 373 # exists locally as secret
374 374 #
375 375 # We should not use added here but the list of all change in
376 376 # the bundle
377 377 if repo.publishing():
378 378 targetphase = phaseall = phases.public
379 379 else:
380 380 # closer target phase computation
381 381
382 382 # Those changesets have been pushed from the
383 383 # outside, their phases are going to be pushed
384 384 # alongside. Therefor `targetphase` is
385 385 # ignored.
386 386 targetphase = phaseall = phases.draft
387 387 if added:
388 388 phases.registernew(repo, tr, targetphase, added)
389 389 if phaseall is not None:
390 390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
391 391
392 392 if changesets > 0:
393 393
394 394 def runhooks():
395 395 # These hooks run when the lock releases, not when the
396 396 # transaction closes. So it's possible for the changelog
397 397 # to have changed since we last saw it.
398 398 if clstart >= len(repo):
399 399 return
400 400
401 401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
402 402
403 403 for n in added:
404 404 args = hookargs.copy()
405 405 args['node'] = hex(n)
406 406 del args['node_last']
407 407 repo.hook("incoming", **pycompat.strkwargs(args))
408 408
409 409 newheads = [h for h in repo.heads()
410 410 if h not in oldheads]
411 411 repo.ui.log("incoming",
412 412 "%d incoming changes - new heads: %s\n",
413 413 len(added),
414 414 ', '.join([hex(c[:6]) for c in newheads]))
415 415
416 416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
417 417 lambda tr: repo._afterlock(runhooks))
418 418 finally:
419 419 repo.ui.flush()
420 420 # never return 0 here:
421 421 if deltaheads < 0:
422 422 ret = deltaheads - 1
423 423 else:
424 424 ret = deltaheads + 1
425 425 return ret
426 426
427 427 def deltaiter(self):
428 428 """
429 429 returns an iterator of the deltas in this changegroup
430 430
431 431 Useful for passing to the underlying storage system to be stored.
432 432 """
433 433 chain = None
434 434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
435 435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
436 436 yield chunkdata
437 437 chain = chunkdata[0]
438 438
439 439 class cg2unpacker(cg1unpacker):
440 440 """Unpacker for cg2 streams.
441 441
442 442 cg2 streams add support for generaldelta, so the delta header
443 443 format is slightly different. All other features about the data
444 444 remain the same.
445 445 """
446 446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
447 447 deltaheadersize = deltaheader.size
448 448 version = '02'
449 449
450 450 def _deltaheader(self, headertuple, prevnode):
451 451 node, p1, p2, deltabase, cs = headertuple
452 452 flags = 0
453 453 return node, p1, p2, deltabase, cs, flags
454 454
455 455 class cg3unpacker(cg2unpacker):
456 456 """Unpacker for cg3 streams.
457 457
458 458 cg3 streams add support for exchanging treemanifests and revlog
459 459 flags. It adds the revlog flags to the delta header and an empty chunk
460 460 separating manifests and files.
461 461 """
462 462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
463 463 deltaheadersize = deltaheader.size
464 464 version = '03'
465 465 _grouplistcount = 2 # One list of manifests and one list of files
466 466
467 467 def _deltaheader(self, headertuple, prevnode):
468 468 node, p1, p2, deltabase, cs, flags = headertuple
469 469 return node, p1, p2, deltabase, cs, flags
470 470
471 471 def _unpackmanifests(self, repo, revmap, trp, prog):
472 472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
473 473 for chunkdata in iter(self.filelogheader, {}):
474 474 # If we get here, there are directory manifests in the changegroup
475 475 d = chunkdata["filename"]
476 476 repo.ui.debug("adding %s revisions\n" % d)
477 477 deltas = self.deltaiter()
478 478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
479 479 raise error.Abort(_("received dir revlog group is empty"))
480 480
481 481 class headerlessfixup(object):
482 482 def __init__(self, fh, h):
483 483 self._h = h
484 484 self._fh = fh
485 485 def read(self, n):
486 486 if self._h:
487 487 d, self._h = self._h[:n], self._h[n:]
488 488 if len(d) < n:
489 489 d += readexactly(self._fh, n - len(d))
490 490 return d
491 491 return readexactly(self._fh, n)
492 492
493 493 def _revisiondeltatochunks(delta, headerfn):
494 494 """Serialize a revisiondelta to changegroup chunks."""
495 495
496 496 # The captured revision delta may be encoded as a delta against
497 497 # a base revision or as a full revision. The changegroup format
498 498 # requires that everything on the wire be deltas. So for full
499 499 # revisions, we need to invent a header that says to rewrite
500 500 # data.
501 501
502 502 if delta.delta is not None:
503 503 prefix, data = b'', delta.delta
504 504 elif delta.basenode == nullid:
505 505 data = delta.revision
506 506 prefix = mdiff.trivialdiffheader(len(data))
507 507 else:
508 508 data = delta.revision
509 509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
510 510 len(data))
511 511
512 512 meta = headerfn(delta)
513 513
514 514 yield chunkheader(len(meta) + len(prefix) + len(data))
515 515 yield meta
516 516 if prefix:
517 517 yield prefix
518 518 yield data
519 519
520 520 def _sortnodesellipsis(store, nodes, cl, lookup):
521 521 """Sort nodes for changegroup generation."""
522 522 # Ellipses serving mode.
523 523 #
524 524 # In a perfect world, we'd generate better ellipsis-ified graphs
525 525 # for non-changelog revlogs. In practice, we haven't started doing
526 526 # that yet, so the resulting DAGs for the manifestlog and filelogs
527 527 # are actually full of bogus parentage on all the ellipsis
528 528 # nodes. This has the side effect that, while the contents are
529 529 # correct, the individual DAGs might be completely out of whack in
530 530 # a case like 882681bc3166 and its ancestors (back about 10
531 531 # revisions or so) in the main hg repo.
532 532 #
533 533 # The one invariant we *know* holds is that the new (potentially
534 534 # bogus) DAG shape will be valid if we order the nodes in the
535 535 # order that they're introduced in dramatis personae by the
536 536 # changelog, so what we do is we sort the non-changelog histories
537 537 # by the order in which they are used by the changelog.
538 538 key = lambda n: cl.rev(lookup(n))
539 539 return sorted(nodes, key=key)
540 540
541 541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
542 542 linknode, clrevtolocalrev, fullclnodes,
543 543 precomputedellipsis):
544 544 linkparents = precomputedellipsis[linkrev]
545 545 def local(clrev):
546 546 """Turn a changelog revnum into a local revnum.
547 547
548 548 The ellipsis dag is stored as revnums on the changelog,
549 549 but when we're producing ellipsis entries for
550 550 non-changelog revlogs, we need to turn those numbers into
551 551 something local. This does that for us, and during the
552 552 changelog sending phase will also expand the stored
553 553 mappings as needed.
554 554 """
555 555 if clrev == nullrev:
556 556 return nullrev
557 557
558 558 if ischangelog:
559 559 return clrev
560 560
561 561 # Walk the ellipsis-ized changelog breadth-first looking for a
562 562 # change that has been linked from the current revlog.
563 563 #
564 564 # For a flat manifest revlog only a single step should be necessary
565 565 # as all relevant changelog entries are relevant to the flat
566 566 # manifest.
567 567 #
568 568 # For a filelog or tree manifest dirlog however not every changelog
569 569 # entry will have been relevant, so we need to skip some changelog
570 570 # nodes even after ellipsis-izing.
571 571 walk = [clrev]
572 572 while walk:
573 573 p = walk[0]
574 574 walk = walk[1:]
575 575 if p in clrevtolocalrev:
576 576 return clrevtolocalrev[p]
577 577 elif p in fullclnodes:
578 578 walk.extend([pp for pp in cl.parentrevs(p)
579 579 if pp != nullrev])
580 580 elif p in precomputedellipsis:
581 581 walk.extend([pp for pp in precomputedellipsis[p]
582 582 if pp != nullrev])
583 583 else:
584 584 # In this case, we've got an ellipsis with parents
585 585 # outside the current bundle (likely an
586 586 # incremental pull). We "know" that we can use the
587 587 # value of this same revlog at whatever revision
588 588 # is pointed to by linknode. "Know" is in scare
589 589 # quotes because I haven't done enough examination
590 590 # of edge cases to convince myself this is really
591 591 # a fact - it works for all the (admittedly
592 592 # thorough) cases in our testsuite, but I would be
593 593 # somewhat unsurprised to find a case in the wild
594 594 # where this breaks down a bit. That said, I don't
595 595 # know if it would hurt anything.
596 596 for i in pycompat.xrange(rev, 0, -1):
597 597 if store.linkrev(i) == clrev:
598 598 return i
599 599 # We failed to resolve a parent for this node, so
600 600 # we crash the changegroup construction.
601 601 raise error.Abort(
602 602 'unable to resolve parent while packing %r %r'
603 603 ' for changeset %r' % (store.indexfile, rev, clrev))
604 604
605 605 return nullrev
606 606
607 607 if not linkparents or (
608 608 store.parentrevs(rev) == (nullrev, nullrev)):
609 609 p1, p2 = nullrev, nullrev
610 610 elif len(linkparents) == 1:
611 611 p1, = sorted(local(p) for p in linkparents)
612 612 p2 = nullrev
613 613 else:
614 614 p1, p2 = sorted(local(p) for p in linkparents)
615 615
616 616 p1node, p2node = store.node(p1), store.node(p2)
617 617
618 618 return p1node, p2node, linknode
619 619
620 620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
621 621 topic=None,
622 622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
623 623 precomputedellipsis=None):
624 624 """Calculate deltas for a set of revisions.
625 625
626 626 Is a generator of ``revisiondelta`` instances.
627 627
628 628 If topic is not None, progress detail will be generated using this
629 629 topic name (e.g. changesets, manifests, etc).
630 630 """
631 631 if not nodes:
632 632 return
633 633
634 634 cl = repo.changelog
635 635
636 636 if ischangelog:
637 637 # `hg log` shows changesets in storage order. To preserve order
638 638 # across clones, send out changesets in storage order.
639 639 nodesorder = 'storage'
640 640 elif ellipses:
641 641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
642 642 nodesorder = 'nodes'
643 643 else:
644 644 nodesorder = None
645 645
646 646 # Perform ellipses filtering and revision massaging. We do this before
647 647 # emitrevisions() because a) filtering out revisions creates less work
648 648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
649 649 # assumptions about delta choices and we would possibly send a delta
650 650 # referencing a missing base revision.
651 651 #
652 652 # Also, calling lookup() has side-effects with regards to populating
653 653 # data structures. If we don't call lookup() for each node or if we call
654 654 # lookup() after the first pass through each node, things can break -
655 655 # possibly intermittently depending on the python hash seed! For that
656 656 # reason, we store a mapping of all linknodes during the initial node
657 657 # pass rather than use lookup() on the output side.
658 658 if ellipses:
659 659 filtered = []
660 660 adjustedparents = {}
661 661 linknodes = {}
662 662
663 663 for node in nodes:
664 664 rev = store.rev(node)
665 665 linknode = lookup(node)
666 666 linkrev = cl.rev(linknode)
667 667 clrevtolocalrev[linkrev] = rev
668 668
669 669 # If linknode is in fullclnodes, it means the corresponding
670 670 # changeset was a full changeset and is being sent unaltered.
671 671 if linknode in fullclnodes:
672 672 linknodes[node] = linknode
673 673
674 674 # If the corresponding changeset wasn't in the set computed
675 675 # as relevant to us, it should be dropped outright.
676 676 elif linkrev not in precomputedellipsis:
677 677 continue
678 678
679 679 else:
680 680 # We could probably do this later and avoid the dict
681 681 # holding state. But it likely doesn't matter.
682 682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
683 683 cl, store, ischangelog, rev, linkrev, linknode,
684 684 clrevtolocalrev, fullclnodes, precomputedellipsis)
685 685
686 686 adjustedparents[node] = (p1node, p2node)
687 687 linknodes[node] = linknode
688 688
689 689 filtered.append(node)
690 690
691 691 nodes = filtered
692 692
693 693 # We expect the first pass to be fast, so we only engage the progress
694 694 # meter for constructing the revision deltas.
695 695 progress = None
696 696 if topic is not None:
697 697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
698 698 total=len(nodes))
699 699
700 700 configtarget = repo.ui.config('devel', 'bundle.delta')
701 if configtarget not in ('', 'p1'):
701 if configtarget not in ('', 'p1', 'full'):
702 702 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
703 703 repo.ui.warn(msg % configtarget)
704 704
705 705 deltamode = repository.CG_DELTAMODE_STD
706 706 if forcedeltaparentprev:
707 707 deltamode = repository.CG_DELTAMODE_PREV
708 708 elif configtarget == 'p1':
709 709 deltamode = repository.CG_DELTAMODE_P1
710 elif configtarget == 'full':
711 deltamode = repository.CG_DELTAMODE_FULL
710 712
711 713 revisions = store.emitrevisions(
712 714 nodes,
713 715 nodesorder=nodesorder,
714 716 revisiondata=True,
715 717 assumehaveparentrevisions=not ellipses,
716 718 deltamode=deltamode)
717 719
718 720 for i, revision in enumerate(revisions):
719 721 if progress:
720 722 progress.update(i + 1)
721 723
722 724 if ellipses:
723 725 linknode = linknodes[revision.node]
724 726
725 727 if revision.node in adjustedparents:
726 728 p1node, p2node = adjustedparents[revision.node]
727 729 revision.p1node = p1node
728 730 revision.p2node = p2node
729 731 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
730 732
731 733 else:
732 734 linknode = lookup(revision.node)
733 735
734 736 revision.linknode = linknode
735 737 yield revision
736 738
737 739 if progress:
738 740 progress.complete()
739 741
740 742 class cgpacker(object):
741 743 def __init__(self, repo, oldmatcher, matcher, version,
742 744 builddeltaheader, manifestsend,
743 745 forcedeltaparentprev=False,
744 746 bundlecaps=None, ellipses=False,
745 747 shallow=False, ellipsisroots=None, fullnodes=None):
746 748 """Given a source repo, construct a bundler.
747 749
748 750 oldmatcher is a matcher that matches on files the client already has.
749 751 These will not be included in the changegroup.
750 752
751 753 matcher is a matcher that matches on files to include in the
752 754 changegroup. Used to facilitate sparse changegroups.
753 755
754 756 forcedeltaparentprev indicates whether delta parents must be against
755 757 the previous revision in a delta group. This should only be used for
756 758 compatibility with changegroup version 1.
757 759
758 760 builddeltaheader is a callable that constructs the header for a group
759 761 delta.
760 762
761 763 manifestsend is a chunk to send after manifests have been fully emitted.
762 764
763 765 ellipses indicates whether ellipsis serving mode is enabled.
764 766
765 767 bundlecaps is optional and can be used to specify the set of
766 768 capabilities which can be used to build the bundle. While bundlecaps is
767 769 unused in core Mercurial, extensions rely on this feature to communicate
768 770 capabilities to customize the changegroup packer.
769 771
770 772 shallow indicates whether shallow data might be sent. The packer may
771 773 need to pack file contents not introduced by the changes being packed.
772 774
773 775 fullnodes is the set of changelog nodes which should not be ellipsis
774 776 nodes. We store this rather than the set of nodes that should be
775 777 ellipsis because for very large histories we expect this to be
776 778 significantly smaller.
777 779 """
778 780 assert oldmatcher
779 781 assert matcher
780 782 self._oldmatcher = oldmatcher
781 783 self._matcher = matcher
782 784
783 785 self.version = version
784 786 self._forcedeltaparentprev = forcedeltaparentprev
785 787 self._builddeltaheader = builddeltaheader
786 788 self._manifestsend = manifestsend
787 789 self._ellipses = ellipses
788 790
789 791 # Set of capabilities we can use to build the bundle.
790 792 if bundlecaps is None:
791 793 bundlecaps = set()
792 794 self._bundlecaps = bundlecaps
793 795 self._isshallow = shallow
794 796 self._fullclnodes = fullnodes
795 797
796 798 # Maps ellipsis revs to their roots at the changelog level.
797 799 self._precomputedellipsis = ellipsisroots
798 800
799 801 self._repo = repo
800 802
801 803 if self._repo.ui.verbose and not self._repo.ui.debugflag:
802 804 self._verbosenote = self._repo.ui.note
803 805 else:
804 806 self._verbosenote = lambda s: None
805 807
806 808 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
807 809 changelog=True):
808 810 """Yield a sequence of changegroup byte chunks.
809 811 If changelog is False, changelog data won't be added to changegroup
810 812 """
811 813
812 814 repo = self._repo
813 815 cl = repo.changelog
814 816
815 817 self._verbosenote(_('uncompressed size of bundle content:\n'))
816 818 size = 0
817 819
818 820 clstate, deltas = self._generatechangelog(cl, clnodes)
819 821 for delta in deltas:
820 822 if changelog:
821 823 for chunk in _revisiondeltatochunks(delta,
822 824 self._builddeltaheader):
823 825 size += len(chunk)
824 826 yield chunk
825 827
826 828 close = closechunk()
827 829 size += len(close)
828 830 yield closechunk()
829 831
830 832 self._verbosenote(_('%8.i (changelog)\n') % size)
831 833
832 834 clrevorder = clstate['clrevorder']
833 835 manifests = clstate['manifests']
834 836 changedfiles = clstate['changedfiles']
835 837
836 838 # We need to make sure that the linkrev in the changegroup refers to
837 839 # the first changeset that introduced the manifest or file revision.
838 840 # The fastpath is usually safer than the slowpath, because the filelogs
839 841 # are walked in revlog order.
840 842 #
841 843 # When taking the slowpath when the manifest revlog uses generaldelta,
842 844 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
843 845 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
844 846 #
845 847 # When taking the fastpath, we are only vulnerable to reordering
846 848 # of the changelog itself. The changelog never uses generaldelta and is
847 849 # never reordered. To handle this case, we simply take the slowpath,
848 850 # which already has the 'clrevorder' logic. This was also fixed in
849 851 # cc0ff93d0c0c.
850 852
851 853 # Treemanifests don't work correctly with fastpathlinkrev
852 854 # either, because we don't discover which directory nodes to
853 855 # send along with files. This could probably be fixed.
854 856 fastpathlinkrev = fastpathlinkrev and (
855 857 'treemanifest' not in repo.requirements)
856 858
857 859 fnodes = {} # needed file nodes
858 860
859 861 size = 0
860 862 it = self.generatemanifests(
861 863 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
862 864 clstate['clrevtomanifestrev'])
863 865
864 866 for tree, deltas in it:
865 867 if tree:
866 868 assert self.version == b'03'
867 869 chunk = _fileheader(tree)
868 870 size += len(chunk)
869 871 yield chunk
870 872
871 873 for delta in deltas:
872 874 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
873 875 for chunk in chunks:
874 876 size += len(chunk)
875 877 yield chunk
876 878
877 879 close = closechunk()
878 880 size += len(close)
879 881 yield close
880 882
881 883 self._verbosenote(_('%8.i (manifests)\n') % size)
882 884 yield self._manifestsend
883 885
884 886 mfdicts = None
885 887 if self._ellipses and self._isshallow:
886 888 mfdicts = [(self._repo.manifestlog[n].read(), lr)
887 889 for (n, lr) in manifests.iteritems()]
888 890
889 891 manifests.clear()
890 892 clrevs = set(cl.rev(x) for x in clnodes)
891 893
892 894 it = self.generatefiles(changedfiles, commonrevs,
893 895 source, mfdicts, fastpathlinkrev,
894 896 fnodes, clrevs)
895 897
896 898 for path, deltas in it:
897 899 h = _fileheader(path)
898 900 size = len(h)
899 901 yield h
900 902
901 903 for delta in deltas:
902 904 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
903 905 for chunk in chunks:
904 906 size += len(chunk)
905 907 yield chunk
906 908
907 909 close = closechunk()
908 910 size += len(close)
909 911 yield close
910 912
911 913 self._verbosenote(_('%8.i %s\n') % (size, path))
912 914
913 915 yield closechunk()
914 916
915 917 if clnodes:
916 918 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
917 919
918 920 def _generatechangelog(self, cl, nodes):
919 921 """Generate data for changelog chunks.
920 922
921 923 Returns a 2-tuple of a dict containing state and an iterable of
922 924 byte chunks. The state will not be fully populated until the
923 925 chunk stream has been fully consumed.
924 926 """
925 927 clrevorder = {}
926 928 manifests = {}
927 929 mfl = self._repo.manifestlog
928 930 changedfiles = set()
929 931 clrevtomanifestrev = {}
930 932
931 933 # Callback for the changelog, used to collect changed files and
932 934 # manifest nodes.
933 935 # Returns the linkrev node (identity in the changelog case).
934 936 def lookupcl(x):
935 937 c = cl.changelogrevision(x)
936 938 clrevorder[x] = len(clrevorder)
937 939
938 940 if self._ellipses:
939 941 # Only update manifests if x is going to be sent. Otherwise we
940 942 # end up with bogus linkrevs specified for manifests and
941 943 # we skip some manifest nodes that we should otherwise
942 944 # have sent.
943 945 if (x in self._fullclnodes
944 946 or cl.rev(x) in self._precomputedellipsis):
945 947
946 948 manifestnode = c.manifest
947 949 # Record the first changeset introducing this manifest
948 950 # version.
949 951 manifests.setdefault(manifestnode, x)
950 952 # Set this narrow-specific dict so we have the lowest
951 953 # manifest revnum to look up for this cl revnum. (Part of
952 954 # mapping changelog ellipsis parents to manifest ellipsis
953 955 # parents)
954 956 clrevtomanifestrev.setdefault(
955 957 cl.rev(x), mfl.rev(manifestnode))
956 958 # We can't trust the changed files list in the changeset if the
957 959 # client requested a shallow clone.
958 960 if self._isshallow:
959 961 changedfiles.update(mfl[c.manifest].read().keys())
960 962 else:
961 963 changedfiles.update(c.files)
962 964 else:
963 965 # record the first changeset introducing this manifest version
964 966 manifests.setdefault(c.manifest, x)
965 967 # Record a complete list of potentially-changed files in
966 968 # this manifest.
967 969 changedfiles.update(c.files)
968 970
969 971 return x
970 972
971 973 state = {
972 974 'clrevorder': clrevorder,
973 975 'manifests': manifests,
974 976 'changedfiles': changedfiles,
975 977 'clrevtomanifestrev': clrevtomanifestrev,
976 978 }
977 979
978 980 gen = deltagroup(
979 981 self._repo, cl, nodes, True, lookupcl,
980 982 self._forcedeltaparentprev,
981 983 ellipses=self._ellipses,
982 984 topic=_('changesets'),
983 985 clrevtolocalrev={},
984 986 fullclnodes=self._fullclnodes,
985 987 precomputedellipsis=self._precomputedellipsis)
986 988
987 989 return state, gen
988 990
989 991 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
990 992 manifests, fnodes, source, clrevtolocalrev):
991 993 """Returns an iterator of changegroup chunks containing manifests.
992 994
993 995 `source` is unused here, but is used by extensions like remotefilelog to
994 996 change what is sent based in pulls vs pushes, etc.
995 997 """
996 998 repo = self._repo
997 999 mfl = repo.manifestlog
998 1000 tmfnodes = {'': manifests}
999 1001
1000 1002 # Callback for the manifest, used to collect linkrevs for filelog
1001 1003 # revisions.
1002 1004 # Returns the linkrev node (collected in lookupcl).
1003 1005 def makelookupmflinknode(tree, nodes):
1004 1006 if fastpathlinkrev:
1005 1007 assert not tree
1006 1008 return manifests.__getitem__
1007 1009
1008 1010 def lookupmflinknode(x):
1009 1011 """Callback for looking up the linknode for manifests.
1010 1012
1011 1013 Returns the linkrev node for the specified manifest.
1012 1014
1013 1015 SIDE EFFECT:
1014 1016
1015 1017 1) fclnodes gets populated with the list of relevant
1016 1018 file nodes if we're not using fastpathlinkrev
1017 1019 2) When treemanifests are in use, collects treemanifest nodes
1018 1020 to send
1019 1021
1020 1022 Note that this means manifests must be completely sent to
1021 1023 the client before you can trust the list of files and
1022 1024 treemanifests to send.
1023 1025 """
1024 1026 clnode = nodes[x]
1025 1027 mdata = mfl.get(tree, x).readfast(shallow=True)
1026 1028 for p, n, fl in mdata.iterentries():
1027 1029 if fl == 't': # subdirectory manifest
1028 1030 subtree = tree + p + '/'
1029 1031 tmfclnodes = tmfnodes.setdefault(subtree, {})
1030 1032 tmfclnode = tmfclnodes.setdefault(n, clnode)
1031 1033 if clrevorder[clnode] < clrevorder[tmfclnode]:
1032 1034 tmfclnodes[n] = clnode
1033 1035 else:
1034 1036 f = tree + p
1035 1037 fclnodes = fnodes.setdefault(f, {})
1036 1038 fclnode = fclnodes.setdefault(n, clnode)
1037 1039 if clrevorder[clnode] < clrevorder[fclnode]:
1038 1040 fclnodes[n] = clnode
1039 1041 return clnode
1040 1042 return lookupmflinknode
1041 1043
1042 1044 while tmfnodes:
1043 1045 tree, nodes = tmfnodes.popitem()
1044 1046 store = mfl.getstorage(tree)
1045 1047
1046 1048 if not self._matcher.visitdir(store.tree[:-1] or '.'):
1047 1049 # No nodes to send because this directory is out of
1048 1050 # the client's view of the repository (probably
1049 1051 # because of narrow clones).
1050 1052 prunednodes = []
1051 1053 else:
1052 1054 # Avoid sending any manifest nodes we can prove the
1053 1055 # client already has by checking linkrevs. See the
1054 1056 # related comment in generatefiles().
1055 1057 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1056 1058 if tree and not prunednodes:
1057 1059 continue
1058 1060
1059 1061 lookupfn = makelookupmflinknode(tree, nodes)
1060 1062
1061 1063 deltas = deltagroup(
1062 1064 self._repo, store, prunednodes, False, lookupfn,
1063 1065 self._forcedeltaparentprev,
1064 1066 ellipses=self._ellipses,
1065 1067 topic=_('manifests'),
1066 1068 clrevtolocalrev=clrevtolocalrev,
1067 1069 fullclnodes=self._fullclnodes,
1068 1070 precomputedellipsis=self._precomputedellipsis)
1069 1071
1070 1072 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1071 1073 yield tree, deltas
1072 1074 else:
1073 1075 # 'deltas' is a generator and we need to consume it even if
1074 1076 # we are not going to send it because a side-effect is that
1075 1077 # it updates tmdnodes (via lookupfn)
1076 1078 for d in deltas:
1077 1079 pass
1078 1080 if not tree:
1079 1081 yield tree, []
1080 1082
1081 1083 def _prunemanifests(self, store, nodes, commonrevs):
1082 1084 # This is split out as a separate method to allow filtering
1083 1085 # commonrevs in extension code.
1084 1086 #
1085 1087 # TODO(augie): this shouldn't be required, instead we should
1086 1088 # make filtering of revisions to send delegated to the store
1087 1089 # layer.
1088 1090 frev, flr = store.rev, store.linkrev
1089 1091 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1090 1092
1091 1093 # The 'source' parameter is useful for extensions
1092 1094 def generatefiles(self, changedfiles, commonrevs, source,
1093 1095 mfdicts, fastpathlinkrev, fnodes, clrevs):
1094 1096 changedfiles = [f for f in changedfiles
1095 1097 if self._matcher(f) and not self._oldmatcher(f)]
1096 1098
1097 1099 if not fastpathlinkrev:
1098 1100 def normallinknodes(unused, fname):
1099 1101 return fnodes.get(fname, {})
1100 1102 else:
1101 1103 cln = self._repo.changelog.node
1102 1104
1103 1105 def normallinknodes(store, fname):
1104 1106 flinkrev = store.linkrev
1105 1107 fnode = store.node
1106 1108 revs = ((r, flinkrev(r)) for r in store)
1107 1109 return dict((fnode(r), cln(lr))
1108 1110 for r, lr in revs if lr in clrevs)
1109 1111
1110 1112 clrevtolocalrev = {}
1111 1113
1112 1114 if self._isshallow:
1113 1115 # In a shallow clone, the linknodes callback needs to also include
1114 1116 # those file nodes that are in the manifests we sent but weren't
1115 1117 # introduced by those manifests.
1116 1118 commonctxs = [self._repo[c] for c in commonrevs]
1117 1119 clrev = self._repo.changelog.rev
1118 1120
1119 1121 def linknodes(flog, fname):
1120 1122 for c in commonctxs:
1121 1123 try:
1122 1124 fnode = c.filenode(fname)
1123 1125 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1124 1126 except error.ManifestLookupError:
1125 1127 pass
1126 1128 links = normallinknodes(flog, fname)
1127 1129 if len(links) != len(mfdicts):
1128 1130 for mf, lr in mfdicts:
1129 1131 fnode = mf.get(fname, None)
1130 1132 if fnode in links:
1131 1133 links[fnode] = min(links[fnode], lr, key=clrev)
1132 1134 elif fnode:
1133 1135 links[fnode] = lr
1134 1136 return links
1135 1137 else:
1136 1138 linknodes = normallinknodes
1137 1139
1138 1140 repo = self._repo
1139 1141 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1140 1142 total=len(changedfiles))
1141 1143 for i, fname in enumerate(sorted(changedfiles)):
1142 1144 filerevlog = repo.file(fname)
1143 1145 if not filerevlog:
1144 1146 raise error.Abort(_("empty or missing file data for %s") %
1145 1147 fname)
1146 1148
1147 1149 clrevtolocalrev.clear()
1148 1150
1149 1151 linkrevnodes = linknodes(filerevlog, fname)
1150 1152 # Lookup for filenodes, we collected the linkrev nodes above in the
1151 1153 # fastpath case and with lookupmf in the slowpath case.
1152 1154 def lookupfilelog(x):
1153 1155 return linkrevnodes[x]
1154 1156
1155 1157 frev, flr = filerevlog.rev, filerevlog.linkrev
1156 1158 # Skip sending any filenode we know the client already
1157 1159 # has. This avoids over-sending files relatively
1158 1160 # inexpensively, so it's not a problem if we under-filter
1159 1161 # here.
1160 1162 filenodes = [n for n in linkrevnodes
1161 1163 if flr(frev(n)) not in commonrevs]
1162 1164
1163 1165 if not filenodes:
1164 1166 continue
1165 1167
1166 1168 progress.update(i + 1, item=fname)
1167 1169
1168 1170 deltas = deltagroup(
1169 1171 self._repo, filerevlog, filenodes, False, lookupfilelog,
1170 1172 self._forcedeltaparentprev,
1171 1173 ellipses=self._ellipses,
1172 1174 clrevtolocalrev=clrevtolocalrev,
1173 1175 fullclnodes=self._fullclnodes,
1174 1176 precomputedellipsis=self._precomputedellipsis)
1175 1177
1176 1178 yield fname, deltas
1177 1179
1178 1180 progress.complete()
1179 1181
1180 1182 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1181 1183 ellipses=False, shallow=False, ellipsisroots=None,
1182 1184 fullnodes=None):
1183 1185 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1184 1186 d.node, d.p1node, d.p2node, d.linknode)
1185 1187
1186 1188 return cgpacker(repo, oldmatcher, matcher, b'01',
1187 1189 builddeltaheader=builddeltaheader,
1188 1190 manifestsend=b'',
1189 1191 forcedeltaparentprev=True,
1190 1192 bundlecaps=bundlecaps,
1191 1193 ellipses=ellipses,
1192 1194 shallow=shallow,
1193 1195 ellipsisroots=ellipsisroots,
1194 1196 fullnodes=fullnodes)
1195 1197
1196 1198 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1197 1199 ellipses=False, shallow=False, ellipsisroots=None,
1198 1200 fullnodes=None):
1199 1201 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1200 1202 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1201 1203
1202 1204 return cgpacker(repo, oldmatcher, matcher, b'02',
1203 1205 builddeltaheader=builddeltaheader,
1204 1206 manifestsend=b'',
1205 1207 bundlecaps=bundlecaps,
1206 1208 ellipses=ellipses,
1207 1209 shallow=shallow,
1208 1210 ellipsisroots=ellipsisroots,
1209 1211 fullnodes=fullnodes)
1210 1212
1211 1213 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1212 1214 ellipses=False, shallow=False, ellipsisroots=None,
1213 1215 fullnodes=None):
1214 1216 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1215 1217 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1216 1218
1217 1219 return cgpacker(repo, oldmatcher, matcher, b'03',
1218 1220 builddeltaheader=builddeltaheader,
1219 1221 manifestsend=closechunk(),
1220 1222 bundlecaps=bundlecaps,
1221 1223 ellipses=ellipses,
1222 1224 shallow=shallow,
1223 1225 ellipsisroots=ellipsisroots,
1224 1226 fullnodes=fullnodes)
1225 1227
1226 1228 _packermap = {'01': (_makecg1packer, cg1unpacker),
1227 1229 # cg2 adds support for exchanging generaldelta
1228 1230 '02': (_makecg2packer, cg2unpacker),
1229 1231 # cg3 adds support for exchanging revlog flags and treemanifests
1230 1232 '03': (_makecg3packer, cg3unpacker),
1231 1233 }
1232 1234
1233 1235 def allsupportedversions(repo):
1234 1236 versions = set(_packermap.keys())
1235 1237 if not (repo.ui.configbool('experimental', 'changegroup3') or
1236 1238 repo.ui.configbool('experimental', 'treemanifest') or
1237 1239 'treemanifest' in repo.requirements):
1238 1240 versions.discard('03')
1239 1241 return versions
1240 1242
1241 1243 # Changegroup versions that can be applied to the repo
1242 1244 def supportedincomingversions(repo):
1243 1245 return allsupportedversions(repo)
1244 1246
1245 1247 # Changegroup versions that can be created from the repo
1246 1248 def supportedoutgoingversions(repo):
1247 1249 versions = allsupportedversions(repo)
1248 1250 if 'treemanifest' in repo.requirements:
1249 1251 # Versions 01 and 02 support only flat manifests and it's just too
1250 1252 # expensive to convert between the flat manifest and tree manifest on
1251 1253 # the fly. Since tree manifests are hashed differently, all of history
1252 1254 # would have to be converted. Instead, we simply don't even pretend to
1253 1255 # support versions 01 and 02.
1254 1256 versions.discard('01')
1255 1257 versions.discard('02')
1256 1258 if repository.NARROW_REQUIREMENT in repo.requirements:
1257 1259 # Versions 01 and 02 don't support revlog flags, and we need to
1258 1260 # support that for stripping and unbundling to work.
1259 1261 versions.discard('01')
1260 1262 versions.discard('02')
1261 1263 if LFS_REQUIREMENT in repo.requirements:
1262 1264 # Versions 01 and 02 don't support revlog flags, and we need to
1263 1265 # mark LFS entries with REVIDX_EXTSTORED.
1264 1266 versions.discard('01')
1265 1267 versions.discard('02')
1266 1268
1267 1269 return versions
1268 1270
1269 1271 def localversion(repo):
1270 1272 # Finds the best version to use for bundles that are meant to be used
1271 1273 # locally, such as those from strip and shelve, and temporary bundles.
1272 1274 return max(supportedoutgoingversions(repo))
1273 1275
1274 1276 def safeversion(repo):
1275 1277 # Finds the smallest version that it's safe to assume clients of the repo
1276 1278 # will support. For example, all hg versions that support generaldelta also
1277 1279 # support changegroup 02.
1278 1280 versions = supportedoutgoingversions(repo)
1279 1281 if 'generaldelta' in repo.requirements:
1280 1282 versions.discard('01')
1281 1283 assert versions
1282 1284 return min(versions)
1283 1285
1284 1286 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1285 1287 matcher=None, ellipses=False, shallow=False,
1286 1288 ellipsisroots=None, fullnodes=None):
1287 1289 assert version in supportedoutgoingversions(repo)
1288 1290
1289 1291 if matcher is None:
1290 1292 matcher = matchmod.alwaysmatcher(repo.root, '')
1291 1293 if oldmatcher is None:
1292 1294 oldmatcher = matchmod.nevermatcher(repo.root, '')
1293 1295
1294 1296 if version == '01' and not matcher.always():
1295 1297 raise error.ProgrammingError('version 01 changegroups do not support '
1296 1298 'sparse file matchers')
1297 1299
1298 1300 if ellipses and version in (b'01', b'02'):
1299 1301 raise error.Abort(
1300 1302 _('ellipsis nodes require at least cg3 on client and server, '
1301 1303 'but negotiated version %s') % version)
1302 1304
1303 1305 # Requested files could include files not in the local store. So
1304 1306 # filter those out.
1305 1307 matcher = repo.narrowmatch(matcher)
1306 1308
1307 1309 fn = _packermap[version][0]
1308 1310 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1309 1311 shallow=shallow, ellipsisroots=ellipsisroots,
1310 1312 fullnodes=fullnodes)
1311 1313
1312 1314 def getunbundler(version, fh, alg, extras=None):
1313 1315 return _packermap[version][1](fh, alg, extras=extras)
1314 1316
1315 1317 def _changegroupinfo(repo, nodes, source):
1316 1318 if repo.ui.verbose or source == 'bundle':
1317 1319 repo.ui.status(_("%d changesets found\n") % len(nodes))
1318 1320 if repo.ui.debugflag:
1319 1321 repo.ui.debug("list of changesets:\n")
1320 1322 for node in nodes:
1321 1323 repo.ui.debug("%s\n" % hex(node))
1322 1324
1323 1325 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1324 1326 bundlecaps=None):
1325 1327 cgstream = makestream(repo, outgoing, version, source,
1326 1328 fastpath=fastpath, bundlecaps=bundlecaps)
1327 1329 return getunbundler(version, util.chunkbuffer(cgstream), None,
1328 1330 {'clcount': len(outgoing.missing) })
1329 1331
1330 1332 def makestream(repo, outgoing, version, source, fastpath=False,
1331 1333 bundlecaps=None, matcher=None):
1332 1334 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1333 1335 matcher=matcher)
1334 1336
1335 1337 repo = repo.unfiltered()
1336 1338 commonrevs = outgoing.common
1337 1339 csets = outgoing.missing
1338 1340 heads = outgoing.missingheads
1339 1341 # We go through the fast path if we get told to, or if all (unfiltered
1340 1342 # heads have been requested (since we then know there all linkrevs will
1341 1343 # be pulled by the client).
1342 1344 heads.sort()
1343 1345 fastpathlinkrev = fastpath or (
1344 1346 repo.filtername is None and heads == sorted(repo.heads()))
1345 1347
1346 1348 repo.hook('preoutgoing', throw=True, source=source)
1347 1349 _changegroupinfo(repo, csets, source)
1348 1350 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1349 1351
1350 1352 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1351 1353 revisions = 0
1352 1354 files = 0
1353 1355 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1354 1356 total=expectedfiles)
1355 1357 for chunkdata in iter(source.filelogheader, {}):
1356 1358 files += 1
1357 1359 f = chunkdata["filename"]
1358 1360 repo.ui.debug("adding %s revisions\n" % f)
1359 1361 progress.increment()
1360 1362 fl = repo.file(f)
1361 1363 o = len(fl)
1362 1364 try:
1363 1365 deltas = source.deltaiter()
1364 1366 if not fl.addgroup(deltas, revmap, trp):
1365 1367 raise error.Abort(_("received file revlog group is empty"))
1366 1368 except error.CensoredBaseError as e:
1367 1369 raise error.Abort(_("received delta base is censored: %s") % e)
1368 1370 revisions += len(fl) - o
1369 1371 if f in needfiles:
1370 1372 needs = needfiles[f]
1371 1373 for new in pycompat.xrange(o, len(fl)):
1372 1374 n = fl.node(new)
1373 1375 if n in needs:
1374 1376 needs.remove(n)
1375 1377 else:
1376 1378 raise error.Abort(
1377 1379 _("received spurious file revlog entry"))
1378 1380 if not needs:
1379 1381 del needfiles[f]
1380 1382 progress.complete()
1381 1383
1382 1384 for f, needs in needfiles.iteritems():
1383 1385 fl = repo.file(f)
1384 1386 for n in needs:
1385 1387 try:
1386 1388 fl.rev(n)
1387 1389 except error.LookupError:
1388 1390 raise error.Abort(
1389 1391 _('missing file data for %s:%s - run hg verify') %
1390 1392 (f, hex(n)))
1391 1393
1392 1394 return revisions, files
@@ -1,907 +1,911
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 checked 9 changesets with 7 changes to 4 files
37 37 $ cd ..
38 38 $ hg init empty
39 39
40 40 Bundle and phase
41 41
42 42 $ hg -R test phase --force --secret 0
43 43 $ hg -R test bundle phase.hg empty
44 44 searching for changes
45 45 no changes found (ignored 9 secret changesets)
46 46 [1]
47 47 $ hg -R test phase --draft -r 'head()'
48 48
49 49 Bundle --all
50 50
51 51 $ hg -R test bundle --all all.hg
52 52 9 changesets found
53 53
54 54 Bundle test to full.hg
55 55
56 56 $ hg -R test bundle full.hg empty
57 57 searching for changes
58 58 9 changesets found
59 59
60 60 Unbundle full.hg in test
61 61
62 62 $ hg -R test unbundle full.hg
63 63 adding changesets
64 64 adding manifests
65 65 adding file changes
66 66 added 0 changesets with 0 changes to 4 files
67 67 (run 'hg update' to get a working copy)
68 68
69 69 Verify empty
70 70
71 71 $ hg -R empty heads
72 72 [1]
73 73 $ hg -R empty verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 checked 0 changesets with 0 changes to 0 files
79 79
80 80 #if repobundlerepo
81 81
82 82 Pull full.hg into test (using --cwd)
83 83
84 84 $ hg --cwd test pull ../full.hg
85 85 pulling from ../full.hg
86 86 searching for changes
87 87 no changes found
88 88
89 89 Verify that there are no leaked temporary files after pull (issue2797)
90 90
91 91 $ ls test/.hg | grep .hg10un
92 92 [1]
93 93
94 94 Pull full.hg into empty (using --cwd)
95 95
96 96 $ hg --cwd empty pull ../full.hg
97 97 pulling from ../full.hg
98 98 requesting all changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 104 (run 'hg heads' to see heads, 'hg merge' to merge)
105 105
106 106 Rollback empty
107 107
108 108 $ hg -R empty rollback
109 109 repository tip rolled back to revision -1 (undo pull)
110 110
111 111 Pull full.hg into empty again (using --cwd)
112 112
113 113 $ hg --cwd empty pull ../full.hg
114 114 pulling from ../full.hg
115 115 requesting all changes
116 116 adding changesets
117 117 adding manifests
118 118 adding file changes
119 119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 121 (run 'hg heads' to see heads, 'hg merge' to merge)
122 122
123 123 Pull full.hg into test (using -R)
124 124
125 125 $ hg -R test pull full.hg
126 126 pulling from full.hg
127 127 searching for changes
128 128 no changes found
129 129
130 130 Pull full.hg into empty (using -R)
131 131
132 132 $ hg -R empty pull full.hg
133 133 pulling from full.hg
134 134 searching for changes
135 135 no changes found
136 136
137 137 Rollback empty
138 138
139 139 $ hg -R empty rollback
140 140 repository tip rolled back to revision -1 (undo pull)
141 141
142 142 Pull full.hg into empty again (using -R)
143 143
144 144 $ hg -R empty pull full.hg
145 145 pulling from full.hg
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 152 (run 'hg heads' to see heads, 'hg merge' to merge)
153 153
154 154 Log -R full.hg in fresh empty
155 155
156 156 $ rm -r empty
157 157 $ hg init empty
158 158 $ cd empty
159 159 $ hg -R bundle://../full.hg log
160 160 changeset: 8:aa35859c02ea
161 161 tag: tip
162 162 parent: 3:eebf5a27f8ca
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: 0.3m
166 166
167 167 changeset: 7:a6a34bfa0076
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: 1.3m
171 171
172 172 changeset: 6:7373c1169842
173 173 user: test
174 174 date: Thu Jan 01 00:00:00 1970 +0000
175 175 summary: 1.3
176 176
177 177 changeset: 5:1bb50a9436a7
178 178 user: test
179 179 date: Thu Jan 01 00:00:00 1970 +0000
180 180 summary: 1.2
181 181
182 182 changeset: 4:095197eb4973
183 183 parent: 0:f9ee2f85a263
184 184 user: test
185 185 date: Thu Jan 01 00:00:00 1970 +0000
186 186 summary: 1.1
187 187
188 188 changeset: 3:eebf5a27f8ca
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: 0.3
192 192
193 193 changeset: 2:e38ba6f5b7e0
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: 0.2
197 197
198 198 changeset: 1:34c2bf6b0626
199 199 user: test
200 200 date: Thu Jan 01 00:00:00 1970 +0000
201 201 summary: 0.1
202 202
203 203 changeset: 0:f9ee2f85a263
204 204 user: test
205 205 date: Thu Jan 01 00:00:00 1970 +0000
206 206 summary: 0.0
207 207
208 208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209 209
210 210 $ ls .hg
211 211 00changelog.i
212 212 cache
213 213 requires
214 214 store
215 215
216 216 Pull ../full.hg into empty (with hook)
217 217
218 218 $ cat >> .hg/hgrc <<EOF
219 219 > [hooks]
220 220 > changegroup = sh -c "printenv.py changegroup"
221 221 > EOF
222 222
223 223 doesn't work (yet ?)
224 224
225 225 hg -R bundle://../full.hg verify
226 226
227 227 $ hg pull bundle://../full.hg
228 228 pulling from bundle:../full.hg
229 229 requesting all changes
230 230 adding changesets
231 231 adding manifests
232 232 adding file changes
233 233 added 9 changesets with 7 changes to 4 files (+1 heads)
234 234 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
235 235 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle*../full.hg (glob)
236 236 (run 'hg heads' to see heads, 'hg merge' to merge)
237 237
238 238 Rollback empty
239 239
240 240 $ hg rollback
241 241 repository tip rolled back to revision -1 (undo pull)
242 242 $ cd ..
243 243
244 244 Log -R bundle:empty+full.hg
245 245
246 246 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
247 247 8 7 6 5 4 3 2 1 0
248 248
249 249 Pull full.hg into empty again (using -R; with hook)
250 250
251 251 $ hg -R empty pull full.hg
252 252 pulling from full.hg
253 253 requesting all changes
254 254 adding changesets
255 255 adding manifests
256 256 adding file changes
257 257 added 9 changesets with 7 changes to 4 files (+1 heads)
258 258 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
259 259 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle:empty+full.hg
260 260 (run 'hg heads' to see heads, 'hg merge' to merge)
261 261
262 262 #endif
263 263
264 264 Cannot produce streaming clone bundles with "hg bundle"
265 265
266 266 $ hg -R test bundle -t packed1 packed.hg
267 267 abort: packed bundles cannot be produced by "hg bundle"
268 268 (use 'hg debugcreatestreamclonebundle')
269 269 [255]
270 270
271 271 packed1 is produced properly
272 272
273 273 #if reporevlogstore
274 274
275 275 $ hg -R test debugcreatestreamclonebundle packed.hg
276 276 writing 2664 bytes for 6 files
277 277 bundle requirements: generaldelta, revlogv1
278 278
279 279 $ f -B 64 --size --sha1 --hexdump packed.hg
280 280 packed.hg: size=2827, sha1=9d14cb90c66a21462d915ab33656f38b9deed686
281 281 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
282 282 0010: 00 00 00 00 0a 68 00 16 67 65 6e 65 72 61 6c 64 |.....h..generald|
283 283 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
284 284 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
285 285
286 286 $ hg debugbundle --spec packed.hg
287 287 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1
288 288
289 289 generaldelta requirement is not listed in stream clone bundles unless used
290 290
291 291 $ hg --config format.usegeneraldelta=false init testnongd
292 292 $ cd testnongd
293 293 $ touch foo
294 294 $ hg -q commit -A -m initial
295 295 $ cd ..
296 296 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
297 297 writing 301 bytes for 3 files
298 298 bundle requirements: revlogv1
299 299
300 300 $ f -B 64 --size --sha1 --hexdump packednongd.hg
301 301 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
302 302 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
303 303 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
304 304 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
305 305 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
306 306
307 307 $ hg debugbundle --spec packednongd.hg
308 308 none-packed1;requirements%3Drevlogv1
309 309
310 310 Warning emitted when packed bundles contain secret changesets
311 311
312 312 $ hg init testsecret
313 313 $ cd testsecret
314 314 $ touch foo
315 315 $ hg -q commit -A -m initial
316 316 $ hg phase --force --secret -r .
317 317 $ cd ..
318 318
319 319 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
320 320 (warning: stream clone bundle will contain secret revisions)
321 321 writing 301 bytes for 3 files
322 322 bundle requirements: generaldelta, revlogv1
323 323
324 324 Unpacking packed1 bundles with "hg unbundle" isn't allowed
325 325
326 326 $ hg init packed
327 327 $ hg -R packed unbundle packed.hg
328 328 abort: packed bundles cannot be applied with "hg unbundle"
329 329 (use "hg debugapplystreamclonebundle")
330 330 [255]
331 331
332 332 packed1 can be consumed from debug command
333 333
334 334 (this also confirms that streamclone-ed changes are visible via
335 335 @filecache properties to in-process procedures before closing
336 336 transaction)
337 337
338 338 $ cat > $TESTTMP/showtip.py <<EOF
339 339 > from __future__ import absolute_import
340 340 >
341 341 > def showtip(ui, repo, hooktype, **kwargs):
342 342 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
343 343 >
344 344 > def reposetup(ui, repo):
345 345 > # this confirms (and ensures) that (empty) 00changelog.i
346 346 > # before streamclone is already cached as repo.changelog
347 347 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
348 348 >
349 349 > # this confirms that streamclone-ed changes are visible to
350 350 > # in-process procedures before closing transaction
351 351 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
352 352 >
353 353 > # this confirms that streamclone-ed changes are still visible
354 354 > # after closing transaction
355 355 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
356 356 > EOF
357 357 $ cat >> $HGRCPATH <<EOF
358 358 > [extensions]
359 359 > showtip = $TESTTMP/showtip.py
360 360 > EOF
361 361
362 362 $ hg -R packed debugapplystreamclonebundle packed.hg
363 363 6 files to transfer, 2.60 KB of data
364 364 pretxnopen: 000000000000
365 365 pretxnclose: aa35859c02ea
366 366 transferred 2.60 KB in *.* seconds (* */sec) (glob)
367 367 txnclose: aa35859c02ea
368 368
369 369 (for safety, confirm visibility of streamclone-ed changes by another
370 370 process, too)
371 371
372 372 $ hg -R packed tip -T "{node|short}\n"
373 373 aa35859c02ea
374 374
375 375 $ cat >> $HGRCPATH <<EOF
376 376 > [extensions]
377 377 > showtip = !
378 378 > EOF
379 379
380 380 Does not work on non-empty repo
381 381
382 382 $ hg -R packed debugapplystreamclonebundle packed.hg
383 383 abort: cannot apply stream clone bundle on non-empty repo
384 384 [255]
385 385
386 386 #endif
387 387
388 388 Create partial clones
389 389
390 390 $ rm -r empty
391 391 $ hg init empty
392 392 $ hg clone -r 3 test partial
393 393 adding changesets
394 394 adding manifests
395 395 adding file changes
396 396 added 4 changesets with 4 changes to 1 files
397 397 new changesets f9ee2f85a263:eebf5a27f8ca
398 398 updating to branch default
399 399 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
400 400 $ hg clone partial partial2
401 401 updating to branch default
402 402 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 403 $ cd partial
404 404
405 405 #if repobundlerepo
406 406
407 407 Log -R full.hg in partial
408 408
409 409 $ hg -R bundle://../full.hg log -T phases
410 410 changeset: 8:aa35859c02ea
411 411 tag: tip
412 412 phase: draft
413 413 parent: 3:eebf5a27f8ca
414 414 user: test
415 415 date: Thu Jan 01 00:00:00 1970 +0000
416 416 summary: 0.3m
417 417
418 418 changeset: 7:a6a34bfa0076
419 419 phase: draft
420 420 user: test
421 421 date: Thu Jan 01 00:00:00 1970 +0000
422 422 summary: 1.3m
423 423
424 424 changeset: 6:7373c1169842
425 425 phase: draft
426 426 user: test
427 427 date: Thu Jan 01 00:00:00 1970 +0000
428 428 summary: 1.3
429 429
430 430 changeset: 5:1bb50a9436a7
431 431 phase: draft
432 432 user: test
433 433 date: Thu Jan 01 00:00:00 1970 +0000
434 434 summary: 1.2
435 435
436 436 changeset: 4:095197eb4973
437 437 phase: draft
438 438 parent: 0:f9ee2f85a263
439 439 user: test
440 440 date: Thu Jan 01 00:00:00 1970 +0000
441 441 summary: 1.1
442 442
443 443 changeset: 3:eebf5a27f8ca
444 444 phase: public
445 445 user: test
446 446 date: Thu Jan 01 00:00:00 1970 +0000
447 447 summary: 0.3
448 448
449 449 changeset: 2:e38ba6f5b7e0
450 450 phase: public
451 451 user: test
452 452 date: Thu Jan 01 00:00:00 1970 +0000
453 453 summary: 0.2
454 454
455 455 changeset: 1:34c2bf6b0626
456 456 phase: public
457 457 user: test
458 458 date: Thu Jan 01 00:00:00 1970 +0000
459 459 summary: 0.1
460 460
461 461 changeset: 0:f9ee2f85a263
462 462 phase: public
463 463 user: test
464 464 date: Thu Jan 01 00:00:00 1970 +0000
465 465 summary: 0.0
466 466
467 467
468 468 Incoming full.hg in partial
469 469
470 470 $ hg incoming bundle://../full.hg
471 471 comparing with bundle:../full.hg
472 472 searching for changes
473 473 changeset: 4:095197eb4973
474 474 parent: 0:f9ee2f85a263
475 475 user: test
476 476 date: Thu Jan 01 00:00:00 1970 +0000
477 477 summary: 1.1
478 478
479 479 changeset: 5:1bb50a9436a7
480 480 user: test
481 481 date: Thu Jan 01 00:00:00 1970 +0000
482 482 summary: 1.2
483 483
484 484 changeset: 6:7373c1169842
485 485 user: test
486 486 date: Thu Jan 01 00:00:00 1970 +0000
487 487 summary: 1.3
488 488
489 489 changeset: 7:a6a34bfa0076
490 490 user: test
491 491 date: Thu Jan 01 00:00:00 1970 +0000
492 492 summary: 1.3m
493 493
494 494 changeset: 8:aa35859c02ea
495 495 tag: tip
496 496 parent: 3:eebf5a27f8ca
497 497 user: test
498 498 date: Thu Jan 01 00:00:00 1970 +0000
499 499 summary: 0.3m
500 500
501 501
502 502 Outgoing -R full.hg vs partial2 in partial
503 503
504 504 $ hg -R bundle://../full.hg outgoing ../partial2
505 505 comparing with ../partial2
506 506 searching for changes
507 507 changeset: 4:095197eb4973
508 508 parent: 0:f9ee2f85a263
509 509 user: test
510 510 date: Thu Jan 01 00:00:00 1970 +0000
511 511 summary: 1.1
512 512
513 513 changeset: 5:1bb50a9436a7
514 514 user: test
515 515 date: Thu Jan 01 00:00:00 1970 +0000
516 516 summary: 1.2
517 517
518 518 changeset: 6:7373c1169842
519 519 user: test
520 520 date: Thu Jan 01 00:00:00 1970 +0000
521 521 summary: 1.3
522 522
523 523 changeset: 7:a6a34bfa0076
524 524 user: test
525 525 date: Thu Jan 01 00:00:00 1970 +0000
526 526 summary: 1.3m
527 527
528 528 changeset: 8:aa35859c02ea
529 529 tag: tip
530 530 parent: 3:eebf5a27f8ca
531 531 user: test
532 532 date: Thu Jan 01 00:00:00 1970 +0000
533 533 summary: 0.3m
534 534
535 535
536 536 Outgoing -R does-not-exist.hg vs partial2 in partial
537 537
538 538 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
539 539 abort: *../does-not-exist.hg* (glob)
540 540 [255]
541 541
542 542 #endif
543 543
544 544 $ cd ..
545 545
546 546 hide outer repo
547 547 $ hg init
548 548
549 549 Direct clone from bundle (all-history)
550 550
551 551 #if repobundlerepo
552 552
553 553 $ hg clone full.hg full-clone
554 554 requesting all changes
555 555 adding changesets
556 556 adding manifests
557 557 adding file changes
558 558 added 9 changesets with 7 changes to 4 files (+1 heads)
559 559 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
560 560 updating to branch default
561 561 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 562 $ hg -R full-clone heads
563 563 changeset: 8:aa35859c02ea
564 564 tag: tip
565 565 parent: 3:eebf5a27f8ca
566 566 user: test
567 567 date: Thu Jan 01 00:00:00 1970 +0000
568 568 summary: 0.3m
569 569
570 570 changeset: 7:a6a34bfa0076
571 571 user: test
572 572 date: Thu Jan 01 00:00:00 1970 +0000
573 573 summary: 1.3m
574 574
575 575 $ rm -r full-clone
576 576
577 577 When cloning from a non-copiable repository into '', do not
578 578 recurse infinitely (issue2528)
579 579
580 580 $ hg clone full.hg ''
581 581 abort: empty destination path is not valid
582 582 [255]
583 583
584 584 test for https://bz.mercurial-scm.org/216
585 585
586 586 Unbundle incremental bundles into fresh empty in one go
587 587
588 588 $ rm -r empty
589 589 $ hg init empty
590 590 $ hg -R test bundle --base null -r 0 ../0.hg
591 591 1 changesets found
592 592 $ hg -R test bundle --base 0 -r 1 ../1.hg
593 593 1 changesets found
594 594 $ hg -R empty unbundle -u ../0.hg ../1.hg
595 595 adding changesets
596 596 adding manifests
597 597 adding file changes
598 598 added 1 changesets with 1 changes to 1 files
599 599 new changesets f9ee2f85a263 (1 drafts)
600 600 adding changesets
601 601 adding manifests
602 602 adding file changes
603 603 added 1 changesets with 1 changes to 1 files
604 604 new changesets 34c2bf6b0626 (1 drafts)
605 605 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
606 606
607 607 View full contents of the bundle
608 608 $ hg -R test bundle --base null -r 3 ../partial.hg
609 609 4 changesets found
610 610 $ cd test
611 611 $ hg -R ../../partial.hg log -r "bundle()"
612 612 changeset: 0:f9ee2f85a263
613 613 user: test
614 614 date: Thu Jan 01 00:00:00 1970 +0000
615 615 summary: 0.0
616 616
617 617 changeset: 1:34c2bf6b0626
618 618 user: test
619 619 date: Thu Jan 01 00:00:00 1970 +0000
620 620 summary: 0.1
621 621
622 622 changeset: 2:e38ba6f5b7e0
623 623 user: test
624 624 date: Thu Jan 01 00:00:00 1970 +0000
625 625 summary: 0.2
626 626
627 627 changeset: 3:eebf5a27f8ca
628 628 user: test
629 629 date: Thu Jan 01 00:00:00 1970 +0000
630 630 summary: 0.3
631 631
632 632 $ cd ..
633 633
634 634 #endif
635 635
636 636 test for 540d1059c802
637 637
638 638 $ hg init orig
639 639 $ cd orig
640 640 $ echo foo > foo
641 641 $ hg add foo
642 642 $ hg ci -m 'add foo'
643 643
644 644 $ hg clone . ../copy
645 645 updating to branch default
646 646 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
647 647 $ hg tag foo
648 648
649 649 $ cd ../copy
650 650 $ echo >> foo
651 651 $ hg ci -m 'change foo'
652 652 $ hg bundle ../bundle.hg ../orig
653 653 searching for changes
654 654 1 changesets found
655 655
656 656 $ cd ..
657 657
658 658 #if repobundlerepo
659 659 $ cd orig
660 660 $ hg incoming ../bundle.hg
661 661 comparing with ../bundle.hg
662 662 searching for changes
663 663 changeset: 2:ed1b79f46b9a
664 664 tag: tip
665 665 parent: 0:bbd179dfa0a7
666 666 user: test
667 667 date: Thu Jan 01 00:00:00 1970 +0000
668 668 summary: change foo
669 669
670 670 $ cd ..
671 671
672 672 test bundle with # in the filename (issue2154):
673 673
674 674 $ cp bundle.hg 'test#bundle.hg'
675 675 $ cd orig
676 676 $ hg incoming '../test#bundle.hg'
677 677 comparing with ../test
678 678 abort: unknown revision 'bundle.hg'!
679 679 [255]
680 680
681 681 note that percent encoding is not handled:
682 682
683 683 $ hg incoming ../test%23bundle.hg
684 684 abort: repository ../test%23bundle.hg not found!
685 685 [255]
686 686 $ cd ..
687 687
688 688 #endif
689 689
690 690 test to bundle revisions on the newly created branch (issue3828):
691 691
692 692 $ hg -q clone -U test test-clone
693 693 $ cd test
694 694
695 695 $ hg -q branch foo
696 696 $ hg commit -m "create foo branch"
697 697 $ hg -q outgoing ../test-clone
698 698 9:b4f5acb1ee27
699 699 $ hg -q bundle --branch foo foo.hg ../test-clone
700 700 #if repobundlerepo
701 701 $ hg -R foo.hg -q log -r "bundle()"
702 702 9:b4f5acb1ee27
703 703 #endif
704 704
705 705 $ cd ..
706 706
707 707 test for https://bz.mercurial-scm.org/1144
708 708
709 709 test that verify bundle does not traceback
710 710
711 711 partial history bundle, fails w/ unknown parent
712 712
713 713 $ hg -R bundle.hg verify
714 714 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
715 715 [255]
716 716
717 717 full history bundle, refuses to verify non-local repo
718 718
719 719 #if repobundlerepo
720 720 $ hg -R all.hg verify
721 721 abort: cannot verify bundle or remote repos
722 722 [255]
723 723 #endif
724 724
725 725 but, regular verify must continue to work
726 726
727 727 $ hg -R orig verify
728 728 checking changesets
729 729 checking manifests
730 730 crosschecking files in changesets and manifests
731 731 checking files
732 732 checked 2 changesets with 2 changes to 2 files
733 733
734 734 #if repobundlerepo
735 735 diff against bundle
736 736
737 737 $ hg init b
738 738 $ cd b
739 739 $ hg -R ../all.hg diff -r tip
740 740 diff -r aa35859c02ea anotherfile
741 741 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
742 742 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
743 743 @@ -1,4 +0,0 @@
744 744 -0
745 745 -1
746 746 -2
747 747 -3
748 748 $ cd ..
749 749 #endif
750 750
751 751 bundle single branch
752 752
753 753 $ hg init branchy
754 754 $ cd branchy
755 755 $ echo a >a
756 756 $ echo x >x
757 757 $ hg ci -Ama
758 758 adding a
759 759 adding x
760 760 $ echo c >c
761 761 $ echo xx >x
762 762 $ hg ci -Amc
763 763 adding c
764 764 $ echo c1 >c1
765 765 $ hg ci -Amc1
766 766 adding c1
767 767 $ hg up 0
768 768 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
769 769 $ echo b >b
770 770 $ hg ci -Amb
771 771 adding b
772 772 created new head
773 773 $ echo b1 >b1
774 774 $ echo xx >x
775 775 $ hg ci -Amb1
776 776 adding b1
777 777 $ hg clone -q -r2 . part
778 778
779 779 == bundling via incoming
780 780
781 781 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
782 782 comparing with .
783 783 searching for changes
784 784 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
785 785 057f4db07f61970e1c11e83be79e9d08adc4dc31
786 786
787 787 == bundling
788 788
789 789 $ hg bundle bundle.hg part --debug --config progress.debug=true
790 790 query 1; heads
791 791 searching for changes
792 792 all remote heads known locally
793 793 2 changesets found
794 794 list of changesets:
795 795 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
796 796 057f4db07f61970e1c11e83be79e9d08adc4dc31
797 797 bundle2-output-bundle: "HG20", (1 params) 2 parts total
798 798 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
799 799 changesets: 1/2 chunks (50.00%)
800 800 changesets: 2/2 chunks (100.00%)
801 801 manifests: 1/2 chunks (50.00%)
802 802 manifests: 2/2 chunks (100.00%)
803 803 files: b 1/3 files (33.33%)
804 804 files: b1 2/3 files (66.67%)
805 805 files: x 3/3 files (100.00%)
806 806 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
807 807
808 808 #if repobundlerepo
809 809 == Test for issue3441
810 810
811 811 $ hg clone -q -r0 . part2
812 812 $ hg -q -R part2 pull bundle.hg
813 813 $ hg -R part2 verify
814 814 checking changesets
815 815 checking manifests
816 816 crosschecking files in changesets and manifests
817 817 checking files
818 818 checked 3 changesets with 5 changes to 4 files
819 819 #endif
820 820
821 821 == Test bundling no commits
822 822
823 823 $ hg bundle -r 'public()' no-output.hg
824 824 abort: no commits to bundle
825 825 [255]
826 826
827 827 $ cd ..
828 828
829 829 When user merges to the revision existing only in the bundle,
830 830 it should show warning that second parent of the working
831 831 directory does not exist
832 832
833 833 $ hg init update2bundled
834 834 $ cd update2bundled
835 835 $ cat <<EOF >> .hg/hgrc
836 836 > [extensions]
837 837 > strip =
838 838 > EOF
839 839 $ echo "aaa" >> a
840 840 $ hg commit -A -m 0
841 841 adding a
842 842 $ echo "bbb" >> b
843 843 $ hg commit -A -m 1
844 844 adding b
845 845 $ echo "ccc" >> c
846 846 $ hg commit -A -m 2
847 847 adding c
848 848 $ hg update -r 1
849 849 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
850 850 $ echo "ddd" >> d
851 851 $ hg commit -A -m 3
852 852 adding d
853 853 created new head
854 854 $ hg update -r 2
855 855 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
856 856 $ hg log -G
857 857 o changeset: 3:8bd3e1f196af
858 858 | tag: tip
859 859 | parent: 1:a01eca7af26d
860 860 | user: test
861 861 | date: Thu Jan 01 00:00:00 1970 +0000
862 862 | summary: 3
863 863 |
864 864 | @ changeset: 2:4652c276ac4f
865 865 |/ user: test
866 866 | date: Thu Jan 01 00:00:00 1970 +0000
867 867 | summary: 2
868 868 |
869 869 o changeset: 1:a01eca7af26d
870 870 | user: test
871 871 | date: Thu Jan 01 00:00:00 1970 +0000
872 872 | summary: 1
873 873 |
874 874 o changeset: 0:4fe08cd4693e
875 875 user: test
876 876 date: Thu Jan 01 00:00:00 1970 +0000
877 877 summary: 0
878 878
879 879
880 880 #if repobundlerepo
881 881 $ hg bundle --base 1 -r 3 ../update2bundled.hg
882 882 1 changesets found
883 883 $ hg strip -r 3
884 884 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
885 885 $ hg merge -R ../update2bundled.hg -r 3
886 886 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
887 887 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
888 888 (branch merge, don't forget to commit)
889 889
890 890 When user updates to the revision existing only in the bundle,
891 891 it should show warning
892 892
893 893 $ hg update -R ../update2bundled.hg --clean -r 3
894 894 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
895 895 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
896 896
897 897 When user updates to the revision existing in the local repository
898 898 the warning shouldn't be emitted
899 899
900 900 $ hg update -R ../update2bundled.hg -r 0
901 901 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
902 902 #endif
903 903
904 904 Test the option that create slim bundle
905 905
906 906 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
907 907 3 changesets found
908
909 Test the option that create and no-delta's bundle
910 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
911 3 changesets found
General Comments 0
You need to be logged in to leave comments. Login now