##// END OF EJS Templates
changegroup: don't send empty subdirectory manifest groups...
Martin von Zweigbergk -
r29371:1b699c7e default
parent child Browse files
Show More
@@ -1,1062 +1,1063
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36 36
37 37 def readexactly(stream, n):
38 38 '''read n bytes from stream.read and abort if less was available'''
39 39 s = stream.read(n)
40 40 if len(s) < n:
41 41 raise error.Abort(_("stream ended unexpectedly"
42 42 " (got %d bytes, expected %d)")
43 43 % (len(s), n))
44 44 return s
45 45
46 46 def getchunk(stream):
47 47 """return the next chunk from stream as a string"""
48 48 d = readexactly(stream, 4)
49 49 l = struct.unpack(">l", d)[0]
50 50 if l <= 4:
51 51 if l:
52 52 raise error.Abort(_("invalid chunk length %d") % l)
53 53 return ""
54 54 return readexactly(stream, l - 4)
55 55
56 56 def chunkheader(length):
57 57 """return a changegroup chunk header (string)"""
58 58 return struct.pack(">l", length + 4)
59 59
60 60 def closechunk():
61 61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 62 return struct.pack(">l", 0)
63 63
64 64 def combineresults(results):
65 65 """logic to combine 0 or more addchangegroup results into one"""
66 66 changedheads = 0
67 67 result = 1
68 68 for ret in results:
69 69 # If any changegroup result is 0, return 0
70 70 if ret == 0:
71 71 result = 0
72 72 break
73 73 if ret < -1:
74 74 changedheads += ret + 1
75 75 elif ret > 1:
76 76 changedheads += ret - 1
77 77 if changedheads > 0:
78 78 result = 1 + changedheads
79 79 elif changedheads < 0:
80 80 result = -1 + changedheads
81 81 return result
82 82
83 83 def writechunks(ui, chunks, filename, vfs=None):
84 84 """Write chunks to a file and return its filename.
85 85
86 86 The stream is assumed to be a bundle file.
87 87 Existing files will not be overwritten.
88 88 If no filename is specified, a temporary file is created.
89 89 """
90 90 fh = None
91 91 cleanup = None
92 92 try:
93 93 if filename:
94 94 if vfs:
95 95 fh = vfs.open(filename, "wb")
96 96 else:
97 97 fh = open(filename, "wb")
98 98 else:
99 99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 100 fh = os.fdopen(fd, "wb")
101 101 cleanup = filename
102 102 for c in chunks:
103 103 fh.write(c)
104 104 cleanup = None
105 105 return filename
106 106 finally:
107 107 if fh is not None:
108 108 fh.close()
109 109 if cleanup is not None:
110 110 if filename and vfs:
111 111 vfs.unlink(cleanup)
112 112 else:
113 113 os.unlink(cleanup)
114 114
115 115 class cg1unpacker(object):
116 116 """Unpacker for cg1 changegroup streams.
117 117
118 118 A changegroup unpacker handles the framing of the revision data in
119 119 the wire format. Most consumers will want to use the apply()
120 120 method to add the changes from the changegroup to a repository.
121 121
122 122 If you're forwarding a changegroup unmodified to another consumer,
123 123 use getchunks(), which returns an iterator of changegroup
124 124 chunks. This is mostly useful for cases where you need to know the
125 125 data stream has ended by observing the end of the changegroup.
126 126
127 127 deltachunk() is useful only if you're applying delta data. Most
128 128 consumers should prefer apply() instead.
129 129
130 130 A few other public methods exist. Those are used only for
131 131 bundlerepo and some debug commands - their use is discouraged.
132 132 """
133 133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 134 deltaheadersize = struct.calcsize(deltaheader)
135 135 version = '01'
136 136 _grouplistcount = 1 # One list of files after the manifests
137 137
138 138 def __init__(self, fh, alg):
139 139 if alg == 'UN':
140 140 alg = None # get more modern without breaking too much
141 141 if not alg in util.decompressors:
142 142 raise error.Abort(_('unknown stream compression type: %s')
143 143 % alg)
144 144 if alg == 'BZ':
145 145 alg = '_truncatedBZ'
146 146 self._stream = util.decompressors[alg](fh)
147 147 self._type = alg
148 148 self.callback = None
149 149
150 150 # These methods (compressed, read, seek, tell) all appear to only
151 151 # be used by bundlerepo, but it's a little hard to tell.
152 152 def compressed(self):
153 153 return self._type is not None
154 154 def read(self, l):
155 155 return self._stream.read(l)
156 156 def seek(self, pos):
157 157 return self._stream.seek(pos)
158 158 def tell(self):
159 159 return self._stream.tell()
160 160 def close(self):
161 161 return self._stream.close()
162 162
163 163 def _chunklength(self):
164 164 d = readexactly(self._stream, 4)
165 165 l = struct.unpack(">l", d)[0]
166 166 if l <= 4:
167 167 if l:
168 168 raise error.Abort(_("invalid chunk length %d") % l)
169 169 return 0
170 170 if self.callback:
171 171 self.callback()
172 172 return l - 4
173 173
174 174 def changelogheader(self):
175 175 """v10 does not have a changelog header chunk"""
176 176 return {}
177 177
178 178 def manifestheader(self):
179 179 """v10 does not have a manifest header chunk"""
180 180 return {}
181 181
182 182 def filelogheader(self):
183 183 """return the header of the filelogs chunk, v10 only has the filename"""
184 184 l = self._chunklength()
185 185 if not l:
186 186 return {}
187 187 fname = readexactly(self._stream, l)
188 188 return {'filename': fname}
189 189
190 190 def _deltaheader(self, headertuple, prevnode):
191 191 node, p1, p2, cs = headertuple
192 192 if prevnode is None:
193 193 deltabase = p1
194 194 else:
195 195 deltabase = prevnode
196 196 flags = 0
197 197 return node, p1, p2, deltabase, cs, flags
198 198
199 199 def deltachunk(self, prevnode):
200 200 l = self._chunklength()
201 201 if not l:
202 202 return {}
203 203 headerdata = readexactly(self._stream, self.deltaheadersize)
204 204 header = struct.unpack(self.deltaheader, headerdata)
205 205 delta = readexactly(self._stream, l - self.deltaheadersize)
206 206 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
207 207 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
208 208 'deltabase': deltabase, 'delta': delta, 'flags': flags}
209 209
210 210 def getchunks(self):
211 211 """returns all the chunks contains in the bundle
212 212
213 213 Used when you need to forward the binary stream to a file or another
214 214 network API. To do so, it parse the changegroup data, otherwise it will
215 215 block in case of sshrepo because it don't know the end of the stream.
216 216 """
217 217 # an empty chunkgroup is the end of the changegroup
218 218 # a changegroup has at least 2 chunkgroups (changelog and manifest).
219 219 # after that, changegroup versions 1 and 2 have a series of groups
220 220 # with one group per file. changegroup 3 has a series of directory
221 221 # manifests before the files.
222 222 count = 0
223 223 emptycount = 0
224 224 while emptycount < self._grouplistcount:
225 225 empty = True
226 226 count += 1
227 227 while True:
228 228 chunk = getchunk(self)
229 229 if not chunk:
230 230 if empty and count > 2:
231 231 emptycount += 1
232 232 break
233 233 empty = False
234 234 yield chunkheader(len(chunk))
235 235 pos = 0
236 236 while pos < len(chunk):
237 237 next = pos + 2**20
238 238 yield chunk[pos:next]
239 239 pos = next
240 240 yield closechunk()
241 241
242 242 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
243 243 # We know that we'll never have more manifests than we had
244 244 # changesets.
245 245 self.callback = prog(_('manifests'), numchanges)
246 246 # no need to check for empty manifest group here:
247 247 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 248 # no new manifest will be created and the manifest group will
249 249 # be empty during the pull
250 250 self.manifestheader()
251 251 repo.manifest.addgroup(self, revmap, trp)
252 252 repo.ui.progress(_('manifests'), None)
253 253 self.callback = None
254 254
255 255 def apply(self, repo, srctype, url, emptyok=False,
256 256 targetphase=phases.draft, expectedtotal=None):
257 257 """Add the changegroup returned by source.read() to this repo.
258 258 srctype is a string like 'push', 'pull', or 'unbundle'. url is
259 259 the URL of the repo where this changegroup is coming from.
260 260
261 261 Return an integer summarizing the change to this repo:
262 262 - nothing changed or no source: 0
263 263 - more heads than before: 1+added heads (2..n)
264 264 - fewer heads than before: -1-removed heads (-2..-n)
265 265 - number of heads stays the same: 1
266 266 """
267 267 repo = repo.unfiltered()
268 268 def csmap(x):
269 269 repo.ui.debug("add changeset %s\n" % short(x))
270 270 return len(cl)
271 271
272 272 def revmap(x):
273 273 return cl.rev(x)
274 274
275 275 changesets = files = revisions = 0
276 276
277 277 try:
278 278 with repo.transaction("\n".join([srctype,
279 279 util.hidepassword(url)])) as tr:
280 280 # The transaction could have been created before and already
281 281 # carries source information. In this case we use the top
282 282 # level data. We overwrite the argument because we need to use
283 283 # the top level value (if they exist) in this function.
284 284 srctype = tr.hookargs.setdefault('source', srctype)
285 285 url = tr.hookargs.setdefault('url', url)
286 286 repo.hook('prechangegroup', throw=True, **tr.hookargs)
287 287
288 288 # write changelog data to temp files so concurrent readers
289 289 # will not see an inconsistent view
290 290 cl = repo.changelog
291 291 cl.delayupdate(tr)
292 292 oldheads = cl.heads()
293 293
294 294 trp = weakref.proxy(tr)
295 295 # pull off the changeset group
296 296 repo.ui.status(_("adding changesets\n"))
297 297 clstart = len(cl)
298 298 class prog(object):
299 299 def __init__(self, step, total):
300 300 self._step = step
301 301 self._total = total
302 302 self._count = 1
303 303 def __call__(self):
304 304 repo.ui.progress(self._step, self._count,
305 305 unit=_('chunks'), total=self._total)
306 306 self._count += 1
307 307 self.callback = prog(_('changesets'), expectedtotal)
308 308
309 309 efiles = set()
310 310 def onchangelog(cl, node):
311 311 efiles.update(cl.readfiles(node))
312 312
313 313 self.changelogheader()
314 314 srccontent = cl.addgroup(self, csmap, trp,
315 315 addrevisioncb=onchangelog)
316 316 efiles = len(efiles)
317 317
318 318 if not (srccontent or emptyok):
319 319 raise error.Abort(_("received changelog group is empty"))
320 320 clend = len(cl)
321 321 changesets = clend - clstart
322 322 repo.ui.progress(_('changesets'), None)
323 323 self.callback = None
324 324
325 325 # pull off the manifest group
326 326 repo.ui.status(_("adding manifests\n"))
327 327 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328 328
329 329 needfiles = {}
330 330 if repo.ui.configbool('server', 'validate', default=False):
331 331 # validate incoming csets have their manifests
332 332 for cset in xrange(clstart, clend):
333 333 mfnode = repo.changelog.read(
334 334 repo.changelog.node(cset))[0]
335 335 mfest = repo.manifest.readdelta(mfnode)
336 336 # store file nodes we must see
337 337 for f, n in mfest.iteritems():
338 338 needfiles.setdefault(f, set()).add(n)
339 339
340 340 # process the files
341 341 repo.ui.status(_("adding file changes\n"))
342 342 newrevs, newfiles = _addchangegroupfiles(
343 343 repo, self, revmap, trp, efiles, needfiles)
344 344 revisions += newrevs
345 345 files += newfiles
346 346
347 347 dh = 0
348 348 if oldheads:
349 349 heads = cl.heads()
350 350 dh = len(heads) - len(oldheads)
351 351 for h in heads:
352 352 if h not in oldheads and repo[h].closesbranch():
353 353 dh -= 1
354 354 htext = ""
355 355 if dh:
356 356 htext = _(" (%+d heads)") % dh
357 357
358 358 repo.ui.status(_("added %d changesets"
359 359 " with %d changes to %d files%s\n")
360 360 % (changesets, revisions, files, htext))
361 361 repo.invalidatevolatilesets()
362 362
363 363 if changesets > 0:
364 364 if 'node' not in tr.hookargs:
365 365 tr.hookargs['node'] = hex(cl.node(clstart))
366 366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 367 hookargs = dict(tr.hookargs)
368 368 else:
369 369 hookargs = dict(tr.hookargs)
370 370 hookargs['node'] = hex(cl.node(clstart))
371 371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 372 repo.hook('pretxnchangegroup', throw=True, **hookargs)
373 373
374 374 added = [cl.node(r) for r in xrange(clstart, clend)]
375 375 publishing = repo.publishing()
376 376 if srctype in ('push', 'serve'):
377 377 # Old servers can not push the boundary themselves.
378 378 # New servers won't push the boundary if changeset already
379 379 # exists locally as secret
380 380 #
381 381 # We should not use added here but the list of all change in
382 382 # the bundle
383 383 if publishing:
384 384 phases.advanceboundary(repo, tr, phases.public,
385 385 srccontent)
386 386 else:
387 387 # Those changesets have been pushed from the
388 388 # outside, their phases are going to be pushed
389 389 # alongside. Therefor `targetphase` is
390 390 # ignored.
391 391 phases.advanceboundary(repo, tr, phases.draft,
392 392 srccontent)
393 393 phases.retractboundary(repo, tr, phases.draft, added)
394 394 elif srctype != 'strip':
395 395 # publishing only alter behavior during push
396 396 #
397 397 # strip should not touch boundary at all
398 398 phases.retractboundary(repo, tr, targetphase, added)
399 399
400 400 if changesets > 0:
401 401 if srctype != 'strip':
402 402 # During strip, branchcache is invalid but
403 403 # coming call to `destroyed` will repair it.
404 404 # In other case we can safely update cache on
405 405 # disk.
406 406 branchmap.updatecache(repo.filtered('served'))
407 407
408 408 def runhooks():
409 409 # These hooks run when the lock releases, not when the
410 410 # transaction closes. So it's possible for the changelog
411 411 # to have changed since we last saw it.
412 412 if clstart >= len(repo):
413 413 return
414 414
415 415 # forcefully update the on-disk branch cache
416 416 repo.ui.debug("updating the branch cache\n")
417 417 repo.hook("changegroup", **hookargs)
418 418
419 419 for n in added:
420 420 args = hookargs.copy()
421 421 args['node'] = hex(n)
422 422 del args['node_last']
423 423 repo.hook("incoming", **args)
424 424
425 425 newheads = [h for h in repo.heads()
426 426 if h not in oldheads]
427 427 repo.ui.log("incoming",
428 428 "%s incoming changes - new heads: %s\n",
429 429 len(added),
430 430 ', '.join([hex(c[:6]) for c in newheads]))
431 431
432 432 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 433 lambda tr: repo._afterlock(runhooks))
434 434 finally:
435 435 repo.ui.flush()
436 436 # never return 0 here:
437 437 if dh < 0:
438 438 return dh - 1
439 439 else:
440 440 return dh + 1
441 441
442 442 class cg2unpacker(cg1unpacker):
443 443 """Unpacker for cg2 streams.
444 444
445 445 cg2 streams add support for generaldelta, so the delta header
446 446 format is slightly different. All other features about the data
447 447 remain the same.
448 448 """
449 449 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 450 deltaheadersize = struct.calcsize(deltaheader)
451 451 version = '02'
452 452
453 453 def _deltaheader(self, headertuple, prevnode):
454 454 node, p1, p2, deltabase, cs = headertuple
455 455 flags = 0
456 456 return node, p1, p2, deltabase, cs, flags
457 457
458 458 class cg3unpacker(cg2unpacker):
459 459 """Unpacker for cg3 streams.
460 460
461 461 cg3 streams add support for exchanging treemanifests and revlog
462 462 flags. It adds the revlog flags to the delta header and an empty chunk
463 463 separating manifests and files.
464 464 """
465 465 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 466 deltaheadersize = struct.calcsize(deltaheader)
467 467 version = '03'
468 468 _grouplistcount = 2 # One list of manifests and one list of files
469 469
470 470 def _deltaheader(self, headertuple, prevnode):
471 471 node, p1, p2, deltabase, cs, flags = headertuple
472 472 return node, p1, p2, deltabase, cs, flags
473 473
474 474 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 475 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 476 numchanges)
477 477 while True:
478 478 chunkdata = self.filelogheader()
479 479 if not chunkdata:
480 480 break
481 481 # If we get here, there are directory manifests in the changegroup
482 482 d = chunkdata["filename"]
483 483 repo.ui.debug("adding %s revisions\n" % d)
484 484 dirlog = repo.manifest.dirlog(d)
485 485 if not dirlog.addgroup(self, revmap, trp):
486 486 raise error.Abort(_("received dir revlog group is empty"))
487 487
488 488 class headerlessfixup(object):
489 489 def __init__(self, fh, h):
490 490 self._h = h
491 491 self._fh = fh
492 492 def read(self, n):
493 493 if self._h:
494 494 d, self._h = self._h[:n], self._h[n:]
495 495 if len(d) < n:
496 496 d += readexactly(self._fh, n - len(d))
497 497 return d
498 498 return readexactly(self._fh, n)
499 499
500 500 class cg1packer(object):
501 501 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 502 version = '01'
503 503 def __init__(self, repo, bundlecaps=None):
504 504 """Given a source repo, construct a bundler.
505 505
506 506 bundlecaps is optional and can be used to specify the set of
507 507 capabilities which can be used to build the bundle.
508 508 """
509 509 # Set of capabilities we can use to build the bundle.
510 510 if bundlecaps is None:
511 511 bundlecaps = set()
512 512 self._bundlecaps = bundlecaps
513 513 # experimental config: bundle.reorder
514 514 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 515 if reorder == 'auto':
516 516 reorder = None
517 517 else:
518 518 reorder = util.parsebool(reorder)
519 519 self._repo = repo
520 520 self._reorder = reorder
521 521 self._progress = repo.ui.progress
522 522 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 523 self._verbosenote = self._repo.ui.note
524 524 else:
525 525 self._verbosenote = lambda s: None
526 526
527 527 def close(self):
528 528 return closechunk()
529 529
530 530 def fileheader(self, fname):
531 531 return chunkheader(len(fname)) + fname
532 532
533 533 # Extracted both for clarity and for overriding in extensions.
534 534 def _sortgroup(self, revlog, nodelist, lookup):
535 535 """Sort nodes for change group and turn them into revnums."""
536 536 # for generaldelta revlogs, we linearize the revs; this will both be
537 537 # much quicker and generate a much smaller bundle
538 538 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 539 dag = dagutil.revlogdag(revlog)
540 540 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 541 else:
542 542 return sorted([revlog.rev(n) for n in nodelist])
543 543
544 544 def group(self, nodelist, revlog, lookup, units=None):
545 545 """Calculate a delta group, yielding a sequence of changegroup chunks
546 546 (strings).
547 547
548 548 Given a list of changeset revs, return a set of deltas and
549 549 metadata corresponding to nodes. The first delta is
550 550 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 551 guaranteed to have this parent as it has all history before
552 552 these changesets. In the case firstparent is nullrev the
553 553 changegroup starts with a full revision.
554 554
555 555 If units is not None, progress detail will be generated, units specifies
556 556 the type of revlog that is touched (changelog, manifest, etc.).
557 557 """
558 558 # if we don't have any revisions touched by these changesets, bail
559 559 if len(nodelist) == 0:
560 560 yield self.close()
561 561 return
562 562
563 563 revs = self._sortgroup(revlog, nodelist, lookup)
564 564
565 565 # add the parent of the first rev
566 566 p = revlog.parentrevs(revs[0])[0]
567 567 revs.insert(0, p)
568 568
569 569 # build deltas
570 570 total = len(revs) - 1
571 571 msgbundling = _('bundling')
572 572 for r in xrange(len(revs) - 1):
573 573 if units is not None:
574 574 self._progress(msgbundling, r + 1, unit=units, total=total)
575 575 prev, curr = revs[r], revs[r + 1]
576 576 linknode = lookup(revlog.node(curr))
577 577 for c in self.revchunk(revlog, curr, prev, linknode):
578 578 yield c
579 579
580 580 if units is not None:
581 581 self._progress(msgbundling, None)
582 582 yield self.close()
583 583
584 584 # filter any nodes that claim to be part of the known set
585 585 def prune(self, revlog, missing, commonrevs):
586 586 rr, rl = revlog.rev, revlog.linkrev
587 587 return [n for n in missing if rl(rr(n)) not in commonrevs]
588 588
589 589 def _packmanifests(self, dir, mfnodes, lookuplinknode):
590 590 """Pack flat manifests into a changegroup stream."""
591 591 assert not dir
592 592 for chunk in self.group(mfnodes, self._repo.manifest,
593 593 lookuplinknode, units=_('manifests')):
594 594 yield chunk
595 595
596 596 def _manifestsdone(self):
597 597 return ''
598 598
599 599 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
600 600 '''yield a sequence of changegroup chunks (strings)'''
601 601 repo = self._repo
602 602 cl = repo.changelog
603 603
604 604 clrevorder = {}
605 605 mfs = {} # needed manifests
606 606 fnodes = {} # needed file nodes
607 607 changedfiles = set()
608 608
609 609 # Callback for the changelog, used to collect changed files and manifest
610 610 # nodes.
611 611 # Returns the linkrev node (identity in the changelog case).
612 612 def lookupcl(x):
613 613 c = cl.read(x)
614 614 clrevorder[x] = len(clrevorder)
615 615 n = c[0]
616 616 # record the first changeset introducing this manifest version
617 617 mfs.setdefault(n, x)
618 618 # Record a complete list of potentially-changed files in
619 619 # this manifest.
620 620 changedfiles.update(c[3])
621 621 return x
622 622
623 623 self._verbosenote(_('uncompressed size of bundle content:\n'))
624 624 size = 0
625 625 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
626 626 size += len(chunk)
627 627 yield chunk
628 628 self._verbosenote(_('%8.i (changelog)\n') % size)
629 629
630 630 # We need to make sure that the linkrev in the changegroup refers to
631 631 # the first changeset that introduced the manifest or file revision.
632 632 # The fastpath is usually safer than the slowpath, because the filelogs
633 633 # are walked in revlog order.
634 634 #
635 635 # When taking the slowpath with reorder=None and the manifest revlog
636 636 # uses generaldelta, the manifest may be walked in the "wrong" order.
637 637 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
638 638 # cc0ff93d0c0c).
639 639 #
640 640 # When taking the fastpath, we are only vulnerable to reordering
641 641 # of the changelog itself. The changelog never uses generaldelta, so
642 642 # it is only reordered when reorder=True. To handle this case, we
643 643 # simply take the slowpath, which already has the 'clrevorder' logic.
644 644 # This was also fixed in cc0ff93d0c0c.
645 645 fastpathlinkrev = fastpathlinkrev and not self._reorder
646 646 # Treemanifests don't work correctly with fastpathlinkrev
647 647 # either, because we don't discover which directory nodes to
648 648 # send along with files. This could probably be fixed.
649 649 fastpathlinkrev = fastpathlinkrev and (
650 650 'treemanifest' not in repo.requirements)
651 651
652 652 for chunk in self.generatemanifests(commonrevs, clrevorder,
653 653 fastpathlinkrev, mfs, fnodes):
654 654 yield chunk
655 655 mfs.clear()
656 656 clrevs = set(cl.rev(x) for x in clnodes)
657 657
658 658 if not fastpathlinkrev:
659 659 def linknodes(unused, fname):
660 660 return fnodes.get(fname, {})
661 661 else:
662 662 cln = cl.node
663 663 def linknodes(filerevlog, fname):
664 664 llr = filerevlog.linkrev
665 665 fln = filerevlog.node
666 666 revs = ((r, llr(r)) for r in filerevlog)
667 667 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
668 668
669 669 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
670 670 source):
671 671 yield chunk
672 672
673 673 yield self.close()
674 674
675 675 if clnodes:
676 676 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
677 677
678 678 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
679 679 fnodes):
680 680 repo = self._repo
681 681 dirlog = repo.manifest.dirlog
682 682 tmfnodes = {'': mfs}
683 683
684 684 # Callback for the manifest, used to collect linkrevs for filelog
685 685 # revisions.
686 686 # Returns the linkrev node (collected in lookupcl).
687 687 def makelookupmflinknode(dir):
688 688 if fastpathlinkrev:
689 689 assert not dir
690 690 return mfs.__getitem__
691 691
692 692 def lookupmflinknode(x):
693 693 """Callback for looking up the linknode for manifests.
694 694
695 695 Returns the linkrev node for the specified manifest.
696 696
697 697 SIDE EFFECT:
698 698
699 699 1) fclnodes gets populated with the list of relevant
700 700 file nodes if we're not using fastpathlinkrev
701 701 2) When treemanifests are in use, collects treemanifest nodes
702 702 to send
703 703
704 704 Note that this means manifests must be completely sent to
705 705 the client before you can trust the list of files and
706 706 treemanifests to send.
707 707 """
708 708 clnode = tmfnodes[dir][x]
709 709 mdata = dirlog(dir).readshallowfast(x)
710 710 for p, n, fl in mdata.iterentries():
711 711 if fl == 't': # subdirectory manifest
712 712 subdir = dir + p + '/'
713 713 tmfclnodes = tmfnodes.setdefault(subdir, {})
714 714 tmfclnode = tmfclnodes.setdefault(n, clnode)
715 715 if clrevorder[clnode] < clrevorder[tmfclnode]:
716 716 tmfclnodes[n] = clnode
717 717 else:
718 718 f = dir + p
719 719 fclnodes = fnodes.setdefault(f, {})
720 720 fclnode = fclnodes.setdefault(n, clnode)
721 721 if clrevorder[clnode] < clrevorder[fclnode]:
722 722 fclnodes[n] = clnode
723 723 return clnode
724 724 return lookupmflinknode
725 725
726 726 size = 0
727 727 while tmfnodes:
728 728 dir = min(tmfnodes)
729 729 nodes = tmfnodes[dir]
730 730 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
731 if not dir or prunednodes:
731 732 for x in self._packmanifests(dir, prunednodes,
732 733 makelookupmflinknode(dir)):
733 734 size += len(x)
734 735 yield x
735 736 del tmfnodes[dir]
736 737 self._verbosenote(_('%8.i (manifests)\n') % size)
737 738 yield self._manifestsdone()
738 739
739 740 # The 'source' parameter is useful for extensions
740 741 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
741 742 repo = self._repo
742 743 progress = self._progress
743 744 msgbundling = _('bundling')
744 745
745 746 total = len(changedfiles)
746 747 # for progress output
747 748 msgfiles = _('files')
748 749 for i, fname in enumerate(sorted(changedfiles)):
749 750 filerevlog = repo.file(fname)
750 751 if not filerevlog:
751 752 raise error.Abort(_("empty or missing revlog for %s") % fname)
752 753
753 754 linkrevnodes = linknodes(filerevlog, fname)
754 755 # Lookup for filenodes, we collected the linkrev nodes above in the
755 756 # fastpath case and with lookupmf in the slowpath case.
756 757 def lookupfilelog(x):
757 758 return linkrevnodes[x]
758 759
759 760 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
760 761 if filenodes:
761 762 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
762 763 total=total)
763 764 h = self.fileheader(fname)
764 765 size = len(h)
765 766 yield h
766 767 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
767 768 size += len(chunk)
768 769 yield chunk
769 770 self._verbosenote(_('%8.i %s\n') % (size, fname))
770 771 progress(msgbundling, None)
771 772
772 773 def deltaparent(self, revlog, rev, p1, p2, prev):
773 774 return prev
774 775
775 776 def revchunk(self, revlog, rev, prev, linknode):
776 777 node = revlog.node(rev)
777 778 p1, p2 = revlog.parentrevs(rev)
778 779 base = self.deltaparent(revlog, rev, p1, p2, prev)
779 780
780 781 prefix = ''
781 782 if revlog.iscensored(base) or revlog.iscensored(rev):
782 783 try:
783 784 delta = revlog.revision(node)
784 785 except error.CensoredNodeError as e:
785 786 delta = e.tombstone
786 787 if base == nullrev:
787 788 prefix = mdiff.trivialdiffheader(len(delta))
788 789 else:
789 790 baselen = revlog.rawsize(base)
790 791 prefix = mdiff.replacediffheader(baselen, len(delta))
791 792 elif base == nullrev:
792 793 delta = revlog.revision(node)
793 794 prefix = mdiff.trivialdiffheader(len(delta))
794 795 else:
795 796 delta = revlog.revdiff(base, rev)
796 797 p1n, p2n = revlog.parents(node)
797 798 basenode = revlog.node(base)
798 799 flags = revlog.flags(rev)
799 800 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
800 801 meta += prefix
801 802 l = len(meta) + len(delta)
802 803 yield chunkheader(l)
803 804 yield meta
804 805 yield delta
805 806 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
806 807 # do nothing with basenode, it is implicitly the previous one in HG10
807 808 # do nothing with flags, it is implicitly 0 for cg1 and cg2
808 809 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
809 810
810 811 class cg2packer(cg1packer):
811 812 version = '02'
812 813 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
813 814
814 815 def __init__(self, repo, bundlecaps=None):
815 816 super(cg2packer, self).__init__(repo, bundlecaps)
816 817 if self._reorder is None:
817 818 # Since generaldelta is directly supported by cg2, reordering
818 819 # generally doesn't help, so we disable it by default (treating
819 820 # bundle.reorder=auto just like bundle.reorder=False).
820 821 self._reorder = False
821 822
822 823 def deltaparent(self, revlog, rev, p1, p2, prev):
823 824 dp = revlog.deltaparent(rev)
824 825 # avoid storing full revisions; pick prev in those cases
825 826 # also pick prev when we can't be sure remote has dp
826 827 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
827 828 return prev
828 829 return dp
829 830
830 831 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
831 832 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
832 833 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
833 834
834 835 class cg3packer(cg2packer):
835 836 version = '03'
836 837 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
837 838
838 839 def _packmanifests(self, dir, mfnodes, lookuplinknode):
839 840 if dir:
840 841 yield self.fileheader(dir)
841 842 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
842 843 lookuplinknode, units=_('manifests')):
843 844 yield chunk
844 845
845 846 def _manifestsdone(self):
846 847 return self.close()
847 848
848 849 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
849 850 return struct.pack(
850 851 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
851 852
852 853 _packermap = {'01': (cg1packer, cg1unpacker),
853 854 # cg2 adds support for exchanging generaldelta
854 855 '02': (cg2packer, cg2unpacker),
855 856 # cg3 adds support for exchanging revlog flags and treemanifests
856 857 '03': (cg3packer, cg3unpacker),
857 858 }
858 859
859 860 def allsupportedversions(ui):
860 861 versions = set(_packermap.keys())
861 862 versions.discard('03')
862 863 if (ui.configbool('experimental', 'changegroup3') or
863 864 ui.configbool('experimental', 'treemanifest')):
864 865 versions.add('03')
865 866 return versions
866 867
867 868 # Changegroup versions that can be applied to the repo
868 869 def supportedincomingversions(repo):
869 870 versions = allsupportedversions(repo.ui)
870 871 if 'treemanifest' in repo.requirements:
871 872 versions.add('03')
872 873 return versions
873 874
874 875 # Changegroup versions that can be created from the repo
875 876 def supportedoutgoingversions(repo):
876 877 versions = allsupportedversions(repo.ui)
877 878 if 'treemanifest' in repo.requirements:
878 879 # Versions 01 and 02 support only flat manifests and it's just too
879 880 # expensive to convert between the flat manifest and tree manifest on
880 881 # the fly. Since tree manifests are hashed differently, all of history
881 882 # would have to be converted. Instead, we simply don't even pretend to
882 883 # support versions 01 and 02.
883 884 versions.discard('01')
884 885 versions.discard('02')
885 886 versions.add('03')
886 887 return versions
887 888
888 889 def safeversion(repo):
889 890 # Finds the smallest version that it's safe to assume clients of the repo
890 891 # will support. For example, all hg versions that support generaldelta also
891 892 # support changegroup 02.
892 893 versions = supportedoutgoingversions(repo)
893 894 if 'generaldelta' in repo.requirements:
894 895 versions.discard('01')
895 896 assert versions
896 897 return min(versions)
897 898
898 899 def getbundler(version, repo, bundlecaps=None):
899 900 assert version in supportedoutgoingversions(repo)
900 901 return _packermap[version][0](repo, bundlecaps)
901 902
902 903 def getunbundler(version, fh, alg):
903 904 return _packermap[version][1](fh, alg)
904 905
905 906 def _changegroupinfo(repo, nodes, source):
906 907 if repo.ui.verbose or source == 'bundle':
907 908 repo.ui.status(_("%d changesets found\n") % len(nodes))
908 909 if repo.ui.debugflag:
909 910 repo.ui.debug("list of changesets:\n")
910 911 for node in nodes:
911 912 repo.ui.debug("%s\n" % hex(node))
912 913
913 914 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
914 915 repo = repo.unfiltered()
915 916 commonrevs = outgoing.common
916 917 csets = outgoing.missing
917 918 heads = outgoing.missingheads
918 919 # We go through the fast path if we get told to, or if all (unfiltered
919 920 # heads have been requested (since we then know there all linkrevs will
920 921 # be pulled by the client).
921 922 heads.sort()
922 923 fastpathlinkrev = fastpath or (
923 924 repo.filtername is None and heads == sorted(repo.heads()))
924 925
925 926 repo.hook('preoutgoing', throw=True, source=source)
926 927 _changegroupinfo(repo, csets, source)
927 928 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
928 929
929 930 def getsubset(repo, outgoing, bundler, source, fastpath=False):
930 931 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
931 932 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
932 933
933 934 def changegroupsubset(repo, roots, heads, source, version='01'):
934 935 """Compute a changegroup consisting of all the nodes that are
935 936 descendants of any of the roots and ancestors of any of the heads.
936 937 Return a chunkbuffer object whose read() method will return
937 938 successive changegroup chunks.
938 939
939 940 It is fairly complex as determining which filenodes and which
940 941 manifest nodes need to be included for the changeset to be complete
941 942 is non-trivial.
942 943
943 944 Another wrinkle is doing the reverse, figuring out which changeset in
944 945 the changegroup a particular filenode or manifestnode belongs to.
945 946 """
946 947 cl = repo.changelog
947 948 if not roots:
948 949 roots = [nullid]
949 950 discbases = []
950 951 for n in roots:
951 952 discbases.extend([p for p in cl.parents(n) if p != nullid])
952 953 # TODO: remove call to nodesbetween.
953 954 csets, roots, heads = cl.nodesbetween(roots, heads)
954 955 included = set(csets)
955 956 discbases = [n for n in discbases if n not in included]
956 957 outgoing = discovery.outgoing(cl, discbases, heads)
957 958 bundler = getbundler(version, repo)
958 959 return getsubset(repo, outgoing, bundler, source)
959 960
960 961 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
961 962 version='01'):
962 963 """Like getbundle, but taking a discovery.outgoing as an argument.
963 964
964 965 This is only implemented for local repos and reuses potentially
965 966 precomputed sets in outgoing. Returns a raw changegroup generator."""
966 967 if not outgoing.missing:
967 968 return None
968 969 bundler = getbundler(version, repo, bundlecaps)
969 970 return getsubsetraw(repo, outgoing, bundler, source)
970 971
971 972 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
972 973 version='01'):
973 974 """Like getbundle, but taking a discovery.outgoing as an argument.
974 975
975 976 This is only implemented for local repos and reuses potentially
976 977 precomputed sets in outgoing."""
977 978 if not outgoing.missing:
978 979 return None
979 980 bundler = getbundler(version, repo, bundlecaps)
980 981 return getsubset(repo, outgoing, bundler, source)
981 982
982 983 def computeoutgoing(repo, heads, common):
983 984 """Computes which revs are outgoing given a set of common
984 985 and a set of heads.
985 986
986 987 This is a separate function so extensions can have access to
987 988 the logic.
988 989
989 990 Returns a discovery.outgoing object.
990 991 """
991 992 cl = repo.changelog
992 993 if common:
993 994 hasnode = cl.hasnode
994 995 common = [n for n in common if hasnode(n)]
995 996 else:
996 997 common = [nullid]
997 998 if not heads:
998 999 heads = cl.heads()
999 1000 return discovery.outgoing(cl, common, heads)
1000 1001
1001 1002 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1002 1003 version='01'):
1003 1004 """Like changegroupsubset, but returns the set difference between the
1004 1005 ancestors of heads and the ancestors common.
1005 1006
1006 1007 If heads is None, use the local heads. If common is None, use [nullid].
1007 1008
1008 1009 The nodes in common might not all be known locally due to the way the
1009 1010 current discovery protocol works.
1010 1011 """
1011 1012 outgoing = computeoutgoing(repo, heads, common)
1012 1013 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1013 1014 version=version)
1014 1015
1015 1016 def changegroup(repo, basenodes, source):
1016 1017 # to avoid a race we use changegroupsubset() (issue1320)
1017 1018 return changegroupsubset(repo, basenodes, repo.heads(), source)
1018 1019
1019 1020 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1020 1021 revisions = 0
1021 1022 files = 0
1022 1023 while True:
1023 1024 chunkdata = source.filelogheader()
1024 1025 if not chunkdata:
1025 1026 break
1026 1027 files += 1
1027 1028 f = chunkdata["filename"]
1028 1029 repo.ui.debug("adding %s revisions\n" % f)
1029 1030 repo.ui.progress(_('files'), files, unit=_('files'),
1030 1031 total=expectedfiles)
1031 1032 fl = repo.file(f)
1032 1033 o = len(fl)
1033 1034 try:
1034 1035 if not fl.addgroup(source, revmap, trp):
1035 1036 raise error.Abort(_("received file revlog group is empty"))
1036 1037 except error.CensoredBaseError as e:
1037 1038 raise error.Abort(_("received delta base is censored: %s") % e)
1038 1039 revisions += len(fl) - o
1039 1040 if f in needfiles:
1040 1041 needs = needfiles[f]
1041 1042 for new in xrange(o, len(fl)):
1042 1043 n = fl.node(new)
1043 1044 if n in needs:
1044 1045 needs.remove(n)
1045 1046 else:
1046 1047 raise error.Abort(
1047 1048 _("received spurious file revlog entry"))
1048 1049 if not needs:
1049 1050 del needfiles[f]
1050 1051 repo.ui.progress(_('files'), None)
1051 1052
1052 1053 for f, needs in needfiles.iteritems():
1053 1054 fl = repo.file(f)
1054 1055 for n in needs:
1055 1056 try:
1056 1057 fl.rev(n)
1057 1058 except error.LookupError:
1058 1059 raise error.Abort(
1059 1060 _('missing file data for %s:%s - run hg verify') %
1060 1061 (f, hex(n)))
1061 1062
1062 1063 return revisions, files
@@ -1,744 +1,786
1 1 #require killdaemons
2 2
3 3 $ cat << EOF >> $HGRCPATH
4 4 > [format]
5 5 > usegeneraldelta=yes
6 6 > [ui]
7 7 > ssh=python "$TESTDIR/dummyssh"
8 8 > EOF
9 9
10 10 Set up repo
11 11
12 12 $ hg --config experimental.treemanifest=True init repo
13 13 $ cd repo
14 14
15 15 Requirements get set on init
16 16
17 17 $ grep treemanifest .hg/requires
18 18 treemanifest
19 19
20 20 Without directories, looks like any other repo
21 21
22 22 $ echo 0 > a
23 23 $ echo 0 > b
24 24 $ hg ci -Aqm initial
25 25 $ hg debugdata -m 0
26 26 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
27 27 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
28 28
29 29 Submanifest is stored in separate revlog
30 30
31 31 $ mkdir dir1
32 32 $ echo 1 > dir1/a
33 33 $ echo 1 > dir1/b
34 34 $ echo 1 > e
35 35 $ hg ci -Aqm 'add dir1'
36 36 $ hg debugdata -m 1
37 37 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
38 38 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
39 39 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
40 40 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
41 41 $ hg debugdata --dir dir1 0
42 42 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
43 43 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
44 44
45 45 Can add nested directories
46 46
47 47 $ mkdir dir1/dir1
48 48 $ echo 2 > dir1/dir1/a
49 49 $ echo 2 > dir1/dir1/b
50 50 $ mkdir dir1/dir2
51 51 $ echo 2 > dir1/dir2/a
52 52 $ echo 2 > dir1/dir2/b
53 53 $ hg ci -Aqm 'add dir1/dir1'
54 54 $ hg files -r .
55 55 a
56 56 b
57 57 dir1/a (glob)
58 58 dir1/b (glob)
59 59 dir1/dir1/a (glob)
60 60 dir1/dir1/b (glob)
61 61 dir1/dir2/a (glob)
62 62 dir1/dir2/b (glob)
63 63 e
64 64
65 65 Revision is not created for unchanged directory
66 66
67 67 $ mkdir dir2
68 68 $ echo 3 > dir2/a
69 69 $ hg add dir2
70 70 adding dir2/a (glob)
71 71 $ hg debugindex --dir dir1 > before
72 72 $ hg ci -qm 'add dir2'
73 73 $ hg debugindex --dir dir1 > after
74 74 $ diff before after
75 75 $ rm before after
76 76
77 77 Removing directory does not create an revlog entry
78 78
79 79 $ hg rm dir1/dir1
80 80 removing dir1/dir1/a (glob)
81 81 removing dir1/dir1/b (glob)
82 82 $ hg debugindex --dir dir1/dir1 > before
83 83 $ hg ci -qm 'remove dir1/dir1'
84 84 $ hg debugindex --dir dir1/dir1 > after
85 85 $ diff before after
86 86 $ rm before after
87 87
88 88 Check that hg files (calls treemanifest.walk()) works
89 89 without loading all directory revlogs
90 90
91 91 $ hg co 'desc("add dir2")'
92 92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 93 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
94 94 $ hg files -r . dir1
95 95 dir1/a (glob)
96 96 dir1/b (glob)
97 97 dir1/dir1/a (glob)
98 98 dir1/dir1/b (glob)
99 99 dir1/dir2/a (glob)
100 100 dir1/dir2/b (glob)
101 101
102 102 Check that status between revisions works (calls treemanifest.matches())
103 103 without loading all directory revlogs
104 104
105 105 $ hg status --rev 'desc("add dir1")' --rev . dir1
106 106 A dir1/dir1/a
107 107 A dir1/dir1/b
108 108 A dir1/dir2/a
109 109 A dir1/dir2/b
110 110 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
111 111
112 112 Merge creates 2-parent revision of directory revlog
113 113
114 114 $ echo 5 > dir1/a
115 115 $ hg ci -Aqm 'modify dir1/a'
116 116 $ hg co '.^'
117 117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 118 $ echo 6 > dir1/b
119 119 $ hg ci -Aqm 'modify dir1/b'
120 120 $ hg merge 'desc("modify dir1/a")'
121 121 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 122 (branch merge, don't forget to commit)
123 123 $ hg ci -m 'conflict-free merge involving dir1/'
124 124 $ cat dir1/a
125 125 5
126 126 $ cat dir1/b
127 127 6
128 128 $ hg debugindex --dir dir1
129 129 rev offset length delta linkrev nodeid p1 p2
130 130 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
131 131 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
132 132 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
133 133 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
134 134 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
135 135 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
136 136
137 137 Merge keeping directory from parent 1 does not create revlog entry. (Note that
138 138 dir1's manifest does change, but only because dir1/a's filelog changes.)
139 139
140 140 $ hg co 'desc("add dir2")'
141 141 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 142 $ echo 8 > dir2/a
143 143 $ hg ci -m 'modify dir2/a'
144 144 created new head
145 145
146 146 $ hg debugindex --dir dir2 > before
147 147 $ hg merge 'desc("modify dir1/a")'
148 148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 149 (branch merge, don't forget to commit)
150 150 $ hg revert -r 'desc("modify dir2/a")' .
151 151 reverting dir1/a (glob)
152 152 $ hg ci -m 'merge, keeping parent 1'
153 153 $ hg debugindex --dir dir2 > after
154 154 $ diff before after
155 155 $ rm before after
156 156
157 157 Merge keeping directory from parent 2 does not create revlog entry. (Note that
158 158 dir2's manifest does change, but only because dir2/a's filelog changes.)
159 159
160 160 $ hg co 'desc("modify dir2/a")'
161 161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 162 $ hg debugindex --dir dir1 > before
163 163 $ hg merge 'desc("modify dir1/a")'
164 164 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 165 (branch merge, don't forget to commit)
166 166 $ hg revert -r 'desc("modify dir1/a")' .
167 167 reverting dir2/a (glob)
168 168 $ hg ci -m 'merge, keeping parent 2'
169 169 created new head
170 170 $ hg debugindex --dir dir1 > after
171 171 $ diff before after
172 172 $ rm before after
173 173
174 174 Create flat source repo for tests with mixed flat/tree manifests
175 175
176 176 $ cd ..
177 177 $ hg init repo-flat
178 178 $ cd repo-flat
179 179
180 180 Create a few commits with flat manifest
181 181
182 182 $ echo 0 > a
183 183 $ echo 0 > b
184 184 $ echo 0 > e
185 185 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
186 186 > do
187 187 > mkdir $d
188 188 > echo 0 > $d/a
189 189 > echo 0 > $d/b
190 190 > done
191 191 $ hg ci -Aqm initial
192 192
193 193 $ echo 1 > a
194 194 $ echo 1 > dir1/a
195 195 $ echo 1 > dir1/dir1/a
196 196 $ hg ci -Aqm 'modify on branch 1'
197 197
198 198 $ hg co 0
199 199 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 200 $ echo 2 > b
201 201 $ echo 2 > dir1/b
202 202 $ echo 2 > dir1/dir1/b
203 203 $ hg ci -Aqm 'modify on branch 2'
204 204
205 205 $ hg merge 1
206 206 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
207 207 (branch merge, don't forget to commit)
208 208 $ hg ci -m 'merge of flat manifests to new flat manifest'
209 209
210 210 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
211 211 $ cat hg.pid >> $DAEMON_PIDS
212 212
213 213 Create clone with tree manifests enabled
214 214
215 215 $ cd ..
216 216 $ hg clone --config experimental.treemanifest=1 \
217 217 > http://localhost:$HGPORT repo-mixed -r 1
218 218 adding changesets
219 219 adding manifests
220 220 adding file changes
221 221 added 2 changesets with 14 changes to 11 files
222 222 updating to branch default
223 223 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
224 224 $ cd repo-mixed
225 225 $ test -d .hg/store/meta
226 226 [1]
227 227 $ grep treemanifest .hg/requires
228 228 treemanifest
229 229
230 230 Should be possible to push updates from flat to tree manifest repo
231 231
232 232 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
233 233 pushing to ssh://user@dummy/repo-mixed
234 234 searching for changes
235 235 remote: adding changesets
236 236 remote: adding manifests
237 237 remote: adding file changes
238 238 remote: added 2 changesets with 3 changes to 3 files
239 239
240 240 Commit should store revlog per directory
241 241
242 242 $ hg co 1
243 243 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
244 244 $ echo 3 > a
245 245 $ echo 3 > dir1/a
246 246 $ echo 3 > dir1/dir1/a
247 247 $ hg ci -m 'first tree'
248 248 created new head
249 249 $ find .hg/store/meta | sort
250 250 .hg/store/meta
251 251 .hg/store/meta/dir1
252 252 .hg/store/meta/dir1/00manifest.i
253 253 .hg/store/meta/dir1/dir1
254 254 .hg/store/meta/dir1/dir1/00manifest.i
255 255 .hg/store/meta/dir1/dir2
256 256 .hg/store/meta/dir1/dir2/00manifest.i
257 257 .hg/store/meta/dir2
258 258 .hg/store/meta/dir2/00manifest.i
259 259
260 260 Merge of two trees
261 261
262 262 $ hg co 2
263 263 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
264 264 $ hg merge 1
265 265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 266 (branch merge, don't forget to commit)
267 267 $ hg ci -m 'merge of flat manifests to new tree manifest'
268 268 created new head
269 269 $ hg diff -r 3
270 270
271 271 Parent of tree root manifest should be flat manifest, and two for merge
272 272
273 273 $ hg debugindex -m
274 274 rev offset length delta linkrev nodeid p1 p2
275 275 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
276 276 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
277 277 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
278 278 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
279 279 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
280 280 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
281 281
282 282
283 283 Status across flat/tree boundary should work
284 284
285 285 $ hg status --rev '.^' --rev .
286 286 M a
287 287 M dir1/a
288 288 M dir1/dir1/a
289 289
290 290
291 291 Turning off treemanifest config has no effect
292 292
293 293 $ hg debugindex --dir dir1
294 294 rev offset length delta linkrev nodeid p1 p2
295 295 0 0 127 -1 4 064927a0648a 000000000000 000000000000
296 296 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
297 297 $ echo 2 > dir1/a
298 298 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
299 299 $ hg debugindex --dir dir1
300 300 rev offset length delta linkrev nodeid p1 p2
301 301 0 0 127 -1 4 064927a0648a 000000000000 000000000000
302 302 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
303 303 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
304 304
305 305 Stripping and recovering changes should work
306 306
307 307 $ hg st --change tip
308 308 M dir1/a
309 309 $ hg --config extensions.strip= strip tip
310 310 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
311 311 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
312 312 $ hg unbundle -q .hg/strip-backup/*
313 313 $ hg st --change tip
314 314 M dir1/a
315 315
316 316 Shelving and unshelving should work
317 317
318 318 $ echo foo >> dir1/a
319 319 $ hg --config extensions.shelve= shelve
320 320 shelved as default
321 321 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
322 322 $ hg --config extensions.shelve= unshelve
323 323 unshelving change 'default'
324 324 $ hg diff --nodates
325 325 diff -r 708a273da119 dir1/a
326 326 --- a/dir1/a
327 327 +++ b/dir1/a
328 328 @@ -1,1 +1,2 @@
329 329 1
330 330 +foo
331 331
332 332 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
333 333
334 334 $ cd ..
335 335 $ hg init empty-repo
336 336 $ cat << EOF >> empty-repo/.hg/hgrc
337 337 > [experimental]
338 338 > changegroup3=yes
339 339 > EOF
340 340 $ grep treemanifest empty-repo/.hg/requires
341 341 [1]
342 342 $ hg push -R repo -r 0 empty-repo
343 343 pushing to empty-repo
344 344 searching for changes
345 345 adding changesets
346 346 adding manifests
347 347 adding file changes
348 348 added 1 changesets with 2 changes to 2 files
349 349 $ grep treemanifest empty-repo/.hg/requires
350 350 treemanifest
351 351
352 352 Pushing to an empty repo works
353 353
354 354 $ hg --config experimental.treemanifest=1 init clone
355 355 $ grep treemanifest clone/.hg/requires
356 356 treemanifest
357 357 $ hg push -R repo clone
358 358 pushing to clone
359 359 searching for changes
360 360 adding changesets
361 361 adding manifests
362 362 adding file changes
363 363 added 11 changesets with 15 changes to 10 files (+3 heads)
364 364 $ grep treemanifest clone/.hg/requires
365 365 treemanifest
366 366 $ hg -R clone verify
367 367 checking changesets
368 368 checking manifests
369 369 checking directory manifests
370 370 crosschecking files in changesets and manifests
371 371 checking files
372 372 10 files, 11 changesets, 15 total revisions
373 373
374 374 Create deeper repo with tree manifests.
375 375
376 376 $ hg --config experimental.treemanifest=True init deeprepo
377 377 $ cd deeprepo
378 378
379 379 $ mkdir .A
380 380 $ mkdir b
381 381 $ mkdir b/bar
382 382 $ mkdir b/bar/orange
383 383 $ mkdir b/bar/orange/fly
384 384 $ mkdir b/foo
385 385 $ mkdir b/foo/apple
386 386 $ mkdir b/foo/apple/bees
387 387
388 388 $ touch .A/one.txt
389 389 $ touch .A/two.txt
390 390 $ touch b/bar/fruits.txt
391 391 $ touch b/bar/orange/fly/gnat.py
392 392 $ touch b/bar/orange/fly/housefly.txt
393 393 $ touch b/foo/apple/bees/flower.py
394 394 $ touch c.txt
395 395 $ touch d.py
396 396
397 397 $ hg ci -Aqm 'initial'
398 398
399 399 We'll see that visitdir works by removing some treemanifest revlogs and running
400 400 the files command with various parameters.
401 401
402 402 Test files from the root.
403 403
404 404 $ hg files -r .
405 405 .A/one.txt (glob)
406 406 .A/two.txt (glob)
407 407 b/bar/fruits.txt (glob)
408 408 b/bar/orange/fly/gnat.py (glob)
409 409 b/bar/orange/fly/housefly.txt (glob)
410 410 b/foo/apple/bees/flower.py (glob)
411 411 c.txt
412 412 d.py
413 413
414 414 Excludes with a glob should not exclude everything from the glob's root
415 415
416 416 $ hg files -r . -X 'b/fo?' b
417 417 b/bar/fruits.txt (glob)
418 418 b/bar/orange/fly/gnat.py (glob)
419 419 b/bar/orange/fly/housefly.txt (glob)
420 420 $ cp -r .hg/store .hg/store-copy
421 421
422 422 Test files for a subdirectory.
423 423
424 424 $ rm -r .hg/store/meta/~2e_a
425 425 $ hg files -r . b
426 426 b/bar/fruits.txt (glob)
427 427 b/bar/orange/fly/gnat.py (glob)
428 428 b/bar/orange/fly/housefly.txt (glob)
429 429 b/foo/apple/bees/flower.py (glob)
430 430 $ cp -r .hg/store-copy/. .hg/store
431 431
432 432 Test files with just includes and excludes.
433 433
434 434 $ rm -r .hg/store/meta/~2e_a
435 435 $ rm -r .hg/store/meta/b/bar/orange/fly
436 436 $ rm -r .hg/store/meta/b/foo/apple/bees
437 437 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
438 438 b/bar/fruits.txt (glob)
439 439 $ cp -r .hg/store-copy/. .hg/store
440 440
441 441 Test files for a subdirectory, excluding a directory within it.
442 442
443 443 $ rm -r .hg/store/meta/~2e_a
444 444 $ rm -r .hg/store/meta/b/foo
445 445 $ hg files -r . -X path:b/foo b
446 446 b/bar/fruits.txt (glob)
447 447 b/bar/orange/fly/gnat.py (glob)
448 448 b/bar/orange/fly/housefly.txt (glob)
449 449 $ cp -r .hg/store-copy/. .hg/store
450 450
451 451 Test files for a sub directory, including only a directory within it, and
452 452 including an unrelated directory.
453 453
454 454 $ rm -r .hg/store/meta/~2e_a
455 455 $ rm -r .hg/store/meta/b/foo
456 456 $ hg files -r . -I path:b/bar/orange -I path:a b
457 457 b/bar/orange/fly/gnat.py (glob)
458 458 b/bar/orange/fly/housefly.txt (glob)
459 459 $ cp -r .hg/store-copy/. .hg/store
460 460
461 461 Test files for a pattern, including a directory, and excluding a directory
462 462 within that.
463 463
464 464 $ rm -r .hg/store/meta/~2e_a
465 465 $ rm -r .hg/store/meta/b/foo
466 466 $ rm -r .hg/store/meta/b/bar/orange
467 467 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
468 468 b/bar/fruits.txt (glob)
469 469 $ cp -r .hg/store-copy/. .hg/store
470 470
471 471 Add some more changes to the deep repo
472 472 $ echo narf >> b/bar/fruits.txt
473 473 $ hg ci -m narf
474 474 $ echo troz >> b/bar/orange/fly/gnat.py
475 475 $ hg ci -m troz
476 476
477 477 Verify works
478 478 $ hg verify
479 479 checking changesets
480 480 checking manifests
481 481 checking directory manifests
482 482 crosschecking files in changesets and manifests
483 483 checking files
484 484 8 files, 3 changesets, 10 total revisions
485 485
486 486 Dirlogs are included in fncache
487 487 $ grep meta/.A/00manifest.i .hg/store/fncache
488 488 meta/.A/00manifest.i
489 489
490 490 Rebuilt fncache includes dirlogs
491 491 $ rm .hg/store/fncache
492 492 $ hg debugrebuildfncache
493 493 adding data/.A/one.txt.i
494 494 adding data/.A/two.txt.i
495 495 adding data/b/bar/fruits.txt.i
496 496 adding data/b/bar/orange/fly/gnat.py.i
497 497 adding data/b/bar/orange/fly/housefly.txt.i
498 498 adding data/b/foo/apple/bees/flower.py.i
499 499 adding data/c.txt.i
500 500 adding data/d.py.i
501 501 adding meta/.A/00manifest.i
502 502 adding meta/b/00manifest.i
503 503 adding meta/b/bar/00manifest.i
504 504 adding meta/b/bar/orange/00manifest.i
505 505 adding meta/b/bar/orange/fly/00manifest.i
506 506 adding meta/b/foo/00manifest.i
507 507 adding meta/b/foo/apple/00manifest.i
508 508 adding meta/b/foo/apple/bees/00manifest.i
509 509 16 items added, 0 removed from fncache
510 510
511 511 Finish first server
512 512 $ killdaemons.py
513 513
514 514 Back up the recently added revlogs
515 515 $ cp -r .hg/store .hg/store-newcopy
516 516
517 517 Verify reports missing dirlog
518 518 $ rm .hg/store/meta/b/00manifest.*
519 519 $ hg verify
520 520 checking changesets
521 521 checking manifests
522 522 checking directory manifests
523 523 0: empty or missing b/
524 524 b/@0: parent-directory manifest refers to unknown revision 67688a370455
525 525 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
526 526 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
527 527 warning: orphan revlog 'meta/b/bar/00manifest.i'
528 528 warning: orphan revlog 'meta/b/bar/orange/00manifest.i'
529 529 warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i'
530 530 warning: orphan revlog 'meta/b/foo/00manifest.i'
531 531 warning: orphan revlog 'meta/b/foo/apple/00manifest.i'
532 532 warning: orphan revlog 'meta/b/foo/apple/bees/00manifest.i'
533 533 crosschecking files in changesets and manifests
534 534 b/bar/fruits.txt@0: in changeset but not in manifest
535 535 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
536 536 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
537 537 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
538 538 checking files
539 539 8 files, 3 changesets, 10 total revisions
540 540 6 warnings encountered!
541 541 8 integrity errors encountered!
542 542 (first damaged changeset appears to be 0)
543 543 [1]
544 544 $ cp -r .hg/store-newcopy/. .hg/store
545 545
546 546 Verify reports missing dirlog entry
547 547 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
548 548 $ hg verify
549 549 checking changesets
550 550 checking manifests
551 551 checking directory manifests
552 552 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
553 553 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
554 554 b/bar/@?: rev 1 points to unexpected changeset 1
555 555 b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest
556 556 b/bar/@?: rev 2 points to unexpected changeset 2
557 557 b/bar/@?: 1b16940d66d6 not in parent-directory manifest
558 558 b/bar/orange/@?: rev 1 points to unexpected changeset 2
559 559 (expected None)
560 560 b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2
561 561 (expected None)
562 562 crosschecking files in changesets and manifests
563 563 checking files
564 564 8 files, 3 changesets, 10 total revisions
565 565 2 warnings encountered!
566 566 8 integrity errors encountered!
567 567 (first damaged changeset appears to be 1)
568 568 [1]
569 569 $ cp -r .hg/store-newcopy/. .hg/store
570 570
571 571 Test cloning a treemanifest repo over http.
572 572 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
573 573 $ cat hg.pid >> $DAEMON_PIDS
574 574 $ cd ..
575 575 We can clone even with the knob turned off and we'll get a treemanifest repo.
576 576 $ hg clone --config experimental.treemanifest=False \
577 577 > --config experimental.changegroup3=True \
578 578 > http://localhost:$HGPORT deepclone
579 579 requesting all changes
580 580 adding changesets
581 581 adding manifests
582 582 adding file changes
583 583 added 3 changesets with 10 changes to 8 files
584 584 updating to branch default
585 585 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
586 586 No server errors.
587 587 $ cat deeprepo/errors.log
588 588 requires got updated to include treemanifest
589 589 $ cat deepclone/.hg/requires | grep treemanifest
590 590 treemanifest
591 591 Tree manifest revlogs exist.
592 592 $ find deepclone/.hg/store/meta | sort
593 593 deepclone/.hg/store/meta
594 594 deepclone/.hg/store/meta/b
595 595 deepclone/.hg/store/meta/b/00manifest.i
596 596 deepclone/.hg/store/meta/b/bar
597 597 deepclone/.hg/store/meta/b/bar/00manifest.i
598 598 deepclone/.hg/store/meta/b/bar/orange
599 599 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
600 600 deepclone/.hg/store/meta/b/bar/orange/fly
601 601 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
602 602 deepclone/.hg/store/meta/b/foo
603 603 deepclone/.hg/store/meta/b/foo/00manifest.i
604 604 deepclone/.hg/store/meta/b/foo/apple
605 605 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
606 606 deepclone/.hg/store/meta/b/foo/apple/bees
607 607 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
608 608 deepclone/.hg/store/meta/~2e_a
609 609 deepclone/.hg/store/meta/~2e_a/00manifest.i
610 610 Verify passes.
611 611 $ cd deepclone
612 612 $ hg verify
613 613 checking changesets
614 614 checking manifests
615 615 checking directory manifests
616 616 crosschecking files in changesets and manifests
617 617 checking files
618 618 8 files, 3 changesets, 10 total revisions
619 619 $ cd ..
620 620
621 621 Create clones using old repo formats to use in later tests
622 622 $ hg clone --config format.usestore=False \
623 623 > --config experimental.changegroup3=True \
624 624 > http://localhost:$HGPORT deeprepo-basicstore
625 625 requesting all changes
626 626 adding changesets
627 627 adding manifests
628 628 adding file changes
629 629 added 3 changesets with 10 changes to 8 files
630 630 updating to branch default
631 631 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
632 632 $ cd deeprepo-basicstore
633 633 $ grep store .hg/requires
634 634 [1]
635 635 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
636 636 $ cat hg.pid >> $DAEMON_PIDS
637 637 $ cd ..
638 638 $ hg clone --config format.usefncache=False \
639 639 > --config experimental.changegroup3=True \
640 640 > http://localhost:$HGPORT deeprepo-encodedstore
641 641 requesting all changes
642 642 adding changesets
643 643 adding manifests
644 644 adding file changes
645 645 added 3 changesets with 10 changes to 8 files
646 646 updating to branch default
647 647 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
648 648 $ cd deeprepo-encodedstore
649 649 $ grep fncache .hg/requires
650 650 [1]
651 651 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
652 652 $ cat hg.pid >> $DAEMON_PIDS
653 653 $ cd ..
654 654
655 655 Local clone with basicstore
656 656 $ hg clone -U deeprepo-basicstore local-clone-basicstore
657 657 $ hg -R local-clone-basicstore verify
658 658 checking changesets
659 659 checking manifests
660 660 checking directory manifests
661 661 crosschecking files in changesets and manifests
662 662 checking files
663 663 8 files, 3 changesets, 10 total revisions
664 664
665 665 Local clone with encodedstore
666 666 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
667 667 $ hg -R local-clone-encodedstore verify
668 668 checking changesets
669 669 checking manifests
670 670 checking directory manifests
671 671 crosschecking files in changesets and manifests
672 672 checking files
673 673 8 files, 3 changesets, 10 total revisions
674 674
675 675 Local clone with fncachestore
676 676 $ hg clone -U deeprepo local-clone-fncachestore
677 677 $ hg -R local-clone-fncachestore verify
678 678 checking changesets
679 679 checking manifests
680 680 checking directory manifests
681 681 crosschecking files in changesets and manifests
682 682 checking files
683 683 8 files, 3 changesets, 10 total revisions
684 684
685 685 Stream clone with basicstore
686 686 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
687 687 > http://localhost:$HGPORT1 stream-clone-basicstore
688 688 streaming all changes
689 689 18 files to transfer, * of data (glob)
690 690 transferred * in * seconds (*) (glob)
691 691 searching for changes
692 692 no changes found
693 693 $ hg -R stream-clone-basicstore verify
694 694 checking changesets
695 695 checking manifests
696 696 checking directory manifests
697 697 crosschecking files in changesets and manifests
698 698 checking files
699 699 8 files, 3 changesets, 10 total revisions
700 700
701 701 Stream clone with encodedstore
702 702 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
703 703 > http://localhost:$HGPORT2 stream-clone-encodedstore
704 704 streaming all changes
705 705 18 files to transfer, * of data (glob)
706 706 transferred * in * seconds (*) (glob)
707 707 searching for changes
708 708 no changes found
709 709 $ hg -R stream-clone-encodedstore verify
710 710 checking changesets
711 711 checking manifests
712 712 checking directory manifests
713 713 crosschecking files in changesets and manifests
714 714 checking files
715 715 8 files, 3 changesets, 10 total revisions
716 716
717 717 Stream clone with fncachestore
718 718 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
719 719 > http://localhost:$HGPORT stream-clone-fncachestore
720 720 streaming all changes
721 721 18 files to transfer, * of data (glob)
722 722 transferred * in * seconds (*) (glob)
723 723 searching for changes
724 724 no changes found
725 725 $ hg -R stream-clone-fncachestore verify
726 726 checking changesets
727 727 checking manifests
728 728 checking directory manifests
729 729 crosschecking files in changesets and manifests
730 730 checking files
731 731 8 files, 3 changesets, 10 total revisions
732 732
733 733 Packed bundle
734 734 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
735 735 writing 3349 bytes for 18 files
736 736 bundle requirements: generaldelta, revlogv1, treemanifest
737 737 $ hg debugbundle --spec repo-packed.hg
738 738 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
739 739
740 740 Bundle with changegroup2 is not supported
741 741
742 742 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
743 743 abort: repository does not support bundle version 02
744 744 [255]
745
746 Pull does not include changegroup for manifest the client already has from
747 other branch
748
749 $ mkdir grafted-dir-repo
750 $ cd grafted-dir-repo
751 $ hg --config experimental.treemanifest=1 init
752 $ mkdir dir
753 $ echo a > dir/file
754 $ echo a > file
755 $ hg ci -Am initial
756 adding dir/file
757 adding file
758 $ echo b > dir/file
759 $ hg ci -m updated
760 $ hg co '.^'
761 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
762 $ hg revert -r tip dir/
763 reverting dir/file (glob)
764 $ echo b > file # to make sure root manifest is sent
765 $ hg ci -m grafted
766 created new head
767 $ cd ..
768
769 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
770 > grafted-dir-repo grafted-dir-repo-clone
771 adding changesets
772 adding manifests
773 adding file changes
774 added 2 changesets with 3 changes to 2 files
775 updating to branch default
776 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
777 $ cd grafted-dir-repo-clone
778 $ hg pull -r 2
779 pulling from $TESTTMP/grafted-dir-repo (glob)
780 searching for changes
781 adding changesets
782 adding manifests
783 adding file changes
784 added 1 changesets with 1 changes to 1 files (+1 heads)
785 (run 'hg heads' to see heads, 'hg merge' to merge)
786
General Comments 0
You need to be logged in to leave comments. Login now