##// END OF EJS Templates
changegroup: use the 'postclose' API on transaction...
Pierre-Yves David -
r23221:cadc9a72 default
parent child Browse files
Show More
@@ -1,830 +1,831 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import weakref
9 9 from i18n import _
10 10 from node import nullrev, nullid, hex, short
11 11 import mdiff, util, dagutil
12 12 import struct, os, bz2, zlib, tempfile
13 13 import discovery, error, phases, branchmap
14 14
15 15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17 17
18 18 def readexactly(stream, n):
19 19 '''read n bytes from stream.read and abort if less was available'''
20 20 s = stream.read(n)
21 21 if len(s) < n:
22 22 raise util.Abort(_("stream ended unexpectedly"
23 23 " (got %d bytes, expected %d)")
24 24 % (len(s), n))
25 25 return s
26 26
27 27 def getchunk(stream):
28 28 """return the next chunk from stream as a string"""
29 29 d = readexactly(stream, 4)
30 30 l = struct.unpack(">l", d)[0]
31 31 if l <= 4:
32 32 if l:
33 33 raise util.Abort(_("invalid chunk length %d") % l)
34 34 return ""
35 35 return readexactly(stream, l - 4)
36 36
37 37 def chunkheader(length):
38 38 """return a changegroup chunk header (string)"""
39 39 return struct.pack(">l", length + 4)
40 40
41 41 def closechunk():
42 42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 43 return struct.pack(">l", 0)
44 44
45 45 class nocompress(object):
46 46 def compress(self, x):
47 47 return x
48 48 def flush(self):
49 49 return ""
50 50
51 51 bundletypes = {
52 52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
53 53 # since the unification ssh accepts a header but there
54 54 # is no capability signaling it.
55 55 "HG10UN": ("HG10UN", nocompress),
56 56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
57 57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
58 58 }
59 59
60 60 # hgweb uses this list to communicate its preferred type
61 61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
62 62
63 63 def writebundle(cg, filename, bundletype, vfs=None):
64 64 """Write a bundle file and return its filename.
65 65
66 66 Existing files will not be overwritten.
67 67 If no filename is specified, a temporary file is created.
68 68 bz2 compression can be turned off.
69 69 The bundle file will be deleted in case of errors.
70 70 """
71 71
72 72 fh = None
73 73 cleanup = None
74 74 try:
75 75 if filename:
76 76 if vfs:
77 77 fh = vfs.open(filename, "wb")
78 78 else:
79 79 fh = open(filename, "wb")
80 80 else:
81 81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 82 fh = os.fdopen(fd, "wb")
83 83 cleanup = filename
84 84
85 85 header, compressor = bundletypes[bundletype]
86 86 fh.write(header)
87 87 z = compressor()
88 88
89 89 # parse the changegroup data, otherwise we will block
90 90 # in case of sshrepo because we don't know the end of the stream
91 91
92 92 # an empty chunkgroup is the end of the changegroup
93 93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
94 94 # after that, an empty chunkgroup is the end of the changegroup
95 95 for chunk in cg.getchunks():
96 96 fh.write(z.compress(chunk))
97 97 fh.write(z.flush())
98 98 cleanup = None
99 99 return filename
100 100 finally:
101 101 if fh is not None:
102 102 fh.close()
103 103 if cleanup is not None:
104 104 if filename and vfs:
105 105 vfs.unlink(cleanup)
106 106 else:
107 107 os.unlink(cleanup)
108 108
109 109 def decompressor(fh, alg):
110 110 if alg == 'UN':
111 111 return fh
112 112 elif alg == 'GZ':
113 113 def generator(f):
114 114 zd = zlib.decompressobj()
115 115 for chunk in util.filechunkiter(f):
116 116 yield zd.decompress(chunk)
117 117 elif alg == 'BZ':
118 118 def generator(f):
119 119 zd = bz2.BZ2Decompressor()
120 120 zd.decompress("BZ")
121 121 for chunk in util.filechunkiter(f, 4096):
122 122 yield zd.decompress(chunk)
123 123 else:
124 124 raise util.Abort("unknown bundle compression '%s'" % alg)
125 125 return util.chunkbuffer(generator(fh))
126 126
127 127 class cg1unpacker(object):
128 128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 129 deltaheadersize = struct.calcsize(deltaheader)
130 130 def __init__(self, fh, alg):
131 131 self._stream = decompressor(fh, alg)
132 132 self._type = alg
133 133 self.callback = None
134 134 def compressed(self):
135 135 return self._type != 'UN'
136 136 def read(self, l):
137 137 return self._stream.read(l)
138 138 def seek(self, pos):
139 139 return self._stream.seek(pos)
140 140 def tell(self):
141 141 return self._stream.tell()
142 142 def close(self):
143 143 return self._stream.close()
144 144
145 145 def chunklength(self):
146 146 d = readexactly(self._stream, 4)
147 147 l = struct.unpack(">l", d)[0]
148 148 if l <= 4:
149 149 if l:
150 150 raise util.Abort(_("invalid chunk length %d") % l)
151 151 return 0
152 152 if self.callback:
153 153 self.callback()
154 154 return l - 4
155 155
156 156 def changelogheader(self):
157 157 """v10 does not have a changelog header chunk"""
158 158 return {}
159 159
160 160 def manifestheader(self):
161 161 """v10 does not have a manifest header chunk"""
162 162 return {}
163 163
164 164 def filelogheader(self):
165 165 """return the header of the filelogs chunk, v10 only has the filename"""
166 166 l = self.chunklength()
167 167 if not l:
168 168 return {}
169 169 fname = readexactly(self._stream, l)
170 170 return {'filename': fname}
171 171
172 172 def _deltaheader(self, headertuple, prevnode):
173 173 node, p1, p2, cs = headertuple
174 174 if prevnode is None:
175 175 deltabase = p1
176 176 else:
177 177 deltabase = prevnode
178 178 return node, p1, p2, deltabase, cs
179 179
180 180 def deltachunk(self, prevnode):
181 181 l = self.chunklength()
182 182 if not l:
183 183 return {}
184 184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 185 header = struct.unpack(self.deltaheader, headerdata)
186 186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
188 188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
189 189 'deltabase': deltabase, 'delta': delta}
190 190
191 191 def getchunks(self):
192 192 """returns all the chunks contains in the bundle
193 193
194 194 Used when you need to forward the binary stream to a file or another
195 195 network API. To do so, it parse the changegroup data, otherwise it will
196 196 block in case of sshrepo because it don't know the end of the stream.
197 197 """
198 198 # an empty chunkgroup is the end of the changegroup
199 199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
200 200 # after that, an empty chunkgroup is the end of the changegroup
201 201 empty = False
202 202 count = 0
203 203 while not empty or count <= 2:
204 204 empty = True
205 205 count += 1
206 206 while True:
207 207 chunk = getchunk(self)
208 208 if not chunk:
209 209 break
210 210 empty = False
211 211 yield chunkheader(len(chunk))
212 212 pos = 0
213 213 while pos < len(chunk):
214 214 next = pos + 2**20
215 215 yield chunk[pos:next]
216 216 pos = next
217 217 yield closechunk()
218 218
219 219 class cg2unpacker(cg1unpacker):
220 220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
221 221 deltaheadersize = struct.calcsize(deltaheader)
222 222
223 223 def _deltaheader(self, headertuple, prevnode):
224 224 node, p1, p2, deltabase, cs = headertuple
225 225 return node, p1, p2, deltabase, cs
226 226
227 227 class headerlessfixup(object):
228 228 def __init__(self, fh, h):
229 229 self._h = h
230 230 self._fh = fh
231 231 def read(self, n):
232 232 if self._h:
233 233 d, self._h = self._h[:n], self._h[n:]
234 234 if len(d) < n:
235 235 d += readexactly(self._fh, n - len(d))
236 236 return d
237 237 return readexactly(self._fh, n)
238 238
239 239 class cg1packer(object):
240 240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
241 241 def __init__(self, repo, bundlecaps=None):
242 242 """Given a source repo, construct a bundler.
243 243
244 244 bundlecaps is optional and can be used to specify the set of
245 245 capabilities which can be used to build the bundle.
246 246 """
247 247 # Set of capabilities we can use to build the bundle.
248 248 if bundlecaps is None:
249 249 bundlecaps = set()
250 250 self._bundlecaps = bundlecaps
251 251 self._changelog = repo.changelog
252 252 self._manifest = repo.manifest
253 253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
254 254 if reorder == 'auto':
255 255 reorder = None
256 256 else:
257 257 reorder = util.parsebool(reorder)
258 258 self._repo = repo
259 259 self._reorder = reorder
260 260 self._progress = repo.ui.progress
261 261 def close(self):
262 262 return closechunk()
263 263
264 264 def fileheader(self, fname):
265 265 return chunkheader(len(fname)) + fname
266 266
267 267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
268 268 """Calculate a delta group, yielding a sequence of changegroup chunks
269 269 (strings).
270 270
271 271 Given a list of changeset revs, return a set of deltas and
272 272 metadata corresponding to nodes. The first delta is
273 273 first parent(nodelist[0]) -> nodelist[0], the receiver is
274 274 guaranteed to have this parent as it has all history before
275 275 these changesets. In the case firstparent is nullrev the
276 276 changegroup starts with a full revision.
277 277
278 278 If units is not None, progress detail will be generated, units specifies
279 279 the type of revlog that is touched (changelog, manifest, etc.).
280 280 """
281 281 # if we don't have any revisions touched by these changesets, bail
282 282 if len(nodelist) == 0:
283 283 yield self.close()
284 284 return
285 285
286 286 # for generaldelta revlogs, we linearize the revs; this will both be
287 287 # much quicker and generate a much smaller bundle
288 288 if (revlog._generaldelta and reorder is not False) or reorder:
289 289 dag = dagutil.revlogdag(revlog)
290 290 revs = set(revlog.rev(n) for n in nodelist)
291 291 revs = dag.linearize(revs)
292 292 else:
293 293 revs = sorted([revlog.rev(n) for n in nodelist])
294 294
295 295 # add the parent of the first rev
296 296 p = revlog.parentrevs(revs[0])[0]
297 297 revs.insert(0, p)
298 298
299 299 # build deltas
300 300 total = len(revs) - 1
301 301 msgbundling = _('bundling')
302 302 for r in xrange(len(revs) - 1):
303 303 if units is not None:
304 304 self._progress(msgbundling, r + 1, unit=units, total=total)
305 305 prev, curr = revs[r], revs[r + 1]
306 306 linknode = lookup(revlog.node(curr))
307 307 for c in self.revchunk(revlog, curr, prev, linknode):
308 308 yield c
309 309
310 310 yield self.close()
311 311
312 312 # filter any nodes that claim to be part of the known set
313 313 def prune(self, revlog, missing, commonrevs, source):
314 314 rr, rl = revlog.rev, revlog.linkrev
315 315 return [n for n in missing if rl(rr(n)) not in commonrevs]
316 316
317 317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
318 318 '''yield a sequence of changegroup chunks (strings)'''
319 319 repo = self._repo
320 320 cl = self._changelog
321 321 mf = self._manifest
322 322 reorder = self._reorder
323 323 progress = self._progress
324 324
325 325 # for progress output
326 326 msgbundling = _('bundling')
327 327
328 328 mfs = {} # needed manifests
329 329 fnodes = {} # needed file nodes
330 330 changedfiles = set()
331 331
332 332 # Callback for the changelog, used to collect changed files and manifest
333 333 # nodes.
334 334 # Returns the linkrev node (identity in the changelog case).
335 335 def lookupcl(x):
336 336 c = cl.read(x)
337 337 changedfiles.update(c[3])
338 338 # record the first changeset introducing this manifest version
339 339 mfs.setdefault(c[0], x)
340 340 return x
341 341
342 342 # Callback for the manifest, used to collect linkrevs for filelog
343 343 # revisions.
344 344 # Returns the linkrev node (collected in lookupcl).
345 345 def lookupmf(x):
346 346 clnode = mfs[x]
347 347 if not fastpathlinkrev:
348 348 mdata = mf.readfast(x)
349 349 for f, n in mdata.iteritems():
350 350 if f in changedfiles:
351 351 # record the first changeset introducing this filelog
352 352 # version
353 353 fnodes[f].setdefault(n, clnode)
354 354 return clnode
355 355
356 356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
357 357 reorder=reorder):
358 358 yield chunk
359 359 progress(msgbundling, None)
360 360
361 361 for f in changedfiles:
362 362 fnodes[f] = {}
363 363 mfnodes = self.prune(mf, mfs, commonrevs, source)
364 364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
365 365 reorder=reorder):
366 366 yield chunk
367 367 progress(msgbundling, None)
368 368
369 369 mfs.clear()
370 370 needed = set(cl.rev(x) for x in clnodes)
371 371
372 372 def linknodes(filerevlog, fname):
373 373 if fastpathlinkrev:
374 374 llr = filerevlog.linkrev
375 375 def genfilenodes():
376 376 for r in filerevlog:
377 377 linkrev = llr(r)
378 378 if linkrev in needed:
379 379 yield filerevlog.node(r), cl.node(linkrev)
380 380 fnodes[fname] = dict(genfilenodes())
381 381 return fnodes.get(fname, {})
382 382
383 383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
384 384 source):
385 385 yield chunk
386 386
387 387 yield self.close()
388 388 progress(msgbundling, None)
389 389
390 390 if clnodes:
391 391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392 392
393 393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
394 394 repo = self._repo
395 395 progress = self._progress
396 396 reorder = self._reorder
397 397 msgbundling = _('bundling')
398 398
399 399 total = len(changedfiles)
400 400 # for progress output
401 401 msgfiles = _('files')
402 402 for i, fname in enumerate(sorted(changedfiles)):
403 403 filerevlog = repo.file(fname)
404 404 if not filerevlog:
405 405 raise util.Abort(_("empty or missing revlog for %s") % fname)
406 406
407 407 linkrevnodes = linknodes(filerevlog, fname)
408 408 # Lookup for filenodes, we collected the linkrev nodes above in the
409 409 # fastpath case and with lookupmf in the slowpath case.
410 410 def lookupfilelog(x):
411 411 return linkrevnodes[x]
412 412
413 413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
414 414 if filenodes:
415 415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
416 416 total=total)
417 417 yield self.fileheader(fname)
418 418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
419 419 reorder=reorder):
420 420 yield chunk
421 421
422 422 def deltaparent(self, revlog, rev, p1, p2, prev):
423 423 return prev
424 424
425 425 def revchunk(self, revlog, rev, prev, linknode):
426 426 node = revlog.node(rev)
427 427 p1, p2 = revlog.parentrevs(rev)
428 428 base = self.deltaparent(revlog, rev, p1, p2, prev)
429 429
430 430 prefix = ''
431 431 if base == nullrev:
432 432 delta = revlog.revision(node)
433 433 prefix = mdiff.trivialdiffheader(len(delta))
434 434 else:
435 435 delta = revlog.revdiff(base, rev)
436 436 p1n, p2n = revlog.parents(node)
437 437 basenode = revlog.node(base)
438 438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
439 439 meta += prefix
440 440 l = len(meta) + len(delta)
441 441 yield chunkheader(l)
442 442 yield meta
443 443 yield delta
444 444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
445 445 # do nothing with basenode, it is implicitly the previous one in HG10
446 446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
447 447
448 448 class cg2packer(cg1packer):
449 449
450 450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 451
452 452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
453 453 if (revlog._generaldelta and reorder is not True):
454 454 reorder = False
455 455 return cg1packer.group(self, nodelist, revlog, lookup,
456 456 units=units, reorder=reorder)
457 457
458 458 def deltaparent(self, revlog, rev, p1, p2, prev):
459 459 dp = revlog.deltaparent(rev)
460 460 # avoid storing full revisions; pick prev in those cases
461 461 # also pick prev when we can't be sure remote has dp
462 462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
463 463 return prev
464 464 return dp
465 465
466 466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
467 467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
468 468
469 469 packermap = {'01': (cg1packer, cg1unpacker),
470 470 '02': (cg2packer, cg2unpacker)}
471 471
472 472 def _changegroupinfo(repo, nodes, source):
473 473 if repo.ui.verbose or source == 'bundle':
474 474 repo.ui.status(_("%d changesets found\n") % len(nodes))
475 475 if repo.ui.debugflag:
476 476 repo.ui.debug("list of changesets:\n")
477 477 for node in nodes:
478 478 repo.ui.debug("%s\n" % hex(node))
479 479
480 480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
481 481 repo = repo.unfiltered()
482 482 commonrevs = outgoing.common
483 483 csets = outgoing.missing
484 484 heads = outgoing.missingheads
485 485 # We go through the fast path if we get told to, or if all (unfiltered
486 486 # heads have been requested (since we then know there all linkrevs will
487 487 # be pulled by the client).
488 488 heads.sort()
489 489 fastpathlinkrev = fastpath or (
490 490 repo.filtername is None and heads == sorted(repo.heads()))
491 491
492 492 repo.hook('preoutgoing', throw=True, source=source)
493 493 _changegroupinfo(repo, csets, source)
494 494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
495 495
496 496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
497 497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
498 498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
499 499
500 500 def changegroupsubset(repo, roots, heads, source):
501 501 """Compute a changegroup consisting of all the nodes that are
502 502 descendants of any of the roots and ancestors of any of the heads.
503 503 Return a chunkbuffer object whose read() method will return
504 504 successive changegroup chunks.
505 505
506 506 It is fairly complex as determining which filenodes and which
507 507 manifest nodes need to be included for the changeset to be complete
508 508 is non-trivial.
509 509
510 510 Another wrinkle is doing the reverse, figuring out which changeset in
511 511 the changegroup a particular filenode or manifestnode belongs to.
512 512 """
513 513 cl = repo.changelog
514 514 if not roots:
515 515 roots = [nullid]
516 516 # TODO: remove call to nodesbetween.
517 517 csets, roots, heads = cl.nodesbetween(roots, heads)
518 518 discbases = []
519 519 for n in roots:
520 520 discbases.extend([p for p in cl.parents(n) if p != nullid])
521 521 outgoing = discovery.outgoing(cl, discbases, heads)
522 522 bundler = cg1packer(repo)
523 523 return getsubset(repo, outgoing, bundler, source)
524 524
525 525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
526 526 version='01'):
527 527 """Like getbundle, but taking a discovery.outgoing as an argument.
528 528
529 529 This is only implemented for local repos and reuses potentially
530 530 precomputed sets in outgoing. Returns a raw changegroup generator."""
531 531 if not outgoing.missing:
532 532 return None
533 533 bundler = packermap[version][0](repo, bundlecaps)
534 534 return getsubsetraw(repo, outgoing, bundler, source)
535 535
536 536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
537 537 """Like getbundle, but taking a discovery.outgoing as an argument.
538 538
539 539 This is only implemented for local repos and reuses potentially
540 540 precomputed sets in outgoing."""
541 541 if not outgoing.missing:
542 542 return None
543 543 bundler = cg1packer(repo, bundlecaps)
544 544 return getsubset(repo, outgoing, bundler, source)
545 545
546 546 def _computeoutgoing(repo, heads, common):
547 547 """Computes which revs are outgoing given a set of common
548 548 and a set of heads.
549 549
550 550 This is a separate function so extensions can have access to
551 551 the logic.
552 552
553 553 Returns a discovery.outgoing object.
554 554 """
555 555 cl = repo.changelog
556 556 if common:
557 557 hasnode = cl.hasnode
558 558 common = [n for n in common if hasnode(n)]
559 559 else:
560 560 common = [nullid]
561 561 if not heads:
562 562 heads = cl.heads()
563 563 return discovery.outgoing(cl, common, heads)
564 564
565 565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
566 566 version='01'):
567 567 """Like changegroupsubset, but returns the set difference between the
568 568 ancestors of heads and the ancestors common.
569 569
570 570 If heads is None, use the local heads. If common is None, use [nullid].
571 571
572 572 If version is None, use a version '1' changegroup.
573 573
574 574 The nodes in common might not all be known locally due to the way the
575 575 current discovery protocol works. Returns a raw changegroup generator.
576 576 """
577 577 outgoing = _computeoutgoing(repo, heads, common)
578 578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
579 579 version=version)
580 580
581 581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
582 582 """Like changegroupsubset, but returns the set difference between the
583 583 ancestors of heads and the ancestors common.
584 584
585 585 If heads is None, use the local heads. If common is None, use [nullid].
586 586
587 587 The nodes in common might not all be known locally due to the way the
588 588 current discovery protocol works.
589 589 """
590 590 outgoing = _computeoutgoing(repo, heads, common)
591 591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
592 592
593 593 def changegroup(repo, basenodes, source):
594 594 # to avoid a race we use changegroupsubset() (issue1320)
595 595 return changegroupsubset(repo, basenodes, repo.heads(), source)
596 596
597 597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
598 598 revisions = 0
599 599 files = 0
600 600 while True:
601 601 chunkdata = source.filelogheader()
602 602 if not chunkdata:
603 603 break
604 604 f = chunkdata["filename"]
605 605 repo.ui.debug("adding %s revisions\n" % f)
606 606 pr()
607 607 fl = repo.file(f)
608 608 o = len(fl)
609 609 if not fl.addgroup(source, revmap, trp):
610 610 raise util.Abort(_("received file revlog group is empty"))
611 611 revisions += len(fl) - o
612 612 files += 1
613 613 if f in needfiles:
614 614 needs = needfiles[f]
615 615 for new in xrange(o, len(fl)):
616 616 n = fl.node(new)
617 617 if n in needs:
618 618 needs.remove(n)
619 619 else:
620 620 raise util.Abort(
621 621 _("received spurious file revlog entry"))
622 622 if not needs:
623 623 del needfiles[f]
624 624 repo.ui.progress(_('files'), None)
625 625
626 626 for f, needs in needfiles.iteritems():
627 627 fl = repo.file(f)
628 628 for n in needs:
629 629 try:
630 630 fl.rev(n)
631 631 except error.LookupError:
632 632 raise util.Abort(
633 633 _('missing file data for %s:%s - run hg verify') %
634 634 (f, hex(n)))
635 635
636 636 return revisions, files
637 637
638 638 def addchangegroup(repo, source, srctype, url, emptyok=False,
639 639 targetphase=phases.draft):
640 640 """Add the changegroup returned by source.read() to this repo.
641 641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
642 642 the URL of the repo where this changegroup is coming from.
643 643
644 644 Return an integer summarizing the change to this repo:
645 645 - nothing changed or no source: 0
646 646 - more heads than before: 1+added heads (2..n)
647 647 - fewer heads than before: -1-removed heads (-2..-n)
648 648 - number of heads stays the same: 1
649 649 """
650 650 repo = repo.unfiltered()
651 651 def csmap(x):
652 652 repo.ui.debug("add changeset %s\n" % short(x))
653 653 return len(cl)
654 654
655 655 def revmap(x):
656 656 return cl.rev(x)
657 657
658 658 if not source:
659 659 return 0
660 660
661 661 changesets = files = revisions = 0
662 662 efiles = set()
663 663
664 664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
665 665 # The transaction could have been created before and already carries source
666 666 # information. In this case we use the top level data. We overwrite the
667 667 # argument because we need to use the top level value (if they exist) in
668 668 # this function.
669 669 srctype = tr.hookargs.setdefault('source', srctype)
670 670 url = tr.hookargs.setdefault('url', url)
671 671
672 672 # write changelog data to temp files so concurrent readers will not see
673 673 # inconsistent view
674 674 cl = repo.changelog
675 675 cl.delayupdate(tr)
676 676 oldheads = cl.heads()
677 677 try:
678 678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
679 679
680 680 trp = weakref.proxy(tr)
681 681 # pull off the changeset group
682 682 repo.ui.status(_("adding changesets\n"))
683 683 clstart = len(cl)
684 684 class prog(object):
685 685 step = _('changesets')
686 686 count = 1
687 687 ui = repo.ui
688 688 total = None
689 689 def __call__(repo):
690 690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
691 691 total=repo.total)
692 692 repo.count += 1
693 693 pr = prog()
694 694 source.callback = pr
695 695
696 696 source.changelogheader()
697 697 srccontent = cl.addgroup(source, csmap, trp)
698 698 if not (srccontent or emptyok):
699 699 raise util.Abort(_("received changelog group is empty"))
700 700 clend = len(cl)
701 701 changesets = clend - clstart
702 702 for c in xrange(clstart, clend):
703 703 efiles.update(repo[c].files())
704 704 efiles = len(efiles)
705 705 repo.ui.progress(_('changesets'), None)
706 706
707 707 # pull off the manifest group
708 708 repo.ui.status(_("adding manifests\n"))
709 709 pr.step = _('manifests')
710 710 pr.count = 1
711 711 pr.total = changesets # manifests <= changesets
712 712 # no need to check for empty manifest group here:
713 713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
714 714 # no new manifest will be created and the manifest group will
715 715 # be empty during the pull
716 716 source.manifestheader()
717 717 repo.manifest.addgroup(source, revmap, trp)
718 718 repo.ui.progress(_('manifests'), None)
719 719
720 720 needfiles = {}
721 721 if repo.ui.configbool('server', 'validate', default=False):
722 722 # validate incoming csets have their manifests
723 723 for cset in xrange(clstart, clend):
724 724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
725 725 mfest = repo.manifest.readdelta(mfest)
726 726 # store file nodes we must see
727 727 for f, n in mfest.iteritems():
728 728 needfiles.setdefault(f, set()).add(n)
729 729
730 730 # process the files
731 731 repo.ui.status(_("adding file changes\n"))
732 732 pr.step = _('files')
733 733 pr.count = 1
734 734 pr.total = efiles
735 735 source.callback = None
736 736
737 737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
738 738 needfiles)
739 739 revisions += newrevs
740 740 files += newfiles
741 741
742 742 dh = 0
743 743 if oldheads:
744 744 heads = cl.heads()
745 745 dh = len(heads) - len(oldheads)
746 746 for h in heads:
747 747 if h not in oldheads and repo[h].closesbranch():
748 748 dh -= 1
749 749 htext = ""
750 750 if dh:
751 751 htext = _(" (%+d heads)") % dh
752 752
753 753 repo.ui.status(_("added %d changesets"
754 754 " with %d changes to %d files%s\n")
755 755 % (changesets, revisions, files, htext))
756 756 repo.invalidatevolatilesets()
757 757
758 758 if changesets > 0:
759 759 p = lambda: tr.writepending() and repo.root or ""
760 760 if 'node' not in tr.hookargs:
761 761 tr.hookargs['node'] = hex(cl.node(clstart))
762 762 hookargs = dict(tr.hookargs)
763 763 else:
764 764 hookargs = dict(tr.hookargs)
765 765 hookargs['node'] = hex(cl.node(clstart))
766 766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
767 767
768 768 added = [cl.node(r) for r in xrange(clstart, clend)]
769 769 publishing = repo.ui.configbool('phases', 'publish', True)
770 770 if srctype in ('push', 'serve'):
771 771 # Old servers can not push the boundary themselves.
772 772 # New servers won't push the boundary if changeset already
773 773 # exists locally as secret
774 774 #
775 775 # We should not use added here but the list of all change in
776 776 # the bundle
777 777 if publishing:
778 778 phases.advanceboundary(repo, tr, phases.public, srccontent)
779 779 else:
780 780 # Those changesets have been pushed from the outside, their
781 781 # phases are going to be pushed alongside. Therefor
782 782 # `targetphase` is ignored.
783 783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
784 784 phases.retractboundary(repo, tr, phases.draft, added)
785 785 elif srctype != 'strip':
786 786 # publishing only alter behavior during push
787 787 #
788 788 # strip should not touch boundary at all
789 789 phases.retractboundary(repo, tr, targetphase, added)
790 790
791
792 tr.close()
793
794 791 if changesets > 0:
795 792 if srctype != 'strip':
796 793 # During strip, branchcache is invalid but coming call to
797 794 # `destroyed` will repair it.
798 795 # In other case we can safely update cache on disk.
799 796 branchmap.updatecache(repo.filtered('served'))
800 797
801 798 def runhooks():
802 799 # These hooks run when the lock releases, not when the
803 800 # transaction closes. So it's possible for the changelog
804 801 # to have changed since we last saw it.
805 802 if clstart >= len(repo):
806 803 return
807 804
808 805 # forcefully update the on-disk branch cache
809 806 repo.ui.debug("updating the branch cache\n")
810 807 repo.hook("changegroup", **hookargs)
811 808
812 809 for n in added:
813 810 args = hookargs.copy()
814 811 args['node'] = hex(n)
815 812 repo.hook("incoming", **args)
816 813
817 814 newheads = [h for h in repo.heads() if h not in oldheads]
818 815 repo.ui.log("incoming",
819 816 "%s incoming changes - new heads: %s\n",
820 817 len(added),
821 818 ', '.join([hex(c[:6]) for c in newheads]))
822 repo._afterlock(runhooks)
819
820 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
821 lambda: repo._afterlock(runhooks))
822
823 tr.close()
823 824
824 825 finally:
825 826 tr.release()
826 827 # never return 0 here:
827 828 if dh < 0:
828 829 return dh - 1
829 830 else:
830 831 return dh + 1
@@ -1,484 +1,482 b''
1 1 Test exchange of common information using bundle2
2 2
3 3
4 4 $ getmainid() {
5 5 > hg -R main log --template '{node}\n' --rev "$1"
6 6 > }
7 7
8 8 enable obsolescence
9 9
10 10 $ cat >> $HGRCPATH << EOF
11 11 > [experimental]
12 12 > evolution=createmarkers,exchange
13 13 > bundle2-exp=True
14 14 > [ui]
15 15 > ssh=python "$TESTDIR/dummyssh"
16 16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 17 > [web]
18 18 > push_ssl = false
19 19 > allow_push = *
20 20 > [phases]
21 21 > publish=False
22 22 > [hooks]
23 23 > changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
24 24 > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
25 25 > EOF
26 26
27 27 The extension requires a repo (currently unused)
28 28
29 29 $ hg init main
30 30 $ cd main
31 31 $ touch a
32 32 $ hg add a
33 33 $ hg commit -m 'a'
34 34
35 35 $ hg unbundle $TESTDIR/bundles/rebase.hg
36 36 adding changesets
37 37 adding manifests
38 38 adding file changes
39 39 added 8 changesets with 7 changes to 7 files (+3 heads)
40 40 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=unbundle HG_URL=bundle:*/rebase.hg (glob)
41 41 (run 'hg heads' to see heads, 'hg merge' to merge)
42 42
43 43 $ cd ..
44 44
45 45 Real world exchange
46 46 =====================
47 47
48 48 Add more obsolescence information
49 49
50 50 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
51 51 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
52 52
53 53 clone --pull
54 54
55 55 $ hg -R main phase --public cd010b8cd998
56 56 $ hg clone main other --pull --rev 9520eea781bc
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 2 changesets with 2 changes to 2 files
61 61 1 new obsolescence markers
62 62 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
63 63 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
64 64 updating to branch default
65 65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 66 $ hg -R other log -G
67 67 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
68 68 |
69 69 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
70 70
71 71 $ hg -R other debugobsolete
72 72 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
73 73
74 74 pull
75 75
76 76 $ hg -R main phase --public 9520eea781bc
77 77 $ hg -R other pull -r 24b6387c8c8c
78 78 pulling from $TESTTMP/main (glob)
79 79 searching for changes
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 84 1 new obsolescence markers
85 85 changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
86 86 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
87 87 (run 'hg heads' to see heads, 'hg merge' to merge)
88 88 $ hg -R other log -G
89 89 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
90 90 |
91 91 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
92 92 |/
93 93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
94 94
95 95 $ hg -R other debugobsolete
96 96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 97 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98 98
99 99 pull empty (with phase movement)
100 100
101 101 $ hg -R main phase --public 24b6387c8c8c
102 102 $ hg -R other pull -r 24b6387c8c8c
103 103 pulling from $TESTTMP/main (glob)
104 104 no changes found
105 105 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
106 106 $ hg -R other log -G
107 107 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
108 108 |
109 109 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
110 110 |/
111 111 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
112 112
113 113 $ hg -R other debugobsolete
114 114 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
115 115 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
116 116
117 117 pull empty
118 118
119 119 $ hg -R other pull -r 24b6387c8c8c
120 120 pulling from $TESTTMP/main (glob)
121 121 no changes found
122 122 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
123 123 $ hg -R other log -G
124 124 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
125 125 |
126 126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
127 127 |/
128 128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
129 129
130 130 $ hg -R other debugobsolete
131 131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
133 133
134 134 add extra data to test their exchange during push
135 135
136 136 $ hg -R main bookmark --rev eea13746799a book_eea1
137 137 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
138 138 $ hg -R main bookmark --rev 02de42196ebe book_02de
139 139 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
140 140 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
141 141 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
142 142 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
143 143 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
144 144 $ hg -R main bookmark --rev 32af7686d403 book_32af
145 145 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
146 146
147 147 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
148 148 $ hg -R other bookmark --rev cd010b8cd998 book_02de
149 149 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
150 150 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
151 151 $ hg -R other bookmark --rev cd010b8cd998 book_32af
152 152
153 153 $ hg -R main phase --public eea13746799a
154 154
155 155 push
156 156 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
157 157 pushing to other
158 158 searching for changes
159 159 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
160 160 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
161 161 remote: adding changesets
162 162 remote: adding manifests
163 163 remote: adding file changes
164 164 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
165 165 remote: 1 new obsolescence markers
166 166 updating bookmark book_eea1
167 167 $ hg -R other log -G
168 168 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
169 169 |\
170 170 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
171 171 | |
172 172 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
173 173 |/
174 174 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
175 175
176 176 $ hg -R other debugobsolete
177 177 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 178 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
179 179 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
180 180
181 181 pull over ssh
182 182
183 183 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
184 184 pulling from ssh://user@dummy/main
185 185 searching for changes
186 186 adding changesets
187 187 adding manifests
188 188 adding file changes
189 189 added 1 changesets with 1 changes to 1 files (+1 heads)
190 190 1 new obsolescence markers
191 191 updating bookmark book_02de
192 192 changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
193 193 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
194 194 (run 'hg heads' to see heads, 'hg merge' to merge)
195 195 $ hg -R other debugobsolete
196 196 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 197 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 198 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
199 199 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
200 200
201 201 pull over http
202 202
203 203 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
204 204 $ cat main.pid >> $DAEMON_PIDS
205 205
206 206 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
207 207 pulling from http://localhost:$HGPORT/
208 208 searching for changes
209 209 adding changesets
210 210 adding manifests
211 211 adding file changes
212 212 added 1 changesets with 1 changes to 1 files (+1 heads)
213 213 1 new obsolescence markers
214 214 updating bookmark book_42cc
215 215 changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
216 216 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
217 217 (run 'hg heads .' to see heads, 'hg merge' to merge)
218 218 $ cat main-error.log
219 219 $ hg -R other debugobsolete
220 220 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
221 221 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
222 222 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
223 223 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
224 224 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
225 225
226 226 push over ssh
227 227
228 228 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
229 229 pushing to ssh://user@dummy/other
230 230 searching for changes
231 231 remote: adding changesets
232 232 remote: adding manifests
233 233 remote: adding file changes
234 234 remote: added 1 changesets with 1 changes to 1 files
235 235 remote: 1 new obsolescence markers
236 236 updating bookmark book_5fdd
237 237 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
238 238 remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
239 239 $ hg -R other log -G
240 240 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
241 241 |
242 242 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
243 243 |
244 244 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
245 245 | |
246 246 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
247 247 | |/|
248 248 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
249 249 |/ /
250 250 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
251 251 |/
252 252 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
253 253
254 254 $ hg -R other debugobsolete
255 255 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
256 256 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
257 257 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
258 258 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
259 259 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
260 260 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
261 261
262 262 push over http
263 263
264 264 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
265 265 $ cat other.pid >> $DAEMON_PIDS
266 266
267 267 $ hg -R main phase --public 32af7686d403
268 268 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
269 269 pushing to http://localhost:$HGPORT2/
270 270 searching for changes
271 271 remote: adding changesets
272 272 remote: adding manifests
273 273 remote: adding file changes
274 274 remote: added 1 changesets with 1 changes to 1 files
275 275 remote: 1 new obsolescence markers
276 276 updating bookmark book_32af
277 277 $ cat other-error.log
278 278
279 279 Check final content.
280 280
281 281 $ hg -R other log -G
282 282 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
283 283 |
284 284 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
285 285 |
286 286 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
287 287 |
288 288 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
289 289 | |
290 290 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
291 291 | |/|
292 292 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
293 293 |/ /
294 294 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
295 295 |/
296 296 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
297 297
298 298 $ hg -R other debugobsolete
299 299 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 300 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
301 301 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
302 302 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
303 303 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
304 304 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 305 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306 306
307 307 Error Handling
308 308 ==============
309 309
310 310 Check that errors are properly returned to the client during push.
311 311
312 312 Setting up
313 313
314 314 $ cat > failpush.py << EOF
315 315 > """A small extension that makes push fails when using bundle2
316 316 >
317 317 > used to test error handling in bundle2
318 318 > """
319 319 >
320 320 > from mercurial import util
321 321 > from mercurial import bundle2
322 322 > from mercurial import exchange
323 323 > from mercurial import extensions
324 324 >
325 325 > def _pushbundle2failpart(pushop, bundler):
326 326 > reason = pushop.ui.config('failpush', 'reason', None)
327 327 > part = None
328 328 > if reason == 'abort':
329 329 > bundler.newpart('test:abort')
330 330 > if reason == 'unknown':
331 331 > bundler.newpart('TEST:UNKNOWN')
332 332 > if reason == 'race':
333 333 > # 20 Bytes of crap
334 334 > bundler.newpart('b2x:check:heads', data='01234567890123456789')
335 335 >
336 336 > @bundle2.parthandler("test:abort")
337 337 > def handleabort(op, part):
338 338 > raise util.Abort('Abandon ship!', hint="don't panic")
339 339 >
340 340 > def uisetup(ui):
341 341 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
342 342 > exchange.b2partsgenorder.insert(0, 'failpart')
343 343 >
344 344 > EOF
345 345
346 346 $ cd main
347 347 $ hg up tip
348 348 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
349 349 $ echo 'I' > I
350 350 $ hg add I
351 351 $ hg ci -m 'I'
352 352 $ hg id
353 353 e7ec4e813ba6 tip
354 354 $ cd ..
355 355
356 356 $ cat << EOF >> $HGRCPATH
357 357 > [extensions]
358 358 > failpush=$TESTTMP/failpush.py
359 359 > EOF
360 360
361 361 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
362 362 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
363 363 $ cat other.pid >> $DAEMON_PIDS
364 364
365 365 Doing the actual push: Abort error
366 366
367 367 $ cat << EOF >> $HGRCPATH
368 368 > [failpush]
369 369 > reason = abort
370 370 > EOF
371 371
372 372 $ hg -R main push other -r e7ec4e813ba6
373 373 pushing to other
374 374 searching for changes
375 375 abort: Abandon ship!
376 376 (don't panic)
377 377 [255]
378 378
379 379 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
380 380 pushing to ssh://user@dummy/other
381 381 searching for changes
382 382 abort: Abandon ship!
383 383 (don't panic)
384 384 [255]
385 385
386 386 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
387 387 pushing to http://localhost:$HGPORT2/
388 388 searching for changes
389 389 abort: Abandon ship!
390 390 (don't panic)
391 391 [255]
392 392
393 393
394 394 Doing the actual push: unknown mandatory parts
395 395
396 396 $ cat << EOF >> $HGRCPATH
397 397 > [failpush]
398 398 > reason = unknown
399 399 > EOF
400 400
401 401 $ hg -R main push other -r e7ec4e813ba6
402 402 pushing to other
403 403 searching for changes
404 404 abort: missing support for test:unknown
405 405 [255]
406 406
407 407 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
408 408 pushing to ssh://user@dummy/other
409 409 searching for changes
410 410 abort: missing support for test:unknown
411 411 [255]
412 412
413 413 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
414 414 pushing to http://localhost:$HGPORT2/
415 415 searching for changes
416 416 abort: missing support for test:unknown
417 417 [255]
418 418
419 419 Doing the actual push: race
420 420
421 421 $ cat << EOF >> $HGRCPATH
422 422 > [failpush]
423 423 > reason = race
424 424 > EOF
425 425
426 426 $ hg -R main push other -r e7ec4e813ba6
427 427 pushing to other
428 428 searching for changes
429 429 abort: push failed:
430 430 'repository changed while pushing - please try again'
431 431 [255]
432 432
433 433 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
434 434 pushing to ssh://user@dummy/other
435 435 searching for changes
436 436 abort: push failed:
437 437 'repository changed while pushing - please try again'
438 438 [255]
439 439
440 440 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
441 441 pushing to http://localhost:$HGPORT2/
442 442 searching for changes
443 443 abort: push failed:
444 444 'repository changed while pushing - please try again'
445 445 [255]
446 446
447 447 Doing the actual push: hook abort
448 448
449 449 $ cat << EOF >> $HGRCPATH
450 450 > [failpush]
451 451 > reason =
452 452 > [hooks]
453 453 > b2x-pretransactionclose.failpush = false
454 454 > EOF
455 455
456 456 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
457 457 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
458 458 $ cat other.pid >> $DAEMON_PIDS
459 459
460 460 $ hg -R main push other -r e7ec4e813ba6
461 461 pushing to other
462 462 searching for changes
463 463 transaction abort!
464 464 rollback completed
465 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=push HG_URL=push
466 465 abort: b2x-pretransactionclose.failpush hook exited with status 1
467 466 [255]
468 467
469 468 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
470 469 pushing to ssh://user@dummy/other
471 470 searching for changes
472 471 abort: b2x-pretransactionclose.failpush hook exited with status 1
473 472 remote: transaction abort!
474 473 remote: rollback completed
475 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
476 474 [255]
477 475
478 476 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
479 477 pushing to http://localhost:$HGPORT2/
480 478 searching for changes
481 479 abort: b2x-pretransactionclose.failpush hook exited with status 1
482 480 [255]
483 481
484 482
General Comments 0
You need to be logged in to leave comments. Login now