##// END OF EJS Templates
changegroup: add "vfs" argument to "writebundle()" for relative access via vfs...
FUJIWARA Katsunori -
r20976:c20f4898 default
parent child Browse files
Show More
@@ -1,738 +1,744 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import weakref
9 9 from i18n import _
10 10 from node import nullrev, nullid, hex, short
11 11 import mdiff, util, dagutil
12 12 import struct, os, bz2, zlib, tempfile
13 13 import discovery, error, phases, branchmap
14 14
15 15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16 16
17 17 def readexactly(stream, n):
18 18 '''read n bytes from stream.read and abort if less was available'''
19 19 s = stream.read(n)
20 20 if len(s) < n:
21 21 raise util.Abort(_("stream ended unexpectedly"
22 22 " (got %d bytes, expected %d)")
23 23 % (len(s), n))
24 24 return s
25 25
26 26 def getchunk(stream):
27 27 """return the next chunk from stream as a string"""
28 28 d = readexactly(stream, 4)
29 29 l = struct.unpack(">l", d)[0]
30 30 if l <= 4:
31 31 if l:
32 32 raise util.Abort(_("invalid chunk length %d") % l)
33 33 return ""
34 34 return readexactly(stream, l - 4)
35 35
36 36 def chunkheader(length):
37 37 """return a changegroup chunk header (string)"""
38 38 return struct.pack(">l", length + 4)
39 39
40 40 def closechunk():
41 41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 42 return struct.pack(">l", 0)
43 43
44 44 class nocompress(object):
45 45 def compress(self, x):
46 46 return x
47 47 def flush(self):
48 48 return ""
49 49
50 50 bundletypes = {
51 51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 52 # since the unification ssh accepts a header but there
53 53 # is no capability signaling it.
54 54 "HG10UN": ("HG10UN", nocompress),
55 55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 57 }
58 58
59 59 # hgweb uses this list to communicate its preferred type
60 60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 61
62 def writebundle(cg, filename, bundletype):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 63 """Write a bundle file and return its filename.
64 64
65 65 Existing files will not be overwritten.
66 66 If no filename is specified, a temporary file is created.
67 67 bz2 compression can be turned off.
68 68 The bundle file will be deleted in case of errors.
69 69 """
70 70
71 71 fh = None
72 72 cleanup = None
73 73 try:
74 74 if filename:
75 fh = open(filename, "wb")
75 if vfs:
76 fh = vfs.open(filename, "wb")
77 else:
78 fh = open(filename, "wb")
76 79 else:
77 80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
78 81 fh = os.fdopen(fd, "wb")
79 82 cleanup = filename
80 83
81 84 header, compressor = bundletypes[bundletype]
82 85 fh.write(header)
83 86 z = compressor()
84 87
85 88 # parse the changegroup data, otherwise we will block
86 89 # in case of sshrepo because we don't know the end of the stream
87 90
88 91 # an empty chunkgroup is the end of the changegroup
89 92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
90 93 # after that, an empty chunkgroup is the end of the changegroup
91 94 empty = False
92 95 count = 0
93 96 while not empty or count <= 2:
94 97 empty = True
95 98 count += 1
96 99 while True:
97 100 chunk = getchunk(cg)
98 101 if not chunk:
99 102 break
100 103 empty = False
101 104 fh.write(z.compress(chunkheader(len(chunk))))
102 105 pos = 0
103 106 while pos < len(chunk):
104 107 next = pos + 2**20
105 108 fh.write(z.compress(chunk[pos:next]))
106 109 pos = next
107 110 fh.write(z.compress(closechunk()))
108 111 fh.write(z.flush())
109 112 cleanup = None
110 113 return filename
111 114 finally:
112 115 if fh is not None:
113 116 fh.close()
114 117 if cleanup is not None:
115 os.unlink(cleanup)
118 if filename and vfs:
119 vfs.unlink(cleanup)
120 else:
121 os.unlink(cleanup)
116 122
117 123 def decompressor(fh, alg):
118 124 if alg == 'UN':
119 125 return fh
120 126 elif alg == 'GZ':
121 127 def generator(f):
122 128 zd = zlib.decompressobj()
123 129 for chunk in util.filechunkiter(f):
124 130 yield zd.decompress(chunk)
125 131 elif alg == 'BZ':
126 132 def generator(f):
127 133 zd = bz2.BZ2Decompressor()
128 134 zd.decompress("BZ")
129 135 for chunk in util.filechunkiter(f, 4096):
130 136 yield zd.decompress(chunk)
131 137 else:
132 138 raise util.Abort("unknown bundle compression '%s'" % alg)
133 139 return util.chunkbuffer(generator(fh))
134 140
135 141 class unbundle10(object):
136 142 deltaheader = _BUNDLE10_DELTA_HEADER
137 143 deltaheadersize = struct.calcsize(deltaheader)
138 144 def __init__(self, fh, alg):
139 145 self._stream = decompressor(fh, alg)
140 146 self._type = alg
141 147 self.callback = None
142 148 def compressed(self):
143 149 return self._type != 'UN'
144 150 def read(self, l):
145 151 return self._stream.read(l)
146 152 def seek(self, pos):
147 153 return self._stream.seek(pos)
148 154 def tell(self):
149 155 return self._stream.tell()
150 156 def close(self):
151 157 return self._stream.close()
152 158
153 159 def chunklength(self):
154 160 d = readexactly(self._stream, 4)
155 161 l = struct.unpack(">l", d)[0]
156 162 if l <= 4:
157 163 if l:
158 164 raise util.Abort(_("invalid chunk length %d") % l)
159 165 return 0
160 166 if self.callback:
161 167 self.callback()
162 168 return l - 4
163 169
164 170 def changelogheader(self):
165 171 """v10 does not have a changelog header chunk"""
166 172 return {}
167 173
168 174 def manifestheader(self):
169 175 """v10 does not have a manifest header chunk"""
170 176 return {}
171 177
172 178 def filelogheader(self):
173 179 """return the header of the filelogs chunk, v10 only has the filename"""
174 180 l = self.chunklength()
175 181 if not l:
176 182 return {}
177 183 fname = readexactly(self._stream, l)
178 184 return {'filename': fname}
179 185
180 186 def _deltaheader(self, headertuple, prevnode):
181 187 node, p1, p2, cs = headertuple
182 188 if prevnode is None:
183 189 deltabase = p1
184 190 else:
185 191 deltabase = prevnode
186 192 return node, p1, p2, deltabase, cs
187 193
188 194 def deltachunk(self, prevnode):
189 195 l = self.chunklength()
190 196 if not l:
191 197 return {}
192 198 headerdata = readexactly(self._stream, self.deltaheadersize)
193 199 header = struct.unpack(self.deltaheader, headerdata)
194 200 delta = readexactly(self._stream, l - self.deltaheadersize)
195 201 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
196 202 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
197 203 'deltabase': deltabase, 'delta': delta}
198 204
199 205 class headerlessfixup(object):
200 206 def __init__(self, fh, h):
201 207 self._h = h
202 208 self._fh = fh
203 209 def read(self, n):
204 210 if self._h:
205 211 d, self._h = self._h[:n], self._h[n:]
206 212 if len(d) < n:
207 213 d += readexactly(self._fh, n - len(d))
208 214 return d
209 215 return readexactly(self._fh, n)
210 216
211 217 def readbundle(fh, fname):
212 218 header = readexactly(fh, 6)
213 219
214 220 if not fname:
215 221 fname = "stream"
216 222 if not header.startswith('HG') and header.startswith('\0'):
217 223 fh = headerlessfixup(fh, header)
218 224 header = "HG10UN"
219 225
220 226 magic, version, alg = header[0:2], header[2:4], header[4:6]
221 227
222 228 if magic != 'HG':
223 229 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
224 230 if version != '10':
225 231 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
226 232 return unbundle10(fh, alg)
227 233
228 234 class bundle10(object):
229 235 deltaheader = _BUNDLE10_DELTA_HEADER
230 236 def __init__(self, repo, bundlecaps=None):
231 237 """Given a source repo, construct a bundler.
232 238
233 239 bundlecaps is optional and can be used to specify the set of
234 240 capabilities which can be used to build the bundle.
235 241 """
236 242 # Set of capabilities we can use to build the bundle.
237 243 if bundlecaps is None:
238 244 bundlecaps = set()
239 245 self._bundlecaps = bundlecaps
240 246 self._changelog = repo.changelog
241 247 self._manifest = repo.manifest
242 248 reorder = repo.ui.config('bundle', 'reorder', 'auto')
243 249 if reorder == 'auto':
244 250 reorder = None
245 251 else:
246 252 reorder = util.parsebool(reorder)
247 253 self._repo = repo
248 254 self._reorder = reorder
249 255 self._progress = repo.ui.progress
250 256 def close(self):
251 257 return closechunk()
252 258
253 259 def fileheader(self, fname):
254 260 return chunkheader(len(fname)) + fname
255 261
256 262 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
257 263 """Calculate a delta group, yielding a sequence of changegroup chunks
258 264 (strings).
259 265
260 266 Given a list of changeset revs, return a set of deltas and
261 267 metadata corresponding to nodes. The first delta is
262 268 first parent(nodelist[0]) -> nodelist[0], the receiver is
263 269 guaranteed to have this parent as it has all history before
264 270 these changesets. In the case firstparent is nullrev the
265 271 changegroup starts with a full revision.
266 272
267 273 If units is not None, progress detail will be generated, units specifies
268 274 the type of revlog that is touched (changelog, manifest, etc.).
269 275 """
270 276 # if we don't have any revisions touched by these changesets, bail
271 277 if len(nodelist) == 0:
272 278 yield self.close()
273 279 return
274 280
275 281 # for generaldelta revlogs, we linearize the revs; this will both be
276 282 # much quicker and generate a much smaller bundle
277 283 if (revlog._generaldelta and reorder is not False) or reorder:
278 284 dag = dagutil.revlogdag(revlog)
279 285 revs = set(revlog.rev(n) for n in nodelist)
280 286 revs = dag.linearize(revs)
281 287 else:
282 288 revs = sorted([revlog.rev(n) for n in nodelist])
283 289
284 290 # add the parent of the first rev
285 291 p = revlog.parentrevs(revs[0])[0]
286 292 revs.insert(0, p)
287 293
288 294 # build deltas
289 295 total = len(revs) - 1
290 296 msgbundling = _('bundling')
291 297 for r in xrange(len(revs) - 1):
292 298 if units is not None:
293 299 self._progress(msgbundling, r + 1, unit=units, total=total)
294 300 prev, curr = revs[r], revs[r + 1]
295 301 linknode = lookup(revlog.node(curr))
296 302 for c in self.revchunk(revlog, curr, prev, linknode):
297 303 yield c
298 304
299 305 yield self.close()
300 306
301 307 # filter any nodes that claim to be part of the known set
302 308 def prune(self, revlog, missing, commonrevs, source):
303 309 rr, rl = revlog.rev, revlog.linkrev
304 310 return [n for n in missing if rl(rr(n)) not in commonrevs]
305 311
306 312 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
307 313 '''yield a sequence of changegroup chunks (strings)'''
308 314 repo = self._repo
309 315 cl = self._changelog
310 316 mf = self._manifest
311 317 reorder = self._reorder
312 318 progress = self._progress
313 319
314 320 # for progress output
315 321 msgbundling = _('bundling')
316 322
317 323 mfs = {} # needed manifests
318 324 fnodes = {} # needed file nodes
319 325 changedfiles = set()
320 326
321 327 # Callback for the changelog, used to collect changed files and manifest
322 328 # nodes.
323 329 # Returns the linkrev node (identity in the changelog case).
324 330 def lookupcl(x):
325 331 c = cl.read(x)
326 332 changedfiles.update(c[3])
327 333 # record the first changeset introducing this manifest version
328 334 mfs.setdefault(c[0], x)
329 335 return x
330 336
331 337 # Callback for the manifest, used to collect linkrevs for filelog
332 338 # revisions.
333 339 # Returns the linkrev node (collected in lookupcl).
334 340 def lookupmf(x):
335 341 clnode = mfs[x]
336 342 if not fastpathlinkrev:
337 343 mdata = mf.readfast(x)
338 344 for f, n in mdata.iteritems():
339 345 if f in changedfiles:
340 346 # record the first changeset introducing this filelog
341 347 # version
342 348 fnodes[f].setdefault(n, clnode)
343 349 return clnode
344 350
345 351 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
346 352 reorder=reorder):
347 353 yield chunk
348 354 progress(msgbundling, None)
349 355
350 356 for f in changedfiles:
351 357 fnodes[f] = {}
352 358 mfnodes = self.prune(mf, mfs, commonrevs, source)
353 359 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
354 360 reorder=reorder):
355 361 yield chunk
356 362 progress(msgbundling, None)
357 363
358 364 mfs.clear()
359 365 needed = set(cl.rev(x) for x in clnodes)
360 366
361 367 def linknodes(filerevlog, fname):
362 368 if fastpathlinkrev:
363 369 llr = filerevlog.linkrev
364 370 def genfilenodes():
365 371 for r in filerevlog:
366 372 linkrev = llr(r)
367 373 if linkrev in needed:
368 374 yield filerevlog.node(r), cl.node(linkrev)
369 375 fnodes[fname] = dict(genfilenodes())
370 376 return fnodes.get(fname, {})
371 377
372 378 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
373 379 source):
374 380 yield chunk
375 381
376 382 yield self.close()
377 383 progress(msgbundling, None)
378 384
379 385 if clnodes:
380 386 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
381 387
382 388 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
383 389 repo = self._repo
384 390 progress = self._progress
385 391 reorder = self._reorder
386 392 msgbundling = _('bundling')
387 393
388 394 total = len(changedfiles)
389 395 # for progress output
390 396 msgfiles = _('files')
391 397 for i, fname in enumerate(sorted(changedfiles)):
392 398 filerevlog = repo.file(fname)
393 399 if not filerevlog:
394 400 raise util.Abort(_("empty or missing revlog for %s") % fname)
395 401
396 402 linkrevnodes = linknodes(filerevlog, fname)
397 403 # Lookup for filenodes, we collected the linkrev nodes above in the
398 404 # fastpath case and with lookupmf in the slowpath case.
399 405 def lookupfilelog(x):
400 406 return linkrevnodes[x]
401 407
402 408 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
403 409 if filenodes:
404 410 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
405 411 total=total)
406 412 yield self.fileheader(fname)
407 413 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
408 414 reorder=reorder):
409 415 yield chunk
410 416
411 417 def revchunk(self, revlog, rev, prev, linknode):
412 418 node = revlog.node(rev)
413 419 p1, p2 = revlog.parentrevs(rev)
414 420 base = prev
415 421
416 422 prefix = ''
417 423 if base == nullrev:
418 424 delta = revlog.revision(node)
419 425 prefix = mdiff.trivialdiffheader(len(delta))
420 426 else:
421 427 delta = revlog.revdiff(base, rev)
422 428 p1n, p2n = revlog.parents(node)
423 429 basenode = revlog.node(base)
424 430 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
425 431 meta += prefix
426 432 l = len(meta) + len(delta)
427 433 yield chunkheader(l)
428 434 yield meta
429 435 yield delta
430 436 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
431 437 # do nothing with basenode, it is implicitly the previous one in HG10
432 438 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
433 439
434 440 def _changegroupinfo(repo, nodes, source):
435 441 if repo.ui.verbose or source == 'bundle':
436 442 repo.ui.status(_("%d changesets found\n") % len(nodes))
437 443 if repo.ui.debugflag:
438 444 repo.ui.debug("list of changesets:\n")
439 445 for node in nodes:
440 446 repo.ui.debug("%s\n" % hex(node))
441 447
442 448 def getsubset(repo, outgoing, bundler, source, fastpath=False):
443 449 repo = repo.unfiltered()
444 450 commonrevs = outgoing.common
445 451 csets = outgoing.missing
446 452 heads = outgoing.missingheads
447 453 # We go through the fast path if we get told to, or if all (unfiltered
448 454 # heads have been requested (since we then know there all linkrevs will
449 455 # be pulled by the client).
450 456 heads.sort()
451 457 fastpathlinkrev = fastpath or (
452 458 repo.filtername is None and heads == sorted(repo.heads()))
453 459
454 460 repo.hook('preoutgoing', throw=True, source=source)
455 461 _changegroupinfo(repo, csets, source)
456 462 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
457 463 return unbundle10(util.chunkbuffer(gengroup), 'UN')
458 464
459 465 def changegroupsubset(repo, roots, heads, source):
460 466 """Compute a changegroup consisting of all the nodes that are
461 467 descendants of any of the roots and ancestors of any of the heads.
462 468 Return a chunkbuffer object whose read() method will return
463 469 successive changegroup chunks.
464 470
465 471 It is fairly complex as determining which filenodes and which
466 472 manifest nodes need to be included for the changeset to be complete
467 473 is non-trivial.
468 474
469 475 Another wrinkle is doing the reverse, figuring out which changeset in
470 476 the changegroup a particular filenode or manifestnode belongs to.
471 477 """
472 478 cl = repo.changelog
473 479 if not roots:
474 480 roots = [nullid]
475 481 # TODO: remove call to nodesbetween.
476 482 csets, roots, heads = cl.nodesbetween(roots, heads)
477 483 discbases = []
478 484 for n in roots:
479 485 discbases.extend([p for p in cl.parents(n) if p != nullid])
480 486 outgoing = discovery.outgoing(cl, discbases, heads)
481 487 bundler = bundle10(repo)
482 488 return getsubset(repo, outgoing, bundler, source)
483 489
484 490 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
485 491 """Like getbundle, but taking a discovery.outgoing as an argument.
486 492
487 493 This is only implemented for local repos and reuses potentially
488 494 precomputed sets in outgoing."""
489 495 if not outgoing.missing:
490 496 return None
491 497 bundler = bundle10(repo, bundlecaps)
492 498 return getsubset(repo, outgoing, bundler, source)
493 499
494 500 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
495 501 """Like changegroupsubset, but returns the set difference between the
496 502 ancestors of heads and the ancestors common.
497 503
498 504 If heads is None, use the local heads. If common is None, use [nullid].
499 505
500 506 The nodes in common might not all be known locally due to the way the
501 507 current discovery protocol works.
502 508 """
503 509 cl = repo.changelog
504 510 if common:
505 511 hasnode = cl.hasnode
506 512 common = [n for n in common if hasnode(n)]
507 513 else:
508 514 common = [nullid]
509 515 if not heads:
510 516 heads = cl.heads()
511 517 outgoing = discovery.outgoing(cl, common, heads)
512 518 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
513 519
514 520 def changegroup(repo, basenodes, source):
515 521 # to avoid a race we use changegroupsubset() (issue1320)
516 522 return changegroupsubset(repo, basenodes, repo.heads(), source)
517 523
518 524 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
519 525 revisions = 0
520 526 files = 0
521 527 while True:
522 528 chunkdata = source.filelogheader()
523 529 if not chunkdata:
524 530 break
525 531 f = chunkdata["filename"]
526 532 repo.ui.debug("adding %s revisions\n" % f)
527 533 pr()
528 534 fl = repo.file(f)
529 535 o = len(fl)
530 536 if not fl.addgroup(source, revmap, trp):
531 537 raise util.Abort(_("received file revlog group is empty"))
532 538 revisions += len(fl) - o
533 539 files += 1
534 540 if f in needfiles:
535 541 needs = needfiles[f]
536 542 for new in xrange(o, len(fl)):
537 543 n = fl.node(new)
538 544 if n in needs:
539 545 needs.remove(n)
540 546 else:
541 547 raise util.Abort(
542 548 _("received spurious file revlog entry"))
543 549 if not needs:
544 550 del needfiles[f]
545 551 repo.ui.progress(_('files'), None)
546 552
547 553 for f, needs in needfiles.iteritems():
548 554 fl = repo.file(f)
549 555 for n in needs:
550 556 try:
551 557 fl.rev(n)
552 558 except error.LookupError:
553 559 raise util.Abort(
554 560 _('missing file data for %s:%s - run hg verify') %
555 561 (f, hex(n)))
556 562
557 563 return revisions, files
558 564
559 565 def addchangegroup(repo, source, srctype, url, emptyok=False):
560 566 """Add the changegroup returned by source.read() to this repo.
561 567 srctype is a string like 'push', 'pull', or 'unbundle'. url is
562 568 the URL of the repo where this changegroup is coming from.
563 569
564 570 Return an integer summarizing the change to this repo:
565 571 - nothing changed or no source: 0
566 572 - more heads than before: 1+added heads (2..n)
567 573 - fewer heads than before: -1-removed heads (-2..-n)
568 574 - number of heads stays the same: 1
569 575 """
570 576 repo = repo.unfiltered()
571 577 def csmap(x):
572 578 repo.ui.debug("add changeset %s\n" % short(x))
573 579 return len(cl)
574 580
575 581 def revmap(x):
576 582 return cl.rev(x)
577 583
578 584 if not source:
579 585 return 0
580 586
581 587 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
582 588
583 589 changesets = files = revisions = 0
584 590 efiles = set()
585 591
586 592 # write changelog data to temp files so concurrent readers will not see
587 593 # inconsistent view
588 594 cl = repo.changelog
589 595 cl.delayupdate()
590 596 oldheads = cl.heads()
591 597
592 598 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
593 599 try:
594 600 trp = weakref.proxy(tr)
595 601 # pull off the changeset group
596 602 repo.ui.status(_("adding changesets\n"))
597 603 clstart = len(cl)
598 604 class prog(object):
599 605 step = _('changesets')
600 606 count = 1
601 607 ui = repo.ui
602 608 total = None
603 609 def __call__(repo):
604 610 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
605 611 total=repo.total)
606 612 repo.count += 1
607 613 pr = prog()
608 614 source.callback = pr
609 615
610 616 source.changelogheader()
611 617 srccontent = cl.addgroup(source, csmap, trp)
612 618 if not (srccontent or emptyok):
613 619 raise util.Abort(_("received changelog group is empty"))
614 620 clend = len(cl)
615 621 changesets = clend - clstart
616 622 for c in xrange(clstart, clend):
617 623 efiles.update(repo[c].files())
618 624 efiles = len(efiles)
619 625 repo.ui.progress(_('changesets'), None)
620 626
621 627 # pull off the manifest group
622 628 repo.ui.status(_("adding manifests\n"))
623 629 pr.step = _('manifests')
624 630 pr.count = 1
625 631 pr.total = changesets # manifests <= changesets
626 632 # no need to check for empty manifest group here:
627 633 # if the result of the merge of 1 and 2 is the same in 3 and 4,
628 634 # no new manifest will be created and the manifest group will
629 635 # be empty during the pull
630 636 source.manifestheader()
631 637 repo.manifest.addgroup(source, revmap, trp)
632 638 repo.ui.progress(_('manifests'), None)
633 639
634 640 needfiles = {}
635 641 if repo.ui.configbool('server', 'validate', default=False):
636 642 # validate incoming csets have their manifests
637 643 for cset in xrange(clstart, clend):
638 644 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
639 645 mfest = repo.manifest.readdelta(mfest)
640 646 # store file nodes we must see
641 647 for f, n in mfest.iteritems():
642 648 needfiles.setdefault(f, set()).add(n)
643 649
644 650 # process the files
645 651 repo.ui.status(_("adding file changes\n"))
646 652 pr.step = _('files')
647 653 pr.count = 1
648 654 pr.total = efiles
649 655 source.callback = None
650 656
651 657 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
652 658 needfiles)
653 659 revisions += newrevs
654 660 files += newfiles
655 661
656 662 dh = 0
657 663 if oldheads:
658 664 heads = cl.heads()
659 665 dh = len(heads) - len(oldheads)
660 666 for h in heads:
661 667 if h not in oldheads and repo[h].closesbranch():
662 668 dh -= 1
663 669 htext = ""
664 670 if dh:
665 671 htext = _(" (%+d heads)") % dh
666 672
667 673 repo.ui.status(_("added %d changesets"
668 674 " with %d changes to %d files%s\n")
669 675 % (changesets, revisions, files, htext))
670 676 repo.invalidatevolatilesets()
671 677
672 678 if changesets > 0:
673 679 p = lambda: cl.writepending() and repo.root or ""
674 680 repo.hook('pretxnchangegroup', throw=True,
675 681 node=hex(cl.node(clstart)), source=srctype,
676 682 url=url, pending=p)
677 683
678 684 added = [cl.node(r) for r in xrange(clstart, clend)]
679 685 publishing = repo.ui.configbool('phases', 'publish', True)
680 686 if srctype in ('push', 'serve'):
681 687 # Old servers can not push the boundary themselves.
682 688 # New servers won't push the boundary if changeset already
683 689 # exists locally as secret
684 690 #
685 691 # We should not use added here but the list of all change in
686 692 # the bundle
687 693 if publishing:
688 694 phases.advanceboundary(repo, phases.public, srccontent)
689 695 else:
690 696 phases.advanceboundary(repo, phases.draft, srccontent)
691 697 phases.retractboundary(repo, phases.draft, added)
692 698 elif srctype != 'strip':
693 699 # publishing only alter behavior during push
694 700 #
695 701 # strip should not touch boundary at all
696 702 phases.retractboundary(repo, phases.draft, added)
697 703
698 704 # make changelog see real files again
699 705 cl.finalize(trp)
700 706
701 707 tr.close()
702 708
703 709 if changesets > 0:
704 710 if srctype != 'strip':
705 711 # During strip, branchcache is invalid but coming call to
706 712 # `destroyed` will repair it.
707 713 # In other case we can safely update cache on disk.
708 714 branchmap.updatecache(repo.filtered('served'))
709 715 def runhooks():
710 716 # These hooks run when the lock releases, not when the
711 717 # transaction closes. So it's possible for the changelog
712 718 # to have changed since we last saw it.
713 719 if clstart >= len(repo):
714 720 return
715 721
716 722 # forcefully update the on-disk branch cache
717 723 repo.ui.debug("updating the branch cache\n")
718 724 repo.hook("changegroup", node=hex(cl.node(clstart)),
719 725 source=srctype, url=url)
720 726
721 727 for n in added:
722 728 repo.hook("incoming", node=hex(n), source=srctype,
723 729 url=url)
724 730
725 731 newheads = [h for h in repo.heads() if h not in oldheads]
726 732 repo.ui.log("incoming",
727 733 "%s incoming changes - new heads: %s\n",
728 734 len(added),
729 735 ', '.join([hex(c[:6]) for c in newheads]))
730 736 repo._afterlock(runhooks)
731 737
732 738 finally:
733 739 tr.release()
734 740 # never return 0 here:
735 741 if dh < 0:
736 742 return dh - 1
737 743 else:
738 744 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now