##// END OF EJS Templates
phases: inform transaction-related hooks that a phase was moved...
phases: inform transaction-related hooks that a phase was moved We do not have enough information to provide finer data, but this is still useful information.

File last commit:

r22390:e2806b86 default
r22940:e8031862 default
Show More
changegroup.py
756 lines | 26.6 KiB | text/x-python | PythonLexer
Martin Geisler
put license and copyright info into comment blocks
r8226 # changegroup.py - Mercurial changegroup manipulation functions
#
# Copyright 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
Matt Mackall
Update license to GPLv2+
r10263 # GNU General Public License version 2 or any later version.
Matt Mackall
Replace demandload with new demandimport
r3877
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 import weakref
Matt Mackall
Simplify i18n imports
r3891 from i18n import _
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 from node import nullrev, nullid, hex, short
Sune Foldager
bundle-ng: move group into the bundler...
r19200 import mdiff, util, dagutil
Simon Heimberg
separate import lines from mercurial and general python modules
r8312 import struct, os, bz2, zlib, tempfile
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 import discovery, error, phases, branchmap
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
Benoit Boissinot
bundler: make parsechunk return the base revision of the delta
r14141
Mads Kiilerich
changegroup: verify all stream reads...
r13457 def readexactly(stream, n):
'''read n bytes from stream.read and abort if less was available'''
s = stream.read(n)
if len(s) < n:
raise util.Abort(_("stream ended unexpectedly"
" (got %d bytes, expected %d)")
% (len(s), n))
return s
def getchunk(stream):
"""return the next chunk from stream as a string"""
d = readexactly(stream, 4)
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981 l = struct.unpack(">l", d)[0]
if l <= 4:
Mads Kiilerich
changegroup: don't accept odd chunk headers
r13458 if l:
raise util.Abort(_("invalid chunk length %d") % l)
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981 return ""
Mads Kiilerich
changegroup: verify all stream reads...
r13457 return readexactly(stream, l - 4)
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981
Matt Mackall
changegroup: avoid large copies...
r5368 def chunkheader(length):
Greg Ward
Improve some docstrings relating to changegroups and prepush().
r9437 """return a changegroup chunk header (string)"""
Matt Mackall
changegroup: avoid large copies...
r5368 return struct.pack(">l", length + 4)
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981
def closechunk():
Greg Ward
Improve some docstrings relating to changegroups and prepush().
r9437 """return a changegroup chunk header (string) for a zero-length chunk"""
Thomas Arendsen Hein
make incoming work via ssh (issue139); move chunk code into separate module....
r1981 return struct.pack(">l", 0)
Matt Mackall
move write_bundle to changegroup.py
r3659 class nocompress(object):
def compress(self, x):
return x
def flush(self):
return ""
Matt Mackall
unduplicate bundle writing code from httprepo
r3662 bundletypes = {
Benoit Boissinot
bundle: more comments about the different header types, remove useless if
r14060 "": ("", nocompress), # only when using unbundle on ssh and old http servers
# since the unification ssh accepts a header but there
# is no capability signaling it.
Benoit Boissinot
fix writebundle for bz2 bundles
r3704 "HG10UN": ("HG10UN", nocompress),
Alexis S. L. Carvalho
changegroup.py: delay the loading of the bz2 and zlib modules
r3762 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
"HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
Matt Mackall
unduplicate bundle writing code from httprepo
r3662 }
Martin Geisler
typos: "it's" -> "its"
r9087 # hgweb uses this list to communicate its preferred type
Dirkjan Ochtman
hgweb: use bundletypes from mercurial.changegroup
r6152 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
FUJIWARA Katsunori
changegroup: add "vfs" argument to "writebundle()" for relative access via vfs...
r20976 def writebundle(cg, filename, bundletype, vfs=None):
Matt Mackall
move write_bundle to changegroup.py
r3659 """Write a bundle file and return its filename.
Existing files will not be overwritten.
If no filename is specified, a temporary file is created.
bz2 compression can be turned off.
The bundle file will be deleted in case of errors.
"""
fh = None
cleanup = None
try:
if filename:
FUJIWARA Katsunori
changegroup: add "vfs" argument to "writebundle()" for relative access via vfs...
r20976 if vfs:
fh = vfs.open(filename, "wb")
else:
fh = open(filename, "wb")
Matt Mackall
move write_bundle to changegroup.py
r3659 else:
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
fh = os.fdopen(fd, "wb")
cleanup = filename
Thomas Arendsen Hein
Use 'bundletype' instead of 'type' to not shadow built-in function.
r3706 header, compressor = bundletypes[bundletype]
Benoit Boissinot
fix writebundle for bz2 bundles
r3704 fh.write(header)
z = compressor()
Matt Mackall
unduplicate bundle writing code from httprepo
r3662
Matt Mackall
move write_bundle to changegroup.py
r3659 # parse the changegroup data, otherwise we will block
# in case of sshrepo because we don't know the end of the stream
Matt Mackall
bundle: get rid of chunkiter
r12335 # an empty chunkgroup is the end of the changegroup
# a changegroup has at least 2 chunkgroups (changelog and manifest).
# after that, an empty chunkgroup is the end of the changegroup
Pierre-Yves David
changegroup: move chunk extraction into a getchunks method of unbundle10...
r20999 for chunk in cg.getchunks():
fh.write(z.compress(chunk))
Matt Mackall
move write_bundle to changegroup.py
r3659 fh.write(z.flush())
cleanup = None
return filename
finally:
if fh is not None:
fh.close()
if cleanup is not None:
FUJIWARA Katsunori
changegroup: add "vfs" argument to "writebundle()" for relative access via vfs...
r20976 if filename and vfs:
vfs.unlink(cleanup)
else:
os.unlink(cleanup)
Matt Mackall
create a readbundle function
r3660
Matt Mackall
bundle: factor out decompressor
r12041 def decompressor(fh, alg):
if alg == 'UN':
Dirkjan Ochtman
improve changegroup.readbundle(), use it in hgweb
r6154 return fh
Matt Mackall
bundle: factor out decompressor
r12041 elif alg == 'GZ':
Dirkjan Ochtman
improve changegroup.readbundle(), use it in hgweb
r6154 def generator(f):
zd = zlib.decompressobj()
Michael Tjørnemark
changegroup: decompress GZ algorithm in larger chunks for better performance
r16557 for chunk in util.filechunkiter(f):
Dirkjan Ochtman
improve changegroup.readbundle(), use it in hgweb
r6154 yield zd.decompress(chunk)
Matt Mackall
bundle: factor out decompressor
r12041 elif alg == 'BZ':
Matt Mackall
create a readbundle function
r3660 def generator(f):
zd = bz2.BZ2Decompressor()
zd.decompress("BZ")
for chunk in util.filechunkiter(f, 4096):
yield zd.decompress(chunk)
Matt Mackall
bundle: factor out decompressor
r12041 else:
raise util.Abort("unknown bundle compression '%s'" % alg)
Matt Mackall
bundle: push chunkbuffer down into decompress...
r12329 return util.chunkbuffer(generator(fh))
Matt Mackall
bundle: factor out decompressor
r12041
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 class cg1unpacker(object):
deltaheader = _CHANGEGROUPV1_DELTA_HEADER
Benoit Boissinot
bundler: make parsechunk return the base revision of the delta
r14141 deltaheadersize = struct.calcsize(deltaheader)
Matt Mackall
bundle: introduce bundle class
r12043 def __init__(self, fh, alg):
Matt Mackall
bundle: push chunkbuffer down into decompress...
r12329 self._stream = decompressor(fh, alg)
Matt Mackall
bundlerepo: remove duplication of bundle decompressors
r12044 self._type = alg
Matt Mackall
bundle: refactor progress callback...
r12334 self.callback = None
Matt Mackall
bundlerepo: remove duplication of bundle decompressors
r12044 def compressed(self):
return self._type != 'UN'
Matt Mackall
bundle: introduce bundle class
r12043 def read(self, l):
return self._stream.read(l)
Matt Mackall
bundle: make unbundle object seekable...
r12330 def seek(self, pos):
return self._stream.seek(pos)
def tell(self):
Matt Mackall
bundlerepo: use bundle objects everywhere
r12332 return self._stream.tell()
Matt Mackall
bundlerepo: restore close() method
r12347 def close(self):
return self._stream.close()
Matt Mackall
bundle: refactor progress callback...
r12334
def chunklength(self):
Jim Hague
changegroup: fix typo introduced in 9f2c407caf34
r13459 d = readexactly(self._stream, 4)
Mads Kiilerich
changegroup: don't accept odd chunk headers
r13458 l = struct.unpack(">l", d)[0]
if l <= 4:
if l:
raise util.Abort(_("invalid chunk length %d") % l)
return 0
if self.callback:
Matt Mackall
bundle: refactor progress callback...
r12334 self.callback()
Mads Kiilerich
changegroup: don't accept odd chunk headers
r13458 return l - 4
Matt Mackall
bundle: refactor progress callback...
r12334
Benoit Boissinot
unbundler: separate delta and header parsing...
r14144 def changelogheader(self):
"""v10 does not have a changelog header chunk"""
return {}
def manifestheader(self):
"""v10 does not have a manifest header chunk"""
return {}
def filelogheader(self):
"""return the header of the filelogs chunk, v10 only has the filename"""
Matt Mackall
bundle: refactor progress callback...
r12334 l = self.chunklength()
Benoit Boissinot
unbundler: separate delta and header parsing...
r14144 if not l:
return {}
fname = readexactly(self._stream, l)
Augie Fackler
changegroup: move from dict() construction to {} literals...
r20675 return {'filename': fname}
Matt Mackall
bundle: refactor progress callback...
r12334
Benoit Boissinot
bundler: make parsechunk return the base revision of the delta
r14141 def _deltaheader(self, headertuple, prevnode):
node, p1, p2, cs = headertuple
if prevnode is None:
deltabase = p1
else:
deltabase = prevnode
return node, p1, p2, deltabase, cs
Benoit Boissinot
unbundler: separate delta and header parsing...
r14144 def deltachunk(self, prevnode):
Matt Mackall
bundle: move chunk parsing into unbundle class
r12336 l = self.chunklength()
if not l:
return {}
Benoit Boissinot
bundler: make parsechunk return the base revision of the delta
r14141 headerdata = readexactly(self._stream, self.deltaheadersize)
header = struct.unpack(self.deltaheader, headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
Augie Fackler
changegroup: move from dict() construction to {} literals...
r20675 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
'deltabase': deltabase, 'delta': delta}
Matt Mackall
bundle: move chunk parsing into unbundle class
r12336
Pierre-Yves David
changegroup: move chunk extraction into a getchunks method of unbundle10...
r20999 def getchunks(self):
"""returns all the chunks contains in the bundle
Used when you need to forward the binary stream to a file or another
network API. To do so, it parse the changegroup data, otherwise it will
block in case of sshrepo because it don't know the end of the stream.
"""
# an empty chunkgroup is the end of the changegroup
# a changegroup has at least 2 chunkgroups (changelog and manifest).
# after that, an empty chunkgroup is the end of the changegroup
empty = False
count = 0
while not empty or count <= 2:
empty = True
count += 1
while True:
chunk = getchunk(self)
if not chunk:
break
empty = False
yield chunkheader(len(chunk))
pos = 0
while pos < len(chunk):
next = pos + 2**20
yield chunk[pos:next]
pos = next
yield closechunk()
Matt Mackall
bundle: push chunkbuffer down into decompress...
r12329 class headerlessfixup(object):
def __init__(self, fh, h):
self._h = h
self._fh = fh
def read(self, n):
if self._h:
d, self._h = self._h[:n], self._h[n:]
if len(d) < n:
Mads Kiilerich
changegroup: verify all stream reads...
r13457 d += readexactly(self._fh, n - len(d))
Matt Mackall
bundle: push chunkbuffer down into decompress...
r12329 return d
Mads Kiilerich
changegroup: verify all stream reads...
r13457 return readexactly(self._fh, n)
Matt Mackall
bundle: push chunkbuffer down into decompress...
r12329
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 class cg1packer(object):
deltaheader = _CHANGEGROUPV1_DELTA_HEADER
Sune Foldager
bundle-ng: move gengroup into bundler, pass repo object to bundler...
r19202 def __init__(self, repo, bundlecaps=None):
"""Given a source repo, construct a bundler.
bundlecaps is optional and can be used to specify the set of
capabilities which can be used to build the bundle.
"""
Benoit Boissinot
bundle-ng: add bundlecaps argument to getbundle() command
r19201 # Set of capabilities we can use to build the bundle.
if bundlecaps is None:
bundlecaps = set()
self._bundlecaps = bundlecaps
Sune Foldager
bundle-ng: move gengroup into bundler, pass repo object to bundler...
r19202 self._changelog = repo.changelog
self._manifest = repo.manifest
reorder = repo.ui.config('bundle', 'reorder', 'auto')
if reorder == 'auto':
reorder = None
else:
reorder = util.parsebool(reorder)
self._repo = repo
self._reorder = reorder
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 self._progress = repo.ui.progress
Matt Mackall
changegroup: introduce bundler objects...
r13831 def close(self):
return closechunk()
Sune Foldager
bundle-ng: move group into the bundler...
r19200
Matt Mackall
changegroup: introduce bundler objects...
r13831 def fileheader(self, fname):
return chunkheader(len(fname)) + fname
Sune Foldager
bundle-ng: move group into the bundler...
r19200
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
Sune Foldager
bundle-ng: move group into the bundler...
r19200 """Calculate a delta group, yielding a sequence of changegroup chunks
(strings).
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. The first delta is
first parent(nodelist[0]) -> nodelist[0], the receiver is
guaranteed to have this parent as it has all history before
these changesets. In the case firstparent is nullrev the
changegroup starts with a full revision.
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208
If units is not None, progress detail will be generated, units specifies
the type of revlog that is touched (changelog, manifest, etc.).
Sune Foldager
bundle-ng: move group into the bundler...
r19200 """
# if we don't have any revisions touched by these changesets, bail
if len(nodelist) == 0:
yield self.close()
return
# for generaldelta revlogs, we linearize the revs; this will both be
# much quicker and generate a much smaller bundle
if (revlog._generaldelta and reorder is not False) or reorder:
dag = dagutil.revlogdag(revlog)
revs = set(revlog.rev(n) for n in nodelist)
revs = dag.linearize(revs)
else:
revs = sorted([revlog.rev(n) for n in nodelist])
# add the parent of the first rev
p = revlog.parentrevs(revs[0])[0]
revs.insert(0, p)
# build deltas
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 total = len(revs) - 1
msgbundling = _('bundling')
Sune Foldager
bundle-ng: move group into the bundler...
r19200 for r in xrange(len(revs) - 1):
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 if units is not None:
self._progress(msgbundling, r + 1, unit=units, total=total)
Sune Foldager
bundle-ng: move group into the bundler...
r19200 prev, curr = revs[r], revs[r + 1]
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 linknode = lookup(revlog.node(curr))
for c in self.revchunk(revlog, curr, prev, linknode):
Sune Foldager
bundle-ng: move group into the bundler...
r19200 yield c
yield self.close()
Durham Goode
bundle: refactor changegroup prune to be its own function...
r19289 # filter any nodes that claim to be part of the known set
def prune(self, revlog, missing, commonrevs, source):
rr, rl = revlog.rev, revlog.linkrev
return [n for n in missing if rl(rr(n)) not in commonrevs]
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
Sune Foldager
bundle-ng: move gengroup into bundler, pass repo object to bundler...
r19202 '''yield a sequence of changegroup chunks (strings)'''
repo = self._repo
cl = self._changelog
mf = self._manifest
reorder = self._reorder
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 progress = self._progress
# for progress output
msgbundling = _('bundling')
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204
mfs = {} # needed manifests
fnodes = {} # needed file nodes
changedfiles = set()
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 # Callback for the changelog, used to collect changed files and manifest
# nodes.
# Returns the linkrev node (identity in the changelog case).
def lookupcl(x):
c = cl.read(x)
changedfiles.update(c[3])
# record the first changeset introducing this manifest version
mfs.setdefault(c[0], x)
return x
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 # Callback for the manifest, used to collect linkrevs for filelog
# revisions.
# Returns the linkrev node (collected in lookupcl).
def lookupmf(x):
clnode = mfs[x]
if not fastpathlinkrev:
mdata = mf.readfast(x)
for f, n in mdata.iteritems():
if f in changedfiles:
# record the first changeset introducing this filelog
# version
fnodes[f].setdefault(n, clnode)
return clnode
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
reorder=reorder):
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206 yield chunk
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 progress(msgbundling, None)
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206 for f in changedfiles:
fnodes[f] = {}
Durham Goode
bundle: refactor changegroup prune to be its own function...
r19289 mfnodes = self.prune(mf, mfs, commonrevs, source)
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
reorder=reorder):
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206 yield chunk
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 progress(msgbundling, None)
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206
mfs.clear()
Antoine Pitrou
bundle: fix performance regression when bundling file changes (issue4031)...
r19708 needed = set(cl.rev(x) for x in clnodes)
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206
Durham Goode
bundle: move file chunk generation to it's own function...
r19334 def linknodes(filerevlog, fname):
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204 if fastpathlinkrev:
Sean Farley
changegroup: remove unused variable caught by pyflakes
r20936 llr = filerevlog.linkrev
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204 def genfilenodes():
for r in filerevlog:
linkrev = llr(r)
Matt Mackall
changegroup: fix fastpath during commit...
r19325 if linkrev in needed:
Benoit Boissinot
bundle-ng: move bundle generation to changegroup.py
r19204 yield filerevlog.node(r), cl.node(linkrev)
fnodes[fname] = dict(genfilenodes())
Durham Goode
bundle: move file chunk generation to it's own function...
r19334 return fnodes.get(fname, {})
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207
Durham Goode
bundle: move file chunk generation to it's own function...
r19334 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
source):
yield chunk
yield self.close()
progress(msgbundling, None)
if clnodes:
repo.hook('outgoing', node=hex(clnodes[0]), source=source)
def generatefiles(self, changedfiles, linknodes, commonrevs, source):
repo = self._repo
progress = self._progress
reorder = self._reorder
msgbundling = _('bundling')
total = len(changedfiles)
# for progress output
msgfiles = _('files')
for i, fname in enumerate(sorted(changedfiles)):
filerevlog = repo.file(fname)
if not filerevlog:
raise util.Abort(_("empty or missing revlog for %s") % fname)
linkrevnodes = linknodes(filerevlog, fname)
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 # Lookup for filenodes, we collected the linkrev nodes above in the
# fastpath case and with lookupmf in the slowpath case.
def lookupfilelog(x):
return linkrevnodes[x]
Durham Goode
bundle: refactor changegroup prune to be its own function...
r19289 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
Sune Foldager
bundle-ng: simplify bundle10.generate...
r19206 if filenodes:
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
total=total)
Sune Foldager
bundle-ng: move gengroup into bundler, pass repo object to bundler...
r19202 yield self.fileheader(fname)
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
Benoit Boissinot
bundle-ng: move progress handling out of the linkrev callback
r19208 reorder=reorder):
Sune Foldager
bundle-ng: move gengroup into bundler, pass repo object to bundler...
r19202 yield chunk
Sune Foldager
bundle-ng: move group into the bundler...
r19200
Benoit Boissinot
bundle-ng: simplify lookup and state handling...
r19207 def revchunk(self, revlog, rev, prev, linknode):
Benoit Boissinot
changegroup: new bundler API
r14143 node = revlog.node(rev)
p1, p2 = revlog.parentrevs(rev)
base = prev
prefix = ''
if base == nullrev:
delta = revlog.revision(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
delta = revlog.revdiff(base, rev)
p1n, p2n = revlog.parents(node)
basenode = revlog.node(base)
meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
meta += prefix
l = len(meta) + len(delta)
Matt Mackall
changegroup: introduce bundler objects...
r13831 yield chunkheader(l)
yield meta
Benoit Boissinot
changegroup: new bundler API
r14143 yield delta
def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
# do nothing with basenode, it is implicitly the previous one in HG10
return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
Pierre-Yves David
localrepo: move the _changegroupsubset method in changegroup module...
r20925
Pierre-Yves David
localrepo: move the changegroupinfo method in changegroup module...
r20926 def _changegroupinfo(repo, nodes, source):
if repo.ui.verbose or source == 'bundle':
repo.ui.status(_("%d changesets found\n") % len(nodes))
if repo.ui.debugflag:
repo.ui.debug("list of changesets:\n")
for node in nodes:
repo.ui.debug("%s\n" % hex(node))
Pierre-Yves David
localrepo: move the _changegroupsubset method in changegroup module...
r20925 def getsubset(repo, outgoing, bundler, source, fastpath=False):
repo = repo.unfiltered()
commonrevs = outgoing.common
csets = outgoing.missing
heads = outgoing.missingheads
# We go through the fast path if we get told to, or if all (unfiltered
# heads have been requested (since we then know there all linkrevs will
# be pulled by the client).
heads.sort()
fastpathlinkrev = fastpath or (
repo.filtername is None and heads == sorted(repo.heads()))
repo.hook('preoutgoing', throw=True, source=source)
Pierre-Yves David
localrepo: move the changegroupinfo method in changegroup module...
r20926 _changegroupinfo(repo, csets, source)
Pierre-Yves David
localrepo: move the _changegroupsubset method in changegroup module...
r20925 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
Pierre-Yves David
localrepo: move the changegroupsubset method in changegroup module...
r20927
def changegroupsubset(repo, roots, heads, source):
"""Compute a changegroup consisting of all the nodes that are
descendants of any of the roots and ancestors of any of the heads.
Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
It is fairly complex as determining which filenodes and which
manifest nodes need to be included for the changeset to be complete
is non-trivial.
Another wrinkle is doing the reverse, figuring out which changeset in
the changegroup a particular filenode or manifestnode belongs to.
"""
cl = repo.changelog
if not roots:
roots = [nullid]
# TODO: remove call to nodesbetween.
csets, roots, heads = cl.nodesbetween(roots, heads)
discbases = []
for n in roots:
discbases.extend([p for p in cl.parents(n) if p != nullid])
outgoing = discovery.outgoing(cl, discbases, heads)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 bundler = cg1packer(repo)
Pierre-Yves David
localrepo: move the changegroupsubset method in changegroup module...
r20927 return getsubset(repo, outgoing, bundler, source)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
Pierre-Yves David
localrepo: move the getlocalbundle method in changegroup module...
r20928 """Like getbundle, but taking a discovery.outgoing as an argument.
This is only implemented for local repos and reuses potentially
precomputed sets in outgoing."""
if not outgoing.missing:
return None
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 bundler = cg1packer(repo, bundlecaps)
Pierre-Yves David
localrepo: move the getlocalbundle method in changegroup module...
r20928 return getsubset(repo, outgoing, bundler, source)
Durham Goode
changegroup: refactor outgoing logic into a function...
r21260 def _computeoutgoing(repo, heads, common):
"""Computes which revs are outgoing given a set of common
and a set of heads.
This is a separate function so extensions can have access to
the logic.
Returns a discovery.outgoing object.
"""
cl = repo.changelog
if common:
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
common = [nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(cl, common, heads)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
Pierre-Yves David
localrepo: move the getbundle method in changegroup module...
r20930 """Like changegroupsubset, but returns the set difference between the
ancestors of heads and the ancestors common.
If heads is None, use the local heads. If common is None, use [nullid].
The nodes in common might not all be known locally due to the way the
current discovery protocol works.
"""
Durham Goode
changegroup: refactor outgoing logic into a function...
r21260 outgoing = _computeoutgoing(repo, heads, common)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
Pierre-Yves David
localrepo: move the getbundle method in changegroup module...
r20930
Pierre-Yves David
localrepo: move the changegroup method in changegroup module...
r20931 def changegroup(repo, basenodes, source):
# to avoid a race we use changegroupsubset() (issue1320)
return changegroupsubset(repo, basenodes, repo.heads(), source)
Pierre-Yves David
localrepo: move the addchangegroupfiles method in changegroup module...
r20932 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
revisions = 0
files = 0
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata["filename"]
repo.ui.debug("adding %s revisions\n" % f)
pr()
fl = repo.file(f)
o = len(fl)
if not fl.addgroup(source, revmap, trp):
raise util.Abort(_("received file revlog group is empty"))
revisions += len(fl) - o
files += 1
if f in needfiles:
needs = needfiles[f]
for new in xrange(o, len(fl)):
n = fl.node(new)
if n in needs:
needs.remove(n)
else:
raise util.Abort(
_("received spurious file revlog entry"))
if not needs:
del needfiles[f]
repo.ui.progress(_('files'), None)
for f, needs in needfiles.iteritems():
fl = repo.file(f)
for n in needs:
try:
fl.rev(n)
except error.LookupError:
raise util.Abort(
_('missing file data for %s:%s - run hg verify') %
(f, hex(n)))
return revisions, files
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933
Pierre-Yves David
changegroup: add a `targetphase` argument to `addchangegroup`...
r22041 def addchangegroup(repo, source, srctype, url, emptyok=False,
targetphase=phases.draft):
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 """Add the changegroup returned by source.read() to this repo.
srctype is a string like 'push', 'pull', or 'unbundle'. url is
the URL of the repo where this changegroup is coming from.
Return an integer summarizing the change to this repo:
- nothing changed or no source: 0
- more heads than before: 1+added heads (2..n)
- fewer heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
"""
repo = repo.unfiltered()
def csmap(x):
repo.ui.debug("add changeset %s\n" % short(x))
return len(cl)
def revmap(x):
return cl.rev(x)
if not source:
return 0
repo.hook('prechangegroup', throw=True, source=srctype, url=url)
changesets = files = revisions = 0
efiles = set()
# write changelog data to temp files so concurrent readers will not see
# inconsistent view
cl = repo.changelog
cl.delayupdate()
oldheads = cl.heads()
tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
try:
trp = weakref.proxy(tr)
# pull off the changeset group
repo.ui.status(_("adding changesets\n"))
clstart = len(cl)
class prog(object):
step = _('changesets')
count = 1
ui = repo.ui
total = None
def __call__(repo):
repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
total=repo.total)
repo.count += 1
pr = prog()
source.callback = pr
source.changelogheader()
srccontent = cl.addgroup(source, csmap, trp)
if not (srccontent or emptyok):
raise util.Abort(_("received changelog group is empty"))
clend = len(cl)
changesets = clend - clstart
for c in xrange(clstart, clend):
efiles.update(repo[c].files())
efiles = len(efiles)
repo.ui.progress(_('changesets'), None)
# pull off the manifest group
repo.ui.status(_("adding manifests\n"))
pr.step = _('manifests')
pr.count = 1
pr.total = changesets # manifests <= changesets
# no need to check for empty manifest group here:
# if the result of the merge of 1 and 2 is the same in 3 and 4,
# no new manifest will be created and the manifest group will
# be empty during the pull
source.manifestheader()
repo.manifest.addgroup(source, revmap, trp)
repo.ui.progress(_('manifests'), None)
needfiles = {}
if repo.ui.configbool('server', 'validate', default=False):
# validate incoming csets have their manifests
for cset in xrange(clstart, clend):
mfest = repo.changelog.read(repo.changelog.node(cset))[0]
mfest = repo.manifest.readdelta(mfest)
# store file nodes we must see
for f, n in mfest.iteritems():
needfiles.setdefault(f, set()).add(n)
# process the files
repo.ui.status(_("adding file changes\n"))
pr.step = _('files')
pr.count = 1
pr.total = efiles
source.callback = None
newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
needfiles)
revisions += newrevs
files += newfiles
dh = 0
if oldheads:
heads = cl.heads()
dh = len(heads) - len(oldheads)
for h in heads:
if h not in oldheads and repo[h].closesbranch():
dh -= 1
htext = ""
if dh:
htext = _(" (%+d heads)") % dh
repo.ui.status(_("added %d changesets"
" with %d changes to %d files%s\n")
% (changesets, revisions, files, htext))
repo.invalidatevolatilesets()
if changesets > 0:
p = lambda: cl.writepending() and repo.root or ""
Pierre-Yves David
addchangegroup: register data in tr.hookargs...
r21151 if 'node' not in tr.hookargs:
tr.hookargs['node'] = hex(cl.node(clstart))
Pierre-Yves David
changegroup: use tr.hookargs when calling pretxnchangegroup hooks...
r21152 repo.hook('pretxnchangegroup', throw=True, source=srctype,
url=url, pending=p, **tr.hookargs)
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933
added = [cl.node(r) for r in xrange(clstart, clend)]
publishing = repo.ui.configbool('phases', 'publish', True)
Pierre-Yves David
phase: apply publishing enforcement for "serve" source...
r20966 if srctype in ('push', 'serve'):
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 # Old servers can not push the boundary themselves.
# New servers won't push the boundary if changeset already
# exists locally as secret
#
# We should not use added here but the list of all change in
# the bundle
if publishing:
Pierre-Yves David
phase: add a transaction argument to advanceboundary...
r22069 phases.advanceboundary(repo, tr, phases.public, srccontent)
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 else:
Pierre-Yves David
changegroup: add a `targetphase` argument to `addchangegroup`...
r22041 # Those changesets have been pushed from the outside, their
# phases are going to be pushed alongside. Therefor
# `targetphase` is ignored.
Pierre-Yves David
phase: add a transaction argument to advanceboundary...
r22069 phases.advanceboundary(repo, tr, phases.draft, srccontent)
Pierre-Yves David
phase: add a transaction argument to retractboundary...
r22070 phases.retractboundary(repo, tr, phases.draft, added)
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933 elif srctype != 'strip':
# publishing only alter behavior during push
#
# strip should not touch boundary at all
Pierre-Yves David
phase: add a transaction argument to retractboundary...
r22070 phases.retractboundary(repo, tr, targetphase, added)
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933
# make changelog see real files again
cl.finalize(trp)
tr.close()
if changesets > 0:
if srctype != 'strip':
# During strip, branchcache is invalid but coming call to
# `destroyed` will repair it.
# In other case we can safely update cache on disk.
branchmap.updatecache(repo.filtered('served'))
def runhooks():
# These hooks run when the lock releases, not when the
# transaction closes. So it's possible for the changelog
# to have changed since we last saw it.
if clstart >= len(repo):
return
# forcefully update the on-disk branch cache
repo.ui.debug("updating the branch cache\n")
Pierre-Yves David
changegroup: use tr.hookargs when calling changegroup hooks...
r21153 repo.hook("changegroup", source=srctype, url=url,
**tr.hookargs)
Pierre-Yves David
localrepo: move the addchangegroup method in changegroup module...
r20933
for n in added:
repo.hook("incoming", node=hex(n), source=srctype,
url=url)
newheads = [h for h in repo.heads() if h not in oldheads]
repo.ui.log("incoming",
"%s incoming changes - new heads: %s\n",
len(added),
', '.join([hex(c[:6]) for c in newheads]))
repo._afterlock(runhooks)
finally:
tr.release()
# never return 0 here:
if dh < 0:
return dh - 1
else:
return dh + 1