changegroup.py
1435 lines
| 55.7 KiB
| text/x-python
|
PythonLexer
/ mercurial / changegroup.py
Martin Geisler
|
r8226 | # changegroup.py - Mercurial changegroup manipulation functions | ||
# | ||||
# Copyright 2006 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r3877 | |||
Gregory Szorc
|
r25921 | from __future__ import absolute_import | ||
import os | ||||
import struct | ||||
Pierre-Yves David
|
r20933 | import weakref | ||
Gregory Szorc
|
r25921 | |||
from .i18n import _ | ||||
from .node import ( | ||||
hex, | ||||
Gregory Szorc
|
r38919 | nullid, | ||
Gregory Szorc
|
r25921 | nullrev, | ||
short, | ||||
) | ||||
Gregory Szorc
|
r38929 | from .thirdparty import ( | ||
attr, | ||||
) | ||||
Gregory Szorc
|
r25921 | from . import ( | ||
dagutil, | ||||
error, | ||||
Gregory Szorc
|
r38842 | manifest, | ||
Gregory Szorc
|
r38830 | match as matchmod, | ||
Gregory Szorc
|
r25921 | mdiff, | ||
phases, | ||||
Pulkit Goyal
|
r30925 | pycompat, | ||
Martin von Zweigbergk
|
r38871 | repository, | ||
Gregory Szorc
|
r38919 | revlog, | ||
Gregory Szorc
|
r25921 | util, | ||
) | ||||
Thomas Arendsen Hein
|
r1981 | |||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
stringutil, | ||||
) | ||||
Gregory Szorc
|
r38932 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s") | ||
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s") | ||||
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH") | ||||
Benoit Boissinot
|
r14141 | |||
Matt Harbison
|
r37150 | LFS_REQUIREMENT = 'lfs' | ||
Boris Feld
|
r35772 | readexactly = util.readexactly | ||
Mads Kiilerich
|
r13457 | |||
def getchunk(stream): | ||||
"""return the next chunk from stream as a string""" | ||||
d = readexactly(stream, 4) | ||||
Thomas Arendsen Hein
|
r1981 | l = struct.unpack(">l", d)[0] | ||
if l <= 4: | ||||
Mads Kiilerich
|
r13458 | if l: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("invalid chunk length %d") % l) | ||
Thomas Arendsen Hein
|
r1981 | return "" | ||
Mads Kiilerich
|
r13457 | return readexactly(stream, l - 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
Matt Mackall
|
r5368 | def chunkheader(length): | ||
Greg Ward
|
r9437 | """return a changegroup chunk header (string)""" | ||
Matt Mackall
|
r5368 | return struct.pack(">l", length + 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
def closechunk(): | ||||
Greg Ward
|
r9437 | """return a changegroup chunk header (string) for a zero-length chunk""" | ||
Thomas Arendsen Hein
|
r1981 | return struct.pack(">l", 0) | ||
Gregory Szorc
|
r39017 | def _fileheader(path): | ||
"""Obtain a changegroup chunk header for a named path.""" | ||||
return chunkheader(len(path)) + path | ||||
Pierre-Yves David
|
r26540 | def writechunks(ui, chunks, filename, vfs=None): | ||
"""Write chunks to a file and return its filename. | ||||
Matt Mackall
|
r3659 | |||
Pierre-Yves David
|
r26540 | The stream is assumed to be a bundle file. | ||
Matt Mackall
|
r3659 | Existing files will not be overwritten. | ||
If no filename is specified, a temporary file is created. | ||||
""" | ||||
fh = None | ||||
cleanup = None | ||||
try: | ||||
if filename: | ||||
FUJIWARA Katsunori
|
r20976 | if vfs: | ||
fh = vfs.open(filename, "wb") | ||||
else: | ||||
Gregory Szorc
|
r30212 | # Increase default buffer size because default is usually | ||
# small (4k is common on Linux). | ||||
fh = open(filename, "wb", 131072) | ||||
Matt Mackall
|
r3659 | else: | ||
Yuya Nishihara
|
r38182 | fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") | ||
Yuya Nishihara
|
r36853 | fh = os.fdopen(fd, r"wb") | ||
Matt Mackall
|
r3659 | cleanup = filename | ||
Pierre-Yves David
|
r26540 | for c in chunks: | ||
fh.write(c) | ||||
Matt Mackall
|
r3659 | cleanup = None | ||
return filename | ||||
finally: | ||||
if fh is not None: | ||||
fh.close() | ||||
if cleanup is not None: | ||||
FUJIWARA Katsunori
|
r20976 | if filename and vfs: | ||
vfs.unlink(cleanup) | ||||
else: | ||||
os.unlink(cleanup) | ||||
Matt Mackall
|
r3660 | |||
Sune Foldager
|
r22390 | class cg1unpacker(object): | ||
Augie Fackler
|
r26708 | """Unpacker for cg1 changegroup streams. | ||
A changegroup unpacker handles the framing of the revision data in | ||||
the wire format. Most consumers will want to use the apply() | ||||
method to add the changes from the changegroup to a repository. | ||||
If you're forwarding a changegroup unmodified to another consumer, | ||||
use getchunks(), which returns an iterator of changegroup | ||||
chunks. This is mostly useful for cases where you need to know the | ||||
data stream has ended by observing the end of the changegroup. | ||||
deltachunk() is useful only if you're applying delta data. Most | ||||
consumers should prefer apply() instead. | ||||
A few other public methods exist. Those are used only for | ||||
bundlerepo and some debug commands - their use is discouraged. | ||||
""" | ||||
Sune Foldager
|
r22390 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Eric Sumner
|
r23896 | version = '01' | ||
Martin von Zweigbergk
|
r27920 | _grouplistcount = 1 # One list of files after the manifests | ||
Gregory Szorc
|
r29593 | def __init__(self, fh, alg, extras=None): | ||
Gregory Szorc
|
r30354 | if alg is None: | ||
alg = 'UN' | ||||
if alg not in util.compengines.supportedbundletypes: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('unknown stream compression type: %s') | ||
Pierre-Yves David
|
r26266 | % alg) | ||
Pierre-Yves David
|
r26392 | if alg == 'BZ': | ||
alg = '_truncatedBZ' | ||||
Gregory Szorc
|
r30354 | |||
compengine = util.compengines.forbundletype(alg) | ||||
self._stream = compengine.decompressorreader(fh) | ||||
Matt Mackall
|
r12044 | self._type = alg | ||
Gregory Szorc
|
r29593 | self.extras = extras or {} | ||
Matt Mackall
|
r12334 | self.callback = None | ||
Augie Fackler
|
r26706 | |||
# These methods (compressed, read, seek, tell) all appear to only | ||||
# be used by bundlerepo, but it's a little hard to tell. | ||||
Matt Mackall
|
r12044 | def compressed(self): | ||
Stanislau Hlebik
|
r30589 | return self._type is not None and self._type != 'UN' | ||
Matt Mackall
|
r12043 | def read(self, l): | ||
return self._stream.read(l) | ||||
Matt Mackall
|
r12330 | def seek(self, pos): | ||
return self._stream.seek(pos) | ||||
def tell(self): | ||||
Matt Mackall
|
r12332 | return self._stream.tell() | ||
Matt Mackall
|
r12347 | def close(self): | ||
return self._stream.close() | ||||
Matt Mackall
|
r12334 | |||
Augie Fackler
|
r26707 | def _chunklength(self): | ||
Jim Hague
|
r13459 | d = readexactly(self._stream, 4) | ||
Mads Kiilerich
|
r13458 | l = struct.unpack(">l", d)[0] | ||
if l <= 4: | ||||
if l: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("invalid chunk length %d") % l) | ||
Mads Kiilerich
|
r13458 | return 0 | ||
if self.callback: | ||||
Matt Mackall
|
r12334 | self.callback() | ||
Mads Kiilerich
|
r13458 | return l - 4 | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14144 | def changelogheader(self): | ||
"""v10 does not have a changelog header chunk""" | ||||
return {} | ||||
def manifestheader(self): | ||||
"""v10 does not have a manifest header chunk""" | ||||
return {} | ||||
def filelogheader(self): | ||||
"""return the header of the filelogs chunk, v10 only has the filename""" | ||||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Benoit Boissinot
|
r14144 | if not l: | ||
return {} | ||||
fname = readexactly(self._stream, l) | ||||
Augie Fackler
|
r20675 | return {'filename': fname} | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14141 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, cs = headertuple | ||||
if prevnode is None: | ||||
deltabase = p1 | ||||
else: | ||||
deltabase = prevnode | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Benoit Boissinot
|
r14141 | |||
Benoit Boissinot
|
r14144 | def deltachunk(self, prevnode): | ||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Matt Mackall
|
r12336 | if not l: | ||
return {} | ||||
Benoit Boissinot
|
r14141 | headerdata = readexactly(self._stream, self.deltaheadersize) | ||
Gregory Szorc
|
r38932 | header = self.deltaheader.unpack(headerdata) | ||
Benoit Boissinot
|
r14141 | delta = readexactly(self._stream, l - self.deltaheadersize) | ||
Mike Edgar
|
r27433 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) | ||
Durham Goode
|
r34295 | return (node, p1, p2, cs, deltabase, delta, flags) | ||
Matt Mackall
|
r12336 | |||
Pierre-Yves David
|
r20999 | def getchunks(self): | ||
"""returns all the chunks contains in the bundle | ||||
Used when you need to forward the binary stream to a file or another | ||||
network API. To do so, it parse the changegroup data, otherwise it will | ||||
block in case of sshrepo because it don't know the end of the stream. | ||||
""" | ||||
Durham Goode
|
r34093 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, | ||
# and a list of filelogs. For changegroup 3, we expect 4 parts: | ||||
# changelog, manifestlog, a list of tree manifestlogs, and a list of | ||||
# filelogs. | ||||
# | ||||
# Changelog and manifestlog parts are terminated with empty chunks. The | ||||
# tree and file parts are a list of entry sections. Each entry section | ||||
# is a series of chunks terminating in an empty chunk. The list of these | ||||
# entry sections is terminated in yet another empty chunk, so we know | ||||
# we've reached the end of the tree/file list when we reach an empty | ||||
# chunk that was proceeded by no non-empty chunks. | ||||
parts = 0 | ||||
while parts < 2 + self._grouplistcount: | ||||
noentries = True | ||||
Pierre-Yves David
|
r20999 | while True: | ||
chunk = getchunk(self) | ||||
if not chunk: | ||||
Durham Goode
|
r34093 | # The first two empty chunks represent the end of the | ||
# changelog and the manifestlog portions. The remaining | ||||
# empty chunks represent either A) the end of individual | ||||
# tree or file entries in the file list, or B) the end of | ||||
# the entire list. It's the end of the entire list if there | ||||
# were no entries (i.e. noentries is True). | ||||
if parts < 2: | ||||
parts += 1 | ||||
elif noentries: | ||||
parts += 1 | ||||
Pierre-Yves David
|
r20999 | break | ||
Durham Goode
|
r34093 | noentries = False | ||
Pierre-Yves David
|
r20999 | yield chunkheader(len(chunk)) | ||
pos = 0 | ||||
while pos < len(chunk): | ||||
next = pos + 2**20 | ||||
yield chunk[pos:next] | ||||
pos = next | ||||
yield closechunk() | ||||
Martin von Zweigbergk
|
r38365 | def _unpackmanifests(self, repo, revmap, trp, prog): | ||
self.callback = prog.increment | ||||
Augie Fackler
|
r26712 | # no need to check for empty manifest group here: | ||
# if the result of the merge of 1 and 2 is the same in 3 and 4, | ||||
# no new manifest will be created and the manifest group will | ||||
# be empty during the pull | ||||
self.manifestheader() | ||||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
Gregory Szorc
|
r38574 | repo.manifestlog.addgroup(deltas, revmap, trp) | ||
Martin von Zweigbergk
|
r38392 | prog.complete() | ||
Martin von Zweigbergk
|
r28360 | self.callback = None | ||
Augie Fackler
|
r26712 | |||
Martin von Zweigbergk
|
r33308 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, | ||
expectedtotal=None): | ||||
Augie Fackler
|
r26695 | """Add the changegroup returned by source.read() to this repo. | ||
srctype is a string like 'push', 'pull', or 'unbundle'. url is | ||||
the URL of the repo where this changegroup is coming from. | ||||
Return an integer summarizing the change to this repo: | ||||
- nothing changed or no source: 0 | ||||
- more heads than before: 1+added heads (2..n) | ||||
- fewer heads than before: -1-removed heads (-2..-n) | ||||
- number of heads stays the same: 1 | ||||
""" | ||||
repo = repo.unfiltered() | ||||
def csmap(x): | ||||
repo.ui.debug("add changeset %s\n" % short(x)) | ||||
return len(cl) | ||||
def revmap(x): | ||||
return cl.rev(x) | ||||
changesets = files = revisions = 0 | ||||
Pierre-Yves David
|
r26880 | try: | ||
Martin von Zweigbergk
|
r32931 | # The transaction may already carry source information. In this | ||
# case we use the top level data. We overwrite the argument | ||||
# because we need to use the top level value (if they exist) | ||||
# in this function. | ||||
srctype = tr.hookargs.setdefault('source', srctype) | ||||
url = tr.hookargs.setdefault('url', url) | ||||
Augie Fackler
|
r33634 | repo.hook('prechangegroup', | ||
throw=True, **pycompat.strkwargs(tr.hookargs)) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # write changelog data to temp files so concurrent readers | ||
# will not see an inconsistent view | ||||
cl = repo.changelog | ||||
cl.delayupdate(tr) | ||||
oldheads = set(cl.heads()) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | trp = weakref.proxy(tr) | ||
# pull off the changeset group | ||||
repo.ui.status(_("adding changesets\n")) | ||||
clstart = len(cl) | ||||
Martin von Zweigbergk
|
r38365 | progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), | ||
total=expectedtotal) | ||||
self.callback = progress.increment | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | efiles = set() | ||
def onchangelog(cl, node): | ||||
efiles.update(cl.readfiles(node)) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | self.changelogheader() | ||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) | ||||
Martin von Zweigbergk
|
r32931 | efiles = len(efiles) | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r33308 | if not cgnodes: | ||
Martin von Zweigbergk
|
r33309 | repo.ui.develwarn('applied empty changegroup', | ||
Boris Feld
|
r34735 | config='warn-empty-changegroup') | ||
Martin von Zweigbergk
|
r32931 | clend = len(cl) | ||
changesets = clend - clstart | ||||
Martin von Zweigbergk
|
r38392 | progress.complete() | ||
Martin von Zweigbergk
|
r32931 | self.callback = None | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # pull off the manifest group | ||
repo.ui.status(_("adding manifests\n")) | ||||
Martin von Zweigbergk
|
r38365 | # We know that we'll never have more manifests than we had | ||
# changesets. | ||||
progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), | ||||
total=changesets) | ||||
self._unpackmanifests(repo, revmap, trp, progress) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | needfiles = {} | ||
r33224 | if repo.ui.configbool('server', 'validate'): | |||
Martin von Zweigbergk
|
r32931 | cl = repo.changelog | ||
ml = repo.manifestlog | ||||
# validate incoming csets have their manifests | ||||
Gregory Szorc
|
r38806 | for cset in pycompat.xrange(clstart, clend): | ||
Martin von Zweigbergk
|
r32931 | mfnode = cl.changelogrevision(cset).manifest | ||
mfest = ml[mfnode].readdelta() | ||||
# store file cgnodes we must see | ||||
for f, n in mfest.iteritems(): | ||||
needfiles.setdefault(f, set()).add(n) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # process the files | ||
repo.ui.status(_("adding file changes\n")) | ||||
newrevs, newfiles = _addchangegroupfiles( | ||||
repo, self, revmap, trp, efiles, needfiles) | ||||
revisions += newrevs | ||||
files += newfiles | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | deltaheads = 0 | ||
if oldheads: | ||||
heads = cl.heads() | ||||
deltaheads = len(heads) - len(oldheads) | ||||
for h in heads: | ||||
if h not in oldheads and repo[h].closesbranch(): | ||||
deltaheads -= 1 | ||||
htext = "" | ||||
if deltaheads: | ||||
htext = _(" (%+d heads)") % deltaheads | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | repo.ui.status(_("added %d changesets" | ||
" with %d changes to %d files%s\n") | ||||
% (changesets, revisions, files, htext)) | ||||
repo.invalidatevolatilesets() | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
if 'node' not in tr.hookargs: | ||||
tr.hookargs['node'] = hex(cl.node(clstart)) | ||||
tr.hookargs['node_last'] = hex(cl.node(clend - 1)) | ||||
hookargs = dict(tr.hookargs) | ||||
else: | ||||
hookargs = dict(tr.hookargs) | ||||
hookargs['node'] = hex(cl.node(clstart)) | ||||
hookargs['node_last'] = hex(cl.node(clend - 1)) | ||||
Augie Fackler
|
r33634 | repo.hook('pretxnchangegroup', | ||
throw=True, **pycompat.strkwargs(hookargs)) | ||||
Augie Fackler
|
r26695 | |||
Gregory Szorc
|
r38806 | added = [cl.node(r) for r in pycompat.xrange(clstart, clend)] | ||
Boris Feld
|
r33456 | phaseall = None | ||
Martin von Zweigbergk
|
r32931 | if srctype in ('push', 'serve'): | ||
# Old servers can not push the boundary themselves. | ||||
# New servers won't push the boundary if changeset already | ||||
# exists locally as secret | ||||
# | ||||
# We should not use added here but the list of all change in | ||||
# the bundle | ||||
if repo.publishing(): | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.public | ||
Martin von Zweigbergk
|
r32931 | else: | ||
Boris Feld
|
r33456 | # closer target phase computation | ||
Martin von Zweigbergk
|
r32931 | # Those changesets have been pushed from the | ||
# outside, their phases are going to be pushed | ||||
# alongside. Therefor `targetphase` is | ||||
# ignored. | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.draft | ||
if added: | ||||
phases.registernew(repo, tr, targetphase, added) | ||||
if phaseall is not None: | ||||
phases.advanceboundary(repo, tr, phaseall, cgnodes) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | def runhooks(): | ||
# These hooks run when the lock releases, not when the | ||||
# transaction closes. So it's possible for the changelog | ||||
# to have changed since we last saw it. | ||||
if clstart >= len(repo): | ||||
return | ||||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r33678 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) | ||
Bryan O'Sullivan
|
r27867 | |||
Martin von Zweigbergk
|
r32931 | for n in added: | ||
args = hookargs.copy() | ||||
args['node'] = hex(n) | ||||
del args['node_last'] | ||||
Augie Fackler
|
r33678 | repo.hook("incoming", **pycompat.strkwargs(args)) | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | newheads = [h for h in repo.heads() | ||
if h not in oldheads] | ||||
repo.ui.log("incoming", | ||||
Yuya Nishihara
|
r36755 | "%d incoming changes - new heads: %s\n", | ||
Martin von Zweigbergk
|
r32931 | len(added), | ||
', '.join([hex(c[:6]) for c in newheads])) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, | ||
lambda tr: repo._afterlock(runhooks)) | ||||
Augie Fackler
|
r26695 | finally: | ||
repo.ui.flush() | ||||
# never return 0 here: | ||||
Martin von Zweigbergk
|
r32870 | if deltaheads < 0: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads - 1 | ||
Augie Fackler
|
r26695 | else: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads + 1 | ||
Boris Feld
|
r33461 | return ret | ||
Augie Fackler
|
r26695 | |||
Durham Goode
|
r34292 | def deltaiter(self): | ||
Durham Goode
|
r34147 | """ | ||
returns an iterator of the deltas in this changegroup | ||||
Useful for passing to the underlying storage system to be stored. | ||||
""" | ||||
chain = None | ||||
for chunkdata in iter(lambda: self.deltachunk(chain), {}): | ||||
Durham Goode
|
r34295 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) | ||
yield chunkdata | ||||
chain = chunkdata[0] | ||||
Durham Goode
|
r34147 | |||
Sune Foldager
|
r23181 | class cg2unpacker(cg1unpacker): | ||
Augie Fackler
|
r26708 | """Unpacker for cg2 streams. | ||
cg2 streams add support for generaldelta, so the delta header | ||||
format is slightly different. All other features about the data | ||||
remain the same. | ||||
""" | ||||
Sune Foldager
|
r23181 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Eric Sumner
|
r23896 | version = '02' | ||
Sune Foldager
|
r23181 | |||
def _deltaheader(self, headertuple, prevnode): | ||||
node, p1, p2, deltabase, cs = headertuple | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Sune Foldager
|
r23181 | |||
Augie Fackler
|
r27432 | class cg3unpacker(cg2unpacker): | ||
"""Unpacker for cg3 streams. | ||||
Mike Edgar
|
r27433 | cg3 streams add support for exchanging treemanifests and revlog | ||
Martin von Zweigbergk
|
r27753 | flags. It adds the revlog flags to the delta header and an empty chunk | ||
separating manifests and files. | ||||
Augie Fackler
|
r27432 | """ | ||
Mike Edgar
|
r27433 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Augie Fackler
|
r27432 | version = '03' | ||
Martin von Zweigbergk
|
r27920 | _grouplistcount = 2 # One list of manifests and one list of files | ||
Augie Fackler
|
r27432 | |||
Mike Edgar
|
r27433 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, deltabase, cs, flags = headertuple | ||||
return node, p1, p2, deltabase, cs, flags | ||||
Martin von Zweigbergk
|
r38365 | def _unpackmanifests(self, repo, revmap, trp, prog): | ||
super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) | ||||
Augie Fackler
|
r29724 | for chunkdata in iter(self.filelogheader, {}): | ||
Martin von Zweigbergk
|
r27754 | # If we get here, there are directory manifests in the changegroup | ||
d = chunkdata["filename"] | ||||
repo.ui.debug("adding %s revisions\n" % d) | ||||
Durham Goode
|
r30339 | dirlog = repo.manifestlog._revlog.dirlog(d) | ||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
if not dirlog.addgroup(deltas, revmap, trp): | ||||
Martin von Zweigbergk
|
r27754 | raise error.Abort(_("received dir revlog group is empty")) | ||
Matt Mackall
|
r12329 | class headerlessfixup(object): | ||
def __init__(self, fh, h): | ||||
self._h = h | ||||
self._fh = fh | ||||
def read(self, n): | ||||
if self._h: | ||||
d, self._h = self._h[:n], self._h[n:] | ||||
if len(d) < n: | ||||
Mads Kiilerich
|
r13457 | d += readexactly(self._fh, n - len(d)) | ||
Matt Mackall
|
r12329 | return d | ||
Mads Kiilerich
|
r13457 | return readexactly(self._fh, n) | ||
Matt Mackall
|
r12329 | |||
Gregory Szorc
|
r38929 | @attr.s(slots=True, frozen=True) | ||
class revisiondelta(object): | ||||
"""Describes a delta entry in a changegroup. | ||||
Captured data is sufficient to serialize the delta into multiple | ||||
formats. | ||||
""" | ||||
# 20 byte node of this revision. | ||||
node = attr.ib() | ||||
# 20 byte nodes of parent revisions. | ||||
p1node = attr.ib() | ||||
p2node = attr.ib() | ||||
# 20 byte node of node this delta is against. | ||||
basenode = attr.ib() | ||||
# 20 byte node of changeset revision this delta is associated with. | ||||
linknode = attr.ib() | ||||
# 2 bytes of flags to apply to revision data. | ||||
flags = attr.ib() | ||||
# Iterable of chunks holding raw delta data. | ||||
deltachunks = attr.ib() | ||||
Gregory Szorc
|
r38919 | |||
Gregory Szorc
|
r38938 | class cgpacker(object): | ||
Gregory Szorc
|
r38936 | def __init__(self, repo, filematcher, version, allowreorder, | ||
Gregory Szorc
|
r39011 | deltaparentfn, builddeltaheader, manifestsend, | ||
Martin von Zweigbergk
|
r38961 | bundlecaps=None, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Sune Foldager
|
r19202 | """Given a source repo, construct a bundler. | ||
Durham Goode
|
r32287 | |||
Gregory Szorc
|
r38830 | filematcher is a matcher that matches on files to include in the | ||
changegroup. Used to facilitate sparse changegroups. | ||||
Gregory Szorc
|
r38936 | allowreorder controls whether reordering of revisions is allowed. | ||
This value is used when ``bundle.reorder`` is ``auto`` or isn't | ||||
set. | ||||
Gregory Szorc
|
r39011 | deltaparentfn is a callable that resolves the delta parent for | ||
a specific revision. | ||||
Gregory Szorc
|
r38937 | |||
Gregory Szorc
|
r38933 | builddeltaheader is a callable that constructs the header for a group | ||
delta. | ||||
Gregory Szorc
|
r38934 | manifestsend is a chunk to send after manifests have been fully emitted. | ||
Gregory Szorc
|
r38944 | ellipses indicates whether ellipsis serving mode is enabled. | ||
Durham Goode
|
r32287 | bundlecaps is optional and can be used to specify the set of | ||
capabilities which can be used to build the bundle. While bundlecaps is | ||||
unused in core Mercurial, extensions rely on this feature to communicate | ||||
capabilities to customize the changegroup packer. | ||||
Gregory Szorc
|
r38940 | |||
shallow indicates whether shallow data might be sent. The packer may | ||||
need to pack file contents not introduced by the changes being packed. | ||||
Gregory Szorc
|
r38945 | |||
fullnodes is the list of nodes which should not be ellipsis nodes. We | ||||
store this rather than the set of nodes that should be ellipsis because | ||||
for very large histories we expect this to be significantly smaller. | ||||
Sune Foldager
|
r19202 | """ | ||
Gregory Szorc
|
r38830 | assert filematcher | ||
self._filematcher = filematcher | ||||
Gregory Szorc
|
r38931 | self.version = version | ||
Gregory Szorc
|
r39011 | self._deltaparentfn = deltaparentfn | ||
Gregory Szorc
|
r38933 | self._builddeltaheader = builddeltaheader | ||
Gregory Szorc
|
r38934 | self._manifestsend = manifestsend | ||
Gregory Szorc
|
r38944 | self._ellipses = ellipses | ||
Gregory Szorc
|
r38931 | |||
Durham Goode
|
r32287 | # Set of capabilities we can use to build the bundle. | ||
if bundlecaps is None: | ||||
bundlecaps = set() | ||||
self._bundlecaps = bundlecaps | ||||
Gregory Szorc
|
r38940 | self._isshallow = shallow | ||
Gregory Szorc
|
r38945 | self._fullnodes = fullnodes | ||
Gregory Szorc
|
r38936 | |||
Gregory Szorc
|
r38943 | # Maps ellipsis revs to their roots at the changelog level. | ||
self._precomputedellipsis = ellipsisroots | ||||
Matt Mackall
|
r25831 | # experimental config: bundle.reorder | ||
r33180 | reorder = repo.ui.config('bundle', 'reorder') | |||
Sune Foldager
|
r19202 | if reorder == 'auto': | ||
Gregory Szorc
|
r38936 | self._reorder = allowreorder | ||
Sune Foldager
|
r19202 | else: | ||
Gregory Szorc
|
r38936 | self._reorder = stringutil.parsebool(reorder) | ||
Sune Foldager
|
r19202 | self._repo = repo | ||
Gregory Szorc
|
r38936 | |||
Mads Kiilerich
|
r23748 | if self._repo.ui.verbose and not self._repo.ui.debugflag: | ||
self._verbosenote = self._repo.ui.note | ||||
else: | ||||
self._verbosenote = lambda s: None | ||||
Gregory Szorc
|
r38942 | # Maps CL revs to per-revlog revisions. Cleared in close() at | ||
# the end of each group. | ||||
self._clrevtolocalrev = {} | ||||
self._nextclrevtolocalrev = {} | ||||
# Maps changelog nodes to changelog revs. Filled in once | ||||
# during changelog stage and then left unmodified. | ||||
self._clnodetorev = {} | ||||
Gregory Szorc
|
r38939 | def _close(self): | ||
Gregory Szorc
|
r38923 | # Ellipses serving mode. | ||
Gregory Szorc
|
r38942 | self._clrevtolocalrev.clear() | ||
Gregory Szorc
|
r39010 | if self._nextclrevtolocalrev is not None: | ||
Gregory Szorc
|
r38968 | self._clrevtolocalrev = self._nextclrevtolocalrev | ||
Gregory Szorc
|
r39010 | self._nextclrevtolocalrev = None | ||
Gregory Szorc
|
r38923 | |||
Matt Mackall
|
r13831 | return closechunk() | ||
Sune Foldager
|
r19200 | |||
Augie Fackler
|
r29236 | # Extracted both for clarity and for overriding in extensions. | ||
Gregory Szorc
|
r39016 | def _sortgroup(self, store, ischangelog, nodelist, lookup): | ||
Augie Fackler
|
r29236 | """Sort nodes for change group and turn them into revnums.""" | ||
Gregory Szorc
|
r38924 | # Ellipses serving mode. | ||
# | ||||
# In a perfect world, we'd generate better ellipsis-ified graphs | ||||
# for non-changelog revlogs. In practice, we haven't started doing | ||||
# that yet, so the resulting DAGs for the manifestlog and filelogs | ||||
# are actually full of bogus parentage on all the ellipsis | ||||
# nodes. This has the side effect that, while the contents are | ||||
# correct, the individual DAGs might be completely out of whack in | ||||
# a case like 882681bc3166 and its ancestors (back about 10 | ||||
# revisions or so) in the main hg repo. | ||||
# | ||||
# The one invariant we *know* holds is that the new (potentially | ||||
# bogus) DAG shape will be valid if we order the nodes in the | ||||
# order that they're introduced in dramatis personae by the | ||||
# changelog, so what we do is we sort the non-changelog histories | ||||
# by the order in which they are used by the changelog. | ||||
Gregory Szorc
|
r39016 | if self._ellipses and not ischangelog: | ||
Gregory Szorc
|
r38942 | key = lambda n: self._clnodetorev[lookup(n)] | ||
Gregory Szorc
|
r38927 | return [store.rev(n) for n in sorted(nodelist, key=key)] | ||
Gregory Szorc
|
r38924 | |||
Augie Fackler
|
r29236 | # for generaldelta revlogs, we linearize the revs; this will both be | ||
# much quicker and generate a much smaller bundle | ||||
Gregory Szorc
|
r38927 | if (store._generaldelta and self._reorder is None) or self._reorder: | ||
dag = dagutil.revlogdag(store) | ||||
return dag.linearize(set(store.rev(n) for n in nodelist)) | ||||
Augie Fackler
|
r29236 | else: | ||
Gregory Szorc
|
r38927 | return sorted([store.rev(n) for n in nodelist]) | ||
Augie Fackler
|
r29236 | |||
Gregory Szorc
|
r39016 | def group(self, nodelist, store, ischangelog, lookup, units=None): | ||
Sune Foldager
|
r19200 | """Calculate a delta group, yielding a sequence of changegroup chunks | ||
(strings). | ||||
Given a list of changeset revs, return a set of deltas and | ||||
metadata corresponding to nodes. The first delta is | ||||
first parent(nodelist[0]) -> nodelist[0], the receiver is | ||||
guaranteed to have this parent as it has all history before | ||||
these changesets. In the case firstparent is nullrev the | ||||
changegroup starts with a full revision. | ||||
Benoit Boissinot
|
r19208 | |||
If units is not None, progress detail will be generated, units specifies | ||||
the type of revlog that is touched (changelog, manifest, etc.). | ||||
Sune Foldager
|
r19200 | """ | ||
# if we don't have any revisions touched by these changesets, bail | ||||
if len(nodelist) == 0: | ||||
Gregory Szorc
|
r38939 | yield self._close() | ||
Sune Foldager
|
r19200 | return | ||
Gregory Szorc
|
r39016 | revs = self._sortgroup(store, ischangelog, nodelist, lookup) | ||
Sune Foldager
|
r19200 | |||
# add the parent of the first rev | ||||
Gregory Szorc
|
r38927 | p = store.parentrevs(revs[0])[0] | ||
Sune Foldager
|
r19200 | revs.insert(0, p) | ||
# build deltas | ||||
Martin von Zweigbergk
|
r38429 | progress = None | ||
if units is not None: | ||||
progress = self._repo.ui.makeprogress(_('bundling'), unit=units, | ||||
total=(len(revs) - 1)) | ||||
Gregory Szorc
|
r38806 | for r in pycompat.xrange(len(revs) - 1): | ||
Martin von Zweigbergk
|
r38429 | if progress: | ||
progress.update(r + 1) | ||||
Sune Foldager
|
r19200 | prev, curr = revs[r], revs[r + 1] | ||
Gregory Szorc
|
r38927 | linknode = lookup(store.node(curr)) | ||
Gregory Szorc
|
r39016 | for c in self._revchunk(store, ischangelog, curr, prev, linknode): | ||
Sune Foldager
|
r19200 | yield c | ||
Martin von Zweigbergk
|
r38429 | if progress: | ||
progress.complete() | ||||
Gregory Szorc
|
r38939 | yield self._close() | ||
Sune Foldager
|
r19200 | |||
Durham Goode
|
r19289 | # filter any nodes that claim to be part of the known set | ||
Gregory Szorc
|
r38939 | def _prune(self, store, missing, commonrevs): | ||
Gregory Szorc
|
r38842 | # TODO this violates storage abstraction for manifests. | ||
Gregory Szorc
|
r38927 | if isinstance(store, manifest.manifestrevlog): | ||
if not self._filematcher.visitdir(store._dir[:-1] or '.'): | ||||
Gregory Szorc
|
r38842 | return [] | ||
Gregory Szorc
|
r38927 | rr, rl = store.rev, store.linkrev | ||
Durham Goode
|
r19289 | return [n for n in missing if rl(rr(n)) not in commonrevs] | ||
Martin von Zweigbergk
|
r28228 | def _packmanifests(self, dir, mfnodes, lookuplinknode): | ||
Martin von Zweigbergk
|
r38961 | """Pack manifests into a changegroup stream. | ||
Gregory Szorc
|
r38935 | |||
Encodes the directory name in the output so multiple manifests | ||||
Martin von Zweigbergk
|
r38961 | can be sent. Multiple manifests is not supported by cg1 and cg2. | ||
Gregory Szorc
|
r38935 | """ | ||
if dir: | ||||
Martin von Zweigbergk
|
r38961 | assert self.version == b'03' | ||
Gregory Szorc
|
r39017 | yield _fileheader(dir) | ||
Gregory Szorc
|
r38935 | |||
# TODO violates storage abstractions by assuming revlogs. | ||||
dirlog = self._repo.manifestlog._revlog.dirlog(dir) | ||||
Gregory Szorc
|
r39016 | for chunk in self.group(mfnodes, dirlog, False, lookuplinknode, | ||
Gregory Szorc
|
r38935 | units=_('manifests')): | ||
yield chunk | ||||
Benoit Boissinot
|
r19204 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | ||
Gregory Szorc
|
r39012 | """Yield a sequence of changegroup byte chunks.""" | ||
Sune Foldager
|
r19202 | repo = self._repo | ||
Martin von Zweigbergk
|
r24978 | cl = repo.changelog | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r39012 | self._verbosenote(_('uncompressed size of bundle content:\n')) | ||
size = 0 | ||||
clstate, chunks = self._generatechangelog(cl, clnodes) | ||||
for chunk in chunks: | ||||
size += len(chunk) | ||||
yield chunk | ||||
self._verbosenote(_('%8.i (changelog)\n') % size) | ||||
clrevorder = clstate['clrevorder'] | ||||
mfs = clstate['mfs'] | ||||
changedfiles = clstate['changedfiles'] | ||||
# We need to make sure that the linkrev in the changegroup refers to | ||||
# the first changeset that introduced the manifest or file revision. | ||||
# The fastpath is usually safer than the slowpath, because the filelogs | ||||
# are walked in revlog order. | ||||
# | ||||
# When taking the slowpath with reorder=None and the manifest revlog | ||||
# uses generaldelta, the manifest may be walked in the "wrong" order. | ||||
# Without 'clrevorder', we would get an incorrect linkrev (see fix in | ||||
# cc0ff93d0c0c). | ||||
# | ||||
# When taking the fastpath, we are only vulnerable to reordering | ||||
# of the changelog itself. The changelog never uses generaldelta, so | ||||
# it is only reordered when reorder=True. To handle this case, we | ||||
# simply take the slowpath, which already has the 'clrevorder' logic. | ||||
# This was also fixed in cc0ff93d0c0c. | ||||
fastpathlinkrev = fastpathlinkrev and not self._reorder | ||||
# Treemanifests don't work correctly with fastpathlinkrev | ||||
# either, because we don't discover which directory nodes to | ||||
# send along with files. This could probably be fixed. | ||||
fastpathlinkrev = fastpathlinkrev and ( | ||||
'treemanifest' not in repo.requirements) | ||||
fnodes = {} # needed file nodes | ||||
for chunk in self.generatemanifests(commonrevs, clrevorder, | ||||
fastpathlinkrev, mfs, fnodes, source): | ||||
yield chunk | ||||
if self._ellipses: | ||||
mfdicts = None | ||||
if self._isshallow: | ||||
mfdicts = [(self._repo.manifestlog[n].read(), lr) | ||||
for (n, lr) in mfs.iteritems()] | ||||
mfs.clear() | ||||
clrevs = set(cl.rev(x) for x in clnodes) | ||||
if not fastpathlinkrev: | ||||
def linknodes(unused, fname): | ||||
return fnodes.get(fname, {}) | ||||
else: | ||||
cln = cl.node | ||||
def linknodes(filerevlog, fname): | ||||
llr = filerevlog.linkrev | ||||
fln = filerevlog.node | ||||
revs = ((r, llr(r)) for r in filerevlog) | ||||
return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | ||||
if self._ellipses: | ||||
# We need to pass the mfdicts variable down into | ||||
# generatefiles(), but more than one command might have | ||||
# wrapped generatefiles so we can't modify the function | ||||
# signature. Instead, we pass the data to ourselves using an | ||||
# instance attribute. I'm sorry. | ||||
self._mfdicts = mfdicts | ||||
for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | ||||
source): | ||||
yield chunk | ||||
yield self._close() | ||||
if clnodes: | ||||
repo.hook('outgoing', node=hex(clnodes[0]), source=source) | ||||
def _generatechangelog(self, cl, nodes): | ||||
"""Generate data for changelog chunks. | ||||
Returns a 2-tuple of a dict containing state and an iterable of | ||||
byte chunks. The state will not be fully populated until the | ||||
chunk stream has been fully consumed. | ||||
""" | ||||
Durham Goode
|
r23381 | clrevorder = {} | ||
Benoit Boissinot
|
r19204 | mfs = {} # needed manifests | ||
Gregory Szorc
|
r39012 | mfl = self._repo.manifestlog | ||
Gregory Szorc
|
r38926 | # TODO violates storage abstraction. | ||
mfrevlog = mfl._revlog | ||||
Martin von Zweigbergk
|
r28241 | changedfiles = set() | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r38926 | # Callback for the changelog, used to collect changed files and | ||
# manifest nodes. | ||||
Benoit Boissinot
|
r19207 | # Returns the linkrev node (identity in the changelog case). | ||
def lookupcl(x): | ||||
c = cl.read(x) | ||||
Durham Goode
|
r23381 | clrevorder[x] = len(clrevorder) | ||
Gregory Szorc
|
r38926 | |||
Gregory Szorc
|
r38944 | if self._ellipses: | ||
Gregory Szorc
|
r38926 | # Only update mfs if x is going to be sent. Otherwise we | ||
# end up with bogus linkrevs specified for manifests and | ||||
# we skip some manifest nodes that we should otherwise | ||||
# have sent. | ||||
Gregory Szorc
|
r38945 | if (x in self._fullnodes | ||
Gregory Szorc
|
r38943 | or cl.rev(x) in self._precomputedellipsis): | ||
Gregory Szorc
|
r38926 | n = c[0] | ||
# Record the first changeset introducing this manifest | ||||
# version. | ||||
mfs.setdefault(n, x) | ||||
# Set this narrow-specific dict so we have the lowest | ||||
# manifest revnum to look up for this cl revnum. (Part of | ||||
# mapping changelog ellipsis parents to manifest ellipsis | ||||
# parents) | ||||
Gregory Szorc
|
r38942 | self._nextclrevtolocalrev.setdefault(cl.rev(x), | ||
mfrevlog.rev(n)) | ||||
Gregory Szorc
|
r38926 | # We can't trust the changed files list in the changeset if the | ||
# client requested a shallow clone. | ||||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r38926 | changedfiles.update(mfl[c[0]].read().keys()) | ||
else: | ||||
changedfiles.update(c[3]) | ||||
else: | ||||
n = c[0] | ||||
# record the first changeset introducing this manifest version | ||||
mfs.setdefault(n, x) | ||||
# Record a complete list of potentially-changed files in | ||||
# this manifest. | ||||
changedfiles.update(c[3]) | ||||
Benoit Boissinot
|
r19207 | return x | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r39012 | state = { | ||
'clrevorder': clrevorder, | ||||
'mfs': mfs, | ||||
'changedfiles': changedfiles, | ||||
} | ||||
Gregory Szorc
|
r38926 | |||
Gregory Szorc
|
r39016 | gen = self.group(nodes, cl, True, lookupcl, units=_('changesets')) | ||
Martin von Zweigbergk
|
r28227 | |||
Gregory Szorc
|
r39012 | return state, gen | ||
Martin von Zweigbergk
|
r28227 | |||
def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, | ||||
Durham Goode
|
r34148 | fnodes, source): | ||
"""Returns an iterator of changegroup chunks containing manifests. | ||||
`source` is unused here, but is used by extensions like remotefilelog to | ||||
change what is sent based in pulls vs pushes, etc. | ||||
""" | ||||
Martin von Zweigbergk
|
r28227 | repo = self._repo | ||
Durham Goode
|
r30294 | mfl = repo.manifestlog | ||
dirlog = mfl._revlog.dirlog | ||||
Martin von Zweigbergk
|
r28232 | tmfnodes = {'': mfs} | ||
Martin von Zweigbergk
|
r28227 | |||
Benoit Boissinot
|
r19207 | # Callback for the manifest, used to collect linkrevs for filelog | ||
# revisions. | ||||
# Returns the linkrev node (collected in lookupcl). | ||||
Kyle Lippincott
|
r35013 | def makelookupmflinknode(dir, nodes): | ||
Martin von Zweigbergk
|
r28231 | if fastpathlinkrev: | ||
assert not dir | ||||
return mfs.__getitem__ | ||||
Augie Fackler
|
r27239 | def lookupmflinknode(x): | ||
"""Callback for looking up the linknode for manifests. | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | Returns the linkrev node for the specified manifest. | ||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | SIDE EFFECT: | ||
Augie Fackler
|
r27432 | 1) fclnodes gets populated with the list of relevant | ||
file nodes if we're not using fastpathlinkrev | ||||
2) When treemanifests are in use, collects treemanifest nodes | ||||
to send | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27432 | Note that this means manifests must be completely sent to | ||
the client before you can trust the list of files and | ||||
treemanifests to send. | ||||
Augie Fackler
|
r27239 | """ | ||
Kyle Lippincott
|
r35013 | clnode = nodes[x] | ||
Durham Goode
|
r30294 | mdata = mfl.get(dir, x).readfast(shallow=True) | ||
Martin von Zweigbergk
|
r28241 | for p, n, fl in mdata.iterentries(): | ||
if fl == 't': # subdirectory manifest | ||||
subdir = dir + p + '/' | ||||
tmfclnodes = tmfnodes.setdefault(subdir, {}) | ||||
tmfclnode = tmfclnodes.setdefault(n, clnode) | ||||
if clrevorder[clnode] < clrevorder[tmfclnode]: | ||||
tmfclnodes[n] = clnode | ||||
else: | ||||
f = dir + p | ||||
Martin von Zweigbergk
|
r28240 | fclnodes = fnodes.setdefault(f, {}) | ||
fclnode = fclnodes.setdefault(n, clnode) | ||||
if clrevorder[clnode] < clrevorder[fclnode]: | ||||
fclnodes[n] = clnode | ||||
Augie Fackler
|
r27239 | return clnode | ||
Martin von Zweigbergk
|
r28231 | return lookupmflinknode | ||
Sune Foldager
|
r19206 | |||
Martin von Zweigbergk
|
r28228 | size = 0 | ||
Martin von Zweigbergk
|
r28232 | while tmfnodes: | ||
Kyle Lippincott
|
r35013 | dir, nodes = tmfnodes.popitem() | ||
Gregory Szorc
|
r38939 | prunednodes = self._prune(dirlog(dir), nodes, commonrevs) | ||
Martin von Zweigbergk
|
r29371 | if not dir or prunednodes: | ||
Martin von Zweigbergk
|
r38961 | for x in self._packmanifests(dir, prunednodes, | ||
makelookupmflinknode(dir, nodes)): | ||||
Martin von Zweigbergk
|
r29371 | size += len(x) | ||
yield x | ||||
Martin von Zweigbergk
|
r28229 | self._verbosenote(_('%8.i (manifests)\n') % size) | ||
Gregory Szorc
|
r38934 | yield self._manifestsend | ||
Sune Foldager
|
r19206 | |||
Martin von Zweigbergk
|
r24897 | # The 'source' parameter is useful for extensions | ||
Durham Goode
|
r19334 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): | ||
Gregory Szorc
|
r38925 | changedfiles = list(filter(self._filematcher, changedfiles)) | ||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r38925 | # See comment in generate() for why this sadness is a thing. | ||
mfdicts = self._mfdicts | ||||
del self._mfdicts | ||||
# In a shallow clone, the linknodes callback needs to also include | ||||
# those file nodes that are in the manifests we sent but weren't | ||||
# introduced by those manifests. | ||||
commonctxs = [self._repo[c] for c in commonrevs] | ||||
oldlinknodes = linknodes | ||||
clrev = self._repo.changelog.rev | ||||
# Defining this function has a side-effect of overriding the | ||||
# function of the same name that was passed in as an argument. | ||||
# TODO have caller pass in appropriate function. | ||||
def linknodes(flog, fname): | ||||
for c in commonctxs: | ||||
try: | ||||
fnode = c.filenode(fname) | ||||
Gregory Szorc
|
r38942 | self._clrevtolocalrev[c.rev()] = flog.rev(fnode) | ||
Gregory Szorc
|
r38925 | except error.ManifestLookupError: | ||
pass | ||||
links = oldlinknodes(flog, fname) | ||||
if len(links) != len(mfdicts): | ||||
for mf, lr in mfdicts: | ||||
fnode = mf.get(fname, None) | ||||
if fnode in links: | ||||
links[fnode] = min(links[fnode], lr, key=clrev) | ||||
elif fnode: | ||||
links[fnode] = lr | ||||
return links | ||||
return self._generatefiles(changedfiles, linknodes, commonrevs, source) | ||||
def _generatefiles(self, changedfiles, linknodes, commonrevs, source): | ||||
Durham Goode
|
r19334 | repo = self._repo | ||
Martin von Zweigbergk
|
r38429 | progress = repo.ui.makeprogress(_('bundling'), unit=_('files'), | ||
total=len(changedfiles)) | ||||
Durham Goode
|
r19334 | for i, fname in enumerate(sorted(changedfiles)): | ||
filerevlog = repo.file(fname) | ||||
if not filerevlog: | ||||
Gregory Szorc
|
r37357 | raise error.Abort(_("empty or missing file data for %s") % | ||
fname) | ||||
Durham Goode
|
r19334 | |||
linkrevnodes = linknodes(filerevlog, fname) | ||||
Benoit Boissinot
|
r19207 | # Lookup for filenodes, we collected the linkrev nodes above in the | ||
# fastpath case and with lookupmf in the slowpath case. | ||||
def lookupfilelog(x): | ||||
return linkrevnodes[x] | ||||
Gregory Szorc
|
r38939 | filenodes = self._prune(filerevlog, linkrevnodes, commonrevs) | ||
Sune Foldager
|
r19206 | if filenodes: | ||
Martin von Zweigbergk
|
r38429 | progress.update(i + 1, item=fname) | ||
Gregory Szorc
|
r39017 | h = _fileheader(fname) | ||
Mads Kiilerich
|
r23748 | size = len(h) | ||
yield h | ||||
Gregory Szorc
|
r39016 | for chunk in self.group(filenodes, filerevlog, False, | ||
lookupfilelog): | ||||
Mads Kiilerich
|
r23748 | size += len(chunk) | ||
Sune Foldager
|
r19202 | yield chunk | ||
Mads Kiilerich
|
r23748 | self._verbosenote(_('%8.i %s\n') % (size, fname)) | ||
Martin von Zweigbergk
|
r38429 | progress.complete() | ||
Sune Foldager
|
r19200 | |||
Gregory Szorc
|
r39016 | def _revchunk(self, store, ischangelog, rev, prev, linknode): | ||
Gregory Szorc
|
r38944 | if self._ellipses: | ||
Gregory Szorc
|
r38929 | fn = self._revisiondeltanarrow | ||
Gregory Szorc
|
r38922 | else: | ||
Gregory Szorc
|
r38929 | fn = self._revisiondeltanormal | ||
Gregory Szorc
|
r39016 | delta = fn(store, ischangelog, rev, prev, linknode) | ||
Gregory Szorc
|
r38929 | if not delta: | ||
return | ||||
Gregory Szorc
|
r38922 | |||
Gregory Szorc
|
r38933 | meta = self._builddeltaheader(delta) | ||
Gregory Szorc
|
r38929 | l = len(meta) + sum(len(x) for x in delta.deltachunks) | ||
Gregory Szorc
|
r38922 | |||
Gregory Szorc
|
r38929 | yield chunkheader(l) | ||
yield meta | ||||
for x in delta.deltachunks: | ||||
yield x | ||||
Gregory Szorc
|
r39016 | def _revisiondeltanormal(self, store, ischangelog, rev, prev, linknode): | ||
Gregory Szorc
|
r38927 | node = store.node(rev) | ||
p1, p2 = store.parentrevs(rev) | ||||
Gregory Szorc
|
r39011 | base = self._deltaparentfn(store, rev, p1, p2, prev) | ||
Benoit Boissinot
|
r14143 | |||
prefix = '' | ||||
Gregory Szorc
|
r38927 | if store.iscensored(base) or store.iscensored(rev): | ||
Mike Edgar
|
r24190 | try: | ||
Gregory Szorc
|
r38927 | delta = store.revision(node, raw=True) | ||
Gregory Szorc
|
r25660 | except error.CensoredNodeError as e: | ||
Mike Edgar
|
r24190 | delta = e.tombstone | ||
if base == nullrev: | ||||
prefix = mdiff.trivialdiffheader(len(delta)) | ||||
else: | ||||
Gregory Szorc
|
r38927 | baselen = store.rawsize(base) | ||
Mike Edgar
|
r24190 | prefix = mdiff.replacediffheader(baselen, len(delta)) | ||
elif base == nullrev: | ||||
Gregory Szorc
|
r38927 | delta = store.revision(node, raw=True) | ||
Benoit Boissinot
|
r14143 | prefix = mdiff.trivialdiffheader(len(delta)) | ||
else: | ||||
Gregory Szorc
|
r38927 | delta = store.revdiff(base, rev) | ||
p1n, p2n = store.parents(node) | ||||
Gregory Szorc
|
r38922 | |||
Gregory Szorc
|
r38929 | return revisiondelta( | ||
node=node, | ||||
p1node=p1n, | ||||
p2node=p2n, | ||||
basenode=store.node(base), | ||||
linknode=linknode, | ||||
flags=store.flags(rev), | ||||
deltachunks=(prefix, delta), | ||||
) | ||||
Gregory Szorc
|
r39016 | def _revisiondeltanarrow(self, store, ischangelog, rev, prev, linknode): | ||
Gregory Szorc
|
r38922 | # build up some mapping information that's useful later. See | ||
# the local() nested function below. | ||||
Gregory Szorc
|
r39016 | if ischangelog: | ||
Gregory Szorc
|
r38942 | self._clnodetorev[linknode] = rev | ||
Gregory Szorc
|
r38922 | linkrev = rev | ||
Gregory Szorc
|
r38942 | self._clrevtolocalrev[linkrev] = rev | ||
Gregory Szorc
|
r38922 | else: | ||
Gregory Szorc
|
r38942 | linkrev = self._clnodetorev[linknode] | ||
self._clrevtolocalrev[linkrev] = rev | ||||
Gregory Szorc
|
r38922 | |||
# This is a node to send in full, because the changeset it | ||||
# corresponds to was a full changeset. | ||||
Gregory Szorc
|
r38945 | if linknode in self._fullnodes: | ||
Gregory Szorc
|
r39016 | return self._revisiondeltanormal(store, ischangelog, rev, prev, | ||
linknode) | ||||
Gregory Szorc
|
r38922 | |||
# At this point, a node can either be one we should skip or an | ||||
# ellipsis. If it's not an ellipsis, bail immediately. | ||||
Gregory Szorc
|
r38943 | if linkrev not in self._precomputedellipsis: | ||
Gregory Szorc
|
r38922 | return | ||
Gregory Szorc
|
r38943 | linkparents = self._precomputedellipsis[linkrev] | ||
Gregory Szorc
|
r38922 | def local(clrev): | ||
"""Turn a changelog revnum into a local revnum. | ||||
The ellipsis dag is stored as revnums on the changelog, | ||||
but when we're producing ellipsis entries for | ||||
non-changelog revlogs, we need to turn those numbers into | ||||
something local. This does that for us, and during the | ||||
changelog sending phase will also expand the stored | ||||
mappings as needed. | ||||
""" | ||||
if clrev == nullrev: | ||||
return nullrev | ||||
Gregory Szorc
|
r39016 | if ischangelog: | ||
Gregory Szorc
|
r38922 | # If we're doing the changelog, it's possible that we | ||
# have a parent that is already on the client, and we | ||||
# need to store some extra mapping information so that | ||||
# our contained ellipsis nodes will be able to resolve | ||||
# their parents. | ||||
Gregory Szorc
|
r38942 | if clrev not in self._clrevtolocalrev: | ||
Gregory Szorc
|
r38927 | clnode = store.node(clrev) | ||
Gregory Szorc
|
r38942 | self._clnodetorev[clnode] = clrev | ||
Gregory Szorc
|
r38922 | return clrev | ||
# Walk the ellipsis-ized changelog breadth-first looking for a | ||||
# change that has been linked from the current revlog. | ||||
# | ||||
# For a flat manifest revlog only a single step should be necessary | ||||
# as all relevant changelog entries are relevant to the flat | ||||
# manifest. | ||||
# | ||||
# For a filelog or tree manifest dirlog however not every changelog | ||||
# entry will have been relevant, so we need to skip some changelog | ||||
# nodes even after ellipsis-izing. | ||||
walk = [clrev] | ||||
while walk: | ||||
p = walk[0] | ||||
walk = walk[1:] | ||||
Gregory Szorc
|
r38942 | if p in self._clrevtolocalrev: | ||
return self._clrevtolocalrev[p] | ||||
Gregory Szorc
|
r38945 | elif p in self._fullnodes: | ||
Gregory Szorc
|
r38922 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) | ||
if pp != nullrev]) | ||||
Gregory Szorc
|
r38943 | elif p in self._precomputedellipsis: | ||
walk.extend([pp for pp in self._precomputedellipsis[p] | ||||
Gregory Szorc
|
r38922 | if pp != nullrev]) | ||
else: | ||||
# In this case, we've got an ellipsis with parents | ||||
# outside the current bundle (likely an | ||||
# incremental pull). We "know" that we can use the | ||||
# value of this same revlog at whatever revision | ||||
# is pointed to by linknode. "Know" is in scare | ||||
# quotes because I haven't done enough examination | ||||
# of edge cases to convince myself this is really | ||||
# a fact - it works for all the (admittedly | ||||
# thorough) cases in our testsuite, but I would be | ||||
# somewhat unsurprised to find a case in the wild | ||||
# where this breaks down a bit. That said, I don't | ||||
# know if it would hurt anything. | ||||
for i in pycompat.xrange(rev, 0, -1): | ||||
Gregory Szorc
|
r38927 | if store.linkrev(i) == clrev: | ||
Gregory Szorc
|
r38922 | return i | ||
# We failed to resolve a parent for this node, so | ||||
# we crash the changegroup construction. | ||||
raise error.Abort( | ||||
'unable to resolve parent while packing %r %r' | ||||
Gregory Szorc
|
r38927 | ' for changeset %r' % (store.indexfile, rev, clrev)) | ||
Gregory Szorc
|
r38922 | |||
return nullrev | ||||
if not linkparents or ( | ||||
Gregory Szorc
|
r38927 | store.parentrevs(rev) == (nullrev, nullrev)): | ||
Gregory Szorc
|
r38922 | p1, p2 = nullrev, nullrev | ||
elif len(linkparents) == 1: | ||||
p1, = sorted(local(p) for p in linkparents) | ||||
p2 = nullrev | ||||
else: | ||||
p1, p2 = sorted(local(p) for p in linkparents) | ||||
Gregory Szorc
|
r38928 | |||
Gregory Szorc
|
r38927 | n = store.node(rev) | ||
Gregory Szorc
|
r38928 | p1n, p2n = store.node(p1), store.node(p2) | ||
flags = store.flags(rev) | ||||
flags |= revlog.REVIDX_ELLIPSIS | ||||
Gregory Szorc
|
r38929 | |||
Gregory Szorc
|
r38928 | # TODO: try and actually send deltas for ellipsis data blocks | ||
data = store.revision(n) | ||||
diffheader = mdiff.trivialdiffheader(len(data)) | ||||
Gregory Szorc
|
r38929 | |||
return revisiondelta( | ||||
node=n, | ||||
p1node=p1n, | ||||
p2node=p2n, | ||||
basenode=nullid, | ||||
linknode=linknode, | ||||
flags=flags, | ||||
deltachunks=(diffheader, data), | ||||
) | ||||
Gregory Szorc
|
r38922 | |||
Gregory Szorc
|
r39011 | def _deltaparentprev(store, rev, p1, p2, prev): | ||
"""Resolve a delta parent to the previous revision. | ||||
Used for version 1 changegroups, which don't support generaldelta. | ||||
""" | ||||
return prev | ||||
def _deltaparentgeneraldelta(store, rev, p1, p2, prev): | ||||
"""Resolve a delta parent when general deltas are supported.""" | ||||
dp = store.deltaparent(rev) | ||||
if dp == nullrev and store.storedeltachains: | ||||
# Avoid sending full revisions when delta parent is null. Pick prev | ||||
# in that case. It's tempting to pick p1 in this case, as p1 will | ||||
# be smaller in the common case. However, computing a delta against | ||||
# p1 may require resolving the raw text of p1, which could be | ||||
# expensive. The revlog caches should have prev cached, meaning | ||||
# less CPU for changegroup generation. There is likely room to add | ||||
# a flag and/or config option to control this behavior. | ||||
base = prev | ||||
elif dp == nullrev: | ||||
# revlog is configured to use full snapshot for a reason, | ||||
# stick to full snapshot. | ||||
base = nullrev | ||||
elif dp not in (p1, p2, prev): | ||||
# Pick prev when we can't be sure remote has the base revision. | ||||
return prev | ||||
else: | ||||
base = dp | ||||
if base != nullrev and not store.candelta(base, rev): | ||||
base = nullrev | ||||
return base | ||||
def _deltaparentellipses(store, rev, p1, p2, prev): | ||||
"""Resolve a delta parent when in ellipses mode.""" | ||||
# TODO: send better deltas when in narrow mode. | ||||
# | ||||
# changegroup.group() loops over revisions to send, | ||||
# including revisions we'll skip. What this means is that | ||||
# `prev` will be a potentially useless delta base for all | ||||
# ellipsis nodes, as the client likely won't have it. In | ||||
# the future we should do bookkeeping about which nodes | ||||
# have been sent to the client, and try to be | ||||
# significantly smarter about delta bases. This is | ||||
# slightly tricky because this same code has to work for | ||||
# all revlogs, and we don't have the linkrev/linknode here. | ||||
return p1 | ||||
Gregory Szorc
|
r38944 | def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.linknode) | ||||
Augie Fackler
|
r27432 | |||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'01', | ||
Gregory Szorc
|
r39011 | deltaparentfn=_deltaparentprev, | ||
Gregory Szorc
|
r38938 | allowreorder=None, | ||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
Gregory Szorc
|
r38944 | def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.basenode, d.linknode) | ||||
Gregory Szorc
|
r38936 | # Since generaldelta is directly supported by cg2, reordering | ||
# generally doesn't help, so we disable it by default (treating | ||||
# bundle.reorder=auto just like bundle.reorder=False). | ||||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'02', | ||
Gregory Szorc
|
r39011 | deltaparentfn=_deltaparentgeneraldelta, | ||
Gregory Szorc
|
r38938 | allowreorder=False, | ||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
Gregory Szorc
|
r38944 | def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags) | ||||
Gregory Szorc
|
r39011 | deltaparentfn = (_deltaparentellipses if ellipses | ||
else _deltaparentgeneraldelta) | ||||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'03', | ||
Gregory Szorc
|
r39011 | deltaparentfn=deltaparentfn, | ||
Gregory Szorc
|
r38938 | allowreorder=False, | ||
builddeltaheader=builddeltaheader, | ||||
manifestsend=closechunk(), | ||||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
_packermap = {'01': (_makecg1packer, cg1unpacker), | ||||
Augie Fackler
|
r26709 | # cg2 adds support for exchanging generaldelta | ||
Gregory Szorc
|
r38930 | '02': (_makecg2packer, cg2unpacker), | ||
Martin von Zweigbergk
|
r27753 | # cg3 adds support for exchanging revlog flags and treemanifests | ||
Gregory Szorc
|
r38930 | '03': (_makecg3packer, cg3unpacker), | ||
Augie Fackler
|
r26709 | } | ||
Pierre-Yves David
|
r23168 | |||
Pierre-Yves David
|
r30627 | def allsupportedversions(repo): | ||
Martin von Zweigbergk
|
r27928 | versions = set(_packermap.keys()) | ||
Pierre-Yves David
|
r30627 | if not (repo.ui.configbool('experimental', 'changegroup3') or | ||
Pierre-Yves David
|
r30628 | repo.ui.configbool('experimental', 'treemanifest') or | ||
'treemanifest' in repo.requirements): | ||||
Pierre-Yves David
|
r30626 | versions.discard('03') | ||
Martin von Zweigbergk
|
r27953 | return versions | ||
# Changegroup versions that can be applied to the repo | ||||
def supportedincomingversions(repo): | ||||
Pierre-Yves David
|
r30628 | return allsupportedversions(repo) | ||
Martin von Zweigbergk
|
r27953 | |||
# Changegroup versions that can be created from the repo | ||||
def supportedoutgoingversions(repo): | ||||
Pierre-Yves David
|
r30627 | versions = allsupportedversions(repo) | ||
Martin von Zweigbergk
|
r27953 | if 'treemanifest' in repo.requirements: | ||
Martin von Zweigbergk
|
r27928 | # Versions 01 and 02 support only flat manifests and it's just too | ||
# expensive to convert between the flat manifest and tree manifest on | ||||
# the fly. Since tree manifests are hashed differently, all of history | ||||
# would have to be converted. Instead, we simply don't even pretend to | ||||
# support versions 01 and 02. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Martin von Zweigbergk
|
r38871 | if repository.NARROW_REQUIREMENT in repo.requirements: | ||
Martin von Zweigbergk
|
r36483 | # Versions 01 and 02 don't support revlog flags, and we need to | ||
# support that for stripping and unbundling to work. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Matt Harbison
|
r37150 | if LFS_REQUIREMENT in repo.requirements: | ||
# Versions 01 and 02 don't support revlog flags, and we need to | ||||
# mark LFS entries with REVIDX_EXTSTORED. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Martin von Zweigbergk
|
r27752 | return versions | ||
Martin von Zweigbergk
|
r27751 | |||
Martin von Zweigbergk
|
r34179 | def localversion(repo): | ||
# Finds the best version to use for bundles that are meant to be used | ||||
# locally, such as those from strip and shelve, and temporary bundles. | ||||
return max(supportedoutgoingversions(repo)) | ||||
Martin von Zweigbergk
|
r27929 | def safeversion(repo): | ||
# Finds the smallest version that it's safe to assume clients of the repo | ||||
Martin von Zweigbergk
|
r27931 | # will support. For example, all hg versions that support generaldelta also | ||
# support changegroup 02. | ||||
Martin von Zweigbergk
|
r27953 | versions = supportedoutgoingversions(repo) | ||
Martin von Zweigbergk
|
r27929 | if 'generaldelta' in repo.requirements: | ||
versions.discard('01') | ||||
assert versions | ||||
return min(versions) | ||||
Gregory Szorc
|
r38940 | def getbundler(version, repo, bundlecaps=None, filematcher=None, | ||
Gregory Szorc
|
r38945 | ellipses=False, shallow=False, ellipsisroots=None, | ||
fullnodes=None): | ||||
Martin von Zweigbergk
|
r27953 | assert version in supportedoutgoingversions(repo) | ||
Gregory Szorc
|
r38830 | |||
if filematcher is None: | ||||
filematcher = matchmod.alwaysmatcher(repo.root, '') | ||||
if version == '01' and not filematcher.always(): | ||||
raise error.ProgrammingError('version 01 changegroups do not support ' | ||||
'sparse file matchers') | ||||
Gregory Szorc
|
r38944 | if ellipses and version in (b'01', b'02'): | ||
raise error.Abort( | ||||
_('ellipsis nodes require at least cg3 on client and server, ' | ||||
'but negotiated version %s') % version) | ||||
Gregory Szorc
|
r38830 | # Requested files could include files not in the local store. So | ||
# filter those out. | ||||
filematcher = matchmod.intersectmatchers(repo.narrowmatch(), | ||||
filematcher) | ||||
Gregory Szorc
|
r38930 | fn = _packermap[version][0] | ||
Gregory Szorc
|
r38944 | return fn(repo, filematcher, bundlecaps, ellipses=ellipses, | ||
Gregory Szorc
|
r38945 | shallow=shallow, ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Martin von Zweigbergk
|
r27751 | |||
Gregory Szorc
|
r29593 | def getunbundler(version, fh, alg, extras=None): | ||
return _packermap[version][1](fh, alg, extras=extras) | ||||
Martin von Zweigbergk
|
r27751 | |||
Pierre-Yves David
|
r20926 | def _changegroupinfo(repo, nodes, source): | ||
if repo.ui.verbose or source == 'bundle': | ||||
repo.ui.status(_("%d changesets found\n") % len(nodes)) | ||||
if repo.ui.debugflag: | ||||
repo.ui.debug("list of changesets:\n") | ||||
for node in nodes: | ||||
repo.ui.debug("%s\n" % hex(node)) | ||||
Durham Goode
|
r34098 | def makechangegroup(repo, outgoing, version, source, fastpath=False, | ||
bundlecaps=None): | ||||
cgstream = makestream(repo, outgoing, version, source, | ||||
fastpath=fastpath, bundlecaps=bundlecaps) | ||||
return getunbundler(version, util.chunkbuffer(cgstream), None, | ||||
{'clcount': len(outgoing.missing) }) | ||||
Durham Goode
|
r34105 | def makestream(repo, outgoing, version, source, fastpath=False, | ||
Gregory Szorc
|
r38830 | bundlecaps=None, filematcher=None): | ||
bundler = getbundler(version, repo, bundlecaps=bundlecaps, | ||||
filematcher=filematcher) | ||||
Durham Goode
|
r34105 | |||
Pierre-Yves David
|
r20925 | repo = repo.unfiltered() | ||
commonrevs = outgoing.common | ||||
csets = outgoing.missing | ||||
heads = outgoing.missingheads | ||||
# We go through the fast path if we get told to, or if all (unfiltered | ||||
# heads have been requested (since we then know there all linkrevs will | ||||
# be pulled by the client). | ||||
heads.sort() | ||||
fastpathlinkrev = fastpath or ( | ||||
repo.filtername is None and heads == sorted(repo.heads())) | ||||
repo.hook('preoutgoing', throw=True, source=source) | ||||
Pierre-Yves David
|
r20926 | _changegroupinfo(repo, csets, source) | ||
Sune Foldager
|
r23177 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) | ||
Martin von Zweigbergk
|
r28361 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): | ||
Pierre-Yves David
|
r20932 | revisions = 0 | ||
files = 0 | ||||
Martin von Zweigbergk
|
r38401 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), | ||
total=expectedfiles) | ||||
Augie Fackler
|
r29724 | for chunkdata in iter(source.filelogheader, {}): | ||
Martin von Zweigbergk
|
r28361 | files += 1 | ||
Pierre-Yves David
|
r20932 | f = chunkdata["filename"] | ||
repo.ui.debug("adding %s revisions\n" % f) | ||||
Martin von Zweigbergk
|
r38401 | progress.increment() | ||
Martin von Zweigbergk
|
r27754 | fl = repo.file(f) | ||
Pierre-Yves David
|
r20932 | o = len(fl) | ||
Mike Edgar
|
r24120 | try: | ||
Durham Goode
|
r34292 | deltas = source.deltaiter() | ||
if not fl.addgroup(deltas, revmap, trp): | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("received file revlog group is empty")) | ||
Gregory Szorc
|
r25660 | except error.CensoredBaseError as e: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("received delta base is censored: %s") % e) | ||
Martin von Zweigbergk
|
r27754 | revisions += len(fl) - o | ||
Pierre-Yves David
|
r20932 | if f in needfiles: | ||
needs = needfiles[f] | ||||
Gregory Szorc
|
r38806 | for new in pycompat.xrange(o, len(fl)): | ||
Pierre-Yves David
|
r20932 | n = fl.node(new) | ||
if n in needs: | ||||
needs.remove(n) | ||||
else: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Pierre-Yves David
|
r20932 | _("received spurious file revlog entry")) | ||
if not needs: | ||||
del needfiles[f] | ||||
Martin von Zweigbergk
|
r38401 | progress.complete() | ||
Pierre-Yves David
|
r20932 | |||
for f, needs in needfiles.iteritems(): | ||||
fl = repo.file(f) | ||||
for n in needs: | ||||
try: | ||||
fl.rev(n) | ||||
except error.LookupError: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Pierre-Yves David
|
r20932 | _('missing file data for %s:%s - run hg verify') % | ||
(f, hex(n))) | ||||
return revisions, files | ||||