changegroup.py
1522 lines
| 56.6 KiB
| text/x-python
|
PythonLexer
/ mercurial / changegroup.py
Martin Geisler
|
r8226 | # changegroup.py - Mercurial changegroup manipulation functions | ||
# | ||||
# Copyright 2006 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r3877 | |||
Gregory Szorc
|
r25921 | from __future__ import absolute_import | ||
import os | ||||
import struct | ||||
Pierre-Yves David
|
r20933 | import weakref | ||
Gregory Szorc
|
r25921 | |||
from .i18n import _ | ||||
from .node import ( | ||||
hex, | ||||
Gregory Szorc
|
r38919 | nullid, | ||
Gregory Szorc
|
r25921 | nullrev, | ||
short, | ||||
) | ||||
Gregory Szorc
|
r38929 | from .thirdparty import ( | ||
attr, | ||||
) | ||||
Gregory Szorc
|
r25921 | from . import ( | ||
dagutil, | ||||
error, | ||||
Gregory Szorc
|
r38830 | match as matchmod, | ||
Gregory Szorc
|
r25921 | mdiff, | ||
phases, | ||||
Pulkit Goyal
|
r30925 | pycompat, | ||
Martin von Zweigbergk
|
r38871 | repository, | ||
Gregory Szorc
|
r38919 | revlog, | ||
Gregory Szorc
|
r25921 | util, | ||
) | ||||
Thomas Arendsen Hein
|
r1981 | |||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
stringutil, | ||||
) | ||||
Gregory Szorc
|
r38932 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s") | ||
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s") | ||||
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH") | ||||
Benoit Boissinot
|
r14141 | |||
Matt Harbison
|
r37150 | LFS_REQUIREMENT = 'lfs' | ||
Boris Feld
|
r35772 | readexactly = util.readexactly | ||
Mads Kiilerich
|
r13457 | |||
def getchunk(stream): | ||||
"""return the next chunk from stream as a string""" | ||||
d = readexactly(stream, 4) | ||||
Thomas Arendsen Hein
|
r1981 | l = struct.unpack(">l", d)[0] | ||
if l <= 4: | ||||
Mads Kiilerich
|
r13458 | if l: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("invalid chunk length %d") % l) | ||
Thomas Arendsen Hein
|
r1981 | return "" | ||
Mads Kiilerich
|
r13457 | return readexactly(stream, l - 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
Matt Mackall
|
r5368 | def chunkheader(length): | ||
Greg Ward
|
r9437 | """return a changegroup chunk header (string)""" | ||
Matt Mackall
|
r5368 | return struct.pack(">l", length + 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
def closechunk(): | ||||
Greg Ward
|
r9437 | """return a changegroup chunk header (string) for a zero-length chunk""" | ||
Thomas Arendsen Hein
|
r1981 | return struct.pack(">l", 0) | ||
Gregory Szorc
|
r39017 | def _fileheader(path): | ||
"""Obtain a changegroup chunk header for a named path.""" | ||||
return chunkheader(len(path)) + path | ||||
Pierre-Yves David
|
r26540 | def writechunks(ui, chunks, filename, vfs=None): | ||
"""Write chunks to a file and return its filename. | ||||
Matt Mackall
|
r3659 | |||
Pierre-Yves David
|
r26540 | The stream is assumed to be a bundle file. | ||
Matt Mackall
|
r3659 | Existing files will not be overwritten. | ||
If no filename is specified, a temporary file is created. | ||||
""" | ||||
fh = None | ||||
cleanup = None | ||||
try: | ||||
if filename: | ||||
FUJIWARA Katsunori
|
r20976 | if vfs: | ||
fh = vfs.open(filename, "wb") | ||||
else: | ||||
Gregory Szorc
|
r30212 | # Increase default buffer size because default is usually | ||
# small (4k is common on Linux). | ||||
fh = open(filename, "wb", 131072) | ||||
Matt Mackall
|
r3659 | else: | ||
Yuya Nishihara
|
r38182 | fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") | ||
Yuya Nishihara
|
r36853 | fh = os.fdopen(fd, r"wb") | ||
Matt Mackall
|
r3659 | cleanup = filename | ||
Pierre-Yves David
|
r26540 | for c in chunks: | ||
fh.write(c) | ||||
Matt Mackall
|
r3659 | cleanup = None | ||
return filename | ||||
finally: | ||||
if fh is not None: | ||||
fh.close() | ||||
if cleanup is not None: | ||||
FUJIWARA Katsunori
|
r20976 | if filename and vfs: | ||
vfs.unlink(cleanup) | ||||
else: | ||||
os.unlink(cleanup) | ||||
Matt Mackall
|
r3660 | |||
Sune Foldager
|
r22390 | class cg1unpacker(object): | ||
Augie Fackler
|
r26708 | """Unpacker for cg1 changegroup streams. | ||
A changegroup unpacker handles the framing of the revision data in | ||||
the wire format. Most consumers will want to use the apply() | ||||
method to add the changes from the changegroup to a repository. | ||||
If you're forwarding a changegroup unmodified to another consumer, | ||||
use getchunks(), which returns an iterator of changegroup | ||||
chunks. This is mostly useful for cases where you need to know the | ||||
data stream has ended by observing the end of the changegroup. | ||||
deltachunk() is useful only if you're applying delta data. Most | ||||
consumers should prefer apply() instead. | ||||
A few other public methods exist. Those are used only for | ||||
bundlerepo and some debug commands - their use is discouraged. | ||||
""" | ||||
Sune Foldager
|
r22390 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Eric Sumner
|
r23896 | version = '01' | ||
Martin von Zweigbergk
|
r27920 | _grouplistcount = 1 # One list of files after the manifests | ||
Gregory Szorc
|
r29593 | def __init__(self, fh, alg, extras=None): | ||
Gregory Szorc
|
r30354 | if alg is None: | ||
alg = 'UN' | ||||
if alg not in util.compengines.supportedbundletypes: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('unknown stream compression type: %s') | ||
Pierre-Yves David
|
r26266 | % alg) | ||
Pierre-Yves David
|
r26392 | if alg == 'BZ': | ||
alg = '_truncatedBZ' | ||||
Gregory Szorc
|
r30354 | |||
compengine = util.compengines.forbundletype(alg) | ||||
self._stream = compengine.decompressorreader(fh) | ||||
Matt Mackall
|
r12044 | self._type = alg | ||
Gregory Szorc
|
r29593 | self.extras = extras or {} | ||
Matt Mackall
|
r12334 | self.callback = None | ||
Augie Fackler
|
r26706 | |||
# These methods (compressed, read, seek, tell) all appear to only | ||||
# be used by bundlerepo, but it's a little hard to tell. | ||||
Matt Mackall
|
r12044 | def compressed(self): | ||
Stanislau Hlebik
|
r30589 | return self._type is not None and self._type != 'UN' | ||
Matt Mackall
|
r12043 | def read(self, l): | ||
return self._stream.read(l) | ||||
Matt Mackall
|
r12330 | def seek(self, pos): | ||
return self._stream.seek(pos) | ||||
def tell(self): | ||||
Matt Mackall
|
r12332 | return self._stream.tell() | ||
Matt Mackall
|
r12347 | def close(self): | ||
return self._stream.close() | ||||
Matt Mackall
|
r12334 | |||
Augie Fackler
|
r26707 | def _chunklength(self): | ||
Jim Hague
|
r13459 | d = readexactly(self._stream, 4) | ||
Mads Kiilerich
|
r13458 | l = struct.unpack(">l", d)[0] | ||
if l <= 4: | ||||
if l: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("invalid chunk length %d") % l) | ||
Mads Kiilerich
|
r13458 | return 0 | ||
if self.callback: | ||||
Matt Mackall
|
r12334 | self.callback() | ||
Mads Kiilerich
|
r13458 | return l - 4 | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14144 | def changelogheader(self): | ||
"""v10 does not have a changelog header chunk""" | ||||
return {} | ||||
def manifestheader(self): | ||||
"""v10 does not have a manifest header chunk""" | ||||
return {} | ||||
def filelogheader(self): | ||||
"""return the header of the filelogs chunk, v10 only has the filename""" | ||||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Benoit Boissinot
|
r14144 | if not l: | ||
return {} | ||||
fname = readexactly(self._stream, l) | ||||
Augie Fackler
|
r20675 | return {'filename': fname} | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14141 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, cs = headertuple | ||||
if prevnode is None: | ||||
deltabase = p1 | ||||
else: | ||||
deltabase = prevnode | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Benoit Boissinot
|
r14141 | |||
Benoit Boissinot
|
r14144 | def deltachunk(self, prevnode): | ||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Matt Mackall
|
r12336 | if not l: | ||
return {} | ||||
Benoit Boissinot
|
r14141 | headerdata = readexactly(self._stream, self.deltaheadersize) | ||
Gregory Szorc
|
r38932 | header = self.deltaheader.unpack(headerdata) | ||
Benoit Boissinot
|
r14141 | delta = readexactly(self._stream, l - self.deltaheadersize) | ||
Mike Edgar
|
r27433 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) | ||
Durham Goode
|
r34295 | return (node, p1, p2, cs, deltabase, delta, flags) | ||
Matt Mackall
|
r12336 | |||
Pierre-Yves David
|
r20999 | def getchunks(self): | ||
"""returns all the chunks contains in the bundle | ||||
Used when you need to forward the binary stream to a file or another | ||||
network API. To do so, it parse the changegroup data, otherwise it will | ||||
block in case of sshrepo because it don't know the end of the stream. | ||||
""" | ||||
Durham Goode
|
r34093 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, | ||
# and a list of filelogs. For changegroup 3, we expect 4 parts: | ||||
# changelog, manifestlog, a list of tree manifestlogs, and a list of | ||||
# filelogs. | ||||
# | ||||
# Changelog and manifestlog parts are terminated with empty chunks. The | ||||
# tree and file parts are a list of entry sections. Each entry section | ||||
# is a series of chunks terminating in an empty chunk. The list of these | ||||
# entry sections is terminated in yet another empty chunk, so we know | ||||
# we've reached the end of the tree/file list when we reach an empty | ||||
# chunk that was proceeded by no non-empty chunks. | ||||
parts = 0 | ||||
while parts < 2 + self._grouplistcount: | ||||
noentries = True | ||||
Pierre-Yves David
|
r20999 | while True: | ||
chunk = getchunk(self) | ||||
if not chunk: | ||||
Durham Goode
|
r34093 | # The first two empty chunks represent the end of the | ||
# changelog and the manifestlog portions. The remaining | ||||
# empty chunks represent either A) the end of individual | ||||
# tree or file entries in the file list, or B) the end of | ||||
# the entire list. It's the end of the entire list if there | ||||
# were no entries (i.e. noentries is True). | ||||
if parts < 2: | ||||
parts += 1 | ||||
elif noentries: | ||||
parts += 1 | ||||
Pierre-Yves David
|
r20999 | break | ||
Durham Goode
|
r34093 | noentries = False | ||
Pierre-Yves David
|
r20999 | yield chunkheader(len(chunk)) | ||
pos = 0 | ||||
while pos < len(chunk): | ||||
next = pos + 2**20 | ||||
yield chunk[pos:next] | ||||
pos = next | ||||
yield closechunk() | ||||
Martin von Zweigbergk
|
r38365 | def _unpackmanifests(self, repo, revmap, trp, prog): | ||
self.callback = prog.increment | ||||
Augie Fackler
|
r26712 | # no need to check for empty manifest group here: | ||
# if the result of the merge of 1 and 2 is the same in 3 and 4, | ||||
# no new manifest will be created and the manifest group will | ||||
# be empty during the pull | ||||
self.manifestheader() | ||||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
Gregory Szorc
|
r38574 | repo.manifestlog.addgroup(deltas, revmap, trp) | ||
Martin von Zweigbergk
|
r38392 | prog.complete() | ||
Martin von Zweigbergk
|
r28360 | self.callback = None | ||
Augie Fackler
|
r26712 | |||
Martin von Zweigbergk
|
r33308 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, | ||
expectedtotal=None): | ||||
Augie Fackler
|
r26695 | """Add the changegroup returned by source.read() to this repo. | ||
srctype is a string like 'push', 'pull', or 'unbundle'. url is | ||||
the URL of the repo where this changegroup is coming from. | ||||
Return an integer summarizing the change to this repo: | ||||
- nothing changed or no source: 0 | ||||
- more heads than before: 1+added heads (2..n) | ||||
- fewer heads than before: -1-removed heads (-2..-n) | ||||
- number of heads stays the same: 1 | ||||
""" | ||||
repo = repo.unfiltered() | ||||
def csmap(x): | ||||
repo.ui.debug("add changeset %s\n" % short(x)) | ||||
return len(cl) | ||||
def revmap(x): | ||||
return cl.rev(x) | ||||
changesets = files = revisions = 0 | ||||
Pierre-Yves David
|
r26880 | try: | ||
Martin von Zweigbergk
|
r32931 | # The transaction may already carry source information. In this | ||
# case we use the top level data. We overwrite the argument | ||||
# because we need to use the top level value (if they exist) | ||||
# in this function. | ||||
srctype = tr.hookargs.setdefault('source', srctype) | ||||
url = tr.hookargs.setdefault('url', url) | ||||
Augie Fackler
|
r33634 | repo.hook('prechangegroup', | ||
throw=True, **pycompat.strkwargs(tr.hookargs)) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # write changelog data to temp files so concurrent readers | ||
# will not see an inconsistent view | ||||
cl = repo.changelog | ||||
cl.delayupdate(tr) | ||||
oldheads = set(cl.heads()) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | trp = weakref.proxy(tr) | ||
# pull off the changeset group | ||||
repo.ui.status(_("adding changesets\n")) | ||||
clstart = len(cl) | ||||
Martin von Zweigbergk
|
r38365 | progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), | ||
total=expectedtotal) | ||||
self.callback = progress.increment | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | efiles = set() | ||
def onchangelog(cl, node): | ||||
efiles.update(cl.readfiles(node)) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | self.changelogheader() | ||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) | ||||
Martin von Zweigbergk
|
r32931 | efiles = len(efiles) | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r33308 | if not cgnodes: | ||
Martin von Zweigbergk
|
r33309 | repo.ui.develwarn('applied empty changegroup', | ||
Boris Feld
|
r34735 | config='warn-empty-changegroup') | ||
Martin von Zweigbergk
|
r32931 | clend = len(cl) | ||
changesets = clend - clstart | ||||
Martin von Zweigbergk
|
r38392 | progress.complete() | ||
Martin von Zweigbergk
|
r32931 | self.callback = None | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # pull off the manifest group | ||
repo.ui.status(_("adding manifests\n")) | ||||
Martin von Zweigbergk
|
r38365 | # We know that we'll never have more manifests than we had | ||
# changesets. | ||||
progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), | ||||
total=changesets) | ||||
self._unpackmanifests(repo, revmap, trp, progress) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | needfiles = {} | ||
r33224 | if repo.ui.configbool('server', 'validate'): | |||
Martin von Zweigbergk
|
r32931 | cl = repo.changelog | ||
ml = repo.manifestlog | ||||
# validate incoming csets have their manifests | ||||
Gregory Szorc
|
r38806 | for cset in pycompat.xrange(clstart, clend): | ||
Martin von Zweigbergk
|
r32931 | mfnode = cl.changelogrevision(cset).manifest | ||
mfest = ml[mfnode].readdelta() | ||||
# store file cgnodes we must see | ||||
for f, n in mfest.iteritems(): | ||||
needfiles.setdefault(f, set()).add(n) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # process the files | ||
repo.ui.status(_("adding file changes\n")) | ||||
newrevs, newfiles = _addchangegroupfiles( | ||||
repo, self, revmap, trp, efiles, needfiles) | ||||
revisions += newrevs | ||||
files += newfiles | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | deltaheads = 0 | ||
if oldheads: | ||||
heads = cl.heads() | ||||
deltaheads = len(heads) - len(oldheads) | ||||
for h in heads: | ||||
if h not in oldheads and repo[h].closesbranch(): | ||||
deltaheads -= 1 | ||||
htext = "" | ||||
if deltaheads: | ||||
htext = _(" (%+d heads)") % deltaheads | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | repo.ui.status(_("added %d changesets" | ||
" with %d changes to %d files%s\n") | ||||
% (changesets, revisions, files, htext)) | ||||
repo.invalidatevolatilesets() | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
if 'node' not in tr.hookargs: | ||||
tr.hookargs['node'] = hex(cl.node(clstart)) | ||||
tr.hookargs['node_last'] = hex(cl.node(clend - 1)) | ||||
hookargs = dict(tr.hookargs) | ||||
else: | ||||
hookargs = dict(tr.hookargs) | ||||
hookargs['node'] = hex(cl.node(clstart)) | ||||
hookargs['node_last'] = hex(cl.node(clend - 1)) | ||||
Augie Fackler
|
r33634 | repo.hook('pretxnchangegroup', | ||
throw=True, **pycompat.strkwargs(hookargs)) | ||||
Augie Fackler
|
r26695 | |||
Gregory Szorc
|
r38806 | added = [cl.node(r) for r in pycompat.xrange(clstart, clend)] | ||
Boris Feld
|
r33456 | phaseall = None | ||
Martin von Zweigbergk
|
r32931 | if srctype in ('push', 'serve'): | ||
# Old servers can not push the boundary themselves. | ||||
# New servers won't push the boundary if changeset already | ||||
# exists locally as secret | ||||
# | ||||
# We should not use added here but the list of all change in | ||||
# the bundle | ||||
if repo.publishing(): | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.public | ||
Martin von Zweigbergk
|
r32931 | else: | ||
Boris Feld
|
r33456 | # closer target phase computation | ||
Martin von Zweigbergk
|
r32931 | # Those changesets have been pushed from the | ||
# outside, their phases are going to be pushed | ||||
# alongside. Therefor `targetphase` is | ||||
# ignored. | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.draft | ||
if added: | ||||
phases.registernew(repo, tr, targetphase, added) | ||||
if phaseall is not None: | ||||
phases.advanceboundary(repo, tr, phaseall, cgnodes) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | def runhooks(): | ||
# These hooks run when the lock releases, not when the | ||||
# transaction closes. So it's possible for the changelog | ||||
# to have changed since we last saw it. | ||||
if clstart >= len(repo): | ||||
return | ||||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r33678 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) | ||
Bryan O'Sullivan
|
r27867 | |||
Martin von Zweigbergk
|
r32931 | for n in added: | ||
args = hookargs.copy() | ||||
args['node'] = hex(n) | ||||
del args['node_last'] | ||||
Augie Fackler
|
r33678 | repo.hook("incoming", **pycompat.strkwargs(args)) | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | newheads = [h for h in repo.heads() | ||
if h not in oldheads] | ||||
repo.ui.log("incoming", | ||||
Yuya Nishihara
|
r36755 | "%d incoming changes - new heads: %s\n", | ||
Martin von Zweigbergk
|
r32931 | len(added), | ||
', '.join([hex(c[:6]) for c in newheads])) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, | ||
lambda tr: repo._afterlock(runhooks)) | ||||
Augie Fackler
|
r26695 | finally: | ||
repo.ui.flush() | ||||
# never return 0 here: | ||||
Martin von Zweigbergk
|
r32870 | if deltaheads < 0: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads - 1 | ||
Augie Fackler
|
r26695 | else: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads + 1 | ||
Boris Feld
|
r33461 | return ret | ||
Augie Fackler
|
r26695 | |||
Durham Goode
|
r34292 | def deltaiter(self): | ||
Durham Goode
|
r34147 | """ | ||
returns an iterator of the deltas in this changegroup | ||||
Useful for passing to the underlying storage system to be stored. | ||||
""" | ||||
chain = None | ||||
for chunkdata in iter(lambda: self.deltachunk(chain), {}): | ||||
Durham Goode
|
r34295 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) | ||
yield chunkdata | ||||
chain = chunkdata[0] | ||||
Durham Goode
|
r34147 | |||
Sune Foldager
|
r23181 | class cg2unpacker(cg1unpacker): | ||
Augie Fackler
|
r26708 | """Unpacker for cg2 streams. | ||
cg2 streams add support for generaldelta, so the delta header | ||||
format is slightly different. All other features about the data | ||||
remain the same. | ||||
""" | ||||
Sune Foldager
|
r23181 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Eric Sumner
|
r23896 | version = '02' | ||
Sune Foldager
|
r23181 | |||
def _deltaheader(self, headertuple, prevnode): | ||||
node, p1, p2, deltabase, cs = headertuple | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Sune Foldager
|
r23181 | |||
Augie Fackler
|
r27432 | class cg3unpacker(cg2unpacker): | ||
"""Unpacker for cg3 streams. | ||||
Mike Edgar
|
r27433 | cg3 streams add support for exchanging treemanifests and revlog | ||
Martin von Zweigbergk
|
r27753 | flags. It adds the revlog flags to the delta header and an empty chunk | ||
separating manifests and files. | ||||
Augie Fackler
|
r27432 | """ | ||
Mike Edgar
|
r27433 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Augie Fackler
|
r27432 | version = '03' | ||
Martin von Zweigbergk
|
r27920 | _grouplistcount = 2 # One list of manifests and one list of files | ||
Augie Fackler
|
r27432 | |||
Mike Edgar
|
r27433 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, deltabase, cs, flags = headertuple | ||||
return node, p1, p2, deltabase, cs, flags | ||||
Martin von Zweigbergk
|
r38365 | def _unpackmanifests(self, repo, revmap, trp, prog): | ||
super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) | ||||
Augie Fackler
|
r29724 | for chunkdata in iter(self.filelogheader, {}): | ||
Martin von Zweigbergk
|
r27754 | # If we get here, there are directory manifests in the changegroup | ||
d = chunkdata["filename"] | ||||
repo.ui.debug("adding %s revisions\n" % d) | ||||
Durham Goode
|
r30339 | dirlog = repo.manifestlog._revlog.dirlog(d) | ||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
if not dirlog.addgroup(deltas, revmap, trp): | ||||
Martin von Zweigbergk
|
r27754 | raise error.Abort(_("received dir revlog group is empty")) | ||
Matt Mackall
|
r12329 | class headerlessfixup(object): | ||
def __init__(self, fh, h): | ||||
self._h = h | ||||
self._fh = fh | ||||
def read(self, n): | ||||
if self._h: | ||||
d, self._h = self._h[:n], self._h[n:] | ||||
if len(d) < n: | ||||
Mads Kiilerich
|
r13457 | d += readexactly(self._fh, n - len(d)) | ||
Matt Mackall
|
r12329 | return d | ||
Mads Kiilerich
|
r13457 | return readexactly(self._fh, n) | ||
Matt Mackall
|
r12329 | |||
Gregory Szorc
|
r38929 | @attr.s(slots=True, frozen=True) | ||
Gregory Szorc
|
r39054 | class revisiondeltarequest(object): | ||
"""Describes a request to construct a revision delta. | ||||
Instances are converted into ``revisiondelta`` later. | ||||
""" | ||||
# Revision whose delta will be generated. | ||||
node = attr.ib() | ||||
# Linknode value. | ||||
linknode = attr.ib() | ||||
# Parent revisions to record in ``revisiondelta`` instance. | ||||
p1node = attr.ib() | ||||
p2node = attr.ib() | ||||
Gregory Szorc
|
r39055 | # Base revision that delta should be generated against. If nullid, | ||
Gregory Szorc
|
r39054 | # the full revision data should be populated. If None, the delta | ||
# may be generated against any base revision that is an ancestor of | ||||
Gregory Szorc
|
r39055 | # this revision. If any other value, the delta should be produced | ||
# against that revision. | ||||
basenode = attr.ib() | ||||
Gregory Szorc
|
r39054 | |||
# Whether this should be marked as an ellipsis revision. | ||||
ellipsis = attr.ib(default=False) | ||||
@attr.s(slots=True, frozen=True) | ||||
Gregory Szorc
|
r38929 | class revisiondelta(object): | ||
"""Describes a delta entry in a changegroup. | ||||
Captured data is sufficient to serialize the delta into multiple | ||||
formats. | ||||
Gregory Szorc
|
r39052 | |||
``revision`` and ``delta`` are mutually exclusive. | ||||
Gregory Szorc
|
r38929 | """ | ||
# 20 byte node of this revision. | ||||
node = attr.ib() | ||||
# 20 byte nodes of parent revisions. | ||||
p1node = attr.ib() | ||||
p2node = attr.ib() | ||||
# 20 byte node of node this delta is against. | ||||
basenode = attr.ib() | ||||
# 20 byte node of changeset revision this delta is associated with. | ||||
linknode = attr.ib() | ||||
# 2 bytes of flags to apply to revision data. | ||||
flags = attr.ib() | ||||
Gregory Szorc
|
r39052 | # Size of base revision this delta is against. May be None if | ||
# basenode is nullid. | ||||
baserevisionsize = attr.ib() | ||||
# Raw fulltext revision data. | ||||
revision = attr.ib() | ||||
# Delta between the basenode and node. | ||||
delta = attr.ib() | ||||
Gregory Szorc
|
r38919 | |||
Gregory Szorc
|
r39050 | def _revisiondeltatochunks(delta, headerfn): | ||
"""Serialize a revisiondelta to changegroup chunks.""" | ||||
Gregory Szorc
|
r39052 | |||
# The captured revision delta may be encoded as a delta against | ||||
# a base revision or as a full revision. The changegroup format | ||||
# requires that everything on the wire be deltas. So for full | ||||
# revisions, we need to invent a header that says to rewrite | ||||
# data. | ||||
if delta.delta is not None: | ||||
prefix, data = b'', delta.delta | ||||
elif delta.basenode == nullid: | ||||
data = delta.revision | ||||
prefix = mdiff.trivialdiffheader(len(data)) | ||||
else: | ||||
data = delta.revision | ||||
prefix = mdiff.replacediffheader(delta.baserevisionsize, | ||||
len(data)) | ||||
Gregory Szorc
|
r39050 | meta = headerfn(delta) | ||
Gregory Szorc
|
r39052 | |||
yield chunkheader(len(meta) + len(prefix) + len(data)) | ||||
Gregory Szorc
|
r39050 | yield meta | ||
Gregory Szorc
|
r39052 | if prefix: | ||
yield prefix | ||||
yield data | ||||
Gregory Szorc
|
r39050 | |||
Gregory Szorc
|
r39018 | def _sortnodesnormal(store, nodes, reorder): | ||
"""Sort nodes for changegroup generation and turn into revnums.""" | ||||
# for generaldelta revlogs, we linearize the revs; this will both be | ||||
# much quicker and generate a much smaller bundle | ||||
if (store._generaldelta and reorder is None) or reorder: | ||||
dag = dagutil.revlogdag(store) | ||||
return dag.linearize(set(store.rev(n) for n in nodes)) | ||||
else: | ||||
return sorted([store.rev(n) for n in nodes]) | ||||
Gregory Szorc
|
r39033 | def _sortnodesellipsis(store, nodes, cl, lookup): | ||
Gregory Szorc
|
r39018 | """Sort nodes for changegroup generation and turn into revnums.""" | ||
# Ellipses serving mode. | ||||
# | ||||
# In a perfect world, we'd generate better ellipsis-ified graphs | ||||
# for non-changelog revlogs. In practice, we haven't started doing | ||||
# that yet, so the resulting DAGs for the manifestlog and filelogs | ||||
# are actually full of bogus parentage on all the ellipsis | ||||
# nodes. This has the side effect that, while the contents are | ||||
# correct, the individual DAGs might be completely out of whack in | ||||
# a case like 882681bc3166 and its ancestors (back about 10 | ||||
# revisions or so) in the main hg repo. | ||||
# | ||||
# The one invariant we *know* holds is that the new (potentially | ||||
# bogus) DAG shape will be valid if we order the nodes in the | ||||
# order that they're introduced in dramatis personae by the | ||||
# changelog, so what we do is we sort the non-changelog histories | ||||
# by the order in which they are used by the changelog. | ||||
Gregory Szorc
|
r39033 | key = lambda n: cl.rev(lookup(n)) | ||
Gregory Szorc
|
r39018 | return [store.rev(n) for n in sorted(nodes, key=key)] | ||
Gregory Szorc
|
r39055 | def _handlerevisiondeltarequest(store, request, prevnode): | ||
Gregory Szorc
|
r39054 | """Obtain a revisiondelta from a revisiondeltarequest""" | ||
node = request.node | ||||
rev = store.rev(node) | ||||
Gregory Szorc
|
r39053 | |||
Gregory Szorc
|
r39054 | # Requesting a full revision. | ||
Gregory Szorc
|
r39055 | if request.basenode == nullid: | ||
baserev = nullrev | ||||
Gregory Szorc
|
r39054 | # Requesting an explicit revision. | ||
Gregory Szorc
|
r39055 | elif request.basenode is not None: | ||
baserev = store.rev(request.basenode) | ||||
Gregory Szorc
|
r39054 | # Allowing us to choose. | ||
Gregory Szorc
|
r39053 | else: | ||
Gregory Szorc
|
r39054 | p1, p2 = store.parentrevs(rev) | ||
Gregory Szorc
|
r39053 | dp = store.deltaparent(rev) | ||
if dp == nullrev and store.storedeltachains: | ||||
# Avoid sending full revisions when delta parent is null. Pick prev | ||||
# in that case. It's tempting to pick p1 in this case, as p1 will | ||||
# be smaller in the common case. However, computing a delta against | ||||
# p1 may require resolving the raw text of p1, which could be | ||||
# expensive. The revlog caches should have prev cached, meaning | ||||
# less CPU for changegroup generation. There is likely room to add | ||||
# a flag and/or config option to control this behavior. | ||||
Gregory Szorc
|
r39055 | baserev = store.rev(prevnode) | ||
Gregory Szorc
|
r39053 | elif dp == nullrev: | ||
# revlog is configured to use full snapshot for a reason, | ||||
# stick to full snapshot. | ||||
Gregory Szorc
|
r39055 | baserev = nullrev | ||
elif dp not in (p1, p2, store.rev(prevnode)): | ||||
Gregory Szorc
|
r39053 | # Pick prev when we can't be sure remote has the base revision. | ||
Gregory Szorc
|
r39055 | baserev = store.rev(prevnode) | ||
Gregory Szorc
|
r39053 | else: | ||
Gregory Szorc
|
r39055 | baserev = dp | ||
Gregory Szorc
|
r39053 | |||
Gregory Szorc
|
r39055 | if baserev != nullrev and not store.candelta(baserev, rev): | ||
baserev = nullrev | ||||
Gregory Szorc
|
r39021 | |||
Gregory Szorc
|
r39052 | revision = None | ||
delta = None | ||||
baserevisionsize = None | ||||
Gregory Szorc
|
r39055 | if store.iscensored(baserev) or store.iscensored(rev): | ||
Gregory Szorc
|
r39021 | try: | ||
Gregory Szorc
|
r39052 | revision = store.revision(node, raw=True) | ||
Gregory Szorc
|
r39021 | except error.CensoredNodeError as e: | ||
Gregory Szorc
|
r39052 | revision = e.tombstone | ||
Gregory Szorc
|
r39055 | if baserev != nullrev: | ||
baserevisionsize = store.rawsize(baserev) | ||||
Gregory Szorc
|
r39052 | |||
Gregory Szorc
|
r39055 | elif baserev == nullrev: | ||
Gregory Szorc
|
r39052 | revision = store.revision(node, raw=True) | ||
Gregory Szorc
|
r39021 | else: | ||
Gregory Szorc
|
r39055 | delta = store.revdiff(baserev, rev) | ||
Gregory Szorc
|
r39052 | |||
Gregory Szorc
|
r39054 | extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0 | ||
Gregory Szorc
|
r39021 | |||
return revisiondelta( | ||||
node=node, | ||||
Gregory Szorc
|
r39054 | p1node=request.p1node, | ||
p2node=request.p2node, | ||||
linknode=request.linknode, | ||||
Gregory Szorc
|
r39055 | basenode=store.node(baserev), | ||
Gregory Szorc
|
r39054 | flags=store.flags(rev) | extraflags, | ||
Gregory Szorc
|
r39052 | baserevisionsize=baserevisionsize, | ||
revision=revision, | ||||
delta=delta, | ||||
Gregory Szorc
|
r39021 | ) | ||
Gregory Szorc
|
r39054 | def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev, | ||
linknode, clrevtolocalrev, fullclnodes, | ||||
precomputedellipsis): | ||||
Gregory Szorc
|
r39040 | linkparents = precomputedellipsis[linkrev] | ||
def local(clrev): | ||||
"""Turn a changelog revnum into a local revnum. | ||||
The ellipsis dag is stored as revnums on the changelog, | ||||
but when we're producing ellipsis entries for | ||||
non-changelog revlogs, we need to turn those numbers into | ||||
something local. This does that for us, and during the | ||||
changelog sending phase will also expand the stored | ||||
mappings as needed. | ||||
""" | ||||
if clrev == nullrev: | ||||
return nullrev | ||||
if ischangelog: | ||||
return clrev | ||||
# Walk the ellipsis-ized changelog breadth-first looking for a | ||||
# change that has been linked from the current revlog. | ||||
# | ||||
# For a flat manifest revlog only a single step should be necessary | ||||
# as all relevant changelog entries are relevant to the flat | ||||
# manifest. | ||||
# | ||||
# For a filelog or tree manifest dirlog however not every changelog | ||||
# entry will have been relevant, so we need to skip some changelog | ||||
# nodes even after ellipsis-izing. | ||||
walk = [clrev] | ||||
while walk: | ||||
p = walk[0] | ||||
walk = walk[1:] | ||||
if p in clrevtolocalrev: | ||||
return clrevtolocalrev[p] | ||||
elif p in fullclnodes: | ||||
walk.extend([pp for pp in cl.parentrevs(p) | ||||
if pp != nullrev]) | ||||
elif p in precomputedellipsis: | ||||
walk.extend([pp for pp in precomputedellipsis[p] | ||||
if pp != nullrev]) | ||||
else: | ||||
# In this case, we've got an ellipsis with parents | ||||
# outside the current bundle (likely an | ||||
# incremental pull). We "know" that we can use the | ||||
# value of this same revlog at whatever revision | ||||
# is pointed to by linknode. "Know" is in scare | ||||
# quotes because I haven't done enough examination | ||||
# of edge cases to convince myself this is really | ||||
# a fact - it works for all the (admittedly | ||||
# thorough) cases in our testsuite, but I would be | ||||
# somewhat unsurprised to find a case in the wild | ||||
# where this breaks down a bit. That said, I don't | ||||
# know if it would hurt anything. | ||||
for i in pycompat.xrange(rev, 0, -1): | ||||
if store.linkrev(i) == clrev: | ||||
return i | ||||
# We failed to resolve a parent for this node, so | ||||
# we crash the changegroup construction. | ||||
raise error.Abort( | ||||
'unable to resolve parent while packing %r %r' | ||||
' for changeset %r' % (store.indexfile, rev, clrev)) | ||||
return nullrev | ||||
if not linkparents or ( | ||||
store.parentrevs(rev) == (nullrev, nullrev)): | ||||
p1, p2 = nullrev, nullrev | ||||
elif len(linkparents) == 1: | ||||
p1, = sorted(local(p) for p in linkparents) | ||||
p2 = nullrev | ||||
else: | ||||
p1, p2 = sorted(local(p) for p in linkparents) | ||||
Gregory Szorc
|
r39054 | p1node, p2node = store.node(p1), store.node(p2) | ||
Gregory Szorc
|
r39040 | |||
# TODO: try and actually send deltas for ellipsis data blocks | ||||
Gregory Szorc
|
r39054 | return revisiondeltarequest( | ||
node=node, | ||||
p1node=p1node, | ||||
p2node=p2node, | ||||
Gregory Szorc
|
r39040 | linknode=linknode, | ||
Gregory Szorc
|
r39055 | basenode=nullid, | ||
Gregory Szorc
|
r39054 | ellipsis=True, | ||
Gregory Szorc
|
r39040 | ) | ||
Gregory Szorc
|
r39053 | def deltagroup(repo, revs, store, ischangelog, lookup, forcedeltaparentprev, | ||
Gregory Szorc
|
r39050 | units=None, | ||
Gregory Szorc
|
r39045 | ellipses=False, clrevtolocalrev=None, fullclnodes=None, | ||
precomputedellipsis=None): | ||||
Gregory Szorc
|
r39050 | """Calculate deltas for a set of revisions. | ||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39050 | Is a generator of ``revisiondelta`` instances. | ||
Gregory Szorc
|
r39045 | |||
If units is not None, progress detail will be generated, units specifies | ||||
the type of revlog that is touched (changelog, manifest, etc.). | ||||
""" | ||||
Gregory Szorc
|
r39051 | if not revs: | ||
Gregory Szorc
|
r39045 | return | ||
Gregory Szorc
|
r39054 | # We perform two passes over the revisions whose data we will emit. | ||
# | ||||
# In the first pass, we obtain information about the deltas that will | ||||
# be generated. This involves computing linknodes and adjusting the | ||||
# request to take shallow fetching into account. The end result of | ||||
# this pass is a list of "request" objects stating which deltas | ||||
# to obtain. | ||||
# | ||||
# The second pass is simply resolving the requested deltas. | ||||
Gregory Szorc
|
r39045 | cl = repo.changelog | ||
Gregory Szorc
|
r39054 | # In the first pass, collect info about the deltas we'll be | ||
# generating. | ||||
requests = [] | ||||
Gregory Szorc
|
r39051 | # Add the parent of the first rev. | ||
revs.insert(0, store.parentrevs(revs[0])[0]) | ||||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39051 | for i in pycompat.xrange(len(revs) - 1): | ||
prev = revs[i] | ||||
curr = revs[i + 1] | ||||
Gregory Szorc
|
r39054 | node = store.node(curr) | ||
linknode = lookup(node) | ||||
p1node, p2node = store.parents(node) | ||||
Gregory Szorc
|
r39045 | |||
if ellipses: | ||||
linkrev = cl.rev(linknode) | ||||
clrevtolocalrev[linkrev] = curr | ||||
# This is a node to send in full, because the changeset it | ||||
# corresponds to was a full changeset. | ||||
if linknode in fullclnodes: | ||||
Gregory Szorc
|
r39054 | requests.append(revisiondeltarequest( | ||
node=node, | ||||
p1node=p1node, | ||||
p2node=p2node, | ||||
linknode=linknode, | ||||
Gregory Szorc
|
r39055 | basenode=None, | ||
Gregory Szorc
|
r39054 | )) | ||
Gregory Szorc
|
r39045 | elif linkrev not in precomputedellipsis: | ||
Gregory Szorc
|
r39054 | pass | ||
Gregory Szorc
|
r39045 | else: | ||
Gregory Szorc
|
r39054 | requests.append(_makenarrowdeltarequest( | ||
cl, store, ischangelog, curr, node, linkrev, linknode, | ||||
Gregory Szorc
|
r39045 | clrevtolocalrev, fullclnodes, | ||
Gregory Szorc
|
r39054 | precomputedellipsis)) | ||
Gregory Szorc
|
r39045 | else: | ||
Gregory Szorc
|
r39054 | requests.append(revisiondeltarequest( | ||
node=node, | ||||
p1node=p1node, | ||||
p2node=p2node, | ||||
linknode=linknode, | ||||
Gregory Szorc
|
r39055 | basenode=store.node(prev) if forcedeltaparentprev else None, | ||
Gregory Szorc
|
r39054 | )) | ||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39054 | # We expect the first pass to be fast, so we only engage the progress | ||
# meter for constructing the revision deltas. | ||||
progress = None | ||||
if units is not None: | ||||
progress = repo.ui.makeprogress(_('bundling'), unit=units, | ||||
total=len(requests)) | ||||
Gregory Szorc
|
r39055 | prevnode = store.node(revs[0]) | ||
Gregory Szorc
|
r39054 | for i, request in enumerate(requests): | ||
if progress: | ||||
progress.update(i + 1) | ||||
Gregory Szorc
|
r39055 | delta = _handlerevisiondeltarequest(store, request, prevnode) | ||
Gregory Szorc
|
r39054 | |||
yield delta | ||||
Gregory Szorc
|
r39055 | prevnode = request.node | ||
Gregory Szorc
|
r39045 | |||
if progress: | ||||
progress.complete() | ||||
Gregory Szorc
|
r38938 | class cgpacker(object): | ||
Gregory Szorc
|
r38936 | def __init__(self, repo, filematcher, version, allowreorder, | ||
Gregory Szorc
|
r39053 | builddeltaheader, manifestsend, | ||
forcedeltaparentprev=False, | ||||
Martin von Zweigbergk
|
r38961 | bundlecaps=None, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Sune Foldager
|
r19202 | """Given a source repo, construct a bundler. | ||
Durham Goode
|
r32287 | |||
Gregory Szorc
|
r38830 | filematcher is a matcher that matches on files to include in the | ||
changegroup. Used to facilitate sparse changegroups. | ||||
Gregory Szorc
|
r38936 | allowreorder controls whether reordering of revisions is allowed. | ||
This value is used when ``bundle.reorder`` is ``auto`` or isn't | ||||
set. | ||||
Gregory Szorc
|
r39053 | forcedeltaparentprev indicates whether delta parents must be against | ||
the previous revision in a delta group. This should only be used for | ||||
compatibility with changegroup version 1. | ||||
Gregory Szorc
|
r38937 | |||
Gregory Szorc
|
r38933 | builddeltaheader is a callable that constructs the header for a group | ||
delta. | ||||
Gregory Szorc
|
r38934 | manifestsend is a chunk to send after manifests have been fully emitted. | ||
Gregory Szorc
|
r38944 | ellipses indicates whether ellipsis serving mode is enabled. | ||
Durham Goode
|
r32287 | bundlecaps is optional and can be used to specify the set of | ||
capabilities which can be used to build the bundle. While bundlecaps is | ||||
unused in core Mercurial, extensions rely on this feature to communicate | ||||
capabilities to customize the changegroup packer. | ||||
Gregory Szorc
|
r38940 | |||
shallow indicates whether shallow data might be sent. The packer may | ||||
need to pack file contents not introduced by the changes being packed. | ||||
Gregory Szorc
|
r38945 | |||
Gregory Szorc
|
r39032 | fullnodes is the set of changelog nodes which should not be ellipsis | ||
nodes. We store this rather than the set of nodes that should be | ||||
ellipsis because for very large histories we expect this to be | ||||
significantly smaller. | ||||
Sune Foldager
|
r19202 | """ | ||
Gregory Szorc
|
r38830 | assert filematcher | ||
self._filematcher = filematcher | ||||
Gregory Szorc
|
r38931 | self.version = version | ||
Gregory Szorc
|
r39053 | self._forcedeltaparentprev = forcedeltaparentprev | ||
Gregory Szorc
|
r38933 | self._builddeltaheader = builddeltaheader | ||
Gregory Szorc
|
r38934 | self._manifestsend = manifestsend | ||
Gregory Szorc
|
r38944 | self._ellipses = ellipses | ||
Gregory Szorc
|
r38931 | |||
Durham Goode
|
r32287 | # Set of capabilities we can use to build the bundle. | ||
if bundlecaps is None: | ||||
bundlecaps = set() | ||||
self._bundlecaps = bundlecaps | ||||
Gregory Szorc
|
r38940 | self._isshallow = shallow | ||
Gregory Szorc
|
r39032 | self._fullclnodes = fullnodes | ||
Gregory Szorc
|
r38936 | |||
Gregory Szorc
|
r38943 | # Maps ellipsis revs to their roots at the changelog level. | ||
self._precomputedellipsis = ellipsisroots | ||||
Matt Mackall
|
r25831 | # experimental config: bundle.reorder | ||
r33180 | reorder = repo.ui.config('bundle', 'reorder') | |||
Sune Foldager
|
r19202 | if reorder == 'auto': | ||
Gregory Szorc
|
r38936 | self._reorder = allowreorder | ||
Sune Foldager
|
r19202 | else: | ||
Gregory Szorc
|
r38936 | self._reorder = stringutil.parsebool(reorder) | ||
Sune Foldager
|
r19202 | self._repo = repo | ||
Gregory Szorc
|
r38936 | |||
Mads Kiilerich
|
r23748 | if self._repo.ui.verbose and not self._repo.ui.debugflag: | ||
self._verbosenote = self._repo.ui.note | ||||
else: | ||||
self._verbosenote = lambda s: None | ||||
Benoit Boissinot
|
r19204 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | ||
Gregory Szorc
|
r39012 | """Yield a sequence of changegroup byte chunks.""" | ||
Sune Foldager
|
r19202 | repo = self._repo | ||
Martin von Zweigbergk
|
r24978 | cl = repo.changelog | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r39012 | self._verbosenote(_('uncompressed size of bundle content:\n')) | ||
size = 0 | ||||
Gregory Szorc
|
r39050 | clstate, deltas = self._generatechangelog(cl, clnodes) | ||
for delta in deltas: | ||||
for chunk in _revisiondeltatochunks(delta, self._builddeltaheader): | ||||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39046 | close = closechunk() | ||
size += len(close) | ||||
yield closechunk() | ||||
Gregory Szorc
|
r39012 | self._verbosenote(_('%8.i (changelog)\n') % size) | ||
clrevorder = clstate['clrevorder'] | ||||
mfs = clstate['mfs'] | ||||
changedfiles = clstate['changedfiles'] | ||||
# We need to make sure that the linkrev in the changegroup refers to | ||||
# the first changeset that introduced the manifest or file revision. | ||||
# The fastpath is usually safer than the slowpath, because the filelogs | ||||
# are walked in revlog order. | ||||
# | ||||
# When taking the slowpath with reorder=None and the manifest revlog | ||||
# uses generaldelta, the manifest may be walked in the "wrong" order. | ||||
# Without 'clrevorder', we would get an incorrect linkrev (see fix in | ||||
# cc0ff93d0c0c). | ||||
# | ||||
# When taking the fastpath, we are only vulnerable to reordering | ||||
# of the changelog itself. The changelog never uses generaldelta, so | ||||
# it is only reordered when reorder=True. To handle this case, we | ||||
# simply take the slowpath, which already has the 'clrevorder' logic. | ||||
# This was also fixed in cc0ff93d0c0c. | ||||
fastpathlinkrev = fastpathlinkrev and not self._reorder | ||||
# Treemanifests don't work correctly with fastpathlinkrev | ||||
# either, because we don't discover which directory nodes to | ||||
# send along with files. This could probably be fixed. | ||||
fastpathlinkrev = fastpathlinkrev and ( | ||||
'treemanifest' not in repo.requirements) | ||||
fnodes = {} # needed file nodes | ||||
Gregory Szorc
|
r39047 | size = 0 | ||
Gregory Szorc
|
r39048 | it = self.generatemanifests( | ||
commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source, | ||||
clstate['clrevtomanifestrev']) | ||||
Gregory Szorc
|
r39050 | for dir, deltas in it: | ||
Gregory Szorc
|
r39048 | if dir: | ||
assert self.version == b'03' | ||||
chunk = _fileheader(dir) | ||||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39050 | for delta in deltas: | ||
chunks = _revisiondeltatochunks(delta, self._builddeltaheader) | ||||
for chunk in chunks: | ||||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39048 | |||
close = closechunk() | ||||
size += len(close) | ||||
yield close | ||||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39047 | self._verbosenote(_('%8.i (manifests)\n') % size) | ||
yield self._manifestsend | ||||
Gregory Szorc
|
r39019 | mfdicts = None | ||
if self._ellipses and self._isshallow: | ||||
mfdicts = [(self._repo.manifestlog[n].read(), lr) | ||||
for (n, lr) in mfs.iteritems()] | ||||
Gregory Szorc
|
r39012 | |||
mfs.clear() | ||||
clrevs = set(cl.rev(x) for x in clnodes) | ||||
Gregory Szorc
|
r39049 | it = self.generatefiles(changedfiles, commonrevs, | ||
source, mfdicts, fastpathlinkrev, | ||||
fnodes, clrevs) | ||||
Gregory Szorc
|
r39050 | for path, deltas in it: | ||
Gregory Szorc
|
r39049 | h = _fileheader(path) | ||
size = len(h) | ||||
yield h | ||||
Gregory Szorc
|
r39050 | for delta in deltas: | ||
chunks = _revisiondeltatochunks(delta, self._builddeltaheader) | ||||
for chunk in chunks: | ||||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39049 | |||
close = closechunk() | ||||
size += len(close) | ||||
yield close | ||||
self._verbosenote(_('%8.i %s\n') % (size, path)) | ||||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39038 | yield closechunk() | ||
Gregory Szorc
|
r39012 | |||
if clnodes: | ||||
repo.hook('outgoing', node=hex(clnodes[0]), source=source) | ||||
def _generatechangelog(self, cl, nodes): | ||||
"""Generate data for changelog chunks. | ||||
Returns a 2-tuple of a dict containing state and an iterable of | ||||
byte chunks. The state will not be fully populated until the | ||||
chunk stream has been fully consumed. | ||||
""" | ||||
Durham Goode
|
r23381 | clrevorder = {} | ||
Benoit Boissinot
|
r19204 | mfs = {} # needed manifests | ||
Gregory Szorc
|
r39012 | mfl = self._repo.manifestlog | ||
Gregory Szorc
|
r38926 | # TODO violates storage abstraction. | ||
mfrevlog = mfl._revlog | ||||
Martin von Zweigbergk
|
r28241 | changedfiles = set() | ||
Gregory Szorc
|
r39034 | clrevtomanifestrev = {} | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r38926 | # Callback for the changelog, used to collect changed files and | ||
# manifest nodes. | ||||
Benoit Boissinot
|
r19207 | # Returns the linkrev node (identity in the changelog case). | ||
def lookupcl(x): | ||||
c = cl.read(x) | ||||
Durham Goode
|
r23381 | clrevorder[x] = len(clrevorder) | ||
Gregory Szorc
|
r38926 | |||
Gregory Szorc
|
r38944 | if self._ellipses: | ||
Gregory Szorc
|
r38926 | # Only update mfs if x is going to be sent. Otherwise we | ||
# end up with bogus linkrevs specified for manifests and | ||||
# we skip some manifest nodes that we should otherwise | ||||
# have sent. | ||||
Gregory Szorc
|
r39032 | if (x in self._fullclnodes | ||
Gregory Szorc
|
r38943 | or cl.rev(x) in self._precomputedellipsis): | ||
Gregory Szorc
|
r38926 | n = c[0] | ||
# Record the first changeset introducing this manifest | ||||
# version. | ||||
mfs.setdefault(n, x) | ||||
# Set this narrow-specific dict so we have the lowest | ||||
# manifest revnum to look up for this cl revnum. (Part of | ||||
# mapping changelog ellipsis parents to manifest ellipsis | ||||
# parents) | ||||
Gregory Szorc
|
r39034 | clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n)) | ||
Gregory Szorc
|
r38926 | # We can't trust the changed files list in the changeset if the | ||
# client requested a shallow clone. | ||||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r38926 | changedfiles.update(mfl[c[0]].read().keys()) | ||
else: | ||||
changedfiles.update(c[3]) | ||||
else: | ||||
n = c[0] | ||||
# record the first changeset introducing this manifest version | ||||
mfs.setdefault(n, x) | ||||
# Record a complete list of potentially-changed files in | ||||
# this manifest. | ||||
changedfiles.update(c[3]) | ||||
Benoit Boissinot
|
r19207 | return x | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r39018 | # Changelog doesn't benefit from reordering revisions. So send out | ||
# revisions in store order. | ||||
revs = sorted(cl.rev(n) for n in nodes) | ||||
Gregory Szorc
|
r39012 | state = { | ||
'clrevorder': clrevorder, | ||||
'mfs': mfs, | ||||
'changedfiles': changedfiles, | ||||
Gregory Szorc
|
r39034 | 'clrevtomanifestrev': clrevtomanifestrev, | ||
Gregory Szorc
|
r39012 | } | ||
Gregory Szorc
|
r38926 | |||
Gregory Szorc
|
r39045 | gen = deltagroup( | ||
self._repo, revs, cl, True, lookupcl, | ||||
Gregory Szorc
|
r39053 | self._forcedeltaparentprev, | ||
Gregory Szorc
|
r39045 | ellipses=self._ellipses, | ||
units=_('changesets'), | ||||
clrevtolocalrev={}, | ||||
fullclnodes=self._fullclnodes, | ||||
precomputedellipsis=self._precomputedellipsis) | ||||
Martin von Zweigbergk
|
r28227 | |||
Gregory Szorc
|
r39012 | return state, gen | ||
Martin von Zweigbergk
|
r28227 | |||
def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, | ||||
Gregory Szorc
|
r39037 | fnodes, source, clrevtolocalrev): | ||
Durham Goode
|
r34148 | """Returns an iterator of changegroup chunks containing manifests. | ||
`source` is unused here, but is used by extensions like remotefilelog to | ||||
change what is sent based in pulls vs pushes, etc. | ||||
""" | ||||
Martin von Zweigbergk
|
r28227 | repo = self._repo | ||
Gregory Szorc
|
r39033 | cl = repo.changelog | ||
Durham Goode
|
r30294 | mfl = repo.manifestlog | ||
dirlog = mfl._revlog.dirlog | ||||
Martin von Zweigbergk
|
r28232 | tmfnodes = {'': mfs} | ||
Martin von Zweigbergk
|
r28227 | |||
Benoit Boissinot
|
r19207 | # Callback for the manifest, used to collect linkrevs for filelog | ||
# revisions. | ||||
# Returns the linkrev node (collected in lookupcl). | ||||
Kyle Lippincott
|
r35013 | def makelookupmflinknode(dir, nodes): | ||
Martin von Zweigbergk
|
r28231 | if fastpathlinkrev: | ||
assert not dir | ||||
return mfs.__getitem__ | ||||
Augie Fackler
|
r27239 | def lookupmflinknode(x): | ||
"""Callback for looking up the linknode for manifests. | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | Returns the linkrev node for the specified manifest. | ||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | SIDE EFFECT: | ||
Augie Fackler
|
r27432 | 1) fclnodes gets populated with the list of relevant | ||
file nodes if we're not using fastpathlinkrev | ||||
2) When treemanifests are in use, collects treemanifest nodes | ||||
to send | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27432 | Note that this means manifests must be completely sent to | ||
the client before you can trust the list of files and | ||||
treemanifests to send. | ||||
Augie Fackler
|
r27239 | """ | ||
Kyle Lippincott
|
r35013 | clnode = nodes[x] | ||
Durham Goode
|
r30294 | mdata = mfl.get(dir, x).readfast(shallow=True) | ||
Martin von Zweigbergk
|
r28241 | for p, n, fl in mdata.iterentries(): | ||
if fl == 't': # subdirectory manifest | ||||
subdir = dir + p + '/' | ||||
tmfclnodes = tmfnodes.setdefault(subdir, {}) | ||||
tmfclnode = tmfclnodes.setdefault(n, clnode) | ||||
if clrevorder[clnode] < clrevorder[tmfclnode]: | ||||
tmfclnodes[n] = clnode | ||||
else: | ||||
f = dir + p | ||||
Martin von Zweigbergk
|
r28240 | fclnodes = fnodes.setdefault(f, {}) | ||
fclnode = fclnodes.setdefault(n, clnode) | ||||
if clrevorder[clnode] < clrevorder[fclnode]: | ||||
fclnodes[n] = clnode | ||||
Augie Fackler
|
r27239 | return clnode | ||
Martin von Zweigbergk
|
r28231 | return lookupmflinknode | ||
Sune Foldager
|
r19206 | |||
Martin von Zweigbergk
|
r28232 | while tmfnodes: | ||
Kyle Lippincott
|
r35013 | dir, nodes = tmfnodes.popitem() | ||
Gregory Szorc
|
r39018 | store = dirlog(dir) | ||
Gregory Szorc
|
r39043 | |||
if not self._filematcher.visitdir(store._dir[:-1] or '.'): | ||||
prunednodes = [] | ||||
else: | ||||
frev, flr = store.rev, store.linkrev | ||||
prunednodes = [n for n in nodes | ||||
if flr(frev(n)) not in commonrevs] | ||||
Gregory Szorc
|
r39041 | |||
if dir and not prunednodes: | ||||
continue | ||||
lookupfn = makelookupmflinknode(dir, nodes) | ||||
Gregory Szorc
|
r39018 | |||
Gregory Szorc
|
r39041 | if self._ellipses: | ||
revs = _sortnodesellipsis(store, prunednodes, cl, | ||||
lookupfn) | ||||
else: | ||||
revs = _sortnodesnormal(store, prunednodes, | ||||
self._reorder) | ||||
Gregory Szorc
|
r39018 | |||
Gregory Szorc
|
r39050 | deltas = deltagroup( | ||
Gregory Szorc
|
r39044 | self._repo, revs, store, False, lookupfn, | ||
Gregory Szorc
|
r39053 | self._forcedeltaparentprev, | ||
Gregory Szorc
|
r39044 | ellipses=self._ellipses, | ||
units=_('manifests'), | ||||
clrevtolocalrev=clrevtolocalrev, | ||||
fullclnodes=self._fullclnodes, | ||||
precomputedellipsis=self._precomputedellipsis) | ||||
Gregory Szorc
|
r39050 | yield dir, deltas | ||
Gregory Szorc
|
r39046 | |||
Martin von Zweigbergk
|
r24897 | # The 'source' parameter is useful for extensions | ||
Gregory Szorc
|
r39035 | def generatefiles(self, changedfiles, commonrevs, source, | ||
mfdicts, fastpathlinkrev, fnodes, clrevs): | ||||
Gregory Szorc
|
r38925 | changedfiles = list(filter(self._filematcher, changedfiles)) | ||
Gregory Szorc
|
r39035 | if not fastpathlinkrev: | ||
def normallinknodes(unused, fname): | ||||
return fnodes.get(fname, {}) | ||||
else: | ||||
cln = self._repo.changelog.node | ||||
def normallinknodes(store, fname): | ||||
flinkrev = store.linkrev | ||||
fnode = store.node | ||||
revs = ((r, flinkrev(r)) for r in store) | ||||
return dict((fnode(r), cln(lr)) | ||||
for r, lr in revs if lr in clrevs) | ||||
Gregory Szorc
|
r39037 | clrevtolocalrev = {} | ||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r38925 | # In a shallow clone, the linknodes callback needs to also include | ||
# those file nodes that are in the manifests we sent but weren't | ||||
# introduced by those manifests. | ||||
commonctxs = [self._repo[c] for c in commonrevs] | ||||
clrev = self._repo.changelog.rev | ||||
# Defining this function has a side-effect of overriding the | ||||
# function of the same name that was passed in as an argument. | ||||
# TODO have caller pass in appropriate function. | ||||
def linknodes(flog, fname): | ||||
for c in commonctxs: | ||||
try: | ||||
fnode = c.filenode(fname) | ||||
Gregory Szorc
|
r39037 | clrevtolocalrev[c.rev()] = flog.rev(fnode) | ||
Gregory Szorc
|
r38925 | except error.ManifestLookupError: | ||
pass | ||||
Gregory Szorc
|
r39035 | links = normallinknodes(flog, fname) | ||
Gregory Szorc
|
r38925 | if len(links) != len(mfdicts): | ||
for mf, lr in mfdicts: | ||||
fnode = mf.get(fname, None) | ||||
if fnode in links: | ||||
links[fnode] = min(links[fnode], lr, key=clrev) | ||||
elif fnode: | ||||
links[fnode] = lr | ||||
return links | ||||
Gregory Szorc
|
r39035 | else: | ||
linknodes = normallinknodes | ||||
Gregory Szorc
|
r38925 | |||
Durham Goode
|
r19334 | repo = self._repo | ||
Gregory Szorc
|
r39033 | cl = repo.changelog | ||
Martin von Zweigbergk
|
r38429 | progress = repo.ui.makeprogress(_('bundling'), unit=_('files'), | ||
total=len(changedfiles)) | ||||
Durham Goode
|
r19334 | for i, fname in enumerate(sorted(changedfiles)): | ||
filerevlog = repo.file(fname) | ||||
if not filerevlog: | ||||
Gregory Szorc
|
r37357 | raise error.Abort(_("empty or missing file data for %s") % | ||
fname) | ||||
Durham Goode
|
r19334 | |||
Gregory Szorc
|
r39037 | clrevtolocalrev.clear() | ||
Durham Goode
|
r19334 | linkrevnodes = linknodes(filerevlog, fname) | ||
Benoit Boissinot
|
r19207 | # Lookup for filenodes, we collected the linkrev nodes above in the | ||
# fastpath case and with lookupmf in the slowpath case. | ||||
def lookupfilelog(x): | ||||
return linkrevnodes[x] | ||||
Gregory Szorc
|
r39043 | frev, flr = filerevlog.rev, filerevlog.linkrev | ||
filenodes = [n for n in linkrevnodes | ||||
if flr(frev(n)) not in commonrevs] | ||||
Gregory Szorc
|
r39056 | if not filenodes: | ||
continue | ||||
Gregory Szorc
|
r39018 | |||
Gregory Szorc
|
r39056 | if self._ellipses: | ||
revs = _sortnodesellipsis(filerevlog, filenodes, | ||||
cl, lookupfilelog) | ||||
else: | ||||
revs = _sortnodesnormal(filerevlog, filenodes, | ||||
self._reorder) | ||||
Gregory Szorc
|
r39044 | |||
Gregory Szorc
|
r39056 | progress.update(i + 1, item=fname) | ||
Gregory Szorc
|
r39044 | |||
Gregory Szorc
|
r39056 | deltas = deltagroup( | ||
self._repo, revs, filerevlog, False, lookupfilelog, | ||||
self._forcedeltaparentprev, | ||||
ellipses=self._ellipses, | ||||
clrevtolocalrev=clrevtolocalrev, | ||||
fullclnodes=self._fullclnodes, | ||||
precomputedellipsis=self._precomputedellipsis) | ||||
yield fname, deltas | ||||
Gregory Szorc
|
r39046 | |||
Martin von Zweigbergk
|
r38429 | progress.complete() | ||
Sune Foldager
|
r19200 | |||
Gregory Szorc
|
r38944 | def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.linknode) | ||||
Augie Fackler
|
r27432 | |||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'01', | ||
allowreorder=None, | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
Gregory Szorc
|
r39053 | forcedeltaparentprev=True, | ||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
Gregory Szorc
|
r38944 | def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.basenode, d.linknode) | ||||
Gregory Szorc
|
r38936 | # Since generaldelta is directly supported by cg2, reordering | ||
# generally doesn't help, so we disable it by default (treating | ||||
# bundle.reorder=auto just like bundle.reorder=False). | ||||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'02', | ||
allowreorder=False, | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
Gregory Szorc
|
r38944 | def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False, | ||
Gregory Szorc
|
r38945 | shallow=False, ellipsisroots=None, fullnodes=None): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | ||
d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags) | ||||
Gregory Szorc
|
r38938 | return cgpacker(repo, filematcher, b'03', | ||
allowreorder=False, | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=closechunk(), | ||||
Gregory Szorc
|
r38940 | bundlecaps=bundlecaps, | ||
Gregory Szorc
|
r38944 | ellipses=ellipses, | ||
Gregory Szorc
|
r38943 | shallow=shallow, | ||
Gregory Szorc
|
r38945 | ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Gregory Szorc
|
r38930 | |||
_packermap = {'01': (_makecg1packer, cg1unpacker), | ||||
Augie Fackler
|
r26709 | # cg2 adds support for exchanging generaldelta | ||
Gregory Szorc
|
r38930 | '02': (_makecg2packer, cg2unpacker), | ||
Martin von Zweigbergk
|
r27753 | # cg3 adds support for exchanging revlog flags and treemanifests | ||
Gregory Szorc
|
r38930 | '03': (_makecg3packer, cg3unpacker), | ||
Augie Fackler
|
r26709 | } | ||
Pierre-Yves David
|
r23168 | |||
Pierre-Yves David
|
r30627 | def allsupportedversions(repo): | ||
Martin von Zweigbergk
|
r27928 | versions = set(_packermap.keys()) | ||
Pierre-Yves David
|
r30627 | if not (repo.ui.configbool('experimental', 'changegroup3') or | ||
Pierre-Yves David
|
r30628 | repo.ui.configbool('experimental', 'treemanifest') or | ||
'treemanifest' in repo.requirements): | ||||
Pierre-Yves David
|
r30626 | versions.discard('03') | ||
Martin von Zweigbergk
|
r27953 | return versions | ||
# Changegroup versions that can be applied to the repo | ||||
def supportedincomingversions(repo): | ||||
Pierre-Yves David
|
r30628 | return allsupportedversions(repo) | ||
Martin von Zweigbergk
|
r27953 | |||
# Changegroup versions that can be created from the repo | ||||
def supportedoutgoingversions(repo): | ||||
Pierre-Yves David
|
r30627 | versions = allsupportedversions(repo) | ||
Martin von Zweigbergk
|
r27953 | if 'treemanifest' in repo.requirements: | ||
Martin von Zweigbergk
|
r27928 | # Versions 01 and 02 support only flat manifests and it's just too | ||
# expensive to convert between the flat manifest and tree manifest on | ||||
# the fly. Since tree manifests are hashed differently, all of history | ||||
# would have to be converted. Instead, we simply don't even pretend to | ||||
# support versions 01 and 02. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Martin von Zweigbergk
|
r38871 | if repository.NARROW_REQUIREMENT in repo.requirements: | ||
Martin von Zweigbergk
|
r36483 | # Versions 01 and 02 don't support revlog flags, and we need to | ||
# support that for stripping and unbundling to work. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Matt Harbison
|
r37150 | if LFS_REQUIREMENT in repo.requirements: | ||
# Versions 01 and 02 don't support revlog flags, and we need to | ||||
# mark LFS entries with REVIDX_EXTSTORED. | ||||
versions.discard('01') | ||||
versions.discard('02') | ||||
Martin von Zweigbergk
|
r27752 | return versions | ||
Martin von Zweigbergk
|
r27751 | |||
Martin von Zweigbergk
|
r34179 | def localversion(repo): | ||
# Finds the best version to use for bundles that are meant to be used | ||||
# locally, such as those from strip and shelve, and temporary bundles. | ||||
return max(supportedoutgoingversions(repo)) | ||||
Martin von Zweigbergk
|
r27929 | def safeversion(repo): | ||
# Finds the smallest version that it's safe to assume clients of the repo | ||||
Martin von Zweigbergk
|
r27931 | # will support. For example, all hg versions that support generaldelta also | ||
# support changegroup 02. | ||||
Martin von Zweigbergk
|
r27953 | versions = supportedoutgoingversions(repo) | ||
Martin von Zweigbergk
|
r27929 | if 'generaldelta' in repo.requirements: | ||
versions.discard('01') | ||||
assert versions | ||||
return min(versions) | ||||
Gregory Szorc
|
r38940 | def getbundler(version, repo, bundlecaps=None, filematcher=None, | ||
Gregory Szorc
|
r38945 | ellipses=False, shallow=False, ellipsisroots=None, | ||
fullnodes=None): | ||||
Martin von Zweigbergk
|
r27953 | assert version in supportedoutgoingversions(repo) | ||
Gregory Szorc
|
r38830 | |||
if filematcher is None: | ||||
filematcher = matchmod.alwaysmatcher(repo.root, '') | ||||
if version == '01' and not filematcher.always(): | ||||
raise error.ProgrammingError('version 01 changegroups do not support ' | ||||
'sparse file matchers') | ||||
Gregory Szorc
|
r38944 | if ellipses and version in (b'01', b'02'): | ||
raise error.Abort( | ||||
_('ellipsis nodes require at least cg3 on client and server, ' | ||||
'but negotiated version %s') % version) | ||||
Gregory Szorc
|
r38830 | # Requested files could include files not in the local store. So | ||
# filter those out. | ||||
filematcher = matchmod.intersectmatchers(repo.narrowmatch(), | ||||
filematcher) | ||||
Gregory Szorc
|
r38930 | fn = _packermap[version][0] | ||
Gregory Szorc
|
r38944 | return fn(repo, filematcher, bundlecaps, ellipses=ellipses, | ||
Gregory Szorc
|
r38945 | shallow=shallow, ellipsisroots=ellipsisroots, | ||
fullnodes=fullnodes) | ||||
Martin von Zweigbergk
|
r27751 | |||
Gregory Szorc
|
r29593 | def getunbundler(version, fh, alg, extras=None): | ||
return _packermap[version][1](fh, alg, extras=extras) | ||||
Martin von Zweigbergk
|
r27751 | |||
Pierre-Yves David
|
r20926 | def _changegroupinfo(repo, nodes, source): | ||
if repo.ui.verbose or source == 'bundle': | ||||
repo.ui.status(_("%d changesets found\n") % len(nodes)) | ||||
if repo.ui.debugflag: | ||||
repo.ui.debug("list of changesets:\n") | ||||
for node in nodes: | ||||
repo.ui.debug("%s\n" % hex(node)) | ||||
Durham Goode
|
r34098 | def makechangegroup(repo, outgoing, version, source, fastpath=False, | ||
bundlecaps=None): | ||||
cgstream = makestream(repo, outgoing, version, source, | ||||
fastpath=fastpath, bundlecaps=bundlecaps) | ||||
return getunbundler(version, util.chunkbuffer(cgstream), None, | ||||
{'clcount': len(outgoing.missing) }) | ||||
Durham Goode
|
r34105 | def makestream(repo, outgoing, version, source, fastpath=False, | ||
Gregory Szorc
|
r38830 | bundlecaps=None, filematcher=None): | ||
bundler = getbundler(version, repo, bundlecaps=bundlecaps, | ||||
filematcher=filematcher) | ||||
Durham Goode
|
r34105 | |||
Pierre-Yves David
|
r20925 | repo = repo.unfiltered() | ||
commonrevs = outgoing.common | ||||
csets = outgoing.missing | ||||
heads = outgoing.missingheads | ||||
# We go through the fast path if we get told to, or if all (unfiltered | ||||
# heads have been requested (since we then know there all linkrevs will | ||||
# be pulled by the client). | ||||
heads.sort() | ||||
fastpathlinkrev = fastpath or ( | ||||
repo.filtername is None and heads == sorted(repo.heads())) | ||||
repo.hook('preoutgoing', throw=True, source=source) | ||||
Pierre-Yves David
|
r20926 | _changegroupinfo(repo, csets, source) | ||
Sune Foldager
|
r23177 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) | ||
Martin von Zweigbergk
|
r28361 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): | ||
Pierre-Yves David
|
r20932 | revisions = 0 | ||
files = 0 | ||||
Martin von Zweigbergk
|
r38401 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), | ||
total=expectedfiles) | ||||
Augie Fackler
|
r29724 | for chunkdata in iter(source.filelogheader, {}): | ||
Martin von Zweigbergk
|
r28361 | files += 1 | ||
Pierre-Yves David
|
r20932 | f = chunkdata["filename"] | ||
repo.ui.debug("adding %s revisions\n" % f) | ||||
Martin von Zweigbergk
|
r38401 | progress.increment() | ||
Martin von Zweigbergk
|
r27754 | fl = repo.file(f) | ||
Pierre-Yves David
|
r20932 | o = len(fl) | ||
Mike Edgar
|
r24120 | try: | ||
Durham Goode
|
r34292 | deltas = source.deltaiter() | ||
if not fl.addgroup(deltas, revmap, trp): | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("received file revlog group is empty")) | ||
Gregory Szorc
|
r25660 | except error.CensoredBaseError as e: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("received delta base is censored: %s") % e) | ||
Martin von Zweigbergk
|
r27754 | revisions += len(fl) - o | ||
Pierre-Yves David
|
r20932 | if f in needfiles: | ||
needs = needfiles[f] | ||||
Gregory Szorc
|
r38806 | for new in pycompat.xrange(o, len(fl)): | ||
Pierre-Yves David
|
r20932 | n = fl.node(new) | ||
if n in needs: | ||||
needs.remove(n) | ||||
else: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Pierre-Yves David
|
r20932 | _("received spurious file revlog entry")) | ||
if not needs: | ||||
del needfiles[f] | ||||
Martin von Zweigbergk
|
r38401 | progress.complete() | ||
Pierre-Yves David
|
r20932 | |||
for f, needs in needfiles.iteritems(): | ||||
fl = repo.file(f) | ||||
for n in needs: | ||||
try: | ||||
fl.rev(n) | ||||
except error.LookupError: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Pierre-Yves David
|
r20932 | _('missing file data for %s:%s - run hg verify') % | ||
(f, hex(n))) | ||||
return revisions, files | ||||