changegroup.py
1937 lines
| 64.5 KiB
| text/x-python
|
PythonLexer
/ mercurial / changegroup.py
Martin Geisler
|
r8226 | # changegroup.py - Mercurial changegroup manipulation functions | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2006 Olivia Mackall <olivia@selenic.com> | ||
Martin Geisler
|
r8226 | # | ||
# This software may be used and distributed according to the terms of the | ||||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r3877 | |||
Gregory Szorc
|
r25921 | from __future__ import absolute_import | ||
Raphaël Gomès
|
r47452 | import collections | ||
Gregory Szorc
|
r25921 | import os | ||
import struct | ||||
Pierre-Yves David
|
r20933 | import weakref | ||
Gregory Szorc
|
r25921 | |||
from .i18n import _ | ||||
from .node import ( | ||||
hex, | ||||
Gregory Szorc
|
r38919 | nullid, | ||
Gregory Szorc
|
r25921 | nullrev, | ||
short, | ||||
) | ||||
Gregory Szorc
|
r43355 | from .pycompat import open | ||
Gregory Szorc
|
r25921 | |||
from . import ( | ||||
error, | ||||
Gregory Szorc
|
r38830 | match as matchmod, | ||
Gregory Szorc
|
r25921 | mdiff, | ||
phases, | ||||
Pulkit Goyal
|
r30925 | pycompat, | ||
Pulkit Goyal
|
r45932 | requirements, | ||
Pulkit Goyal
|
r46129 | scmutil, | ||
Pulkit Goyal
|
r43078 | util, | ||
) | ||||
Augie Fackler
|
r43346 | from .interfaces import repository | ||
Raphaël Gomès
|
r47445 | from .revlogutils import sidedata as sidedatamod | ||
Thomas Arendsen Hein
|
r1981 | |||
Augie Fackler
|
r43347 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s") | ||
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s") | ||||
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH") | ||||
Benoit Boissinot
|
r14141 | |||
Augie Fackler
|
r43347 | LFS_REQUIREMENT = b'lfs' | ||
Matt Harbison
|
r37150 | |||
Boris Feld
|
r35772 | readexactly = util.readexactly | ||
Mads Kiilerich
|
r13457 | |||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r13457 | def getchunk(stream): | ||
"""return the next chunk from stream as a string""" | ||||
d = readexactly(stream, 4) | ||||
Augie Fackler
|
r43347 | l = struct.unpack(b">l", d)[0] | ||
Thomas Arendsen Hein
|
r1981 | if l <= 4: | ||
Mads Kiilerich
|
r13458 | if l: | ||
Augie Fackler
|
r43347 | raise error.Abort(_(b"invalid chunk length %d") % l) | ||
return b"" | ||||
Mads Kiilerich
|
r13457 | return readexactly(stream, l - 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r5368 | def chunkheader(length): | ||
Greg Ward
|
r9437 | """return a changegroup chunk header (string)""" | ||
Augie Fackler
|
r43347 | return struct.pack(b">l", length + 4) | ||
Thomas Arendsen Hein
|
r1981 | |||
Augie Fackler
|
r43346 | |||
Thomas Arendsen Hein
|
r1981 | def closechunk(): | ||
Greg Ward
|
r9437 | """return a changegroup chunk header (string) for a zero-length chunk""" | ||
Augie Fackler
|
r43347 | return struct.pack(b">l", 0) | ||
Thomas Arendsen Hein
|
r1981 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39017 | def _fileheader(path): | ||
"""Obtain a changegroup chunk header for a named path.""" | ||||
return chunkheader(len(path)) + path | ||||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r26540 | def writechunks(ui, chunks, filename, vfs=None): | ||
"""Write chunks to a file and return its filename. | ||||
Matt Mackall
|
r3659 | |||
Pierre-Yves David
|
r26540 | The stream is assumed to be a bundle file. | ||
Matt Mackall
|
r3659 | Existing files will not be overwritten. | ||
If no filename is specified, a temporary file is created. | ||||
""" | ||||
fh = None | ||||
cleanup = None | ||||
try: | ||||
if filename: | ||||
FUJIWARA Katsunori
|
r20976 | if vfs: | ||
Augie Fackler
|
r43347 | fh = vfs.open(filename, b"wb") | ||
FUJIWARA Katsunori
|
r20976 | else: | ||
Gregory Szorc
|
r30212 | # Increase default buffer size because default is usually | ||
# small (4k is common on Linux). | ||||
Augie Fackler
|
r43347 | fh = open(filename, b"wb", 131072) | ||
Matt Mackall
|
r3659 | else: | ||
Augie Fackler
|
r43347 | fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg") | ||
Augie Fackler
|
r43809 | fh = os.fdopen(fd, "wb") | ||
Matt Mackall
|
r3659 | cleanup = filename | ||
Pierre-Yves David
|
r26540 | for c in chunks: | ||
fh.write(c) | ||||
Matt Mackall
|
r3659 | cleanup = None | ||
return filename | ||||
finally: | ||||
if fh is not None: | ||||
fh.close() | ||||
if cleanup is not None: | ||||
FUJIWARA Katsunori
|
r20976 | if filename and vfs: | ||
vfs.unlink(cleanup) | ||||
else: | ||||
os.unlink(cleanup) | ||||
Matt Mackall
|
r3660 | |||
Augie Fackler
|
r43346 | |||
Sune Foldager
|
r22390 | class cg1unpacker(object): | ||
Augie Fackler
|
r26708 | """Unpacker for cg1 changegroup streams. | ||
A changegroup unpacker handles the framing of the revision data in | ||||
the wire format. Most consumers will want to use the apply() | ||||
method to add the changes from the changegroup to a repository. | ||||
If you're forwarding a changegroup unmodified to another consumer, | ||||
use getchunks(), which returns an iterator of changegroup | ||||
chunks. This is mostly useful for cases where you need to know the | ||||
data stream has ended by observing the end of the changegroup. | ||||
deltachunk() is useful only if you're applying delta data. Most | ||||
consumers should prefer apply() instead. | ||||
A few other public methods exist. Those are used only for | ||||
bundlerepo and some debug commands - their use is discouraged. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Sune Foldager
|
r22390 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Augie Fackler
|
r43347 | version = b'01' | ||
Augie Fackler
|
r43346 | _grouplistcount = 1 # One list of files after the manifests | ||
Martin von Zweigbergk
|
r27920 | |||
Gregory Szorc
|
r29593 | def __init__(self, fh, alg, extras=None): | ||
Gregory Szorc
|
r30354 | if alg is None: | ||
Augie Fackler
|
r43347 | alg = b'UN' | ||
Gregory Szorc
|
r30354 | if alg not in util.compengines.supportedbundletypes: | ||
Augie Fackler
|
r43347 | raise error.Abort(_(b'unknown stream compression type: %s') % alg) | ||
if alg == b'BZ': | ||||
alg = b'_truncatedBZ' | ||||
Gregory Szorc
|
r30354 | |||
compengine = util.compengines.forbundletype(alg) | ||||
self._stream = compengine.decompressorreader(fh) | ||||
Matt Mackall
|
r12044 | self._type = alg | ||
Gregory Szorc
|
r29593 | self.extras = extras or {} | ||
Matt Mackall
|
r12334 | self.callback = None | ||
Augie Fackler
|
r26706 | |||
# These methods (compressed, read, seek, tell) all appear to only | ||||
# be used by bundlerepo, but it's a little hard to tell. | ||||
Matt Mackall
|
r12044 | def compressed(self): | ||
Augie Fackler
|
r43347 | return self._type is not None and self._type != b'UN' | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r12043 | def read(self, l): | ||
return self._stream.read(l) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r12330 | def seek(self, pos): | ||
return self._stream.seek(pos) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r12330 | def tell(self): | ||
Matt Mackall
|
r12332 | return self._stream.tell() | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r12347 | def close(self): | ||
return self._stream.close() | ||||
Matt Mackall
|
r12334 | |||
Augie Fackler
|
r26707 | def _chunklength(self): | ||
Jim Hague
|
r13459 | d = readexactly(self._stream, 4) | ||
Augie Fackler
|
r43347 | l = struct.unpack(b">l", d)[0] | ||
Mads Kiilerich
|
r13458 | if l <= 4: | ||
if l: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"invalid chunk length %d") % l) | ||
Mads Kiilerich
|
r13458 | return 0 | ||
if self.callback: | ||||
Matt Mackall
|
r12334 | self.callback() | ||
Mads Kiilerich
|
r13458 | return l - 4 | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14144 | def changelogheader(self): | ||
"""v10 does not have a changelog header chunk""" | ||||
return {} | ||||
def manifestheader(self): | ||||
"""v10 does not have a manifest header chunk""" | ||||
return {} | ||||
def filelogheader(self): | ||||
"""return the header of the filelogs chunk, v10 only has the filename""" | ||||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Benoit Boissinot
|
r14144 | if not l: | ||
return {} | ||||
fname = readexactly(self._stream, l) | ||||
Augie Fackler
|
r43347 | return {b'filename': fname} | ||
Matt Mackall
|
r12334 | |||
Benoit Boissinot
|
r14141 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, cs = headertuple | ||||
if prevnode is None: | ||||
deltabase = p1 | ||||
else: | ||||
deltabase = prevnode | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Benoit Boissinot
|
r14141 | |||
Benoit Boissinot
|
r14144 | def deltachunk(self, prevnode): | ||
Augie Fackler
|
r26707 | l = self._chunklength() | ||
Matt Mackall
|
r12336 | if not l: | ||
return {} | ||||
Benoit Boissinot
|
r14141 | headerdata = readexactly(self._stream, self.deltaheadersize) | ||
Gregory Szorc
|
r38932 | header = self.deltaheader.unpack(headerdata) | ||
Benoit Boissinot
|
r14141 | delta = readexactly(self._stream, l - self.deltaheadersize) | ||
Mike Edgar
|
r27433 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) | ||
Raphaël Gomès
|
r47445 | # cg4 forward-compat | ||
sidedata = {} | ||||
return (node, p1, p2, cs, deltabase, delta, flags, sidedata) | ||||
Matt Mackall
|
r12336 | |||
Pierre-Yves David
|
r20999 | def getchunks(self): | ||
"""returns all the chunks contains in the bundle | ||||
Used when you need to forward the binary stream to a file or another | ||||
network API. To do so, it parse the changegroup data, otherwise it will | ||||
block in case of sshrepo because it don't know the end of the stream. | ||||
""" | ||||
Durham Goode
|
r34093 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, | ||
# and a list of filelogs. For changegroup 3, we expect 4 parts: | ||||
# changelog, manifestlog, a list of tree manifestlogs, and a list of | ||||
# filelogs. | ||||
# | ||||
# Changelog and manifestlog parts are terminated with empty chunks. The | ||||
# tree and file parts are a list of entry sections. Each entry section | ||||
# is a series of chunks terminating in an empty chunk. The list of these | ||||
# entry sections is terminated in yet another empty chunk, so we know | ||||
# we've reached the end of the tree/file list when we reach an empty | ||||
# chunk that was proceeded by no non-empty chunks. | ||||
parts = 0 | ||||
while parts < 2 + self._grouplistcount: | ||||
noentries = True | ||||
Pierre-Yves David
|
r20999 | while True: | ||
chunk = getchunk(self) | ||||
if not chunk: | ||||
Durham Goode
|
r34093 | # The first two empty chunks represent the end of the | ||
# changelog and the manifestlog portions. The remaining | ||||
# empty chunks represent either A) the end of individual | ||||
# tree or file entries in the file list, or B) the end of | ||||
# the entire list. It's the end of the entire list if there | ||||
# were no entries (i.e. noentries is True). | ||||
if parts < 2: | ||||
parts += 1 | ||||
elif noentries: | ||||
parts += 1 | ||||
Pierre-Yves David
|
r20999 | break | ||
Durham Goode
|
r34093 | noentries = False | ||
Pierre-Yves David
|
r20999 | yield chunkheader(len(chunk)) | ||
pos = 0 | ||||
while pos < len(chunk): | ||||
Augie Fackler
|
r43346 | next = pos + 2 ** 20 | ||
Pierre-Yves David
|
r20999 | yield chunk[pos:next] | ||
pos = next | ||||
yield closechunk() | ||||
Raphaël Gomès
|
r47452 | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): | ||
Martin von Zweigbergk
|
r38365 | self.callback = prog.increment | ||
Augie Fackler
|
r26712 | # no need to check for empty manifest group here: | ||
# if the result of the merge of 1 and 2 is the same in 3 and 4, | ||||
# no new manifest will be created and the manifest group will | ||||
# be empty during the pull | ||||
self.manifestheader() | ||||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
Raphaël Gomès
|
r47452 | storage = repo.manifestlog.getstorage(b'') | ||
storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb) | ||||
Martin von Zweigbergk
|
r38392 | prog.complete() | ||
Martin von Zweigbergk
|
r28360 | self.callback = None | ||
Augie Fackler
|
r26712 | |||
Augie Fackler
|
r43346 | def apply( | ||
self, | ||||
repo, | ||||
tr, | ||||
srctype, | ||||
url, | ||||
targetphase=phases.draft, | ||||
expectedtotal=None, | ||||
Raphaël Gomès
|
r47449 | sidedata_categories=None, | ||
Augie Fackler
|
r43346 | ): | ||
Augie Fackler
|
r26695 | """Add the changegroup returned by source.read() to this repo. | ||
srctype is a string like 'push', 'pull', or 'unbundle'. url is | ||||
the URL of the repo where this changegroup is coming from. | ||||
Return an integer summarizing the change to this repo: | ||||
- nothing changed or no source: 0 | ||||
- more heads than before: 1+added heads (2..n) | ||||
- fewer heads than before: -1-removed heads (-2..-n) | ||||
- number of heads stays the same: 1 | ||||
Raphaël Gomès
|
r47449 | |||
`sidedata_categories` is an optional set of the remote's sidedata wanted | ||||
categories. | ||||
Augie Fackler
|
r26695 | """ | ||
repo = repo.unfiltered() | ||||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47449 | # Only useful if we're adding sidedata categories. If both peers have | ||
# the same categories, then we simply don't do anything. | ||||
if self.version == b'04' and srctype == b'pull': | ||||
sidedata_helpers = get_sidedata_helpers( | ||||
repo, | ||||
sidedata_categories or set(), | ||||
pull=True, | ||||
) | ||||
else: | ||||
sidedata_helpers = None | ||||
Augie Fackler
|
r26695 | def csmap(x): | ||
Augie Fackler
|
r43347 | repo.ui.debug(b"add changeset %s\n" % short(x)) | ||
Augie Fackler
|
r26695 | return len(cl) | ||
def revmap(x): | ||||
return cl.rev(x) | ||||
Pierre-Yves David
|
r26880 | try: | ||
Martin von Zweigbergk
|
r32931 | # The transaction may already carry source information. In this | ||
# case we use the top level data. We overwrite the argument | ||||
# because we need to use the top level value (if they exist) | ||||
# in this function. | ||||
Augie Fackler
|
r43347 | srctype = tr.hookargs.setdefault(b'source', srctype) | ||
tr.hookargs.setdefault(b'url', url) | ||||
Augie Fackler
|
r43346 | repo.hook( | ||
Augie Fackler
|
r43347 | b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs) | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | # write changelog data to temp files so concurrent readers | ||
# will not see an inconsistent view | ||||
cl = repo.changelog | ||||
cl.delayupdate(tr) | ||||
oldheads = set(cl.heads()) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | trp = weakref.proxy(tr) | ||
# pull off the changeset group | ||||
Augie Fackler
|
r43347 | repo.ui.status(_(b"adding changesets\n")) | ||
Martin von Zweigbergk
|
r32931 | clstart = len(cl) | ||
Augie Fackler
|
r43346 | progress = repo.ui.makeprogress( | ||
Augie Fackler
|
r43347 | _(b'changesets'), unit=_(b'chunks'), total=expectedtotal | ||
Augie Fackler
|
r43346 | ) | ||
Martin von Zweigbergk
|
r38365 | self.callback = progress.increment | ||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r43790 | efilesset = set() | ||
Joerg Sonnenberger
|
r47260 | duprevs = [] | ||
Augie Fackler
|
r43346 | |||
Joerg Sonnenberger
|
r47259 | def ondupchangelog(cl, rev): | ||
if rev < clstart: | ||||
Joerg Sonnenberger
|
r47260 | duprevs.append(rev) | ||
Joerg Sonnenberger
|
r46374 | |||
Joerg Sonnenberger
|
r47259 | def onchangelog(cl, rev): | ||
Joerg Sonnenberger
|
r47083 | ctx = cl.changelogrevision(rev) | ||
efilesset.update(ctx.files) | ||||
repo.register_changeset(rev, ctx) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | self.changelogheader() | ||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
Joerg Sonnenberger
|
r46373 | if not cl.addgroup( | ||
deltas, | ||||
csmap, | ||||
trp, | ||||
Joerg Sonnenberger
|
r47085 | alwayscache=True, | ||
Joerg Sonnenberger
|
r46373 | addrevisioncb=onchangelog, | ||
duplicaterevisioncb=ondupchangelog, | ||||
): | ||||
Augie Fackler
|
r43346 | repo.ui.develwarn( | ||
Augie Fackler
|
r43347 | b'applied empty changelog from changegroup', | ||
config=b'warn-empty-changegroup', | ||||
Augie Fackler
|
r43346 | ) | ||
Joerg Sonnenberger
|
r46373 | efiles = len(efilesset) | ||
Martin von Zweigbergk
|
r32931 | clend = len(cl) | ||
changesets = clend - clstart | ||||
Martin von Zweigbergk
|
r38392 | progress.complete() | ||
Joerg Sonnenberger
|
r46321 | del deltas | ||
# TODO Python 2.7 removal | ||||
# del efilesset | ||||
efilesset = None | ||||
Martin von Zweigbergk
|
r32931 | self.callback = None | ||
Augie Fackler
|
r26695 | |||
Raphaël Gomès
|
r47452 | # Keep track of the (non-changelog) revlogs we've updated and their | ||
# range of new revisions for sidedata rewrite. | ||||
# TODO do something more efficient than keeping the reference to | ||||
# the revlogs, especially memory-wise. | ||||
touched_manifests = {} | ||||
touched_filelogs = {} | ||||
Martin von Zweigbergk
|
r32931 | # pull off the manifest group | ||
Augie Fackler
|
r43347 | repo.ui.status(_(b"adding manifests\n")) | ||
Martin von Zweigbergk
|
r38365 | # We know that we'll never have more manifests than we had | ||
# changesets. | ||||
Augie Fackler
|
r43346 | progress = repo.ui.makeprogress( | ||
Augie Fackler
|
r43347 | _(b'manifests'), unit=_(b'chunks'), total=changesets | ||
Augie Fackler
|
r43346 | ) | ||
Raphaël Gomès
|
r47452 | on_manifest_rev = None | ||
if sidedata_helpers and b'manifest' in sidedata_helpers[1]: | ||||
def on_manifest_rev(manifest, rev): | ||||
range = touched_manifests.get(manifest) | ||||
if not range: | ||||
touched_manifests[manifest] = (rev, rev) | ||||
else: | ||||
assert rev == range[1] + 1 | ||||
touched_manifests[manifest] = (range[0], rev) | ||||
self._unpackmanifests( | ||||
repo, | ||||
revmap, | ||||
trp, | ||||
progress, | ||||
addrevisioncb=on_manifest_rev, | ||||
) | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | needfiles = {} | ||
Augie Fackler
|
r43347 | if repo.ui.configbool(b'server', b'validate'): | ||
Martin von Zweigbergk
|
r32931 | cl = repo.changelog | ||
ml = repo.manifestlog | ||||
# validate incoming csets have their manifests | ||||
Gregory Szorc
|
r38806 | for cset in pycompat.xrange(clstart, clend): | ||
Martin von Zweigbergk
|
r32931 | mfnode = cl.changelogrevision(cset).manifest | ||
mfest = ml[mfnode].readdelta() | ||||
Joerg Sonnenberger
|
r46374 | # store file nodes we must see | ||
Gregory Szorc
|
r43376 | for f, n in pycompat.iteritems(mfest): | ||
Martin von Zweigbergk
|
r32931 | needfiles.setdefault(f, set()).add(n) | ||
Augie Fackler
|
r26695 | |||
Raphaël Gomès
|
r47452 | on_filelog_rev = None | ||
if sidedata_helpers and b'filelog' in sidedata_helpers[1]: | ||||
def on_filelog_rev(filelog, rev): | ||||
range = touched_filelogs.get(filelog) | ||||
if not range: | ||||
touched_filelogs[filelog] = (rev, rev) | ||||
else: | ||||
assert rev == range[1] + 1 | ||||
touched_filelogs[filelog] = (range[0], rev) | ||||
Martin von Zweigbergk
|
r32931 | # process the files | ||
Augie Fackler
|
r43347 | repo.ui.status(_(b"adding file changes\n")) | ||
Martin von Zweigbergk
|
r32931 | newrevs, newfiles = _addchangegroupfiles( | ||
Raphaël Gomès
|
r47452 | repo, | ||
self, | ||||
revmap, | ||||
trp, | ||||
efiles, | ||||
needfiles, | ||||
addrevisioncb=on_filelog_rev, | ||||
Augie Fackler
|
r43346 | ) | ||
r43167 | ||||
Raphaël Gomès
|
r47452 | if sidedata_helpers: | ||
if b'changelog' in sidedata_helpers[1]: | ||||
cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1) | ||||
for mf, (startrev, endrev) in touched_manifests.items(): | ||||
mf.rewrite_sidedata(sidedata_helpers, startrev, endrev) | ||||
for fl, (startrev, endrev) in touched_filelogs.items(): | ||||
fl.rewrite_sidedata(sidedata_helpers, startrev, endrev) | ||||
r43167 | # making sure the value exists | |||
Augie Fackler
|
r43347 | tr.changes.setdefault(b'changegroup-count-changesets', 0) | ||
tr.changes.setdefault(b'changegroup-count-revisions', 0) | ||||
tr.changes.setdefault(b'changegroup-count-files', 0) | ||||
tr.changes.setdefault(b'changegroup-count-heads', 0) | ||||
r43167 | ||||
# some code use bundle operation for internal purpose. They usually | ||||
# set `ui.quiet` to do this outside of user sight. Size the report | ||||
# of such operation now happens at the end of the transaction, that | ||||
# ui.quiet has not direct effect on the output. | ||||
# | ||||
# To preserve this intend use an inelegant hack, we fail to report | ||||
# the change if `quiet` is set. We should probably move to | ||||
# something better, but this is a good first step to allow the "end | ||||
# of transaction report" to pass tests. | ||||
if not repo.ui.quiet: | ||||
Augie Fackler
|
r43347 | tr.changes[b'changegroup-count-changesets'] += changesets | ||
tr.changes[b'changegroup-count-revisions'] += newrevs | ||||
tr.changes[b'changegroup-count-files'] += newfiles | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | deltaheads = 0 | ||
if oldheads: | ||||
heads = cl.heads() | ||||
r43167 | deltaheads += len(heads) - len(oldheads) | |||
Martin von Zweigbergk
|
r32931 | for h in heads: | ||
if h not in oldheads and repo[h].closesbranch(): | ||||
deltaheads -= 1 | ||||
Augie Fackler
|
r26695 | |||
r43167 | # see previous comment about checking ui.quiet | |||
if not repo.ui.quiet: | ||||
Augie Fackler
|
r43347 | tr.changes[b'changegroup-count-heads'] += deltaheads | ||
Martin von Zweigbergk
|
r32931 | repo.invalidatevolatilesets() | ||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
Augie Fackler
|
r43347 | if b'node' not in tr.hookargs: | ||
tr.hookargs[b'node'] = hex(cl.node(clstart)) | ||||
tr.hookargs[b'node_last'] = hex(cl.node(clend - 1)) | ||||
Martin von Zweigbergk
|
r32931 | hookargs = dict(tr.hookargs) | ||
else: | ||||
hookargs = dict(tr.hookargs) | ||||
Augie Fackler
|
r43347 | hookargs[b'node'] = hex(cl.node(clstart)) | ||
hookargs[b'node_last'] = hex(cl.node(clend - 1)) | ||||
Augie Fackler
|
r43346 | repo.hook( | ||
Augie Fackler
|
r43347 | b'pretxnchangegroup', | ||
Augie Fackler
|
r43346 | throw=True, | ||
**pycompat.strkwargs(hookargs) | ||||
) | ||||
Augie Fackler
|
r26695 | |||
Joerg Sonnenberger
|
r46374 | added = pycompat.xrange(clstart, clend) | ||
Boris Feld
|
r33456 | phaseall = None | ||
Augie Fackler
|
r43347 | if srctype in (b'push', b'serve'): | ||
Martin von Zweigbergk
|
r32931 | # Old servers can not push the boundary themselves. | ||
# New servers won't push the boundary if changeset already | ||||
# exists locally as secret | ||||
# | ||||
# We should not use added here but the list of all change in | ||||
# the bundle | ||||
if repo.publishing(): | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.public | ||
Martin von Zweigbergk
|
r32931 | else: | ||
Boris Feld
|
r33456 | # closer target phase computation | ||
Martin von Zweigbergk
|
r32931 | # Those changesets have been pushed from the | ||
# outside, their phases are going to be pushed | ||||
# alongside. Therefor `targetphase` is | ||||
# ignored. | ||||
Boris Feld
|
r33456 | targetphase = phaseall = phases.draft | ||
if added: | ||||
Joerg Sonnenberger
|
r46375 | phases.registernew(repo, tr, targetphase, added) | ||
Boris Feld
|
r33456 | if phaseall is not None: | ||
Joerg Sonnenberger
|
r47260 | if duprevs: | ||
duprevs.extend(added) | ||||
else: | ||||
duprevs = added | ||||
phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs) | ||||
duprevs = [] | ||||
Augie Fackler
|
r26695 | |||
Martin von Zweigbergk
|
r32931 | if changesets > 0: | ||
Augie Fackler
|
r26695 | |||
Kyle Lippincott
|
r44217 | def runhooks(unused_success): | ||
Martin von Zweigbergk
|
r32931 | # These hooks run when the lock releases, not when the | ||
# transaction closes. So it's possible for the changelog | ||||
# to have changed since we last saw it. | ||||
if clstart >= len(repo): | ||||
return | ||||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r43347 | repo.hook(b"changegroup", **pycompat.strkwargs(hookargs)) | ||
Bryan O'Sullivan
|
r27867 | |||
Joerg Sonnenberger
|
r46374 | for rev in added: | ||
Martin von Zweigbergk
|
r32931 | args = hookargs.copy() | ||
Joerg Sonnenberger
|
r46374 | args[b'node'] = hex(cl.node(rev)) | ||
Augie Fackler
|
r43347 | del args[b'node_last'] | ||
repo.hook(b"incoming", **pycompat.strkwargs(args)) | ||||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r43346 | newheads = [h for h in repo.heads() if h not in oldheads] | ||
repo.ui.log( | ||||
Augie Fackler
|
r43347 | b"incoming", | ||
b"%d incoming changes - new heads: %s\n", | ||||
Augie Fackler
|
r43346 | len(added), | ||
Augie Fackler
|
r43347 | b', '.join([hex(c[:6]) for c in newheads]), | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r26695 | |||
Augie Fackler
|
r43346 | tr.addpostclose( | ||
Augie Fackler
|
r43347 | b'changegroup-runhooks-%020i' % clstart, | ||
Augie Fackler
|
r43346 | lambda tr: repo._afterlock(runhooks), | ||
) | ||||
Augie Fackler
|
r26695 | finally: | ||
repo.ui.flush() | ||||
# never return 0 here: | ||||
Martin von Zweigbergk
|
r32870 | if deltaheads < 0: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads - 1 | ||
Augie Fackler
|
r26695 | else: | ||
Martin von Zweigbergk
|
r33030 | ret = deltaheads + 1 | ||
Boris Feld
|
r33461 | return ret | ||
Augie Fackler
|
r26695 | |||
Durham Goode
|
r34292 | def deltaiter(self): | ||
Durham Goode
|
r34147 | """ | ||
returns an iterator of the deltas in this changegroup | ||||
Useful for passing to the underlying storage system to be stored. | ||||
""" | ||||
chain = None | ||||
for chunkdata in iter(lambda: self.deltachunk(chain), {}): | ||||
Raphaël Gomès
|
r47689 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata) | ||
Durham Goode
|
r34295 | yield chunkdata | ||
chain = chunkdata[0] | ||||
Durham Goode
|
r34147 | |||
Augie Fackler
|
r43346 | |||
Sune Foldager
|
r23181 | class cg2unpacker(cg1unpacker): | ||
Augie Fackler
|
r26708 | """Unpacker for cg2 streams. | ||
cg2 streams add support for generaldelta, so the delta header | ||||
format is slightly different. All other features about the data | ||||
remain the same. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Sune Foldager
|
r23181 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Augie Fackler
|
r43347 | version = b'02' | ||
Sune Foldager
|
r23181 | |||
def _deltaheader(self, headertuple, prevnode): | ||||
node, p1, p2, deltabase, cs = headertuple | ||||
Mike Edgar
|
r27433 | flags = 0 | ||
return node, p1, p2, deltabase, cs, flags | ||||
Sune Foldager
|
r23181 | |||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r27432 | class cg3unpacker(cg2unpacker): | ||
"""Unpacker for cg3 streams. | ||||
Mike Edgar
|
r27433 | cg3 streams add support for exchanging treemanifests and revlog | ||
Martin von Zweigbergk
|
r27753 | flags. It adds the revlog flags to the delta header and an empty chunk | ||
separating manifests and files. | ||||
Augie Fackler
|
r27432 | """ | ||
Augie Fackler
|
r43346 | |||
Mike Edgar
|
r27433 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | ||
Gregory Szorc
|
r38932 | deltaheadersize = deltaheader.size | ||
Augie Fackler
|
r43347 | version = b'03' | ||
Augie Fackler
|
r43346 | _grouplistcount = 2 # One list of manifests and one list of files | ||
Augie Fackler
|
r27432 | |||
Mike Edgar
|
r27433 | def _deltaheader(self, headertuple, prevnode): | ||
node, p1, p2, deltabase, cs, flags = headertuple | ||||
return node, p1, p2, deltabase, cs, flags | ||||
Raphaël Gomès
|
r47452 | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): | ||
super(cg3unpacker, self)._unpackmanifests( | ||||
repo, revmap, trp, prog, addrevisioncb=addrevisioncb | ||||
) | ||||
Augie Fackler
|
r29724 | for chunkdata in iter(self.filelogheader, {}): | ||
Martin von Zweigbergk
|
r27754 | # If we get here, there are directory manifests in the changegroup | ||
Augie Fackler
|
r43347 | d = chunkdata[b"filename"] | ||
repo.ui.debug(b"adding %s revisions\n" % d) | ||||
Durham Goode
|
r34292 | deltas = self.deltaiter() | ||
Raphaël Gomès
|
r47452 | if not repo.manifestlog.getstorage(d).addgroup( | ||
deltas, revmap, trp, addrevisioncb=addrevisioncb | ||||
): | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"received dir revlog group is empty")) | ||
Martin von Zweigbergk
|
r27754 | |||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47445 | class cg4unpacker(cg3unpacker): | ||
"""Unpacker for cg4 streams. | ||||
cg4 streams add support for exchanging sidedata. | ||||
""" | ||||
version = b'04' | ||||
def deltachunk(self, prevnode): | ||||
res = super(cg4unpacker, self).deltachunk(prevnode) | ||||
if not res: | ||||
return res | ||||
(node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res | ||||
sidedata_raw = getchunk(self._stream) | ||||
sidedata = {} | ||||
if len(sidedata_raw) > 0: | ||||
sidedata = sidedatamod.deserialize_sidedata(sidedata_raw) | ||||
return node, p1, p2, cs, deltabase, delta, flags, sidedata | ||||
Matt Mackall
|
r12329 | class headerlessfixup(object): | ||
def __init__(self, fh, h): | ||||
self._h = h | ||||
self._fh = fh | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r12329 | def read(self, n): | ||
if self._h: | ||||
d, self._h = self._h[:n], self._h[n:] | ||||
if len(d) < n: | ||||
Mads Kiilerich
|
r13457 | d += readexactly(self._fh, n - len(d)) | ||
Matt Mackall
|
r12329 | return d | ||
Mads Kiilerich
|
r13457 | return readexactly(self._fh, n) | ||
Matt Mackall
|
r12329 | |||
Augie Fackler
|
r43346 | |||
Joerg Sonnenberger
|
r47538 | def _revisiondeltatochunks(repo, delta, headerfn): | ||
Gregory Szorc
|
r39050 | """Serialize a revisiondelta to changegroup chunks.""" | ||
Gregory Szorc
|
r39052 | |||
# The captured revision delta may be encoded as a delta against | ||||
# a base revision or as a full revision. The changegroup format | ||||
# requires that everything on the wire be deltas. So for full | ||||
# revisions, we need to invent a header that says to rewrite | ||||
# data. | ||||
if delta.delta is not None: | ||||
prefix, data = b'', delta.delta | ||||
elif delta.basenode == nullid: | ||||
data = delta.revision | ||||
prefix = mdiff.trivialdiffheader(len(data)) | ||||
else: | ||||
data = delta.revision | ||||
Augie Fackler
|
r43346 | prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data)) | ||
Gregory Szorc
|
r39052 | |||
Gregory Szorc
|
r39050 | meta = headerfn(delta) | ||
Gregory Szorc
|
r39052 | |||
yield chunkheader(len(meta) + len(prefix) + len(data)) | ||||
Gregory Szorc
|
r39050 | yield meta | ||
Gregory Szorc
|
r39052 | if prefix: | ||
yield prefix | ||||
yield data | ||||
Gregory Szorc
|
r39050 | |||
Raphaël Gomès
|
r47446 | sidedata = delta.sidedata | ||
if sidedata is not None: | ||||
# Need a separate chunk for sidedata to be able to differentiate | ||||
# "raw delta" length and sidedata length | ||||
yield chunkheader(len(sidedata)) | ||||
yield sidedata | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39033 | def _sortnodesellipsis(store, nodes, cl, lookup): | ||
Gregory Szorc
|
r39901 | """Sort nodes for changegroup generation.""" | ||
Gregory Szorc
|
r39018 | # Ellipses serving mode. | ||
# | ||||
# In a perfect world, we'd generate better ellipsis-ified graphs | ||||
# for non-changelog revlogs. In practice, we haven't started doing | ||||
# that yet, so the resulting DAGs for the manifestlog and filelogs | ||||
# are actually full of bogus parentage on all the ellipsis | ||||
# nodes. This has the side effect that, while the contents are | ||||
# correct, the individual DAGs might be completely out of whack in | ||||
# a case like 882681bc3166 and its ancestors (back about 10 | ||||
# revisions or so) in the main hg repo. | ||||
# | ||||
# The one invariant we *know* holds is that the new (potentially | ||||
# bogus) DAG shape will be valid if we order the nodes in the | ||||
# order that they're introduced in dramatis personae by the | ||||
# changelog, so what we do is we sort the non-changelog histories | ||||
# by the order in which they are used by the changelog. | ||||
Gregory Szorc
|
r39033 | key = lambda n: cl.rev(lookup(n)) | ||
Gregory Szorc
|
r39901 | return sorted(nodes, key=key) | ||
Gregory Szorc
|
r39018 | |||
Augie Fackler
|
r43346 | |||
def _resolvenarrowrevisioninfo( | ||||
cl, | ||||
store, | ||||
ischangelog, | ||||
rev, | ||||
linkrev, | ||||
linknode, | ||||
clrevtolocalrev, | ||||
fullclnodes, | ||||
precomputedellipsis, | ||||
): | ||||
Gregory Szorc
|
r39040 | linkparents = precomputedellipsis[linkrev] | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39040 | def local(clrev): | ||
"""Turn a changelog revnum into a local revnum. | ||||
The ellipsis dag is stored as revnums on the changelog, | ||||
but when we're producing ellipsis entries for | ||||
non-changelog revlogs, we need to turn those numbers into | ||||
something local. This does that for us, and during the | ||||
changelog sending phase will also expand the stored | ||||
mappings as needed. | ||||
""" | ||||
if clrev == nullrev: | ||||
return nullrev | ||||
if ischangelog: | ||||
return clrev | ||||
# Walk the ellipsis-ized changelog breadth-first looking for a | ||||
# change that has been linked from the current revlog. | ||||
# | ||||
# For a flat manifest revlog only a single step should be necessary | ||||
# as all relevant changelog entries are relevant to the flat | ||||
# manifest. | ||||
# | ||||
# For a filelog or tree manifest dirlog however not every changelog | ||||
# entry will have been relevant, so we need to skip some changelog | ||||
# nodes even after ellipsis-izing. | ||||
walk = [clrev] | ||||
while walk: | ||||
p = walk[0] | ||||
walk = walk[1:] | ||||
if p in clrevtolocalrev: | ||||
return clrevtolocalrev[p] | ||||
elif p in fullclnodes: | ||||
Augie Fackler
|
r43346 | walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev]) | ||
Gregory Szorc
|
r39040 | elif p in precomputedellipsis: | ||
Augie Fackler
|
r43346 | walk.extend( | ||
[pp for pp in precomputedellipsis[p] if pp != nullrev] | ||||
) | ||||
Gregory Szorc
|
r39040 | else: | ||
# In this case, we've got an ellipsis with parents | ||||
# outside the current bundle (likely an | ||||
# incremental pull). We "know" that we can use the | ||||
# value of this same revlog at whatever revision | ||||
# is pointed to by linknode. "Know" is in scare | ||||
# quotes because I haven't done enough examination | ||||
# of edge cases to convince myself this is really | ||||
# a fact - it works for all the (admittedly | ||||
# thorough) cases in our testsuite, but I would be | ||||
# somewhat unsurprised to find a case in the wild | ||||
# where this breaks down a bit. That said, I don't | ||||
# know if it would hurt anything. | ||||
for i in pycompat.xrange(rev, 0, -1): | ||||
if store.linkrev(i) == clrev: | ||||
return i | ||||
# We failed to resolve a parent for this node, so | ||||
# we crash the changegroup construction. | ||||
raise error.Abort( | ||||
Raphaël Gomès
|
r47263 | b"unable to resolve parent while packing '%s' %r" | ||
Augie Fackler
|
r43347 | b' for changeset %r' % (store.indexfile, rev, clrev) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39040 | |||
return nullrev | ||||
Augie Fackler
|
r43346 | if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)): | ||
Gregory Szorc
|
r39040 | p1, p2 = nullrev, nullrev | ||
elif len(linkparents) == 1: | ||||
Augie Fackler
|
r43346 | (p1,) = sorted(local(p) for p in linkparents) | ||
Gregory Szorc
|
r39040 | p2 = nullrev | ||
else: | ||||
p1, p2 = sorted(local(p) for p in linkparents) | ||||
Gregory Szorc
|
r39054 | p1node, p2node = store.node(p1), store.node(p2) | ||
Gregory Szorc
|
r39040 | |||
Gregory Szorc
|
r39901 | return p1node, p2node, linknode | ||
Gregory Szorc
|
r39040 | |||
Augie Fackler
|
r43346 | |||
def deltagroup( | ||||
repo, | ||||
store, | ||||
nodes, | ||||
ischangelog, | ||||
lookup, | ||||
forcedeltaparentprev, | ||||
topic=None, | ||||
ellipses=False, | ||||
clrevtolocalrev=None, | ||||
fullclnodes=None, | ||||
precomputedellipsis=None, | ||||
Raphaël Gomès
|
r47449 | sidedata_helpers=None, | ||
Augie Fackler
|
r43346 | ): | ||
Gregory Szorc
|
r39050 | """Calculate deltas for a set of revisions. | ||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39050 | Is a generator of ``revisiondelta`` instances. | ||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39275 | If topic is not None, progress detail will be generated using this | ||
topic name (e.g. changesets, manifests, etc). | ||||
Raphaël Gomès
|
r47449 | |||
See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | ||||
Gregory Szorc
|
r39045 | """ | ||
Gregory Szorc
|
r39265 | if not nodes: | ||
Gregory Szorc
|
r39045 | return | ||
cl = repo.changelog | ||||
Gregory Szorc
|
r39265 | if ischangelog: | ||
Gregory Szorc
|
r39901 | # `hg log` shows changesets in storage order. To preserve order | ||
# across clones, send out changesets in storage order. | ||||
Augie Fackler
|
r43347 | nodesorder = b'storage' | ||
Gregory Szorc
|
r39265 | elif ellipses: | ||
Gregory Szorc
|
r39901 | nodes = _sortnodesellipsis(store, nodes, cl, lookup) | ||
Augie Fackler
|
r43347 | nodesorder = b'nodes' | ||
Gregory Szorc
|
r39265 | else: | ||
Gregory Szorc
|
r39901 | nodesorder = None | ||
Gregory Szorc
|
r39265 | |||
Gregory Szorc
|
r39901 | # Perform ellipses filtering and revision massaging. We do this before | ||
# emitrevisions() because a) filtering out revisions creates less work | ||||
# for emitrevisions() b) dropping revisions would break emitrevisions()'s | ||||
# assumptions about delta choices and we would possibly send a delta | ||||
# referencing a missing base revision. | ||||
# | ||||
# Also, calling lookup() has side-effects with regards to populating | ||||
# data structures. If we don't call lookup() for each node or if we call | ||||
# lookup() after the first pass through each node, things can break - | ||||
# possibly intermittently depending on the python hash seed! For that | ||||
# reason, we store a mapping of all linknodes during the initial node | ||||
# pass rather than use lookup() on the output side. | ||||
if ellipses: | ||||
filtered = [] | ||||
adjustedparents = {} | ||||
linknodes = {} | ||||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39901 | for node in nodes: | ||
rev = store.rev(node) | ||||
linknode = lookup(node) | ||||
Gregory Szorc
|
r39045 | linkrev = cl.rev(linknode) | ||
Gregory Szorc
|
r39901 | clrevtolocalrev[linkrev] = rev | ||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39901 | # If linknode is in fullclnodes, it means the corresponding | ||
# changeset was a full changeset and is being sent unaltered. | ||||
Gregory Szorc
|
r39045 | if linknode in fullclnodes: | ||
Gregory Szorc
|
r39901 | linknodes[node] = linknode | ||
Gregory Szorc
|
r39054 | |||
Gregory Szorc
|
r39901 | # If the corresponding changeset wasn't in the set computed | ||
# as relevant to us, it should be dropped outright. | ||||
Gregory Szorc
|
r39045 | elif linkrev not in precomputedellipsis: | ||
Gregory Szorc
|
r39901 | continue | ||
Gregory Szorc
|
r39045 | else: | ||
Gregory Szorc
|
r39901 | # We could probably do this later and avoid the dict | ||
# holding state. But it likely doesn't matter. | ||||
p1node, p2node, linknode = _resolvenarrowrevisioninfo( | ||||
Augie Fackler
|
r43346 | cl, | ||
store, | ||||
ischangelog, | ||||
rev, | ||||
linkrev, | ||||
linknode, | ||||
clrevtolocalrev, | ||||
fullclnodes, | ||||
precomputedellipsis, | ||||
) | ||||
Gregory Szorc
|
r39901 | |||
adjustedparents[node] = (p1node, p2node) | ||||
linknodes[node] = linknode | ||||
filtered.append(node) | ||||
nodes = filtered | ||||
Gregory Szorc
|
r39045 | |||
Gregory Szorc
|
r39054 | # We expect the first pass to be fast, so we only engage the progress | ||
# meter for constructing the revision deltas. | ||||
progress = None | ||||
Gregory Szorc
|
r39275 | if topic is not None: | ||
Augie Fackler
|
r43346 | progress = repo.ui.makeprogress( | ||
Augie Fackler
|
r43347 | topic, unit=_(b'chunks'), total=len(nodes) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39054 | |||
Augie Fackler
|
r43347 | configtarget = repo.ui.config(b'devel', b'bundle.delta') | ||
if configtarget not in (b'', b'p1', b'full'): | ||||
Matt Harbison
|
r47513 | msg = _(b"""config "devel.bundle.delta" as unknown value: %s""") | ||
Boris Feld
|
r40458 | repo.ui.warn(msg % configtarget) | ||
Boris Feld
|
r40457 | deltamode = repository.CG_DELTAMODE_STD | ||
if forcedeltaparentprev: | ||||
deltamode = repository.CG_DELTAMODE_PREV | ||||
Augie Fackler
|
r43347 | elif configtarget == b'p1': | ||
Boris Feld
|
r40458 | deltamode = repository.CG_DELTAMODE_P1 | ||
Augie Fackler
|
r43347 | elif configtarget == b'full': | ||
Boris Feld
|
r40459 | deltamode = repository.CG_DELTAMODE_FULL | ||
Boris Feld
|
r40457 | |||
Gregory Szorc
|
r39901 | revisions = store.emitrevisions( | ||
nodes, | ||||
nodesorder=nodesorder, | ||||
revisiondata=True, | ||||
assumehaveparentrevisions=not ellipses, | ||||
Augie Fackler
|
r43346 | deltamode=deltamode, | ||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39901 | |||
for i, revision in enumerate(revisions): | ||||
Gregory Szorc
|
r39054 | if progress: | ||
progress.update(i + 1) | ||||
Gregory Szorc
|
r39901 | if ellipses: | ||
linknode = linknodes[revision.node] | ||||
if revision.node in adjustedparents: | ||||
p1node, p2node = adjustedparents[revision.node] | ||||
revision.p1node = p1node | ||||
revision.p2node = p2node | ||||
Gregory Szorc
|
r40083 | revision.flags |= repository.REVISION_FLAG_ELLIPSIS | ||
Gregory Szorc
|
r39901 | |||
else: | ||||
linknode = lookup(revision.node) | ||||
revision.linknode = linknode | ||||
yield revision | ||||
Gregory Szorc
|
r39054 | |||
Gregory Szorc
|
r39045 | if progress: | ||
progress.complete() | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r38938 | class cgpacker(object): | ||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
version, | ||||
builddeltaheader, | ||||
manifestsend, | ||||
forcedeltaparentprev=False, | ||||
bundlecaps=None, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
Sune Foldager
|
r19202 | """Given a source repo, construct a bundler. | ||
Durham Goode
|
r32287 | |||
Martin von Zweigbergk
|
r40380 | oldmatcher is a matcher that matches on files the client already has. | ||
These will not be included in the changegroup. | ||||
matcher is a matcher that matches on files to include in the | ||||
Gregory Szorc
|
r38830 | changegroup. Used to facilitate sparse changegroups. | ||
Gregory Szorc
|
r39053 | forcedeltaparentprev indicates whether delta parents must be against | ||
the previous revision in a delta group. This should only be used for | ||||
compatibility with changegroup version 1. | ||||
Gregory Szorc
|
r38937 | |||
Gregory Szorc
|
r38933 | builddeltaheader is a callable that constructs the header for a group | ||
delta. | ||||
Gregory Szorc
|
r38934 | manifestsend is a chunk to send after manifests have been fully emitted. | ||
Gregory Szorc
|
r38944 | ellipses indicates whether ellipsis serving mode is enabled. | ||
Durham Goode
|
r32287 | bundlecaps is optional and can be used to specify the set of | ||
capabilities which can be used to build the bundle. While bundlecaps is | ||||
unused in core Mercurial, extensions rely on this feature to communicate | ||||
capabilities to customize the changegroup packer. | ||||
Gregory Szorc
|
r38940 | |||
shallow indicates whether shallow data might be sent. The packer may | ||||
need to pack file contents not introduced by the changes being packed. | ||||
Gregory Szorc
|
r38945 | |||
Gregory Szorc
|
r39032 | fullnodes is the set of changelog nodes which should not be ellipsis | ||
nodes. We store this rather than the set of nodes that should be | ||||
ellipsis because for very large histories we expect this to be | ||||
significantly smaller. | ||||
Raphaël Gomès
|
r47445 | |||
remote_sidedata is the set of sidedata categories wanted by the remote. | ||||
Sune Foldager
|
r19202 | """ | ||
Martin von Zweigbergk
|
r40380 | assert oldmatcher | ||
assert matcher | ||||
self._oldmatcher = oldmatcher | ||||
self._matcher = matcher | ||||
Gregory Szorc
|
r38830 | |||
Gregory Szorc
|
r38931 | self.version = version | ||
Gregory Szorc
|
r39053 | self._forcedeltaparentprev = forcedeltaparentprev | ||
Gregory Szorc
|
r38933 | self._builddeltaheader = builddeltaheader | ||
Gregory Szorc
|
r38934 | self._manifestsend = manifestsend | ||
Gregory Szorc
|
r38944 | self._ellipses = ellipses | ||
Gregory Szorc
|
r38931 | |||
Durham Goode
|
r32287 | # Set of capabilities we can use to build the bundle. | ||
if bundlecaps is None: | ||||
bundlecaps = set() | ||||
self._bundlecaps = bundlecaps | ||||
Raphaël Gomès
|
r47447 | if remote_sidedata is None: | ||
remote_sidedata = set() | ||||
self._remote_sidedata = remote_sidedata | ||||
Gregory Szorc
|
r38940 | self._isshallow = shallow | ||
Gregory Szorc
|
r39032 | self._fullclnodes = fullnodes | ||
Gregory Szorc
|
r38936 | |||
Gregory Szorc
|
r38943 | # Maps ellipsis revs to their roots at the changelog level. | ||
self._precomputedellipsis = ellipsisroots | ||||
Sune Foldager
|
r19202 | self._repo = repo | ||
Gregory Szorc
|
r38936 | |||
Mads Kiilerich
|
r23748 | if self._repo.ui.verbose and not self._repo.ui.debugflag: | ||
self._verbosenote = self._repo.ui.note | ||||
else: | ||||
self._verbosenote = lambda s: None | ||||
Augie Fackler
|
r43346 | def generate( | ||
self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True | ||||
): | ||||
Pulkit Goyal
|
r39708 | """Yield a sequence of changegroup byte chunks. | ||
If changelog is False, changelog data won't be added to changegroup | ||||
""" | ||||
Gregory Szorc
|
r39012 | |||
Sune Foldager
|
r19202 | repo = self._repo | ||
Martin von Zweigbergk
|
r24978 | cl = repo.changelog | ||
Benoit Boissinot
|
r19204 | |||
Augie Fackler
|
r43347 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) | ||
Gregory Szorc
|
r39012 | size = 0 | ||
Raphaël Gomès
|
r47449 | sidedata_helpers = None | ||
if self.version == b'04': | ||||
remote_sidedata = self._remote_sidedata | ||||
if source == b'strip': | ||||
# We're our own remote when stripping, get the no-op helpers | ||||
# TODO a better approach would be for the strip bundle to | ||||
# correctly advertise its sidedata categories directly. | ||||
remote_sidedata = repo._wanted_sidedata | ||||
sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata) | ||||
Augie Fackler
|
r43346 | clstate, deltas = self._generatechangelog( | ||
Raphaël Gomès
|
r47449 | cl, | ||
clnodes, | ||||
generate=changelog, | ||||
sidedata_helpers=sidedata_helpers, | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39050 | for delta in deltas: | ||
Joerg Sonnenberger
|
r47538 | for chunk in _revisiondeltatochunks( | ||
self._repo, delta, self._builddeltaheader | ||||
): | ||||
Pulkit Goyal
|
r41491 | size += len(chunk) | ||
yield chunk | ||||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39046 | close = closechunk() | ||
size += len(close) | ||||
yield closechunk() | ||||
Augie Fackler
|
r43347 | self._verbosenote(_(b'%8.i (changelog)\n') % size) | ||
Gregory Szorc
|
r39012 | |||
Augie Fackler
|
r43347 | clrevorder = clstate[b'clrevorder'] | ||
manifests = clstate[b'manifests'] | ||||
changedfiles = clstate[b'changedfiles'] | ||||
Gregory Szorc
|
r39012 | |||
# We need to make sure that the linkrev in the changegroup refers to | ||||
# the first changeset that introduced the manifest or file revision. | ||||
# The fastpath is usually safer than the slowpath, because the filelogs | ||||
# are walked in revlog order. | ||||
# | ||||
Gregory Szorc
|
r39897 | # When taking the slowpath when the manifest revlog uses generaldelta, | ||
# the manifest may be walked in the "wrong" order. Without 'clrevorder', | ||||
# we would get an incorrect linkrev (see fix in cc0ff93d0c0c). | ||||
Gregory Szorc
|
r39012 | # | ||
# When taking the fastpath, we are only vulnerable to reordering | ||||
Gregory Szorc
|
r39897 | # of the changelog itself. The changelog never uses generaldelta and is | ||
# never reordered. To handle this case, we simply take the slowpath, | ||||
# which already has the 'clrevorder' logic. This was also fixed in | ||||
# cc0ff93d0c0c. | ||||
Gregory Szorc
|
r39012 | # Treemanifests don't work correctly with fastpathlinkrev | ||
# either, because we don't discover which directory nodes to | ||||
# send along with files. This could probably be fixed. | ||||
Pulkit Goyal
|
r46129 | fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo) | ||
Gregory Szorc
|
r39012 | |||
fnodes = {} # needed file nodes | ||||
Gregory Szorc
|
r39047 | size = 0 | ||
Gregory Szorc
|
r39048 | it = self.generatemanifests( | ||
Augie Fackler
|
r43346 | commonrevs, | ||
clrevorder, | ||||
fastpathlinkrev, | ||||
manifests, | ||||
fnodes, | ||||
source, | ||||
Augie Fackler
|
r43347 | clstate[b'clrevtomanifestrev'], | ||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39048 | |||
Gregory Szorc
|
r39269 | for tree, deltas in it: | ||
if tree: | ||||
Raphaël Gomès
|
r47445 | assert self.version in (b'03', b'04') | ||
Gregory Szorc
|
r39269 | chunk = _fileheader(tree) | ||
Gregory Szorc
|
r39048 | size += len(chunk) | ||
yield chunk | ||||
Gregory Szorc
|
r39050 | for delta in deltas: | ||
Joerg Sonnenberger
|
r47538 | chunks = _revisiondeltatochunks( | ||
self._repo, delta, self._builddeltaheader | ||||
) | ||||
Gregory Szorc
|
r39050 | for chunk in chunks: | ||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39048 | |||
close = closechunk() | ||||
size += len(close) | ||||
yield close | ||||
Gregory Szorc
|
r39012 | |||
Augie Fackler
|
r43347 | self._verbosenote(_(b'%8.i (manifests)\n') % size) | ||
Gregory Szorc
|
r39047 | yield self._manifestsend | ||
Gregory Szorc
|
r39019 | mfdicts = None | ||
if self._ellipses and self._isshallow: | ||||
Augie Fackler
|
r43346 | mfdicts = [ | ||
Raphaël Gomès
|
r47369 | (repo.manifestlog[n].read(), lr) | ||
Gregory Szorc
|
r43376 | for (n, lr) in pycompat.iteritems(manifests) | ||
Augie Fackler
|
r43346 | ] | ||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39274 | manifests.clear() | ||
Augie Fackler
|
r44937 | clrevs = {cl.rev(x) for x in clnodes} | ||
Gregory Szorc
|
r39012 | |||
Augie Fackler
|
r43346 | it = self.generatefiles( | ||
changedfiles, | ||||
commonrevs, | ||||
source, | ||||
mfdicts, | ||||
fastpathlinkrev, | ||||
fnodes, | ||||
clrevs, | ||||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39049 | |||
Gregory Szorc
|
r39050 | for path, deltas in it: | ||
Gregory Szorc
|
r39049 | h = _fileheader(path) | ||
size = len(h) | ||||
yield h | ||||
Gregory Szorc
|
r39050 | for delta in deltas: | ||
Joerg Sonnenberger
|
r47538 | chunks = _revisiondeltatochunks( | ||
self._repo, delta, self._builddeltaheader | ||||
) | ||||
Gregory Szorc
|
r39050 | for chunk in chunks: | ||
size += len(chunk) | ||||
yield chunk | ||||
Gregory Szorc
|
r39049 | |||
close = closechunk() | ||||
size += len(close) | ||||
yield close | ||||
Augie Fackler
|
r43347 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) | ||
Gregory Szorc
|
r39012 | |||
Gregory Szorc
|
r39038 | yield closechunk() | ||
Gregory Szorc
|
r39012 | |||
if clnodes: | ||||
Augie Fackler
|
r43347 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) | ||
Gregory Szorc
|
r39012 | |||
Raphaël Gomès
|
r47449 | def _generatechangelog( | ||
self, cl, nodes, generate=True, sidedata_helpers=None | ||||
): | ||||
Gregory Szorc
|
r39012 | """Generate data for changelog chunks. | ||
Returns a 2-tuple of a dict containing state and an iterable of | ||||
byte chunks. The state will not be fully populated until the | ||||
chunk stream has been fully consumed. | ||||
Pulkit Goyal
|
r41491 | |||
if generate is False, the state will be fully populated and no chunk | ||||
stream will be yielded | ||||
Raphaël Gomès
|
r47449 | |||
See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | ||||
Gregory Szorc
|
r39012 | """ | ||
Durham Goode
|
r23381 | clrevorder = {} | ||
Gregory Szorc
|
r39274 | manifests = {} | ||
Gregory Szorc
|
r39012 | mfl = self._repo.manifestlog | ||
Martin von Zweigbergk
|
r28241 | changedfiles = set() | ||
Gregory Szorc
|
r39034 | clrevtomanifestrev = {} | ||
Benoit Boissinot
|
r19204 | |||
Pulkit Goyal
|
r41490 | state = { | ||
Augie Fackler
|
r43347 | b'clrevorder': clrevorder, | ||
b'manifests': manifests, | ||||
b'changedfiles': changedfiles, | ||||
b'clrevtomanifestrev': clrevtomanifestrev, | ||||
Pulkit Goyal
|
r41490 | } | ||
Pulkit Goyal
|
r41491 | if not (generate or self._ellipses): | ||
# sort the nodes in storage order | ||||
nodes = sorted(nodes, key=cl.rev) | ||||
for node in nodes: | ||||
c = cl.changelogrevision(node) | ||||
clrevorder[node] = len(clrevorder) | ||||
# record the first changeset introducing this manifest version | ||||
manifests.setdefault(c.manifest, node) | ||||
# Record a complete list of potentially-changed files in | ||||
# this manifest. | ||||
changedfiles.update(c.files) | ||||
return state, () | ||||
Gregory Szorc
|
r38926 | # Callback for the changelog, used to collect changed files and | ||
# manifest nodes. | ||||
Benoit Boissinot
|
r19207 | # Returns the linkrev node (identity in the changelog case). | ||
def lookupcl(x): | ||||
Gregory Szorc
|
r39273 | c = cl.changelogrevision(x) | ||
Durham Goode
|
r23381 | clrevorder[x] = len(clrevorder) | ||
Gregory Szorc
|
r38926 | |||
Gregory Szorc
|
r38944 | if self._ellipses: | ||
Gregory Szorc
|
r39274 | # Only update manifests if x is going to be sent. Otherwise we | ||
Gregory Szorc
|
r38926 | # end up with bogus linkrevs specified for manifests and | ||
# we skip some manifest nodes that we should otherwise | ||||
# have sent. | ||||
Augie Fackler
|
r43346 | if ( | ||
x in self._fullclnodes | ||||
or cl.rev(x) in self._precomputedellipsis | ||||
): | ||||
Gregory Szorc
|
r39273 | |||
manifestnode = c.manifest | ||||
Gregory Szorc
|
r38926 | # Record the first changeset introducing this manifest | ||
# version. | ||||
Gregory Szorc
|
r39274 | manifests.setdefault(manifestnode, x) | ||
Gregory Szorc
|
r38926 | # Set this narrow-specific dict so we have the lowest | ||
# manifest revnum to look up for this cl revnum. (Part of | ||||
# mapping changelog ellipsis parents to manifest ellipsis | ||||
# parents) | ||||
Gregory Szorc
|
r39273 | clrevtomanifestrev.setdefault( | ||
Augie Fackler
|
r43346 | cl.rev(x), mfl.rev(manifestnode) | ||
) | ||||
Gregory Szorc
|
r38926 | # We can't trust the changed files list in the changeset if the | ||
# client requested a shallow clone. | ||||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r39273 | changedfiles.update(mfl[c.manifest].read().keys()) | ||
Gregory Szorc
|
r38926 | else: | ||
Gregory Szorc
|
r39273 | changedfiles.update(c.files) | ||
Gregory Szorc
|
r38926 | else: | ||
# record the first changeset introducing this manifest version | ||||
Gregory Szorc
|
r39274 | manifests.setdefault(c.manifest, x) | ||
Gregory Szorc
|
r38926 | # Record a complete list of potentially-changed files in | ||
# this manifest. | ||||
Gregory Szorc
|
r39273 | changedfiles.update(c.files) | ||
Gregory Szorc
|
r38926 | |||
Benoit Boissinot
|
r19207 | return x | ||
Benoit Boissinot
|
r19204 | |||
Gregory Szorc
|
r39045 | gen = deltagroup( | ||
Augie Fackler
|
r43346 | self._repo, | ||
cl, | ||||
nodes, | ||||
True, | ||||
lookupcl, | ||||
Gregory Szorc
|
r39053 | self._forcedeltaparentprev, | ||
Gregory Szorc
|
r39045 | ellipses=self._ellipses, | ||
Augie Fackler
|
r43347 | topic=_(b'changesets'), | ||
Gregory Szorc
|
r39045 | clrevtolocalrev={}, | ||
fullclnodes=self._fullclnodes, | ||||
Augie Fackler
|
r43346 | precomputedellipsis=self._precomputedellipsis, | ||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Martin von Zweigbergk
|
r28227 | |||
Gregory Szorc
|
r39012 | return state, gen | ||
Martin von Zweigbergk
|
r28227 | |||
Augie Fackler
|
r43346 | def generatemanifests( | ||
self, | ||||
commonrevs, | ||||
clrevorder, | ||||
fastpathlinkrev, | ||||
manifests, | ||||
fnodes, | ||||
source, | ||||
clrevtolocalrev, | ||||
Raphaël Gomès
|
r47449 | sidedata_helpers=None, | ||
Augie Fackler
|
r43346 | ): | ||
Durham Goode
|
r34148 | """Returns an iterator of changegroup chunks containing manifests. | ||
`source` is unused here, but is used by extensions like remotefilelog to | ||||
change what is sent based in pulls vs pushes, etc. | ||||
Raphaël Gomès
|
r47449 | |||
See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | ||||
Durham Goode
|
r34148 | """ | ||
Martin von Zweigbergk
|
r28227 | repo = self._repo | ||
Durham Goode
|
r30294 | mfl = repo.manifestlog | ||
Augie Fackler
|
r43347 | tmfnodes = {b'': manifests} | ||
Martin von Zweigbergk
|
r28227 | |||
Benoit Boissinot
|
r19207 | # Callback for the manifest, used to collect linkrevs for filelog | ||
# revisions. | ||||
# Returns the linkrev node (collected in lookupcl). | ||||
Gregory Szorc
|
r39269 | def makelookupmflinknode(tree, nodes): | ||
Martin von Zweigbergk
|
r28231 | if fastpathlinkrev: | ||
Gregory Szorc
|
r39269 | assert not tree | ||
Matt Harbison
|
r47545 | |||
# pytype: disable=unsupported-operands | ||||
return manifests.__getitem__ | ||||
# pytype: enable=unsupported-operands | ||||
Martin von Zweigbergk
|
r28231 | |||
Augie Fackler
|
r27239 | def lookupmflinknode(x): | ||
"""Callback for looking up the linknode for manifests. | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | Returns the linkrev node for the specified manifest. | ||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27239 | SIDE EFFECT: | ||
Augie Fackler
|
r27432 | 1) fclnodes gets populated with the list of relevant | ||
file nodes if we're not using fastpathlinkrev | ||||
2) When treemanifests are in use, collects treemanifest nodes | ||||
to send | ||||
Augie Fackler
|
r27219 | |||
Augie Fackler
|
r27432 | Note that this means manifests must be completely sent to | ||
the client before you can trust the list of files and | ||||
treemanifests to send. | ||||
Augie Fackler
|
r27239 | """ | ||
Kyle Lippincott
|
r35013 | clnode = nodes[x] | ||
Gregory Szorc
|
r39269 | mdata = mfl.get(tree, x).readfast(shallow=True) | ||
Martin von Zweigbergk
|
r28241 | for p, n, fl in mdata.iterentries(): | ||
Augie Fackler
|
r43347 | if fl == b't': # subdirectory manifest | ||
subtree = tree + p + b'/' | ||||
Gregory Szorc
|
r39269 | tmfclnodes = tmfnodes.setdefault(subtree, {}) | ||
Martin von Zweigbergk
|
r28241 | tmfclnode = tmfclnodes.setdefault(n, clnode) | ||
if clrevorder[clnode] < clrevorder[tmfclnode]: | ||||
tmfclnodes[n] = clnode | ||||
else: | ||||
Gregory Szorc
|
r39269 | f = tree + p | ||
Martin von Zweigbergk
|
r28240 | fclnodes = fnodes.setdefault(f, {}) | ||
fclnode = fclnodes.setdefault(n, clnode) | ||||
if clrevorder[clnode] < clrevorder[fclnode]: | ||||
fclnodes[n] = clnode | ||||
Augie Fackler
|
r27239 | return clnode | ||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r28231 | return lookupmflinknode | ||
Sune Foldager
|
r19206 | |||
Martin von Zweigbergk
|
r28232 | while tmfnodes: | ||
Gregory Szorc
|
r39269 | tree, nodes = tmfnodes.popitem() | ||
Kyle Lippincott
|
r40700 | |||
Martin von Zweigbergk
|
r42528 | should_visit = self._matcher.visitdir(tree[:-1]) | ||
Kyle Lippincott
|
r40700 | if tree and not should_visit: | ||
continue | ||||
Gregory Szorc
|
r39280 | store = mfl.getstorage(tree) | ||
Gregory Szorc
|
r39043 | |||
Kyle Lippincott
|
r40700 | if not should_visit: | ||
Augie Fackler
|
r39769 | # No nodes to send because this directory is out of | ||
# the client's view of the repository (probably | ||||
Kyle Lippincott
|
r40700 | # because of narrow clones). Do this even for the root | ||
# directory (tree=='') | ||||
Gregory Szorc
|
r39043 | prunednodes = [] | ||
else: | ||||
Augie Fackler
|
r39769 | # Avoid sending any manifest nodes we can prove the | ||
# client already has by checking linkrevs. See the | ||||
# related comment in generatefiles(). | ||||
Augie Fackler
|
r39768 | prunednodes = self._prunemanifests(store, nodes, commonrevs) | ||
Kyle Lippincott
|
r40700 | |||
Gregory Szorc
|
r39269 | if tree and not prunednodes: | ||
Gregory Szorc
|
r39041 | continue | ||
Gregory Szorc
|
r39269 | lookupfn = makelookupmflinknode(tree, nodes) | ||
Gregory Szorc
|
r39018 | |||
Gregory Szorc
|
r39050 | deltas = deltagroup( | ||
Augie Fackler
|
r43346 | self._repo, | ||
store, | ||||
prunednodes, | ||||
False, | ||||
lookupfn, | ||||
Gregory Szorc
|
r39897 | self._forcedeltaparentprev, | ||
Gregory Szorc
|
r39044 | ellipses=self._ellipses, | ||
Augie Fackler
|
r43347 | topic=_(b'manifests'), | ||
Gregory Szorc
|
r39044 | clrevtolocalrev=clrevtolocalrev, | ||
fullclnodes=self._fullclnodes, | ||||
Augie Fackler
|
r43346 | precomputedellipsis=self._precomputedellipsis, | ||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39044 | |||
Martin von Zweigbergk
|
r42528 | if not self._oldmatcher.visitdir(store.tree[:-1]): | ||
Martin von Zweigbergk
|
r40380 | yield tree, deltas | ||
else: | ||||
# 'deltas' is a generator and we need to consume it even if | ||||
# we are not going to send it because a side-effect is that | ||||
# it updates tmdnodes (via lookupfn) | ||||
for d in deltas: | ||||
pass | ||||
if not tree: | ||||
yield tree, [] | ||||
Gregory Szorc
|
r39046 | |||
Augie Fackler
|
r39768 | def _prunemanifests(self, store, nodes, commonrevs): | ||
Martin von Zweigbergk
|
r41933 | if not self._ellipses: | ||
# In non-ellipses case and large repositories, it is better to | ||||
# prevent calling of store.rev and store.linkrev on a lot of | ||||
# nodes as compared to sending some extra data | ||||
return nodes.copy() | ||||
Augie Fackler
|
r39768 | # This is split out as a separate method to allow filtering | ||
# commonrevs in extension code. | ||||
# | ||||
# TODO(augie): this shouldn't be required, instead we should | ||||
# make filtering of revisions to send delegated to the store | ||||
# layer. | ||||
frev, flr = store.rev, store.linkrev | ||||
return [n for n in nodes if flr(frev(n)) not in commonrevs] | ||||
Martin von Zweigbergk
|
r24897 | # The 'source' parameter is useful for extensions | ||
Augie Fackler
|
r43346 | def generatefiles( | ||
self, | ||||
changedfiles, | ||||
commonrevs, | ||||
source, | ||||
mfdicts, | ||||
fastpathlinkrev, | ||||
fnodes, | ||||
clrevs, | ||||
Raphaël Gomès
|
r47449 | sidedata_helpers=None, | ||
Augie Fackler
|
r43346 | ): | ||
changedfiles = [ | ||||
f | ||||
for f in changedfiles | ||||
if self._matcher(f) and not self._oldmatcher(f) | ||||
] | ||||
Gregory Szorc
|
r38925 | |||
Gregory Szorc
|
r39035 | if not fastpathlinkrev: | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39035 | def normallinknodes(unused, fname): | ||
return fnodes.get(fname, {}) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39035 | else: | ||
cln = self._repo.changelog.node | ||||
def normallinknodes(store, fname): | ||||
flinkrev = store.linkrev | ||||
fnode = store.node | ||||
revs = ((r, flinkrev(r)) for r in store) | ||||
Augie Fackler
|
r44937 | return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs} | ||
Gregory Szorc
|
r39035 | |||
Gregory Szorc
|
r39037 | clrevtolocalrev = {} | ||
Gregory Szorc
|
r38940 | if self._isshallow: | ||
Gregory Szorc
|
r38925 | # In a shallow clone, the linknodes callback needs to also include | ||
# those file nodes that are in the manifests we sent but weren't | ||||
# introduced by those manifests. | ||||
commonctxs = [self._repo[c] for c in commonrevs] | ||||
clrev = self._repo.changelog.rev | ||||
def linknodes(flog, fname): | ||||
for c in commonctxs: | ||||
try: | ||||
fnode = c.filenode(fname) | ||||
Gregory Szorc
|
r39037 | clrevtolocalrev[c.rev()] = flog.rev(fnode) | ||
Gregory Szorc
|
r38925 | except error.ManifestLookupError: | ||
pass | ||||
Gregory Szorc
|
r39035 | links = normallinknodes(flog, fname) | ||
Gregory Szorc
|
r38925 | if len(links) != len(mfdicts): | ||
for mf, lr in mfdicts: | ||||
fnode = mf.get(fname, None) | ||||
if fnode in links: | ||||
links[fnode] = min(links[fnode], lr, key=clrev) | ||||
elif fnode: | ||||
links[fnode] = lr | ||||
return links | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39035 | else: | ||
linknodes = normallinknodes | ||||
Gregory Szorc
|
r38925 | |||
Durham Goode
|
r19334 | repo = self._repo | ||
Augie Fackler
|
r43346 | progress = repo.ui.makeprogress( | ||
Augie Fackler
|
r43347 | _(b'files'), unit=_(b'files'), total=len(changedfiles) | ||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r19334 | for i, fname in enumerate(sorted(changedfiles)): | ||
filerevlog = repo.file(fname) | ||||
if not filerevlog: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b"empty or missing file data for %s") % fname | ||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r19334 | |||
Gregory Szorc
|
r39037 | clrevtolocalrev.clear() | ||
Durham Goode
|
r19334 | linkrevnodes = linknodes(filerevlog, fname) | ||
Benoit Boissinot
|
r19207 | # Lookup for filenodes, we collected the linkrev nodes above in the | ||
# fastpath case and with lookupmf in the slowpath case. | ||||
def lookupfilelog(x): | ||||
return linkrevnodes[x] | ||||
Gregory Szorc
|
r39043 | frev, flr = filerevlog.rev, filerevlog.linkrev | ||
Augie Fackler
|
r39769 | # Skip sending any filenode we know the client already | ||
# has. This avoids over-sending files relatively | ||||
# inexpensively, so it's not a problem if we under-filter | ||||
# here. | ||||
Augie Fackler
|
r43346 | filenodes = [ | ||
n for n in linkrevnodes if flr(frev(n)) not in commonrevs | ||||
] | ||||
Gregory Szorc
|
r39043 | |||
Gregory Szorc
|
r39056 | if not filenodes: | ||
continue | ||||
Gregory Szorc
|
r39018 | |||
Gregory Szorc
|
r39056 | progress.update(i + 1, item=fname) | ||
Gregory Szorc
|
r39044 | |||
Gregory Szorc
|
r39056 | deltas = deltagroup( | ||
Augie Fackler
|
r43346 | self._repo, | ||
filerevlog, | ||||
filenodes, | ||||
False, | ||||
lookupfilelog, | ||||
Gregory Szorc
|
r39897 | self._forcedeltaparentprev, | ||
Gregory Szorc
|
r39056 | ellipses=self._ellipses, | ||
clrevtolocalrev=clrevtolocalrev, | ||||
fullclnodes=self._fullclnodes, | ||||
Augie Fackler
|
r43346 | precomputedellipsis=self._precomputedellipsis, | ||
Raphaël Gomès
|
r47449 | sidedata_helpers=sidedata_helpers, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r39056 | |||
yield fname, deltas | ||||
Gregory Szorc
|
r39046 | |||
Martin von Zweigbergk
|
r38429 | progress.complete() | ||
Sune Foldager
|
r19200 | |||
Augie Fackler
|
r43346 | |||
def _makecg1packer( | ||||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
bundlecaps, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( | ||
Augie Fackler
|
r43346 | d.node, d.p1node, d.p2node, d.linknode | ||
) | ||||
Augie Fackler
|
r27432 | |||
Augie Fackler
|
r43346 | return cgpacker( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
b'01', | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
forcedeltaparentprev=True, | ||||
bundlecaps=bundlecaps, | ||||
ellipses=ellipses, | ||||
shallow=shallow, | ||||
ellipsisroots=ellipsisroots, | ||||
fullnodes=fullnodes, | ||||
) | ||||
Gregory Szorc
|
r38930 | |||
Augie Fackler
|
r43346 | |||
def _makecg2packer( | ||||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
bundlecaps, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
Gregory Szorc
|
r38933 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( | ||
Augie Fackler
|
r43346 | d.node, d.p1node, d.p2node, d.basenode, d.linknode | ||
) | ||||
Gregory Szorc
|
r38933 | |||
Augie Fackler
|
r43346 | return cgpacker( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
b'02', | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=b'', | ||||
bundlecaps=bundlecaps, | ||||
ellipses=ellipses, | ||||
shallow=shallow, | ||||
ellipsisroots=ellipsisroots, | ||||
fullnodes=fullnodes, | ||||
) | ||||
Gregory Szorc
|
r38930 | |||
Gregory Szorc
|
r38933 | |||
Augie Fackler
|
r43346 | def _makecg3packer( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
bundlecaps, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | ||||
d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags | ||||
) | ||||
Gregory Szorc
|
r38930 | |||
Augie Fackler
|
r43346 | return cgpacker( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
b'03', | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=closechunk(), | ||||
bundlecaps=bundlecaps, | ||||
ellipses=ellipses, | ||||
shallow=shallow, | ||||
ellipsisroots=ellipsisroots, | ||||
fullnodes=fullnodes, | ||||
) | ||||
Raphaël Gomès
|
r47445 | def _makecg4packer( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
bundlecaps, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
remote_sidedata=None, | ||||
): | ||||
# Same header func as cg3. Sidedata is in a separate chunk from the delta to | ||||
# differenciate "raw delta" and sidedata. | ||||
builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | ||||
d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags | ||||
) | ||||
return cgpacker( | ||||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
b'04', | ||||
builddeltaheader=builddeltaheader, | ||||
manifestsend=closechunk(), | ||||
bundlecaps=bundlecaps, | ||||
ellipses=ellipses, | ||||
shallow=shallow, | ||||
ellipsisroots=ellipsisroots, | ||||
fullnodes=fullnodes, | ||||
remote_sidedata=remote_sidedata, | ||||
) | ||||
Augie Fackler
|
r43346 | _packermap = { | ||
Augie Fackler
|
r43347 | b'01': (_makecg1packer, cg1unpacker), | ||
Augie Fackler
|
r43346 | # cg2 adds support for exchanging generaldelta | ||
Augie Fackler
|
r43347 | b'02': (_makecg2packer, cg2unpacker), | ||
Augie Fackler
|
r43346 | # cg3 adds support for exchanging revlog flags and treemanifests | ||
Augie Fackler
|
r43347 | b'03': (_makecg3packer, cg3unpacker), | ||
Raphaël Gomès
|
r47445 | # ch4 adds support for exchanging sidedata | ||
b'04': (_makecg4packer, cg4unpacker), | ||||
Augie Fackler
|
r26709 | } | ||
Pierre-Yves David
|
r23168 | |||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r30627 | def allsupportedversions(repo): | ||
Martin von Zweigbergk
|
r27928 | versions = set(_packermap.keys()) | ||
r43329 | needv03 = False | |||
Augie Fackler
|
r43346 | if ( | ||
Augie Fackler
|
r43347 | repo.ui.configbool(b'experimental', b'changegroup3') | ||
or repo.ui.configbool(b'experimental', b'treemanifest') | ||||
Pulkit Goyal
|
r46129 | or scmutil.istreemanifest(repo) | ||
Augie Fackler
|
r43346 | ): | ||
r43329 | # we keep version 03 because we need to to exchange treemanifest data | |||
# | ||||
# we also keep vresion 01 and 02, because it is possible for repo to | ||||
# contains both normal and tree manifest at the same time. so using | ||||
# older version to pull data is viable | ||||
# | ||||
# (or even to push subset of history) | ||||
needv03 = True | ||||
Raphaël Gomès
|
r47445 | has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements | ||
if not has_revlogv2: | ||||
versions.discard(b'04') | ||||
r43329 | if not needv03: | |||
Augie Fackler
|
r43347 | versions.discard(b'03') | ||
Martin von Zweigbergk
|
r27953 | return versions | ||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r27953 | # Changegroup versions that can be applied to the repo | ||
def supportedincomingversions(repo): | ||||
Pierre-Yves David
|
r30628 | return allsupportedversions(repo) | ||
Martin von Zweigbergk
|
r27953 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r27953 | # Changegroup versions that can be created from the repo | ||
def supportedoutgoingversions(repo): | ||||
Pierre-Yves David
|
r30627 | versions = allsupportedversions(repo) | ||
Pulkit Goyal
|
r46129 | if scmutil.istreemanifest(repo): | ||
Martin von Zweigbergk
|
r27928 | # Versions 01 and 02 support only flat manifests and it's just too | ||
# expensive to convert between the flat manifest and tree manifest on | ||||
# the fly. Since tree manifests are hashed differently, all of history | ||||
# would have to be converted. Instead, we simply don't even pretend to | ||||
# support versions 01 and 02. | ||||
Augie Fackler
|
r43347 | versions.discard(b'01') | ||
versions.discard(b'02') | ||||
Pulkit Goyal
|
r45932 | if requirements.NARROW_REQUIREMENT in repo.requirements: | ||
Martin von Zweigbergk
|
r36483 | # Versions 01 and 02 don't support revlog flags, and we need to | ||
# support that for stripping and unbundling to work. | ||||
Augie Fackler
|
r43347 | versions.discard(b'01') | ||
versions.discard(b'02') | ||||
Matt Harbison
|
r37150 | if LFS_REQUIREMENT in repo.requirements: | ||
# Versions 01 and 02 don't support revlog flags, and we need to | ||||
# mark LFS entries with REVIDX_EXTSTORED. | ||||
Augie Fackler
|
r43347 | versions.discard(b'01') | ||
versions.discard(b'02') | ||||
Matt Harbison
|
r37150 | |||
Martin von Zweigbergk
|
r27752 | return versions | ||
Martin von Zweigbergk
|
r27751 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r34179 | def localversion(repo): | ||
# Finds the best version to use for bundles that are meant to be used | ||||
# locally, such as those from strip and shelve, and temporary bundles. | ||||
return max(supportedoutgoingversions(repo)) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r27929 | def safeversion(repo): | ||
# Finds the smallest version that it's safe to assume clients of the repo | ||||
Martin von Zweigbergk
|
r27931 | # will support. For example, all hg versions that support generaldelta also | ||
# support changegroup 02. | ||||
Martin von Zweigbergk
|
r27953 | versions = supportedoutgoingversions(repo) | ||
Raphaël Gomès
|
r47372 | if requirements.GENERALDELTA_REQUIREMENT in repo.requirements: | ||
Augie Fackler
|
r43347 | versions.discard(b'01') | ||
Martin von Zweigbergk
|
r27929 | assert versions | ||
return min(versions) | ||||
Augie Fackler
|
r43346 | |||
def getbundler( | ||||
version, | ||||
repo, | ||||
bundlecaps=None, | ||||
oldmatcher=None, | ||||
matcher=None, | ||||
ellipses=False, | ||||
shallow=False, | ||||
ellipsisroots=None, | ||||
fullnodes=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
Martin von Zweigbergk
|
r27953 | assert version in supportedoutgoingversions(repo) | ||
Gregory Szorc
|
r38830 | |||
Martin von Zweigbergk
|
r40380 | if matcher is None: | ||
Martin von Zweigbergk
|
r41825 | matcher = matchmod.always() | ||
Martin von Zweigbergk
|
r40380 | if oldmatcher is None: | ||
Martin von Zweigbergk
|
r41825 | oldmatcher = matchmod.never() | ||
Gregory Szorc
|
r38830 | |||
Augie Fackler
|
r43347 | if version == b'01' and not matcher.always(): | ||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Martin von Zweigbergk
|
r43387 | b'version 01 changegroups do not support sparse file matchers' | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r38830 | |||
Gregory Szorc
|
r38944 | if ellipses and version in (b'01', b'02'): | ||
raise error.Abort( | ||||
Augie Fackler
|
r43346 | _( | ||
Augie Fackler
|
r43347 | b'ellipsis nodes require at least cg3 on client and server, ' | ||
b'but negotiated version %s' | ||||
Augie Fackler
|
r43346 | ) | ||
% version | ||||
) | ||||
Gregory Szorc
|
r38944 | |||
Gregory Szorc
|
r38830 | # Requested files could include files not in the local store. So | ||
# filter those out. | ||||
Martin von Zweigbergk
|
r40380 | matcher = repo.narrowmatch(matcher) | ||
Gregory Szorc
|
r38830 | |||
Gregory Szorc
|
r38930 | fn = _packermap[version][0] | ||
Augie Fackler
|
r43346 | return fn( | ||
repo, | ||||
oldmatcher, | ||||
matcher, | ||||
bundlecaps, | ||||
ellipses=ellipses, | ||||
shallow=shallow, | ||||
ellipsisroots=ellipsisroots, | ||||
fullnodes=fullnodes, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=remote_sidedata, | ||
Augie Fackler
|
r43346 | ) | ||
Martin von Zweigbergk
|
r27751 | |||
Gregory Szorc
|
r29593 | def getunbundler(version, fh, alg, extras=None): | ||
return _packermap[version][1](fh, alg, extras=extras) | ||||
Martin von Zweigbergk
|
r27751 | |||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r20926 | def _changegroupinfo(repo, nodes, source): | ||
Augie Fackler
|
r43347 | if repo.ui.verbose or source == b'bundle': | ||
repo.ui.status(_(b"%d changesets found\n") % len(nodes)) | ||||
Pierre-Yves David
|
r20926 | if repo.ui.debugflag: | ||
Augie Fackler
|
r43347 | repo.ui.debug(b"list of changesets:\n") | ||
Pierre-Yves David
|
r20926 | for node in nodes: | ||
Augie Fackler
|
r43347 | repo.ui.debug(b"%s\n" % hex(node)) | ||
Pierre-Yves David
|
r20926 | |||
Durham Goode
|
r34098 | |||
Augie Fackler
|
r43346 | def makechangegroup( | ||
repo, outgoing, version, source, fastpath=False, bundlecaps=None | ||||
): | ||||
cgstream = makestream( | ||||
repo, | ||||
outgoing, | ||||
version, | ||||
source, | ||||
fastpath=fastpath, | ||||
bundlecaps=bundlecaps, | ||||
) | ||||
return getunbundler( | ||||
version, | ||||
util.chunkbuffer(cgstream), | ||||
None, | ||||
Augie Fackler
|
r43347 | {b'clcount': len(outgoing.missing)}, | ||
Augie Fackler
|
r43346 | ) | ||
def makestream( | ||||
repo, | ||||
outgoing, | ||||
version, | ||||
source, | ||||
fastpath=False, | ||||
bundlecaps=None, | ||||
matcher=None, | ||||
Raphaël Gomès
|
r47445 | remote_sidedata=None, | ||
Augie Fackler
|
r43346 | ): | ||
Raphaël Gomès
|
r47445 | bundler = getbundler( | ||
version, | ||||
repo, | ||||
bundlecaps=bundlecaps, | ||||
matcher=matcher, | ||||
remote_sidedata=remote_sidedata, | ||||
) | ||||
Durham Goode
|
r34105 | |||
Pierre-Yves David
|
r20925 | repo = repo.unfiltered() | ||
commonrevs = outgoing.common | ||||
csets = outgoing.missing | ||||
Manuel Jacob
|
r45704 | heads = outgoing.ancestorsof | ||
Pierre-Yves David
|
r20925 | # We go through the fast path if we get told to, or if all (unfiltered | ||
# heads have been requested (since we then know there all linkrevs will | ||||
# be pulled by the client). | ||||
heads.sort() | ||||
fastpathlinkrev = fastpath or ( | ||||
Augie Fackler
|
r43346 | repo.filtername is None and heads == sorted(repo.heads()) | ||
) | ||||
Pierre-Yves David
|
r20925 | |||
Augie Fackler
|
r43347 | repo.hook(b'preoutgoing', throw=True, source=source) | ||
Pierre-Yves David
|
r20926 | _changegroupinfo(repo, csets, source) | ||
Sune Foldager
|
r23177 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) | ||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47452 | def _addchangegroupfiles( | ||
repo, | ||||
source, | ||||
revmap, | ||||
trp, | ||||
expectedfiles, | ||||
needfiles, | ||||
addrevisioncb=None, | ||||
): | ||||
Pierre-Yves David
|
r20932 | revisions = 0 | ||
files = 0 | ||||
Augie Fackler
|
r43346 | progress = repo.ui.makeprogress( | ||
Augie Fackler
|
r43347 | _(b'files'), unit=_(b'files'), total=expectedfiles | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r29724 | for chunkdata in iter(source.filelogheader, {}): | ||
Martin von Zweigbergk
|
r28361 | files += 1 | ||
Augie Fackler
|
r43347 | f = chunkdata[b"filename"] | ||
repo.ui.debug(b"adding %s revisions\n" % f) | ||||
Martin von Zweigbergk
|
r38401 | progress.increment() | ||
Martin von Zweigbergk
|
r27754 | fl = repo.file(f) | ||
Pierre-Yves David
|
r20932 | o = len(fl) | ||
Mike Edgar
|
r24120 | try: | ||
Durham Goode
|
r34292 | deltas = source.deltaiter() | ||
Raphaël Gomès
|
r47452 | added = fl.addgroup( | ||
deltas, | ||||
revmap, | ||||
trp, | ||||
addrevisioncb=addrevisioncb, | ||||
) | ||||
if not added: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"received file revlog group is empty")) | ||
Gregory Szorc
|
r25660 | except error.CensoredBaseError as e: | ||
Augie Fackler
|
r43347 | raise error.Abort(_(b"received delta base is censored: %s") % e) | ||
Martin von Zweigbergk
|
r27754 | revisions += len(fl) - o | ||
Pierre-Yves David
|
r20932 | if f in needfiles: | ||
needs = needfiles[f] | ||||
Gregory Szorc
|
r38806 | for new in pycompat.xrange(o, len(fl)): | ||
Pierre-Yves David
|
r20932 | n = fl.node(new) | ||
if n in needs: | ||||
needs.remove(n) | ||||
else: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"received spurious file revlog entry")) | ||
Pierre-Yves David
|
r20932 | if not needs: | ||
del needfiles[f] | ||||
Martin von Zweigbergk
|
r38401 | progress.complete() | ||
Pierre-Yves David
|
r20932 | |||
Gregory Szorc
|
r43376 | for f, needs in pycompat.iteritems(needfiles): | ||
Pierre-Yves David
|
r20932 | fl = repo.file(f) | ||
for n in needs: | ||||
try: | ||||
fl.rev(n) | ||||
except error.LookupError: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b'missing file data for %s:%s - run hg verify') | ||
Augie Fackler
|
r43346 | % (f, hex(n)) | ||
) | ||||
Pierre-Yves David
|
r20932 | |||
return revisions, files | ||||
Raphaël Gomès
|
r47449 | |||
def get_sidedata_helpers(repo, remote_sd_categories, pull=False): | ||||
# Computers for computing sidedata on-the-fly | ||||
sd_computers = collections.defaultdict(list) | ||||
# Computers for categories to remove from sidedata | ||||
sd_removers = collections.defaultdict(list) | ||||
to_generate = remote_sd_categories - repo._wanted_sidedata | ||||
to_remove = repo._wanted_sidedata - remote_sd_categories | ||||
if pull: | ||||
to_generate, to_remove = to_remove, to_generate | ||||
for revlog_kind, computers in repo._sidedata_computers.items(): | ||||
for category, computer in computers.items(): | ||||
if category in to_generate: | ||||
sd_computers[revlog_kind].append(computer) | ||||
if category in to_remove: | ||||
sd_removers[revlog_kind].append(computer) | ||||
sidedata_helpers = (repo, sd_computers, sd_removers) | ||||
return sidedata_helpers | ||||