shallowbundle.py
337 lines
| 10.8 KiB
| text/x-python
|
PythonLexer
Augie Fackler
|
r40530 | # shallowbundle.py - bundle10 implementation for use with shallow repositories | ||
# | ||||
# Copyright 2013 Facebook, Inc. | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
from mercurial.i18n import _ | ||||
Joerg Sonnenberger
|
r47771 | from mercurial.node import bin, hex | ||
Augie Fackler
|
r40530 | from mercurial import ( | ||
bundlerepo, | ||||
changegroup, | ||||
error, | ||||
match, | ||||
mdiff, | ||||
) | ||||
from . import ( | ||||
Augie Fackler
|
r40544 | constants, | ||
Augie Fackler
|
r40530 | remotefilelog, | ||
shallowutil, | ||||
) | ||||
NoFiles = 0 | ||||
LocalFiles = 1 | ||||
AllFiles = 2 | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r40530 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): | ||
if not isinstance(rlog, remotefilelog.remotefilelog): | ||||
Augie Fackler
|
r43346 | for c in super(cls, self).group(nodelist, rlog, lookup, units=units): | ||
Augie Fackler
|
r40530 | yield c | ||
return | ||||
if len(nodelist) == 0: | ||||
yield self.close() | ||||
return | ||||
nodelist = shallowutil.sortnodes(nodelist, rlog.parents) | ||||
# add the parent of the first rev | ||||
p = rlog.parents(nodelist[0])[0] | ||||
nodelist.insert(0, p) | ||||
# build deltas | ||||
Manuel Jacob
|
r50179 | for i in range(len(nodelist) - 1): | ||
Augie Fackler
|
r40530 | prev, curr = nodelist[i], nodelist[i + 1] | ||
linknode = lookup(curr) | ||||
for c in self.nodechunk(rlog, curr, prev, linknode): | ||||
yield c | ||||
yield self.close() | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r40530 | class shallowcg1packer(changegroup.cgpacker): | ||
Pulkit Goyal
|
r44865 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs): | ||
Pulkit Goyal
|
r40549 | if shallowutil.isenabled(self._repo): | ||
Augie Fackler
|
r40530 | fastpathlinkrev = False | ||
Augie Fackler
|
r43346 | return super(shallowcg1packer, self).generate( | ||
Pulkit Goyal
|
r44865 | commonrevs, clnodes, fastpathlinkrev, source, **kwargs | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | |||
def group(self, nodelist, rlog, lookup, units=None, reorder=None): | ||||
Augie Fackler
|
r43346 | return shallowgroup( | ||
shallowcg1packer, self, nodelist, rlog, lookup, units=units | ||||
) | ||||
Augie Fackler
|
r40530 | |||
Raphaël Gomès
|
r47445 | def generatefiles(self, changedfiles, *args, **kwargs): | ||
Augie Fackler
|
r40530 | try: | ||
linknodes, commonrevs, source = args | ||||
except ValueError: | ||||
commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args | ||||
Pulkit Goyal
|
r40549 | if shallowutil.isenabled(self._repo): | ||
Augie Fackler
|
r40530 | repo = self._repo | ||
if isinstance(repo, bundlerepo.bundlerepository): | ||||
# If the bundle contains filelogs, we can't pull from it, since | ||||
# bundlerepo is heavily tied to revlogs. Instead require that | ||||
# the user use unbundle instead. | ||||
# Force load the filelog data. | ||||
Augie Fackler
|
r43347 | bundlerepo.bundlerepository.file(repo, b'foo') | ||
Augie Fackler
|
r40530 | if repo._cgfilespos: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | b"cannot pull from full bundles", | ||
hint=b"use `hg unbundle` instead", | ||||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | return [] | ||
filestosend = self.shouldaddfilegroups(source) | ||||
if filestosend == NoFiles: | ||||
Augie Fackler
|
r43346 | changedfiles = list( | ||
[f for f in changedfiles if not repo.shallowmatch(f)] | ||||
) | ||||
Augie Fackler
|
r40530 | |||
Raphaël Gomès
|
r47445 | return super(shallowcg1packer, self).generatefiles( | ||
changedfiles, *args, **kwargs | ||||
) | ||||
Augie Fackler
|
r40530 | |||
def shouldaddfilegroups(self, source): | ||||
repo = self._repo | ||||
Pulkit Goyal
|
r40549 | if not shallowutil.isenabled(repo): | ||
Augie Fackler
|
r40530 | return AllFiles | ||
Augie Fackler
|
r43347 | if source == b"push" or source == b"bundle": | ||
Augie Fackler
|
r40530 | return AllFiles | ||
Kyle Lippincott
|
r47606 | # We won't actually strip the files, but we should put them in any | ||
# backup bundle generated by strip (especially for cases like narrow's | ||||
# `hg tracked --removeinclude`, as failing to do so means that the | ||||
# "saved" changesets during a strip won't have their files reapplied and | ||||
# thus their linknode adjusted, if necessary). | ||||
if source == b"strip": | ||||
cfg = repo.ui.config(b'remotefilelog', b'strip.includefiles') | ||||
if cfg == b'local': | ||||
return LocalFiles | ||||
elif cfg != b'none': | ||||
return AllFiles | ||||
Augie Fackler
|
r40530 | caps = self._bundlecaps or [] | ||
Augie Fackler
|
r43347 | if source == b"serve" or source == b"pull": | ||
Augie Fackler
|
r40544 | if constants.BUNDLE2_CAPABLITY in caps: | ||
Augie Fackler
|
r40530 | return LocalFiles | ||
else: | ||||
# Serving to a full repo requires us to serve everything | ||||
Augie Fackler
|
r43347 | repo.ui.warn(_(b"pulling from a shallow repo\n")) | ||
Augie Fackler
|
r40530 | return AllFiles | ||
return NoFiles | ||||
def prune(self, rlog, missing, commonrevs): | ||||
if not isinstance(rlog, remotefilelog.remotefilelog): | ||||
Augie Fackler
|
r43346 | return super(shallowcg1packer, self).prune( | ||
rlog, missing, commonrevs | ||||
) | ||||
Augie Fackler
|
r40530 | |||
repo = self._repo | ||||
results = [] | ||||
for fnode in missing: | ||||
fctx = repo.filectx(rlog.filename, fileid=fnode) | ||||
if fctx.linkrev() not in commonrevs: | ||||
results.append(fnode) | ||||
return results | ||||
def nodechunk(self, revlog, node, prevnode, linknode): | ||||
Augie Fackler
|
r43347 | prefix = b'' | ||
Joerg Sonnenberger
|
r47771 | if prevnode == revlog.nullid: | ||
r43050 | delta = revlog.rawdata(node) | |||
Augie Fackler
|
r40530 | prefix = mdiff.trivialdiffheader(len(delta)) | ||
else: | ||||
# Actually uses remotefilelog.revdiff which works on nodes, not revs | ||||
delta = revlog.revdiff(prevnode, node) | ||||
p1, p2 = revlog.parents(node) | ||||
flags = revlog.flags(node) | ||||
meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) | ||||
meta += prefix | ||||
l = len(meta) + len(delta) | ||||
yield changegroup.chunkheader(l) | ||||
yield meta | ||||
yield delta | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r40530 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): | ||
Pulkit Goyal
|
r40549 | if not shallowutil.isenabled(repo): | ||
Augie Fackler
|
r40530 | return orig(repo, outgoing, version, source, *args, **kwargs) | ||
original = repo.shallowmatch | ||||
try: | ||||
# if serving, only send files the clients has patterns for | ||||
Augie Fackler
|
r43347 | if source == b'serve': | ||
Augie Fackler
|
r43906 | bundlecaps = kwargs.get('bundlecaps') | ||
Augie Fackler
|
r40530 | includepattern = None | ||
excludepattern = None | ||||
Augie Fackler
|
r43346 | for cap in bundlecaps or []: | ||
Augie Fackler
|
r43347 | if cap.startswith(b"includepattern="): | ||
raw = cap[len(b"includepattern=") :] | ||||
Augie Fackler
|
r40530 | if raw: | ||
Augie Fackler
|
r43347 | includepattern = raw.split(b'\0') | ||
elif cap.startswith(b"excludepattern="): | ||||
raw = cap[len(b"excludepattern=") :] | ||||
Augie Fackler
|
r40530 | if raw: | ||
Augie Fackler
|
r43347 | excludepattern = raw.split(b'\0') | ||
Augie Fackler
|
r40530 | if includepattern or excludepattern: | ||
Augie Fackler
|
r43346 | repo.shallowmatch = match.match( | ||
Augie Fackler
|
r43347 | repo.root, b'', None, includepattern, excludepattern | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | else: | ||
Martin von Zweigbergk
|
r41825 | repo.shallowmatch = match.always() | ||
Augie Fackler
|
r40530 | return orig(repo, outgoing, version, source, *args, **kwargs) | ||
finally: | ||||
repo.shallowmatch = original | ||||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47445 | def addchangegroupfiles( | ||
orig, repo, source, revmap, trp, expectedfiles, *args, **kwargs | ||||
): | ||||
Pulkit Goyal
|
r40549 | if not shallowutil.isenabled(repo): | ||
Raphaël Gomès
|
r47445 | return orig(repo, source, revmap, trp, expectedfiles, *args, **kwargs) | ||
Augie Fackler
|
r40530 | |||
newfiles = 0 | ||||
visited = set() | ||||
revisiondatas = {} | ||||
queue = [] | ||||
# Normal Mercurial processes each file one at a time, adding all | ||||
# the new revisions for that file at once. In remotefilelog a file | ||||
# revision may depend on a different file's revision (in the case | ||||
# of a rename/copy), so we must lay all revisions down across all | ||||
# files in topological order. | ||||
# read all the file chunks but don't add them | ||||
Augie Fackler
|
r43347 | progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles) | ||
Augie Fackler
|
r40530 | while True: | ||
chunkdata = source.filelogheader() | ||||
if not chunkdata: | ||||
break | ||||
Augie Fackler
|
r43347 | f = chunkdata[b"filename"] | ||
repo.ui.debug(b"adding %s revisions\n" % f) | ||||
Martin von Zweigbergk
|
r40879 | progress.increment() | ||
Augie Fackler
|
r40530 | |||
if not repo.shallowmatch(f): | ||||
fl = repo.file(f) | ||||
deltas = source.deltaiter() | ||||
fl.addgroup(deltas, revmap, trp) | ||||
continue | ||||
chain = None | ||||
while True: | ||||
r48131 | # returns: None or ( | |||
# node, | ||||
# p1, | ||||
# p2, | ||||
# cs, | ||||
# deltabase, | ||||
# delta, | ||||
# flags, | ||||
# sidedata, | ||||
# proto_flags | ||||
# ) | ||||
Augie Fackler
|
r40530 | revisiondata = source.deltachunk(chain) | ||
if not revisiondata: | ||||
break | ||||
chain = revisiondata[0] | ||||
revisiondatas[(f, chain)] = revisiondata | ||||
queue.append((f, chain)) | ||||
if f not in visited: | ||||
newfiles += 1 | ||||
visited.add(f) | ||||
if chain is None: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"received file revlog group is empty")) | ||
Augie Fackler
|
r40530 | |||
processed = set() | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r40530 | def available(f, node, depf, depnode): | ||
Joerg Sonnenberger
|
r47771 | if depnode != repo.nullid and (depf, depnode) not in processed: | ||
Augie Fackler
|
r40530 | if not (depf, depnode) in revisiondatas: | ||
# It's not in the changegroup, assume it's already | ||||
# in the repo | ||||
return True | ||||
# re-add self to queue | ||||
queue.insert(0, (f, node)) | ||||
# add dependency in front | ||||
queue.insert(0, (depf, depnode)) | ||||
return False | ||||
return True | ||||
skipcount = 0 | ||||
# Prefetch the non-bundled revisions that we will need | ||||
prefetchfiles = [] | ||||
for f, node in queue: | ||||
revisiondata = revisiondatas[(f, node)] | ||||
r48131 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl) | |||
Augie Fackler
|
r40530 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] | ||
for dependent in dependents: | ||||
Joerg Sonnenberger
|
r47771 | if dependent == repo.nullid or (f, dependent) in revisiondatas: | ||
Augie Fackler
|
r40530 | continue | ||
prefetchfiles.append((f, hex(dependent))) | ||||
repo.fileservice.prefetch(prefetchfiles) | ||||
# Apply the revisions in topological order such that a revision | ||||
# is only written once it's deltabase and parents have been written. | ||||
while queue: | ||||
f, node = queue.pop(0) | ||||
if (f, node) in processed: | ||||
continue | ||||
skipcount += 1 | ||||
if skipcount > len(queue) + 1: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b"circular node dependency")) | ||
Augie Fackler
|
r40530 | |||
fl = repo.file(f) | ||||
revisiondata = revisiondatas[(f, node)] | ||||
r48131 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl) | |||
( | ||||
node, | ||||
p1, | ||||
p2, | ||||
linknode, | ||||
deltabase, | ||||
delta, | ||||
flags, | ||||
sidedata, | ||||
proto_flags, | ||||
) = revisiondata | ||||
Augie Fackler
|
r40530 | |||
if not available(f, node, f, deltabase): | ||||
continue | ||||
r43050 | base = fl.rawdata(deltabase) | |||
Augie Fackler
|
r40530 | text = mdiff.patch(base, delta) | ||
Augie Fackler
|
r41292 | if not isinstance(text, bytes): | ||
text = bytes(text) | ||||
Augie Fackler
|
r40530 | |||
meta, text = shallowutil.parsemeta(text) | ||||
Augie Fackler
|
r43347 | if b'copy' in meta: | ||
copyfrom = meta[b'copy'] | ||||
copynode = bin(meta[b'copyrev']) | ||||
Augie Fackler
|
r40530 | if not available(f, node, copyfrom, copynode): | ||
continue | ||||
for p in [p1, p2]: | ||||
Joerg Sonnenberger
|
r47771 | if p != repo.nullid: | ||
Augie Fackler
|
r40530 | if not available(f, node, f, p): | ||
continue | ||||
fl.add(text, meta, trp, linknode, p1, p2) | ||||
processed.add((f, node)) | ||||
skipcount = 0 | ||||
Martin von Zweigbergk
|
r40879 | progress.complete() | ||
Augie Fackler
|
r40530 | |||
return len(revisiondatas), newfiles | ||||