wrapper.py
546 lines
| 16.7 KiB
| text/x-python
|
PythonLexer
Matt Harbison
|
r35097 | # wrapper.py - methods wrapping core mercurial logic | ||
# | ||||
# Copyright 2017 Facebook, Inc. | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | ||||
import hashlib | ||||
Matt Harbison
|
r35098 | from mercurial.i18n import _ | ||
Augie Fackler
|
r36622 | from mercurial.node import bin, hex, nullid, short | ||
Gregory Szorc
|
r43359 | from mercurial.pycompat import ( | ||
getattr, | ||||
setattr, | ||||
) | ||||
Matt Harbison
|
r35098 | |||
Matt Harbison
|
r35097 | from mercurial import ( | ||
Matt Harbison
|
r41078 | bundle2, | ||
changegroup, | ||||
cmdutil, | ||||
context, | ||||
Matt Harbison
|
r35097 | error, | ||
Matt Harbison
|
r41078 | exchange, | ||
exthelper, | ||||
localrepo, | ||||
Gregory Szorc
|
r43375 | pycompat, | ||
Matt Harbison
|
r35097 | revlog, | ||
Matt Harbison
|
r41078 | scmutil, | ||
upgrade, | ||||
Matt Harbison
|
r35097 | util, | ||
Matt Harbison
|
r41078 | vfs as vfsmod, | ||
wireprotov1server, | ||||
Matt Harbison
|
r35097 | ) | ||
Augie Fackler
|
r43346 | from mercurial.interfaces import repository | ||
Pulkit Goyal
|
r43078 | |||
Yuya Nishihara
|
r37102 | from mercurial.utils import ( | ||
Gregory Szorc
|
r39914 | storageutil, | ||
Yuya Nishihara
|
r37102 | stringutil, | ||
) | ||||
Matt Harbison
|
r35364 | from ..largefiles import lfutil | ||
Matt Harbison
|
r35097 | from . import ( | ||
blobstore, | ||||
pointer, | ||||
) | ||||
Matt Harbison
|
r41078 | eh = exthelper.exthelper() | ||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(localrepo, b'makefilestorage') | ||
Gregory Szorc
|
r39887 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | ||
if b'lfs' in requirements: | ||||
features.add(repository.REPO_FEATURE_LFS) | ||||
return orig(requirements=requirements, features=features, **kwargs) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(changegroup, b'allsupportedversions') | ||
Matt Harbison
|
r35097 | def allsupportedversions(orig, ui): | ||
versions = orig(ui) | ||||
Augie Fackler
|
r43347 | versions.add(b'03') | ||
Matt Harbison
|
r35097 | return versions | ||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(wireprotov1server, b'_capabilities') | ||
Matt Harbison
|
r35522 | def _capabilities(orig, repo, proto): | ||
'''Wrap server command to announce lfs server capability''' | ||||
caps = orig(repo, proto) | ||||
Augie Fackler
|
r43347 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): | ||
Matt Harbison
|
r40360 | # Advertise a slightly different capability when lfs is *required*, so | ||
# that the client knows it MUST load the extension. If lfs is not | ||||
# required on the server, there's no reason to autoload the extension | ||||
# on the client. | ||||
if b'lfs' in repo.requirements: | ||||
Augie Fackler
|
r43347 | caps.append(b'lfs-serve') | ||
Matt Harbison
|
r40360 | |||
Augie Fackler
|
r43347 | caps.append(b'lfs') | ||
Matt Harbison
|
r35522 | return caps | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def bypasscheckhash(self, text): | ||
return False | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def readfromstore(self, text): | ||
"""Read filelog content from local blobstore transform for flagprocessor. | ||||
Default tranform for flagprocessor, returning contents from blobstore. | ||||
Returns a 2-typle (text, validatehash) where validatehash is True as the | ||||
contents of the blobstore should be checked using checkhash. | ||||
""" | ||||
p = pointer.deserialize(text) | ||||
oid = p.oid() | ||||
store = self.opener.lfslocalblobstore | ||||
if not store.has(oid): | ||||
Matt Harbison
|
r35584 | p.filename = self.filename | ||
Matt Harbison
|
r35097 | self.opener.lfsremoteblobstore.readbatch([p], store) | ||
Matt Harbison
|
r35492 | |||
# The caller will validate the content | ||||
text = store.read(oid, verify=False) | ||||
Matt Harbison
|
r35097 | |||
# pack hg filelog metadata | ||||
hgmeta = {} | ||||
for k in p.keys(): | ||||
Augie Fackler
|
r43347 | if k.startswith(b'x-hg-'): | ||
name = k[len(b'x-hg-') :] | ||||
Matt Harbison
|
r35097 | hgmeta[name] = p[k] | ||
Augie Fackler
|
r43347 | if hgmeta or text.startswith(b'\1\n'): | ||
Gregory Szorc
|
r39914 | text = storageutil.packmeta(hgmeta, text) | ||
Matt Harbison
|
r35097 | |||
r43255 | return (text, True, {}) | |||
Matt Harbison
|
r35097 | |||
Augie Fackler
|
r43346 | |||
r43258 | def writetostore(self, text, sidedata): | |||
Matt Harbison
|
r35097 | # hg filelog metadata (includes rename, etc) | ||
Gregory Szorc
|
r39914 | hgmeta, offset = storageutil.parsemeta(text) | ||
Matt Harbison
|
r35097 | if offset and offset > 0: | ||
# lfs blob does not contain hg filelog metadata | ||||
text = text[offset:] | ||||
# git-lfs only supports sha256 | ||||
Augie Fackler
|
r36622 | oid = hex(hashlib.sha256(text).digest()) | ||
Matt Harbison
|
r35567 | self.opener.lfslocalblobstore.write(oid, text) | ||
Matt Harbison
|
r35097 | |||
# replace contents with metadata | ||||
Augie Fackler
|
r43347 | longoid = b'sha256:%s' % oid | ||
metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) | ||||
Matt Harbison
|
r35097 | |||
# by default, we expect the content to be binary. however, LFS could also | ||||
# be used for non-binary content. add a special entry for non-binary data. | ||||
# this will be used by filectx.isbinary(). | ||||
Yuya Nishihara
|
r37102 | if not stringutil.binary(text): | ||
Matt Harbison
|
r35097 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | ||
Augie Fackler
|
r43347 | metadata[b'x-is-binary'] = b'0' | ||
Matt Harbison
|
r35097 | |||
# translate hg filelog metadata to lfs metadata with "x-hg-" prefix | ||||
if hgmeta is not None: | ||||
Gregory Szorc
|
r43375 | for k, v in pycompat.iteritems(hgmeta): | ||
Augie Fackler
|
r43347 | metadata[b'x-hg-%s' % k] = v | ||
Matt Harbison
|
r35097 | |||
rawtext = metadata.serialize() | ||||
return (rawtext, False) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def _islfs(rlog, node=None, rev=None): | ||
if rev is None: | ||||
if node is None: | ||||
# both None - likely working copy content where node is not ready | ||||
return False | ||||
Matt Harbison
|
r44410 | rev = rlog.rev(node) | ||
Matt Harbison
|
r35097 | else: | ||
Matt Harbison
|
r44410 | node = rlog.node(rev) | ||
Matt Harbison
|
r35097 | if node == nullid: | ||
return False | ||||
Matt Harbison
|
r44410 | flags = rlog.flags(rev) | ||
Matt Harbison
|
r35097 | return bool(flags & revlog.REVIDX_EXTSTORED) | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r41078 | # Wrapping may also be applied by remotefilelog | ||
Augie Fackler
|
r43346 | def filelogaddrevision( | ||
orig, | ||||
self, | ||||
text, | ||||
transaction, | ||||
link, | ||||
p1, | ||||
p2, | ||||
cachedelta=None, | ||||
node=None, | ||||
flags=revlog.REVIDX_DEFAULT_FLAGS, | ||||
**kwds | ||||
): | ||||
Matt Harbison
|
r38199 | # The matcher isn't available if reposetup() wasn't called. | ||
Augie Fackler
|
r43347 | lfstrack = self._revlog.opener.options.get(b'lfstrack') | ||
Matt Harbison
|
r35097 | |||
Matt Harbison
|
r38199 | if lfstrack: | ||
textlen = len(text) | ||||
# exclude hg rename meta from file size | ||||
Gregory Szorc
|
r39914 | meta, offset = storageutil.parsemeta(text) | ||
Matt Harbison
|
r38199 | if offset: | ||
textlen -= offset | ||||
Matt Harbison
|
r35666 | |||
Gregory Szorc
|
r39892 | if lfstrack(self._revlog.filename, textlen): | ||
Matt Harbison
|
r38199 | flags |= revlog.REVIDX_EXTSTORED | ||
Matt Harbison
|
r35097 | |||
Augie Fackler
|
r43346 | return orig( | ||
self, | ||||
text, | ||||
transaction, | ||||
link, | ||||
p1, | ||||
p2, | ||||
cachedelta=cachedelta, | ||||
node=node, | ||||
flags=flags, | ||||
**kwds | ||||
) | ||||
Matt Harbison
|
r35097 | |||
Matt Harbison
|
r41078 | # Wrapping may also be applied by remotefilelog | ||
Matt Harbison
|
r35097 | def filelogrenamed(orig, self, node): | ||
Matt Harbison
|
r44410 | if _islfs(self._revlog, node): | ||
r43043 | rawtext = self._revlog.rawdata(node) | |||
Matt Harbison
|
r35097 | if not rawtext: | ||
return False | ||||
metadata = pointer.deserialize(rawtext) | ||||
Augie Fackler
|
r43347 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: | ||
return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) | ||||
Matt Harbison
|
r35097 | else: | ||
return False | ||||
return orig(self, node) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r41078 | # Wrapping may also be applied by remotefilelog | ||
Matt Harbison
|
r35097 | def filelogsize(orig, self, rev): | ||
Matt Harbison
|
r44410 | if _islfs(self._revlog, rev=rev): | ||
Matt Harbison
|
r35097 | # fast path: use lfs metadata to answer size | ||
r43043 | rawtext = self._revlog.rawdata(rev) | |||
Matt Harbison
|
r35097 | metadata = pointer.deserialize(rawtext) | ||
Augie Fackler
|
r43347 | return int(metadata[b'size']) | ||
Matt Harbison
|
r35097 | return orig(self, rev) | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r44529 | @eh.wrapfunction(revlog, b'_verify_revision') | ||
def _verify_revision(orig, rl, skipflags, state, node): | ||||
if _islfs(rl, node=node): | ||||
rawtext = rl.rawdata(node) | ||||
metadata = pointer.deserialize(rawtext) | ||||
# Don't skip blobs that are stored locally, as local verification is | ||||
# relatively cheap and there's no other way to verify the raw data in | ||||
# the revlog. | ||||
if rl.opener.lfslocalblobstore.has(metadata.oid()): | ||||
skipflags &= ~revlog.REVIDX_EXTSTORED | ||||
Matt Harbison
|
r44530 | elif skipflags & revlog.REVIDX_EXTSTORED: | ||
# The wrapped method will set `skipread`, but there's enough local | ||||
# info to check renames. | ||||
state[b'safe_renamed'].add(node) | ||||
Matt Harbison
|
r44529 | |||
orig(rl, skipflags, state, node) | ||||
Augie Fackler
|
r43347 | @eh.wrapfunction(context.basefilectx, b'cmp') | ||
Matt Harbison
|
r35097 | def filectxcmp(orig, self, fctx): | ||
"""returns True if text is different than fctx""" | ||||
# some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | ||||
if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | ||||
# fast path: check LFS oid | ||||
p1 = pointer.deserialize(self.rawdata()) | ||||
p2 = pointer.deserialize(fctx.rawdata()) | ||||
return p1.oid() != p2.oid() | ||||
return orig(self, fctx) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(context.basefilectx, b'isbinary') | ||
Matt Harbison
|
r35097 | def filectxisbinary(orig, self): | ||
if self.islfs(): | ||||
# fast path: use lfs metadata to answer isbinary | ||||
metadata = pointer.deserialize(self.rawdata()) | ||||
# if lfs metadata says nothing, assume it's binary by default | ||||
Augie Fackler
|
r43347 | return bool(int(metadata.get(b'x-is-binary', 1))) | ||
Matt Harbison
|
r35097 | return orig(self) | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def filectxislfs(self): | ||
Matt Harbison
|
r44410 | return _islfs(self.filelog()._revlog, self.filenode()) | ||
Matt Harbison
|
r35097 | |||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') | ||
Matt Harbison
|
r35681 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | ||
orig(fm, ctx, matcher, path, decode) | ||||
fm.data(rawdata=ctx[path].rawdata()) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(scmutil, b'wrapconvertsink') | ||
Matt Harbison
|
r35170 | def convertsink(orig, sink): | ||
sink = orig(sink) | ||||
Augie Fackler
|
r43347 | if sink.repotype == b'hg': | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35170 | class lfssink(sink.__class__): | ||
Augie Fackler
|
r43346 | def putcommit( | ||
self, | ||||
files, | ||||
copies, | ||||
parents, | ||||
commit, | ||||
source, | ||||
revmap, | ||||
full, | ||||
cleanp2, | ||||
): | ||||
Matt Harbison
|
r35170 | pc = super(lfssink, self).putcommit | ||
Augie Fackler
|
r43346 | node = pc( | ||
files, | ||||
copies, | ||||
parents, | ||||
commit, | ||||
source, | ||||
revmap, | ||||
full, | ||||
cleanp2, | ||||
) | ||||
Matt Harbison
|
r35170 | |||
Augie Fackler
|
r43347 | if b'lfs' not in self.repo.requirements: | ||
Matt Harbison
|
r35170 | ctx = self.repo[node] | ||
# The file list may contain removed files, so check for | ||||
# membership before assuming it is in the context. | ||||
if any(f in ctx and ctx[f].islfs() for f, n in files): | ||||
Augie Fackler
|
r43347 | self.repo.requirements.add(b'lfs') | ||
Matt Harbison
|
r35170 | self.repo._writerequirements() | ||
return node | ||||
sink.__class__ = lfssink | ||||
return sink | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r41078 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | ||
# options and blob stores are passed from othervfs to the new readonlyvfs. | ||||
Augie Fackler
|
r43347 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') | ||
Matt Harbison
|
r35097 | def vfsinit(orig, self, othervfs): | ||
orig(self, othervfs) | ||||
# copy lfs related options | ||||
for k, v in othervfs.options.items(): | ||||
Augie Fackler
|
r43347 | if k.startswith(b'lfs'): | ||
Matt Harbison
|
r35097 | self.options[k] = v | ||
# also copy lfs blobstores. note: this can run before reposetup, so lfs | ||||
# blobstore attributes are not always ready at this time. | ||||
Augie Fackler
|
r43347 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: | ||
Matt Harbison
|
r35097 | if util.safehasattr(othervfs, name): | ||
setattr(self, name, getattr(othervfs, name)) | ||||
Augie Fackler
|
r43346 | |||
Rodrigo Damazio Bovendorp
|
r45604 | def _prefetchfiles(repo, revmatches): | ||
Matt Harbison
|
r35940 | """Ensure that required LFS blobs are present, fetching them as a group if | ||
Matt Harbison
|
r36159 | needed.""" | ||
Augie Fackler
|
r43347 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): | ||
Matt Harbison
|
r38199 | return | ||
Matt Harbison
|
r35940 | pointers = [] | ||
Matt Harbison
|
r37780 | oids = set() | ||
Matt Harbison
|
r35940 | localstore = repo.svfs.lfslocalblobstore | ||
Rodrigo Damazio Bovendorp
|
r45604 | for rev, match in revmatches: | ||
Matt Harbison
|
r37780 | ctx = repo[rev] | ||
for f in ctx.walk(match): | ||||
p = pointerfromctx(ctx, f) | ||||
if p and p.oid() not in oids and not localstore.has(p.oid()): | ||||
p.filename = f | ||||
pointers.append(p) | ||||
oids.add(p.oid()) | ||||
Matt Harbison
|
r35940 | |||
if pointers: | ||||
Matt Harbison
|
r37536 | # Recalculating the repo store here allows 'paths.default' that is set | ||
# on the repo by a clone command to be used for the update. | ||||
blobstore.remote(repo).readbatch(pointers, localstore) | ||||
Matt Harbison
|
r35940 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def _canskipupload(repo): | ||
Matt Harbison
|
r38199 | # Skip if this hasn't been passed to reposetup() | ||
Augie Fackler
|
r43347 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | ||
Matt Harbison
|
r38199 | return True | ||
Matt Harbison
|
r35097 | # if remotestore is a null store, upload is a no-op and can be skipped | ||
return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def candownload(repo): | ||
Matt Harbison
|
r38199 | # Skip if this hasn't been passed to reposetup() | ||
Augie Fackler
|
r43347 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | ||
Matt Harbison
|
r38199 | return False | ||
Matt Harbison
|
r35097 | # if remotestore is a null store, downloads will lead to nothing | ||
return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def uploadblobsfromrevs(repo, revs): | ||
'''upload lfs blobs introduced by revs | ||||
Note: also used by other extensions e. g. infinitepush. avoid renaming. | ||||
''' | ||||
if _canskipupload(repo): | ||||
return | ||||
pointers = extractpointers(repo, revs) | ||||
uploadblobs(repo, pointers) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def prepush(pushop): | ||
"""Prepush hook. | ||||
Read through the revisions to push, looking for filelog entries that can be | ||||
deserialized into metadata so that we can block the push on their upload to | ||||
the remote blobstore. | ||||
""" | ||||
return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(exchange, b'push') | ||
Matt Harbison
|
r35522 | def push(orig, repo, remote, *args, **kwargs): | ||
Matt Harbison
|
r37582 | """bail on push if the extension isn't enabled on remote when needed, and | ||
update the remote store based on the destination path.""" | ||||
Augie Fackler
|
r43347 | if b'lfs' in repo.requirements: | ||
Matt Harbison
|
r35522 | # If the remote peer is for a local repo, the requirement tests in the | ||
# base class method enforce lfs support. Otherwise, some revisions in | ||||
# this repo use lfs, and the remote repo needs the extension loaded. | ||||
Augie Fackler
|
r43347 | if not remote.local() and not remote.capable(b'lfs'): | ||
Matt Harbison
|
r35522 | # This is a copy of the message in exchange.push() when requirements | ||
# are missing between local repos. | ||||
Augie Fackler
|
r43347 | m = _(b"required features are not supported in the destination: %s") | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | m % b'lfs', hint=_(b'enable the lfs extension on the server') | ||
Augie Fackler
|
r43346 | ) | ||
Matt Harbison
|
r37582 | |||
# Repositories where this extension is disabled won't have the field. | ||||
# But if there's a requirement, then the extension must be loaded AND | ||||
# there may be blobs to push. | ||||
remotestore = repo.svfs.lfsremoteblobstore | ||||
try: | ||||
repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) | ||||
return orig(repo, remote, *args, **kwargs) | ||||
finally: | ||||
repo.svfs.lfsremoteblobstore = remotestore | ||||
else: | ||||
return orig(repo, remote, *args, **kwargs) | ||||
Matt Harbison
|
r35522 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r41078 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | ||
Augie Fackler
|
r43347 | @eh.wrapfunction(bundle2, b'writenewbundle') | ||
Augie Fackler
|
r43346 | def writenewbundle( | ||
orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | ||||
): | ||||
Matt Harbison
|
r35097 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | ||
uploadblobsfromrevs(repo, outgoing.missing) | ||||
Augie Fackler
|
r43346 | return orig( | ||
ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | ||||
) | ||||
Matt Harbison
|
r35097 | |||
def extractpointers(repo, revs): | ||||
"""return a list of lfs pointers added by given revs""" | ||||
Augie Fackler
|
r43347 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') | ||
Matt Harbison
|
r35097 | pointers = {} | ||
Matt Harbison
|
r39306 | |||
Matt Harbison
|
r39426 | makeprogress = repo.ui.makeprogress | ||
Augie Fackler
|
r43347 | with makeprogress( | ||
_(b'lfs search'), _(b'changesets'), len(revs) | ||||
) as progress: | ||||
Matt Harbison
|
r39306 | for r in revs: | ||
ctx = repo[r] | ||||
for p in pointersfromctx(ctx).values(): | ||||
pointers[p.oid()] = p | ||||
progress.increment() | ||||
Matt Harbison
|
r39985 | return sorted(pointers.values(), key=lambda p: p.oid()) | ||
Matt Harbison
|
r35097 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r36016 | def pointerfromctx(ctx, f, removed=False): | ||
Matt Harbison
|
r35939 | """return a pointer for the named file from the given changectx, or None if | ||
Matt Harbison
|
r36016 | the file isn't LFS. | ||
Optionally, the pointer for a file deleted from the context can be returned. | ||||
Since no such pointer is actually stored, and to distinguish from a non LFS | ||||
file, this pointer is represented by an empty dict. | ||||
""" | ||||
_ctx = ctx | ||||
Matt Harbison
|
r35939 | if f not in ctx: | ||
Matt Harbison
|
r36016 | if not removed: | ||
return None | ||||
if f in ctx.p1(): | ||||
_ctx = ctx.p1() | ||||
elif f in ctx.p2(): | ||||
_ctx = ctx.p2() | ||||
else: | ||||
return None | ||||
fctx = _ctx[f] | ||||
Matt Harbison
|
r44410 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): | ||
Matt Harbison
|
r35939 | return None | ||
try: | ||||
Matt Harbison
|
r36016 | p = pointer.deserialize(fctx.rawdata()) | ||
if ctx == _ctx: | ||||
return p | ||||
return {} | ||||
Matt Harbison
|
r35939 | except pointer.InvalidPointer as ex: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b'lfs: corrupted pointer (%s@%s): %s\n') | ||
Augie Fackler
|
r43346 | % (f, short(_ctx.node()), ex) | ||
) | ||||
Matt Harbison
|
r35939 | |||
Matt Harbison
|
r36016 | def pointersfromctx(ctx, removed=False): | ||
"""return a dict {path: pointer} for given single changectx. | ||||
If ``removed`` == True and the LFS file was removed from ``ctx``, the value | ||||
stored for the path is an empty dict. | ||||
""" | ||||
Matt Harbison
|
r35097 | result = {} | ||
Matt Harbison
|
r40422 | m = ctx.repo().narrowmatch() | ||
# TODO: consider manifest.fastread() instead | ||||
Matt Harbison
|
r35097 | for f in ctx.files(): | ||
Matt Harbison
|
r40422 | if not m(f): | ||
continue | ||||
Matt Harbison
|
r36016 | p = pointerfromctx(ctx, f, removed=removed) | ||
if p is not None: | ||||
Matt Harbison
|
r35939 | result[f] = p | ||
Matt Harbison
|
r35097 | return result | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35097 | def uploadblobs(repo, pointers): | ||
"""upload given pointers from local blobstore""" | ||||
if not pointers: | ||||
return | ||||
remoteblob = repo.svfs.lfsremoteblobstore | ||||
remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | ||||
Boris Feld
|
r35347 | |||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(upgrade, b'_finishdatamigration') | ||
Matt Harbison
|
r35364 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | ||
orig(ui, srcrepo, dstrepo, requirements) | ||||
Matt Harbison
|
r38199 | # Skip if this hasn't been passed to reposetup() | ||
Augie Fackler
|
r43347 | if util.safehasattr( | ||
srcrepo.svfs, b'lfslocalblobstore' | ||||
) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): | ||||
Matt Harbison
|
r38199 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs | ||
dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs | ||||
Matt Harbison
|
r35364 | |||
Matt Harbison
|
r38199 | for dirpath, dirs, files in srclfsvfs.walk(): | ||
for oid in files: | ||||
Augie Fackler
|
r43347 | ui.write(_(b'copying lfs blob %s\n') % oid) | ||
Matt Harbison
|
r38199 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | ||
Matt Harbison
|
r35364 | |||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | @eh.wrapfunction(upgrade, b'preservedrequirements') | ||
@eh.wrapfunction(upgrade, b'supporteddestrequirements') | ||||
Boris Feld
|
r35347 | def upgraderequirements(orig, repo): | ||
reqs = orig(repo) | ||||
Augie Fackler
|
r43347 | if b'lfs' in repo.requirements: | ||
reqs.add(b'lfs') | ||||
Boris Feld
|
r35347 | return reqs | ||