censor.py
184 lines
| 6.8 KiB
| text/x-python
|
PythonLexer
/ hgext / censor.py
Mike Edgar
|
r24347 | # Copyright (C) 2015 - Mike Edgar <adgar@google.com> | ||
# | ||||
# This extension enables removal of file content at a given revision, | ||||
# rewriting the data/metadata of successive revisions to preserve revision log | ||||
# integrity. | ||||
"""erase file content at a given revision | ||||
The censor command instructs Mercurial to erase all content of a file at a given | ||||
revision *without updating the changeset hash.* This allows existing history to | ||||
remain valid while preventing future clones/pulls from receiving the erased | ||||
data. | ||||
Typical uses for censor are due to security or legal requirements, including:: | ||||
Mads Kiilerich
|
r26781 | * Passwords, private keys, cryptographic material | ||
Mike Edgar
|
r24347 | * Licensed data/code/libraries for which the license has expired | ||
* Personally Identifiable Information or other private data | ||||
Censored nodes can interrupt mercurial's typical operation whenever the excised | ||||
data needs to be materialized. Some commands, like ``hg cat``/``hg revert``, | ||||
simply fail when asked to produce censored data. Others, like ``hg verify`` and | ||||
``hg update``, must be capable of tolerating censored data to continue to | ||||
function in a meaningful way. Such commands only tolerate censored file | ||||
FUJIWARA Katsunori
|
r24890 | revisions if they are allowed by the "censor.policy=ignore" config option. | ||
Mike Edgar
|
r24347 | """ | ||
Gregory Szorc
|
r28092 | from __future__ import absolute_import | ||
from mercurial.i18n import _ | ||||
Mike Edgar
|
r24347 | from mercurial.node import short | ||
Gregory Szorc
|
r28092 | |||
from mercurial import ( | ||||
error, | ||||
Gregory Szorc
|
r38806 | pycompat, | ||
Yuya Nishihara
|
r32337 | registrar, | ||
Gregory Szorc
|
r28092 | revlog, | ||
scmutil, | ||||
util, | ||||
) | ||||
Mike Edgar
|
r24347 | |||
cmdtable = {} | ||||
Yuya Nishihara
|
r32337 | command = registrar.command(cmdtable) | ||
Augie Fackler
|
r29841 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||
Augie Fackler
|
r25186 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||
# be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | ||||
Augie Fackler
|
r29841 | testedwith = 'ships-with-hg-core' | ||
Mike Edgar
|
r24347 | |||
@command('censor', | ||||
[('r', 'rev', '', _('censor file from specified revision'), _('REV')), | ||||
('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))], | ||||
_('-r REV [-t TEXT] [FILE]')) | ||||
def censor(ui, repo, path, rev='', tombstone='', **opts): | ||||
Matt Harbison
|
r38460 | with repo.wlock(), repo.lock(): | ||
FUJIWARA Katsunori
|
r27290 | return _docensor(ui, repo, path, rev, tombstone, **opts) | ||
def _docensor(ui, repo, path, rev='', tombstone='', **opts): | ||||
Mike Edgar
|
r24347 | if not path: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('must specify file path to censor')) | ||
Mike Edgar
|
r24347 | if not rev: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('must specify revision to censor')) | ||
Mike Edgar
|
r24347 | |||
FUJIWARA Katsunori
|
r25806 | wctx = repo[None] | ||
m = scmutil.match(wctx, (path,)) | ||||
if m.anypats() or len(m.files()) != 1: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('can only specify an explicit filename')) | ||
FUJIWARA Katsunori
|
r25806 | path = m.files()[0] | ||
Mike Edgar
|
r24347 | flog = repo.file(path) | ||
if not len(flog): | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('cannot censor file with no history')) | ||
Mike Edgar
|
r24347 | |||
rev = scmutil.revsingle(repo, rev, rev).rev() | ||||
try: | ||||
ctx = repo[rev] | ||||
except KeyError: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('invalid revision identifier %s') % rev) | ||
Mike Edgar
|
r24347 | |||
try: | ||||
fctx = ctx.filectx(path) | ||||
except error.LookupError: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('file does not exist at revision %s') % rev) | ||
Mike Edgar
|
r24347 | |||
fnode = fctx.filenode() | ||||
headctxs = [repo[c] for c in repo.heads()] | ||||
heads = [c for c in headctxs if path in c and c.filenode(path) == fnode] | ||||
if heads: | ||||
headlist = ', '.join([short(c.node()) for c in heads]) | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('cannot censor file in heads (%s)') % headlist, | ||
Mike Edgar
|
r24347 | hint=_('clean/delete and commit first')) | ||
wp = wctx.parents() | ||||
if ctx.node() in [p.node() for p in wp]: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('cannot censor working directory'), | ||
Mike Edgar
|
r24347 | hint=_('clean/delete/update first')) | ||
flogv = flog.version & 0xFFFF | ||||
Gregory Szorc
|
r32316 | if flogv != revlog.REVLOGV1: | ||
Pierre-Yves David
|
r26587 | raise error.Abort( | ||
Mike Edgar
|
r24347 | _('censor does not support revlog version %d') % (flogv,)) | ||
Gregory Szorc
|
r37460 | tombstone = revlog.packmeta({"censored": tombstone}, "") | ||
Mike Edgar
|
r24347 | |||
crev = fctx.filerev() | ||||
if len(tombstone) > flog.rawsize(crev): | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_( | ||
Mike Edgar
|
r24347 | 'censor tombstone must be no longer than censored data')) | ||
# Using two files instead of one makes it easy to rewrite entry-by-entry | ||||
idxread = repo.svfs(flog.indexfile, 'r') | ||||
idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True) | ||||
Gregory Szorc
|
r32316 | if flog.version & revlog.FLAG_INLINE_DATA: | ||
Mike Edgar
|
r24347 | dataread, datawrite = idxread, idxwrite | ||
else: | ||||
dataread = repo.svfs(flog.datafile, 'r') | ||||
datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True) | ||||
# Copy all revlog data up to the entry to be censored. | ||||
rio = revlog.revlogio() | ||||
offset = flog.start(crev) | ||||
for chunk in util.filechunkiter(idxread, limit=crev * rio.size): | ||||
idxwrite.write(chunk) | ||||
for chunk in util.filechunkiter(dataread, limit=offset): | ||||
datawrite.write(chunk) | ||||
def rewriteindex(r, newoffs, newdata=None): | ||||
"""Rewrite the index entry with a new data offset and optional new data. | ||||
The newdata argument, if given, is a tuple of three positive integers: | ||||
(new compressed, new uncompressed, added flag bits). | ||||
""" | ||||
offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r] | ||||
flags = revlog.gettype(offlags) | ||||
if newdata: | ||||
comp, uncomp, nflags = newdata | ||||
flags |= nflags | ||||
offlags = revlog.offset_type(newoffs, flags) | ||||
e = (offlags, comp, uncomp, r, link, p1, p2, nodeid) | ||||
idxwrite.write(rio.packentry(e, None, flog.version, r)) | ||||
idxread.seek(rio.size, 1) | ||||
def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS): | ||||
"""Write the given full text to the filelog with the given data offset. | ||||
Returns: | ||||
The integer number of data bytes written, for tracking data offsets. | ||||
""" | ||||
flag, compdata = flog.compress(data) | ||||
newcomp = len(flag) + len(compdata) | ||||
rewriteindex(r, offs, (newcomp, len(data), nflags)) | ||||
datawrite.write(flag) | ||||
datawrite.write(compdata) | ||||
dataread.seek(flog.length(r), 1) | ||||
return newcomp | ||||
# Rewrite censored revlog entry with (padded) tombstone data. | ||||
pad = ' ' * (flog.rawsize(crev) - len(tombstone)) | ||||
offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED) | ||||
# Rewrite all following filelog revisions fixing up offsets and deltas. | ||||
Gregory Szorc
|
r38806 | for srev in pycompat.xrange(crev + 1, len(flog)): | ||
Mike Edgar
|
r24347 | if crev in flog.parentrevs(srev): | ||
# Immediate children of censored node must be re-added as fulltext. | ||||
try: | ||||
revdata = flog.revision(srev) | ||||
Gregory Szorc
|
r25660 | except error.CensoredNodeError as e: | ||
Mike Edgar
|
r24347 | revdata = e.tombstone | ||
dlen = rewrite(srev, offset, revdata) | ||||
else: | ||||
# Copy any other revision data verbatim after fixing up the offset. | ||||
rewriteindex(srev, offset) | ||||
dlen = flog.length(srev) | ||||
for chunk in util.filechunkiter(dataread, limit=dlen): | ||||
datawrite.write(chunk) | ||||
offset += dlen | ||||
idxread.close() | ||||
idxwrite.close() | ||||
if dataread is not idxread: | ||||
dataread.close() | ||||
datawrite.close() | ||||