##// END OF EJS Templates
don't let lazyparser read more data than it can handle...
don't let lazyparser read more data than it can handle This should fix issue368.

File last commit:

r2948:b2138d84 default
r3078:baa3873e default
Show More
filelog.py
155 lines | 4.9 KiB | text/x-python | PythonLexer
# filelog.py - file history class for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
from revlog import *
from demandload import *
demandload(globals(), "bdiff os")
class filelog(revlog):
def __init__(self, opener, path, defversion=REVLOG_DEFAULT_VERSION):
revlog.__init__(self, opener,
os.path.join("data", self.encodedir(path + ".i")),
os.path.join("data", self.encodedir(path + ".d")),
defversion)
# This avoids a collision between a file named foo and a dir named
# foo.i or foo.d
def encodedir(self, path):
return (path
.replace(".hg/", ".hg.hg/")
.replace(".i/", ".i.hg/")
.replace(".d/", ".d.hg/"))
def decodedir(self, path):
return (path
.replace(".d.hg/", ".d/")
.replace(".i.hg/", ".i/")
.replace(".hg.hg/", ".hg/"))
def read(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return t
s = t.index('\1\n', 2)
return t[s+2:]
def readmeta(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return {}
s = t.index('\1\n', 2)
mt = t[2:s]
m = {}
for l in mt.splitlines():
k, v = l.split(": ", 1)
m[k] = v
return m
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith('\1\n'):
mt = ""
if meta:
mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
text = "\1\n%s\1\n%s" % ("".join(mt), text)
return self.addrevision(text, transaction, link, p1, p2)
def renamed(self, node):
if self.parents(node)[0] != nullid:
return False
m = self.readmeta(node)
if m and m.has_key("copy"):
return (m["copy"], bin(m["copyrev"]))
return False
def size(self, rev):
"""return the size of a given revision"""
# for revisions with renames, we have to go the slow way
node = self.node(rev)
if self.renamed(node):
return len(self.read(node))
return revlog.size(self, rev)
def cmp(self, node, text):
"""compare text with a given file revision"""
# for renames, we have to go the slow way
if self.renamed(node):
t2 = self.read(node)
return t2 != text
return revlog.cmp(self, node, text)
def annotate(self, node):
def decorate(text, rev):
return ([rev] * len(text.splitlines()), text)
def pair(parent, child):
for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
child[0][b1:b2] = parent[0][a1:a2]
return child
# find all ancestors
needed = {(self, node):1}
files = [self]
visit = [(self, node)]
while visit:
f, n = visit.pop(0)
rn = f.renamed(n)
if rn:
f, n = rn
f = filelog(self.opener, f, self.defversion)
files.insert(0, f)
if (f, n) not in needed:
needed[(f, n)] = 1
else:
needed[(f, n)] += 1
for p in f.parents(n):
if p == nullid:
continue
if (f, p) not in needed:
needed[(f, p)] = 1
visit.append((f, p))
else:
# count how many times we'll use this
needed[(f, p)] += 1
# sort by revision (per file) which is a topological order
visit = []
for f in files:
fn = [(f.rev(n[1]), f, n[1]) for n in needed.keys() if n[0] == f]
fn.sort()
visit.extend(fn)
hist = {}
for i in range(len(visit)):
r, f, n = visit[i]
curr = decorate(f.read(n), f.linkrev(n))
if r == -1:
continue
parents = f.parents(n)
# follow parents across renames
if r < 1 and i > 0:
j = i
while j > 0 and visit[j][1] == f:
j -= 1
parents = (visit[j][2],)
f = visit[j][1]
else:
parents = f.parents(n)
for p in parents:
if p != nullid:
curr = pair(hist[p], curr)
# trim the history of unneeded revs
needed[(f, p)] -= 1
if not needed[(f, p)]:
del hist[p]
hist[n] = curr
return zip(hist[n][0], hist[n][1].splitlines(1))