Show More
@@ -1,92 +1,90 b'' | |||||
1 | # filelog.py - file history class for mercurial |
|
1 | # filelog.py - file history class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | import revlog |
|
8 | import revlog | |
9 | import re |
|
9 | import re | |
10 |
|
10 | |||
11 | _mdre = re.compile('\1\n') |
|
11 | _mdre = re.compile('\1\n') | |
12 | def parsemeta(text): |
|
12 | def parsemeta(text): | |
13 | """return (metadatadict, keylist, metadatasize)""" |
|
13 | """return (metadatadict, keylist, metadatasize)""" | |
14 | # text can be buffer, so we can't use .startswith or .index |
|
14 | # text can be buffer, so we can't use .startswith or .index | |
15 | if text[:2] != '\1\n': |
|
15 | if text[:2] != '\1\n': | |
16 |
return None, None |
|
16 | return None, None | |
17 | s = _mdre.search(text, 2).start() |
|
17 | s = _mdre.search(text, 2).start() | |
18 | mtext = text[2:s] |
|
18 | mtext = text[2:s] | |
19 | meta = {} |
|
19 | meta = {} | |
20 | keys = [] |
|
|||
21 | for l in mtext.splitlines(): |
|
20 | for l in mtext.splitlines(): | |
22 | k, v = l.split(": ", 1) |
|
21 | k, v = l.split(": ", 1) | |
23 | meta[k] = v |
|
22 | meta[k] = v | |
24 | keys.append(k) |
|
23 | return meta, (s + 2) | |
25 | return meta, keys, (s + 2) |
|
|||
26 |
|
24 | |||
27 | def packmeta(meta, text): |
|
25 | def packmeta(meta, text): | |
28 | keys = sorted(meta.iterkeys()) |
|
26 | keys = sorted(meta.iterkeys()) | |
29 | metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys) |
|
27 | metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys) | |
30 | return "\1\n%s\1\n%s" % (metatext, text) |
|
28 | return "\1\n%s\1\n%s" % (metatext, text) | |
31 |
|
29 | |||
32 | class filelog(revlog.revlog): |
|
30 | class filelog(revlog.revlog): | |
33 | def __init__(self, opener, path): |
|
31 | def __init__(self, opener, path): | |
34 | super(filelog, self).__init__(opener, |
|
32 | super(filelog, self).__init__(opener, | |
35 | "/".join(("data", path + ".i"))) |
|
33 | "/".join(("data", path + ".i"))) | |
36 |
|
34 | |||
37 | def read(self, node): |
|
35 | def read(self, node): | |
38 | t = self.revision(node) |
|
36 | t = self.revision(node) | |
39 | if not t.startswith('\1\n'): |
|
37 | if not t.startswith('\1\n'): | |
40 | return t |
|
38 | return t | |
41 | s = t.index('\1\n', 2) |
|
39 | s = t.index('\1\n', 2) | |
42 | return t[s + 2:] |
|
40 | return t[s + 2:] | |
43 |
|
41 | |||
44 | def add(self, text, meta, transaction, link, p1=None, p2=None): |
|
42 | def add(self, text, meta, transaction, link, p1=None, p2=None): | |
45 | if meta or text.startswith('\1\n'): |
|
43 | if meta or text.startswith('\1\n'): | |
46 | text = packmeta(meta, text) |
|
44 | text = packmeta(meta, text) | |
47 | return self.addrevision(text, transaction, link, p1, p2) |
|
45 | return self.addrevision(text, transaction, link, p1, p2) | |
48 |
|
46 | |||
49 | def renamed(self, node): |
|
47 | def renamed(self, node): | |
50 | if self.parents(node)[0] != revlog.nullid: |
|
48 | if self.parents(node)[0] != revlog.nullid: | |
51 | return False |
|
49 | return False | |
52 | t = self.revision(node) |
|
50 | t = self.revision(node) | |
53 | m = parsemeta(t)[0] |
|
51 | m = parsemeta(t)[0] | |
54 | if m and "copy" in m: |
|
52 | if m and "copy" in m: | |
55 | return (m["copy"], revlog.bin(m["copyrev"])) |
|
53 | return (m["copy"], revlog.bin(m["copyrev"])) | |
56 | return False |
|
54 | return False | |
57 |
|
55 | |||
58 | def size(self, rev): |
|
56 | def size(self, rev): | |
59 | """return the size of a given revision""" |
|
57 | """return the size of a given revision""" | |
60 |
|
58 | |||
61 | # for revisions with renames, we have to go the slow way |
|
59 | # for revisions with renames, we have to go the slow way | |
62 | node = self.node(rev) |
|
60 | node = self.node(rev) | |
63 | if self.renamed(node): |
|
61 | if self.renamed(node): | |
64 | return len(self.read(node)) |
|
62 | return len(self.read(node)) | |
65 |
|
63 | |||
66 | # XXX if self.read(node).startswith("\1\n"), this returns (size+4) |
|
64 | # XXX if self.read(node).startswith("\1\n"), this returns (size+4) | |
67 | return super(filelog, self).size(rev) |
|
65 | return super(filelog, self).size(rev) | |
68 |
|
66 | |||
69 | def cmp(self, node, text): |
|
67 | def cmp(self, node, text): | |
70 | """compare text with a given file revision |
|
68 | """compare text with a given file revision | |
71 |
|
69 | |||
72 | returns True if text is different than what is stored. |
|
70 | returns True if text is different than what is stored. | |
73 | """ |
|
71 | """ | |
74 |
|
72 | |||
75 | t = text |
|
73 | t = text | |
76 | if text.startswith('\1\n'): |
|
74 | if text.startswith('\1\n'): | |
77 | t = '\1\n\1\n' + text |
|
75 | t = '\1\n\1\n' + text | |
78 |
|
76 | |||
79 | samehashes = not super(filelog, self).cmp(node, t) |
|
77 | samehashes = not super(filelog, self).cmp(node, t) | |
80 | if samehashes: |
|
78 | if samehashes: | |
81 | return False |
|
79 | return False | |
82 |
|
80 | |||
83 | # renaming a file produces a different hash, even if the data |
|
81 | # renaming a file produces a different hash, even if the data | |
84 | # remains unchanged. Check if it's the case (slow): |
|
82 | # remains unchanged. Check if it's the case (slow): | |
85 | if self.renamed(node): |
|
83 | if self.renamed(node): | |
86 | t2 = self.read(node) |
|
84 | t2 = self.read(node) | |
87 | return t2 != text |
|
85 | return t2 != text | |
88 |
|
86 | |||
89 | return True |
|
87 | return True | |
90 |
|
88 | |||
91 | def _file(self, f): |
|
89 | def _file(self, f): | |
92 | return filelog(self.opener, f) |
|
90 | return filelog(self.opener, f) |
General Comments 0
You need to be logged in to leave comments.
Login now