##// END OF EJS Templates
revlog: add "iscensored()" to revlog public API...
Mike Edgar -
r24118:76f6ae06 default
parent child Browse files
Show More
@@ -1,109 +1,109 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import error, revlog
8 import error, revlog
9 import re
9 import re
10
10
11 _mdre = re.compile('\1\n')
11 _mdre = re.compile('\1\n')
12 def parsemeta(text):
12 def parsemeta(text):
13 """return (metadatadict, keylist, metadatasize)"""
13 """return (metadatadict, keylist, metadatasize)"""
14 # text can be buffer, so we can't use .startswith or .index
14 # text can be buffer, so we can't use .startswith or .index
15 if text[:2] != '\1\n':
15 if text[:2] != '\1\n':
16 return None, None
16 return None, None
17 s = _mdre.search(text, 2).start()
17 s = _mdre.search(text, 2).start()
18 mtext = text[2:s]
18 mtext = text[2:s]
19 meta = {}
19 meta = {}
20 for l in mtext.splitlines():
20 for l in mtext.splitlines():
21 k, v = l.split(": ", 1)
21 k, v = l.split(": ", 1)
22 meta[k] = v
22 meta[k] = v
23 return meta, (s + 2)
23 return meta, (s + 2)
24
24
25 def packmeta(meta, text):
25 def packmeta(meta, text):
26 keys = sorted(meta.iterkeys())
26 keys = sorted(meta.iterkeys())
27 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
27 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
28 return "\1\n%s\1\n%s" % (metatext, text)
28 return "\1\n%s\1\n%s" % (metatext, text)
29
29
30 def _censoredtext(text):
30 def _censoredtext(text):
31 m, offs = parsemeta(text)
31 m, offs = parsemeta(text)
32 return m and "censored" in m
32 return m and "censored" in m
33
33
34 class filelog(revlog.revlog):
34 class filelog(revlog.revlog):
35 def __init__(self, opener, path):
35 def __init__(self, opener, path):
36 super(filelog, self).__init__(opener,
36 super(filelog, self).__init__(opener,
37 "/".join(("data", path + ".i")))
37 "/".join(("data", path + ".i")))
38
38
39 def read(self, node):
39 def read(self, node):
40 t = self.revision(node)
40 t = self.revision(node)
41 if not t.startswith('\1\n'):
41 if not t.startswith('\1\n'):
42 return t
42 return t
43 s = t.index('\1\n', 2)
43 s = t.index('\1\n', 2)
44 return t[s + 2:]
44 return t[s + 2:]
45
45
46 def add(self, text, meta, transaction, link, p1=None, p2=None):
46 def add(self, text, meta, transaction, link, p1=None, p2=None):
47 if meta or text.startswith('\1\n'):
47 if meta or text.startswith('\1\n'):
48 text = packmeta(meta, text)
48 text = packmeta(meta, text)
49 return self.addrevision(text, transaction, link, p1, p2)
49 return self.addrevision(text, transaction, link, p1, p2)
50
50
51 def renamed(self, node):
51 def renamed(self, node):
52 if self.parents(node)[0] != revlog.nullid:
52 if self.parents(node)[0] != revlog.nullid:
53 return False
53 return False
54 t = self.revision(node)
54 t = self.revision(node)
55 m = parsemeta(t)[0]
55 m = parsemeta(t)[0]
56 if m and "copy" in m:
56 if m and "copy" in m:
57 return (m["copy"], revlog.bin(m["copyrev"]))
57 return (m["copy"], revlog.bin(m["copyrev"]))
58 return False
58 return False
59
59
60 def size(self, rev):
60 def size(self, rev):
61 """return the size of a given revision"""
61 """return the size of a given revision"""
62
62
63 # for revisions with renames, we have to go the slow way
63 # for revisions with renames, we have to go the slow way
64 node = self.node(rev)
64 node = self.node(rev)
65 if self.renamed(node):
65 if self.renamed(node):
66 return len(self.read(node))
66 return len(self.read(node))
67 if self._iscensored(rev):
67 if self.iscensored(rev):
68 return 0
68 return 0
69
69
70 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
70 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
71 return super(filelog, self).size(rev)
71 return super(filelog, self).size(rev)
72
72
73 def cmp(self, node, text):
73 def cmp(self, node, text):
74 """compare text with a given file revision
74 """compare text with a given file revision
75
75
76 returns True if text is different than what is stored.
76 returns True if text is different than what is stored.
77 """
77 """
78
78
79 t = text
79 t = text
80 if text.startswith('\1\n'):
80 if text.startswith('\1\n'):
81 t = '\1\n\1\n' + text
81 t = '\1\n\1\n' + text
82
82
83 samehashes = not super(filelog, self).cmp(node, t)
83 samehashes = not super(filelog, self).cmp(node, t)
84 if samehashes:
84 if samehashes:
85 return False
85 return False
86
86
87 # censored files compare against the empty file
87 # censored files compare against the empty file
88 if self._iscensored(self.rev(node)):
88 if self.iscensored(self.rev(node)):
89 return text != ''
89 return text != ''
90
90
91 # renaming a file produces a different hash, even if the data
91 # renaming a file produces a different hash, even if the data
92 # remains unchanged. Check if it's the case (slow):
92 # remains unchanged. Check if it's the case (slow):
93 if self.renamed(node):
93 if self.renamed(node):
94 t2 = self.read(node)
94 t2 = self.read(node)
95 return t2 != text
95 return t2 != text
96
96
97 return True
97 return True
98
98
99 def checkhash(self, text, p1, p2, node, rev=None):
99 def checkhash(self, text, p1, p2, node, rev=None):
100 try:
100 try:
101 super(filelog, self).checkhash(text, p1, p2, node, rev=rev)
101 super(filelog, self).checkhash(text, p1, p2, node, rev=rev)
102 except error.RevlogError:
102 except error.RevlogError:
103 if _censoredtext(text):
103 if _censoredtext(text):
104 raise error.CensoredNodeError(self.indexfile, node)
104 raise error.CensoredNodeError(self.indexfile, node)
105 raise
105 raise
106
106
107 def _iscensored(self, rev):
107 def iscensored(self, rev):
108 """Check if a file revision is censored."""
108 """Check if a file revision is censored."""
109 return self.flags(rev) & revlog.REVIDX_ISCENSORED
109 return self.flags(rev) & revlog.REVIDX_ISCENSORED
@@ -1,1543 +1,1547 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, templatefilters
17 import ancestor, mdiff, parsers, error, util, templatefilters
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
37 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
38 REVIDX_DEFAULT_FLAGS = 0
38 REVIDX_DEFAULT_FLAGS = 0
39 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
39 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
40
40
41 # max size of revlog with inline data
41 # max size of revlog with inline data
42 _maxinline = 131072
42 _maxinline = 131072
43 _chunksize = 1048576
43 _chunksize = 1048576
44
44
45 RevlogError = error.RevlogError
45 RevlogError = error.RevlogError
46 LookupError = error.LookupError
46 LookupError = error.LookupError
47 CensoredNodeError = error.CensoredNodeError
47 CensoredNodeError = error.CensoredNodeError
48
48
49 def getoffset(q):
49 def getoffset(q):
50 return int(q >> 16)
50 return int(q >> 16)
51
51
52 def gettype(q):
52 def gettype(q):
53 return int(q & 0xFFFF)
53 return int(q & 0xFFFF)
54
54
55 def offset_type(offset, type):
55 def offset_type(offset, type):
56 return long(long(offset) << 16 | type)
56 return long(long(offset) << 16 | type)
57
57
58 _nullhash = _sha(nullid)
58 _nullhash = _sha(nullid)
59
59
60 def hash(text, p1, p2):
60 def hash(text, p1, p2):
61 """generate a hash from the given text and its parent hashes
61 """generate a hash from the given text and its parent hashes
62
62
63 This hash combines both the current file contents and its history
63 This hash combines both the current file contents and its history
64 in a manner that makes it easy to distinguish nodes with the same
64 in a manner that makes it easy to distinguish nodes with the same
65 content in the revision graph.
65 content in the revision graph.
66 """
66 """
67 # As of now, if one of the parent node is null, p2 is null
67 # As of now, if one of the parent node is null, p2 is null
68 if p2 == nullid:
68 if p2 == nullid:
69 # deep copy of a hash is faster than creating one
69 # deep copy of a hash is faster than creating one
70 s = _nullhash.copy()
70 s = _nullhash.copy()
71 s.update(p1)
71 s.update(p1)
72 else:
72 else:
73 # none of the parent nodes are nullid
73 # none of the parent nodes are nullid
74 l = [p1, p2]
74 l = [p1, p2]
75 l.sort()
75 l.sort()
76 s = _sha(l[0])
76 s = _sha(l[0])
77 s.update(l[1])
77 s.update(l[1])
78 s.update(text)
78 s.update(text)
79 return s.digest()
79 return s.digest()
80
80
81 def decompress(bin):
81 def decompress(bin):
82 """ decompress the given input """
82 """ decompress the given input """
83 if not bin:
83 if not bin:
84 return bin
84 return bin
85 t = bin[0]
85 t = bin[0]
86 if t == '\0':
86 if t == '\0':
87 return bin
87 return bin
88 if t == 'x':
88 if t == 'x':
89 try:
89 try:
90 return _decompress(bin)
90 return _decompress(bin)
91 except zlib.error, e:
91 except zlib.error, e:
92 raise RevlogError(_("revlog decompress error: %s") % str(e))
92 raise RevlogError(_("revlog decompress error: %s") % str(e))
93 if t == 'u':
93 if t == 'u':
94 return bin[1:]
94 return bin[1:]
95 raise RevlogError(_("unknown compression type %r") % t)
95 raise RevlogError(_("unknown compression type %r") % t)
96
96
97 # index v0:
97 # index v0:
98 # 4 bytes: offset
98 # 4 bytes: offset
99 # 4 bytes: compressed length
99 # 4 bytes: compressed length
100 # 4 bytes: base rev
100 # 4 bytes: base rev
101 # 4 bytes: link rev
101 # 4 bytes: link rev
102 # 32 bytes: parent 1 nodeid
102 # 32 bytes: parent 1 nodeid
103 # 32 bytes: parent 2 nodeid
103 # 32 bytes: parent 2 nodeid
104 # 32 bytes: nodeid
104 # 32 bytes: nodeid
105 indexformatv0 = ">4l20s20s20s"
105 indexformatv0 = ">4l20s20s20s"
106 v0shaoffset = 56
106 v0shaoffset = 56
107
107
108 class revlogoldio(object):
108 class revlogoldio(object):
109 def __init__(self):
109 def __init__(self):
110 self.size = struct.calcsize(indexformatv0)
110 self.size = struct.calcsize(indexformatv0)
111
111
112 def parseindex(self, data, inline):
112 def parseindex(self, data, inline):
113 s = self.size
113 s = self.size
114 index = []
114 index = []
115 nodemap = {nullid: nullrev}
115 nodemap = {nullid: nullrev}
116 n = off = 0
116 n = off = 0
117 l = len(data)
117 l = len(data)
118 while off + s <= l:
118 while off + s <= l:
119 cur = data[off:off + s]
119 cur = data[off:off + s]
120 off += s
120 off += s
121 e = _unpack(indexformatv0, cur)
121 e = _unpack(indexformatv0, cur)
122 # transform to revlogv1 format
122 # transform to revlogv1 format
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 index.append(e2)
125 index.append(e2)
126 nodemap[e[6]] = n
126 nodemap[e[6]] = n
127 n += 1
127 n += 1
128
128
129 # add the magic null revision at -1
129 # add the magic null revision at -1
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131
131
132 return index, nodemap, None
132 return index, nodemap, None
133
133
134 def packentry(self, entry, node, version, rev):
134 def packentry(self, entry, node, version, rev):
135 if gettype(entry[0]):
135 if gettype(entry[0]):
136 raise RevlogError(_("index entry flags need RevlogNG"))
136 raise RevlogError(_("index entry flags need RevlogNG"))
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 node(entry[5]), node(entry[6]), entry[7])
138 node(entry[5]), node(entry[6]), entry[7])
139 return _pack(indexformatv0, *e2)
139 return _pack(indexformatv0, *e2)
140
140
141 # index ng:
141 # index ng:
142 # 6 bytes: offset
142 # 6 bytes: offset
143 # 2 bytes: flags
143 # 2 bytes: flags
144 # 4 bytes: compressed length
144 # 4 bytes: compressed length
145 # 4 bytes: uncompressed length
145 # 4 bytes: uncompressed length
146 # 4 bytes: base rev
146 # 4 bytes: base rev
147 # 4 bytes: link rev
147 # 4 bytes: link rev
148 # 4 bytes: parent 1 rev
148 # 4 bytes: parent 1 rev
149 # 4 bytes: parent 2 rev
149 # 4 bytes: parent 2 rev
150 # 32 bytes: nodeid
150 # 32 bytes: nodeid
151 indexformatng = ">Qiiiiii20s12x"
151 indexformatng = ">Qiiiiii20s12x"
152 ngshaoffset = 32
152 ngshaoffset = 32
153 versionformat = ">I"
153 versionformat = ">I"
154
154
155 class revlogio(object):
155 class revlogio(object):
156 def __init__(self):
156 def __init__(self):
157 self.size = struct.calcsize(indexformatng)
157 self.size = struct.calcsize(indexformatng)
158
158
159 def parseindex(self, data, inline):
159 def parseindex(self, data, inline):
160 # call the C implementation to parse the index data
160 # call the C implementation to parse the index data
161 index, cache = parsers.parse_index2(data, inline)
161 index, cache = parsers.parse_index2(data, inline)
162 return index, getattr(index, 'nodemap', None), cache
162 return index, getattr(index, 'nodemap', None), cache
163
163
164 def packentry(self, entry, node, version, rev):
164 def packentry(self, entry, node, version, rev):
165 p = _pack(indexformatng, *entry)
165 p = _pack(indexformatng, *entry)
166 if rev == 0:
166 if rev == 0:
167 p = _pack(versionformat, version) + p[4:]
167 p = _pack(versionformat, version) + p[4:]
168 return p
168 return p
169
169
170 class revlog(object):
170 class revlog(object):
171 """
171 """
172 the underlying revision storage object
172 the underlying revision storage object
173
173
174 A revlog consists of two parts, an index and the revision data.
174 A revlog consists of two parts, an index and the revision data.
175
175
176 The index is a file with a fixed record size containing
176 The index is a file with a fixed record size containing
177 information on each revision, including its nodeid (hash), the
177 information on each revision, including its nodeid (hash), the
178 nodeids of its parents, the position and offset of its data within
178 nodeids of its parents, the position and offset of its data within
179 the data file, and the revision it's based on. Finally, each entry
179 the data file, and the revision it's based on. Finally, each entry
180 contains a linkrev entry that can serve as a pointer to external
180 contains a linkrev entry that can serve as a pointer to external
181 data.
181 data.
182
182
183 The revision data itself is a linear collection of data chunks.
183 The revision data itself is a linear collection of data chunks.
184 Each chunk represents a revision and is usually represented as a
184 Each chunk represents a revision and is usually represented as a
185 delta against the previous chunk. To bound lookup time, runs of
185 delta against the previous chunk. To bound lookup time, runs of
186 deltas are limited to about 2 times the length of the original
186 deltas are limited to about 2 times the length of the original
187 version data. This makes retrieval of a version proportional to
187 version data. This makes retrieval of a version proportional to
188 its size, or O(1) relative to the number of revisions.
188 its size, or O(1) relative to the number of revisions.
189
189
190 Both pieces of the revlog are written to in an append-only
190 Both pieces of the revlog are written to in an append-only
191 fashion, which means we never need to rewrite a file to insert or
191 fashion, which means we never need to rewrite a file to insert or
192 remove data, and can use some simple techniques to avoid the need
192 remove data, and can use some simple techniques to avoid the need
193 for locking while reading.
193 for locking while reading.
194 """
194 """
195 def __init__(self, opener, indexfile):
195 def __init__(self, opener, indexfile):
196 """
196 """
197 create a revlog object
197 create a revlog object
198
198
199 opener is a function that abstracts the file opening operation
199 opener is a function that abstracts the file opening operation
200 and can be used to implement COW semantics or the like.
200 and can be used to implement COW semantics or the like.
201 """
201 """
202 self.indexfile = indexfile
202 self.indexfile = indexfile
203 self.datafile = indexfile[:-2] + ".d"
203 self.datafile = indexfile[:-2] + ".d"
204 self.opener = opener
204 self.opener = opener
205 self._cache = None
205 self._cache = None
206 self._basecache = None
206 self._basecache = None
207 self._chunkcache = (0, '')
207 self._chunkcache = (0, '')
208 self._chunkcachesize = 65536
208 self._chunkcachesize = 65536
209 self._maxchainlen = None
209 self._maxchainlen = None
210 self.index = []
210 self.index = []
211 self._pcache = {}
211 self._pcache = {}
212 self._nodecache = {nullid: nullrev}
212 self._nodecache = {nullid: nullrev}
213 self._nodepos = None
213 self._nodepos = None
214
214
215 v = REVLOG_DEFAULT_VERSION
215 v = REVLOG_DEFAULT_VERSION
216 opts = getattr(opener, 'options', None)
216 opts = getattr(opener, 'options', None)
217 if opts is not None:
217 if opts is not None:
218 if 'revlogv1' in opts:
218 if 'revlogv1' in opts:
219 if 'generaldelta' in opts:
219 if 'generaldelta' in opts:
220 v |= REVLOGGENERALDELTA
220 v |= REVLOGGENERALDELTA
221 else:
221 else:
222 v = 0
222 v = 0
223 if 'chunkcachesize' in opts:
223 if 'chunkcachesize' in opts:
224 self._chunkcachesize = opts['chunkcachesize']
224 self._chunkcachesize = opts['chunkcachesize']
225 if 'maxchainlen' in opts:
225 if 'maxchainlen' in opts:
226 self._maxchainlen = opts['maxchainlen']
226 self._maxchainlen = opts['maxchainlen']
227
227
228 if self._chunkcachesize <= 0:
228 if self._chunkcachesize <= 0:
229 raise RevlogError(_('revlog chunk cache size %r is not greater '
229 raise RevlogError(_('revlog chunk cache size %r is not greater '
230 'than 0') % self._chunkcachesize)
230 'than 0') % self._chunkcachesize)
231 elif self._chunkcachesize & (self._chunkcachesize - 1):
231 elif self._chunkcachesize & (self._chunkcachesize - 1):
232 raise RevlogError(_('revlog chunk cache size %r is not a power '
232 raise RevlogError(_('revlog chunk cache size %r is not a power '
233 'of 2') % self._chunkcachesize)
233 'of 2') % self._chunkcachesize)
234
234
235 i = ''
235 i = ''
236 self._initempty = True
236 self._initempty = True
237 try:
237 try:
238 f = self.opener(self.indexfile)
238 f = self.opener(self.indexfile)
239 i = f.read()
239 i = f.read()
240 f.close()
240 f.close()
241 if len(i) > 0:
241 if len(i) > 0:
242 v = struct.unpack(versionformat, i[:4])[0]
242 v = struct.unpack(versionformat, i[:4])[0]
243 self._initempty = False
243 self._initempty = False
244 except IOError, inst:
244 except IOError, inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247
247
248 self.version = v
248 self.version = v
249 self._inline = v & REVLOGNGINLINEDATA
249 self._inline = v & REVLOGNGINLINEDATA
250 self._generaldelta = v & REVLOGGENERALDELTA
250 self._generaldelta = v & REVLOGGENERALDELTA
251 flags = v & ~0xFFFF
251 flags = v & ~0xFFFF
252 fmt = v & 0xFFFF
252 fmt = v & 0xFFFF
253 if fmt == REVLOGV0 and flags:
253 if fmt == REVLOGV0 and flags:
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 % (self.indexfile, flags >> 16))
255 % (self.indexfile, flags >> 16))
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 % (self.indexfile, flags >> 16))
258 % (self.indexfile, flags >> 16))
259 elif fmt > REVLOGNG:
259 elif fmt > REVLOGNG:
260 raise RevlogError(_("index %s unknown format %d")
260 raise RevlogError(_("index %s unknown format %d")
261 % (self.indexfile, fmt))
261 % (self.indexfile, fmt))
262
262
263 self._io = revlogio()
263 self._io = revlogio()
264 if self.version == REVLOGV0:
264 if self.version == REVLOGV0:
265 self._io = revlogoldio()
265 self._io = revlogoldio()
266 try:
266 try:
267 d = self._io.parseindex(i, self._inline)
267 d = self._io.parseindex(i, self._inline)
268 except (ValueError, IndexError):
268 except (ValueError, IndexError):
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 self.index, nodemap, self._chunkcache = d
270 self.index, nodemap, self._chunkcache = d
271 if nodemap is not None:
271 if nodemap is not None:
272 self.nodemap = self._nodecache = nodemap
272 self.nodemap = self._nodecache = nodemap
273 if not self._chunkcache:
273 if not self._chunkcache:
274 self._chunkclear()
274 self._chunkclear()
275 # revnum -> (chain-length, sum-delta-length)
275 # revnum -> (chain-length, sum-delta-length)
276 self._chaininfocache = {}
276 self._chaininfocache = {}
277
277
278 def tip(self):
278 def tip(self):
279 return self.node(len(self.index) - 2)
279 return self.node(len(self.index) - 2)
280 def __contains__(self, rev):
280 def __contains__(self, rev):
281 return 0 <= rev < len(self)
281 return 0 <= rev < len(self)
282 def __len__(self):
282 def __len__(self):
283 return len(self.index) - 1
283 return len(self.index) - 1
284 def __iter__(self):
284 def __iter__(self):
285 return iter(xrange(len(self)))
285 return iter(xrange(len(self)))
286 def revs(self, start=0, stop=None):
286 def revs(self, start=0, stop=None):
287 """iterate over all rev in this revlog (from start to stop)"""
287 """iterate over all rev in this revlog (from start to stop)"""
288 step = 1
288 step = 1
289 if stop is not None:
289 if stop is not None:
290 if start > stop:
290 if start > stop:
291 step = -1
291 step = -1
292 stop += step
292 stop += step
293 else:
293 else:
294 stop = len(self)
294 stop = len(self)
295 return xrange(start, stop, step)
295 return xrange(start, stop, step)
296
296
297 @util.propertycache
297 @util.propertycache
298 def nodemap(self):
298 def nodemap(self):
299 self.rev(self.node(0))
299 self.rev(self.node(0))
300 return self._nodecache
300 return self._nodecache
301
301
302 def hasnode(self, node):
302 def hasnode(self, node):
303 try:
303 try:
304 self.rev(node)
304 self.rev(node)
305 return True
305 return True
306 except KeyError:
306 except KeyError:
307 return False
307 return False
308
308
309 def clearcaches(self):
309 def clearcaches(self):
310 try:
310 try:
311 self._nodecache.clearcaches()
311 self._nodecache.clearcaches()
312 except AttributeError:
312 except AttributeError:
313 self._nodecache = {nullid: nullrev}
313 self._nodecache = {nullid: nullrev}
314 self._nodepos = None
314 self._nodepos = None
315
315
316 def rev(self, node):
316 def rev(self, node):
317 try:
317 try:
318 return self._nodecache[node]
318 return self._nodecache[node]
319 except TypeError:
319 except TypeError:
320 raise
320 raise
321 except RevlogError:
321 except RevlogError:
322 # parsers.c radix tree lookup failed
322 # parsers.c radix tree lookup failed
323 raise LookupError(node, self.indexfile, _('no node'))
323 raise LookupError(node, self.indexfile, _('no node'))
324 except KeyError:
324 except KeyError:
325 # pure python cache lookup failed
325 # pure python cache lookup failed
326 n = self._nodecache
326 n = self._nodecache
327 i = self.index
327 i = self.index
328 p = self._nodepos
328 p = self._nodepos
329 if p is None:
329 if p is None:
330 p = len(i) - 2
330 p = len(i) - 2
331 for r in xrange(p, -1, -1):
331 for r in xrange(p, -1, -1):
332 v = i[r][7]
332 v = i[r][7]
333 n[v] = r
333 n[v] = r
334 if v == node:
334 if v == node:
335 self._nodepos = r - 1
335 self._nodepos = r - 1
336 return r
336 return r
337 raise LookupError(node, self.indexfile, _('no node'))
337 raise LookupError(node, self.indexfile, _('no node'))
338
338
339 def node(self, rev):
339 def node(self, rev):
340 return self.index[rev][7]
340 return self.index[rev][7]
341 def linkrev(self, rev):
341 def linkrev(self, rev):
342 return self.index[rev][4]
342 return self.index[rev][4]
343 def parents(self, node):
343 def parents(self, node):
344 i = self.index
344 i = self.index
345 d = i[self.rev(node)]
345 d = i[self.rev(node)]
346 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
346 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
347 def parentrevs(self, rev):
347 def parentrevs(self, rev):
348 return self.index[rev][5:7]
348 return self.index[rev][5:7]
349 def start(self, rev):
349 def start(self, rev):
350 return int(self.index[rev][0] >> 16)
350 return int(self.index[rev][0] >> 16)
351 def end(self, rev):
351 def end(self, rev):
352 return self.start(rev) + self.length(rev)
352 return self.start(rev) + self.length(rev)
353 def length(self, rev):
353 def length(self, rev):
354 return self.index[rev][1]
354 return self.index[rev][1]
355 def chainbase(self, rev):
355 def chainbase(self, rev):
356 index = self.index
356 index = self.index
357 base = index[rev][3]
357 base = index[rev][3]
358 while base != rev:
358 while base != rev:
359 rev = base
359 rev = base
360 base = index[rev][3]
360 base = index[rev][3]
361 return base
361 return base
362 def chainlen(self, rev):
362 def chainlen(self, rev):
363 return self._chaininfo(rev)[0]
363 return self._chaininfo(rev)[0]
364
364
365 def _chaininfo(self, rev):
365 def _chaininfo(self, rev):
366 chaininfocache = self._chaininfocache
366 chaininfocache = self._chaininfocache
367 if rev in chaininfocache:
367 if rev in chaininfocache:
368 return chaininfocache[rev]
368 return chaininfocache[rev]
369 index = self.index
369 index = self.index
370 generaldelta = self._generaldelta
370 generaldelta = self._generaldelta
371 iterrev = rev
371 iterrev = rev
372 e = index[iterrev]
372 e = index[iterrev]
373 clen = 0
373 clen = 0
374 compresseddeltalen = 0
374 compresseddeltalen = 0
375 while iterrev != e[3]:
375 while iterrev != e[3]:
376 clen += 1
376 clen += 1
377 compresseddeltalen += e[1]
377 compresseddeltalen += e[1]
378 if generaldelta:
378 if generaldelta:
379 iterrev = e[3]
379 iterrev = e[3]
380 else:
380 else:
381 iterrev -= 1
381 iterrev -= 1
382 if iterrev in chaininfocache:
382 if iterrev in chaininfocache:
383 t = chaininfocache[iterrev]
383 t = chaininfocache[iterrev]
384 clen += t[0]
384 clen += t[0]
385 compresseddeltalen += t[1]
385 compresseddeltalen += t[1]
386 break
386 break
387 e = index[iterrev]
387 e = index[iterrev]
388 else:
388 else:
389 # Add text length of base since decompressing that also takes
389 # Add text length of base since decompressing that also takes
390 # work. For cache hits the length is already included.
390 # work. For cache hits the length is already included.
391 compresseddeltalen += e[1]
391 compresseddeltalen += e[1]
392 r = (clen, compresseddeltalen)
392 r = (clen, compresseddeltalen)
393 chaininfocache[rev] = r
393 chaininfocache[rev] = r
394 return r
394 return r
395
395
396 def flags(self, rev):
396 def flags(self, rev):
397 return self.index[rev][0] & 0xFFFF
397 return self.index[rev][0] & 0xFFFF
398 def rawsize(self, rev):
398 def rawsize(self, rev):
399 """return the length of the uncompressed text for a given revision"""
399 """return the length of the uncompressed text for a given revision"""
400 l = self.index[rev][2]
400 l = self.index[rev][2]
401 if l >= 0:
401 if l >= 0:
402 return l
402 return l
403
403
404 t = self.revision(self.node(rev))
404 t = self.revision(self.node(rev))
405 return len(t)
405 return len(t)
406 size = rawsize
406 size = rawsize
407
407
408 def ancestors(self, revs, stoprev=0, inclusive=False):
408 def ancestors(self, revs, stoprev=0, inclusive=False):
409 """Generate the ancestors of 'revs' in reverse topological order.
409 """Generate the ancestors of 'revs' in reverse topological order.
410 Does not generate revs lower than stoprev.
410 Does not generate revs lower than stoprev.
411
411
412 See the documentation for ancestor.lazyancestors for more details."""
412 See the documentation for ancestor.lazyancestors for more details."""
413
413
414 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
414 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
415 inclusive=inclusive)
415 inclusive=inclusive)
416
416
417 def descendants(self, revs):
417 def descendants(self, revs):
418 """Generate the descendants of 'revs' in revision order.
418 """Generate the descendants of 'revs' in revision order.
419
419
420 Yield a sequence of revision numbers starting with a child of
420 Yield a sequence of revision numbers starting with a child of
421 some rev in revs, i.e., each revision is *not* considered a
421 some rev in revs, i.e., each revision is *not* considered a
422 descendant of itself. Results are ordered by revision number (a
422 descendant of itself. Results are ordered by revision number (a
423 topological sort)."""
423 topological sort)."""
424 first = min(revs)
424 first = min(revs)
425 if first == nullrev:
425 if first == nullrev:
426 for i in self:
426 for i in self:
427 yield i
427 yield i
428 return
428 return
429
429
430 seen = set(revs)
430 seen = set(revs)
431 for i in self.revs(start=first + 1):
431 for i in self.revs(start=first + 1):
432 for x in self.parentrevs(i):
432 for x in self.parentrevs(i):
433 if x != nullrev and x in seen:
433 if x != nullrev and x in seen:
434 seen.add(i)
434 seen.add(i)
435 yield i
435 yield i
436 break
436 break
437
437
438 def findcommonmissing(self, common=None, heads=None):
438 def findcommonmissing(self, common=None, heads=None):
439 """Return a tuple of the ancestors of common and the ancestors of heads
439 """Return a tuple of the ancestors of common and the ancestors of heads
440 that are not ancestors of common. In revset terminology, we return the
440 that are not ancestors of common. In revset terminology, we return the
441 tuple:
441 tuple:
442
442
443 ::common, (::heads) - (::common)
443 ::common, (::heads) - (::common)
444
444
445 The list is sorted by revision number, meaning it is
445 The list is sorted by revision number, meaning it is
446 topologically sorted.
446 topologically sorted.
447
447
448 'heads' and 'common' are both lists of node IDs. If heads is
448 'heads' and 'common' are both lists of node IDs. If heads is
449 not supplied, uses all of the revlog's heads. If common is not
449 not supplied, uses all of the revlog's heads. If common is not
450 supplied, uses nullid."""
450 supplied, uses nullid."""
451 if common is None:
451 if common is None:
452 common = [nullid]
452 common = [nullid]
453 if heads is None:
453 if heads is None:
454 heads = self.heads()
454 heads = self.heads()
455
455
456 common = [self.rev(n) for n in common]
456 common = [self.rev(n) for n in common]
457 heads = [self.rev(n) for n in heads]
457 heads = [self.rev(n) for n in heads]
458
458
459 # we want the ancestors, but inclusive
459 # we want the ancestors, but inclusive
460 class lazyset(object):
460 class lazyset(object):
461 def __init__(self, lazyvalues):
461 def __init__(self, lazyvalues):
462 self.addedvalues = set()
462 self.addedvalues = set()
463 self.lazyvalues = lazyvalues
463 self.lazyvalues = lazyvalues
464
464
465 def __contains__(self, value):
465 def __contains__(self, value):
466 return value in self.addedvalues or value in self.lazyvalues
466 return value in self.addedvalues or value in self.lazyvalues
467
467
468 def __iter__(self):
468 def __iter__(self):
469 added = self.addedvalues
469 added = self.addedvalues
470 for r in added:
470 for r in added:
471 yield r
471 yield r
472 for r in self.lazyvalues:
472 for r in self.lazyvalues:
473 if not r in added:
473 if not r in added:
474 yield r
474 yield r
475
475
476 def add(self, value):
476 def add(self, value):
477 self.addedvalues.add(value)
477 self.addedvalues.add(value)
478
478
479 def update(self, values):
479 def update(self, values):
480 self.addedvalues.update(values)
480 self.addedvalues.update(values)
481
481
482 has = lazyset(self.ancestors(common))
482 has = lazyset(self.ancestors(common))
483 has.add(nullrev)
483 has.add(nullrev)
484 has.update(common)
484 has.update(common)
485
485
486 # take all ancestors from heads that aren't in has
486 # take all ancestors from heads that aren't in has
487 missing = set()
487 missing = set()
488 visit = util.deque(r for r in heads if r not in has)
488 visit = util.deque(r for r in heads if r not in has)
489 while visit:
489 while visit:
490 r = visit.popleft()
490 r = visit.popleft()
491 if r in missing:
491 if r in missing:
492 continue
492 continue
493 else:
493 else:
494 missing.add(r)
494 missing.add(r)
495 for p in self.parentrevs(r):
495 for p in self.parentrevs(r):
496 if p not in has:
496 if p not in has:
497 visit.append(p)
497 visit.append(p)
498 missing = list(missing)
498 missing = list(missing)
499 missing.sort()
499 missing.sort()
500 return has, [self.node(r) for r in missing]
500 return has, [self.node(r) for r in missing]
501
501
502 def incrementalmissingrevs(self, common=None):
502 def incrementalmissingrevs(self, common=None):
503 """Return an object that can be used to incrementally compute the
503 """Return an object that can be used to incrementally compute the
504 revision numbers of the ancestors of arbitrary sets that are not
504 revision numbers of the ancestors of arbitrary sets that are not
505 ancestors of common. This is an ancestor.incrementalmissingancestors
505 ancestors of common. This is an ancestor.incrementalmissingancestors
506 object.
506 object.
507
507
508 'common' is a list of revision numbers. If common is not supplied, uses
508 'common' is a list of revision numbers. If common is not supplied, uses
509 nullrev.
509 nullrev.
510 """
510 """
511 if common is None:
511 if common is None:
512 common = [nullrev]
512 common = [nullrev]
513
513
514 return ancestor.incrementalmissingancestors(self.parentrevs, common)
514 return ancestor.incrementalmissingancestors(self.parentrevs, common)
515
515
516 def findmissingrevs(self, common=None, heads=None):
516 def findmissingrevs(self, common=None, heads=None):
517 """Return the revision numbers of the ancestors of heads that
517 """Return the revision numbers of the ancestors of heads that
518 are not ancestors of common.
518 are not ancestors of common.
519
519
520 More specifically, return a list of revision numbers corresponding to
520 More specifically, return a list of revision numbers corresponding to
521 nodes N such that every N satisfies the following constraints:
521 nodes N such that every N satisfies the following constraints:
522
522
523 1. N is an ancestor of some node in 'heads'
523 1. N is an ancestor of some node in 'heads'
524 2. N is not an ancestor of any node in 'common'
524 2. N is not an ancestor of any node in 'common'
525
525
526 The list is sorted by revision number, meaning it is
526 The list is sorted by revision number, meaning it is
527 topologically sorted.
527 topologically sorted.
528
528
529 'heads' and 'common' are both lists of revision numbers. If heads is
529 'heads' and 'common' are both lists of revision numbers. If heads is
530 not supplied, uses all of the revlog's heads. If common is not
530 not supplied, uses all of the revlog's heads. If common is not
531 supplied, uses nullid."""
531 supplied, uses nullid."""
532 if common is None:
532 if common is None:
533 common = [nullrev]
533 common = [nullrev]
534 if heads is None:
534 if heads is None:
535 heads = self.headrevs()
535 heads = self.headrevs()
536
536
537 inc = self.incrementalmissingrevs(common=common)
537 inc = self.incrementalmissingrevs(common=common)
538 return inc.missingancestors(heads)
538 return inc.missingancestors(heads)
539
539
540 def findmissing(self, common=None, heads=None):
540 def findmissing(self, common=None, heads=None):
541 """Return the ancestors of heads that are not ancestors of common.
541 """Return the ancestors of heads that are not ancestors of common.
542
542
543 More specifically, return a list of nodes N such that every N
543 More specifically, return a list of nodes N such that every N
544 satisfies the following constraints:
544 satisfies the following constraints:
545
545
546 1. N is an ancestor of some node in 'heads'
546 1. N is an ancestor of some node in 'heads'
547 2. N is not an ancestor of any node in 'common'
547 2. N is not an ancestor of any node in 'common'
548
548
549 The list is sorted by revision number, meaning it is
549 The list is sorted by revision number, meaning it is
550 topologically sorted.
550 topologically sorted.
551
551
552 'heads' and 'common' are both lists of node IDs. If heads is
552 'heads' and 'common' are both lists of node IDs. If heads is
553 not supplied, uses all of the revlog's heads. If common is not
553 not supplied, uses all of the revlog's heads. If common is not
554 supplied, uses nullid."""
554 supplied, uses nullid."""
555 if common is None:
555 if common is None:
556 common = [nullid]
556 common = [nullid]
557 if heads is None:
557 if heads is None:
558 heads = self.heads()
558 heads = self.heads()
559
559
560 common = [self.rev(n) for n in common]
560 common = [self.rev(n) for n in common]
561 heads = [self.rev(n) for n in heads]
561 heads = [self.rev(n) for n in heads]
562
562
563 inc = self.incrementalmissingrevs(common=common)
563 inc = self.incrementalmissingrevs(common=common)
564 return [self.node(r) for r in inc.missingancestors(heads)]
564 return [self.node(r) for r in inc.missingancestors(heads)]
565
565
566 def nodesbetween(self, roots=None, heads=None):
566 def nodesbetween(self, roots=None, heads=None):
567 """Return a topological path from 'roots' to 'heads'.
567 """Return a topological path from 'roots' to 'heads'.
568
568
569 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
569 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
570 topologically sorted list of all nodes N that satisfy both of
570 topologically sorted list of all nodes N that satisfy both of
571 these constraints:
571 these constraints:
572
572
573 1. N is a descendant of some node in 'roots'
573 1. N is a descendant of some node in 'roots'
574 2. N is an ancestor of some node in 'heads'
574 2. N is an ancestor of some node in 'heads'
575
575
576 Every node is considered to be both a descendant and an ancestor
576 Every node is considered to be both a descendant and an ancestor
577 of itself, so every reachable node in 'roots' and 'heads' will be
577 of itself, so every reachable node in 'roots' and 'heads' will be
578 included in 'nodes'.
578 included in 'nodes'.
579
579
580 'outroots' is the list of reachable nodes in 'roots', i.e., the
580 'outroots' is the list of reachable nodes in 'roots', i.e., the
581 subset of 'roots' that is returned in 'nodes'. Likewise,
581 subset of 'roots' that is returned in 'nodes'. Likewise,
582 'outheads' is the subset of 'heads' that is also in 'nodes'.
582 'outheads' is the subset of 'heads' that is also in 'nodes'.
583
583
584 'roots' and 'heads' are both lists of node IDs. If 'roots' is
584 'roots' and 'heads' are both lists of node IDs. If 'roots' is
585 unspecified, uses nullid as the only root. If 'heads' is
585 unspecified, uses nullid as the only root. If 'heads' is
586 unspecified, uses list of all of the revlog's heads."""
586 unspecified, uses list of all of the revlog's heads."""
587 nonodes = ([], [], [])
587 nonodes = ([], [], [])
588 if roots is not None:
588 if roots is not None:
589 roots = list(roots)
589 roots = list(roots)
590 if not roots:
590 if not roots:
591 return nonodes
591 return nonodes
592 lowestrev = min([self.rev(n) for n in roots])
592 lowestrev = min([self.rev(n) for n in roots])
593 else:
593 else:
594 roots = [nullid] # Everybody's a descendant of nullid
594 roots = [nullid] # Everybody's a descendant of nullid
595 lowestrev = nullrev
595 lowestrev = nullrev
596 if (lowestrev == nullrev) and (heads is None):
596 if (lowestrev == nullrev) and (heads is None):
597 # We want _all_ the nodes!
597 # We want _all_ the nodes!
598 return ([self.node(r) for r in self], [nullid], list(self.heads()))
598 return ([self.node(r) for r in self], [nullid], list(self.heads()))
599 if heads is None:
599 if heads is None:
600 # All nodes are ancestors, so the latest ancestor is the last
600 # All nodes are ancestors, so the latest ancestor is the last
601 # node.
601 # node.
602 highestrev = len(self) - 1
602 highestrev = len(self) - 1
603 # Set ancestors to None to signal that every node is an ancestor.
603 # Set ancestors to None to signal that every node is an ancestor.
604 ancestors = None
604 ancestors = None
605 # Set heads to an empty dictionary for later discovery of heads
605 # Set heads to an empty dictionary for later discovery of heads
606 heads = {}
606 heads = {}
607 else:
607 else:
608 heads = list(heads)
608 heads = list(heads)
609 if not heads:
609 if not heads:
610 return nonodes
610 return nonodes
611 ancestors = set()
611 ancestors = set()
612 # Turn heads into a dictionary so we can remove 'fake' heads.
612 # Turn heads into a dictionary so we can remove 'fake' heads.
613 # Also, later we will be using it to filter out the heads we can't
613 # Also, later we will be using it to filter out the heads we can't
614 # find from roots.
614 # find from roots.
615 heads = dict.fromkeys(heads, False)
615 heads = dict.fromkeys(heads, False)
616 # Start at the top and keep marking parents until we're done.
616 # Start at the top and keep marking parents until we're done.
617 nodestotag = set(heads)
617 nodestotag = set(heads)
618 # Remember where the top was so we can use it as a limit later.
618 # Remember where the top was so we can use it as a limit later.
619 highestrev = max([self.rev(n) for n in nodestotag])
619 highestrev = max([self.rev(n) for n in nodestotag])
620 while nodestotag:
620 while nodestotag:
621 # grab a node to tag
621 # grab a node to tag
622 n = nodestotag.pop()
622 n = nodestotag.pop()
623 # Never tag nullid
623 # Never tag nullid
624 if n == nullid:
624 if n == nullid:
625 continue
625 continue
626 # A node's revision number represents its place in a
626 # A node's revision number represents its place in a
627 # topologically sorted list of nodes.
627 # topologically sorted list of nodes.
628 r = self.rev(n)
628 r = self.rev(n)
629 if r >= lowestrev:
629 if r >= lowestrev:
630 if n not in ancestors:
630 if n not in ancestors:
631 # If we are possibly a descendant of one of the roots
631 # If we are possibly a descendant of one of the roots
632 # and we haven't already been marked as an ancestor
632 # and we haven't already been marked as an ancestor
633 ancestors.add(n) # Mark as ancestor
633 ancestors.add(n) # Mark as ancestor
634 # Add non-nullid parents to list of nodes to tag.
634 # Add non-nullid parents to list of nodes to tag.
635 nodestotag.update([p for p in self.parents(n) if
635 nodestotag.update([p for p in self.parents(n) if
636 p != nullid])
636 p != nullid])
637 elif n in heads: # We've seen it before, is it a fake head?
637 elif n in heads: # We've seen it before, is it a fake head?
638 # So it is, real heads should not be the ancestors of
638 # So it is, real heads should not be the ancestors of
639 # any other heads.
639 # any other heads.
640 heads.pop(n)
640 heads.pop(n)
641 if not ancestors:
641 if not ancestors:
642 return nonodes
642 return nonodes
643 # Now that we have our set of ancestors, we want to remove any
643 # Now that we have our set of ancestors, we want to remove any
644 # roots that are not ancestors.
644 # roots that are not ancestors.
645
645
646 # If one of the roots was nullid, everything is included anyway.
646 # If one of the roots was nullid, everything is included anyway.
647 if lowestrev > nullrev:
647 if lowestrev > nullrev:
648 # But, since we weren't, let's recompute the lowest rev to not
648 # But, since we weren't, let's recompute the lowest rev to not
649 # include roots that aren't ancestors.
649 # include roots that aren't ancestors.
650
650
651 # Filter out roots that aren't ancestors of heads
651 # Filter out roots that aren't ancestors of heads
652 roots = [n for n in roots if n in ancestors]
652 roots = [n for n in roots if n in ancestors]
653 # Recompute the lowest revision
653 # Recompute the lowest revision
654 if roots:
654 if roots:
655 lowestrev = min([self.rev(n) for n in roots])
655 lowestrev = min([self.rev(n) for n in roots])
656 else:
656 else:
657 # No more roots? Return empty list
657 # No more roots? Return empty list
658 return nonodes
658 return nonodes
659 else:
659 else:
660 # We are descending from nullid, and don't need to care about
660 # We are descending from nullid, and don't need to care about
661 # any other roots.
661 # any other roots.
662 lowestrev = nullrev
662 lowestrev = nullrev
663 roots = [nullid]
663 roots = [nullid]
664 # Transform our roots list into a set.
664 # Transform our roots list into a set.
665 descendants = set(roots)
665 descendants = set(roots)
666 # Also, keep the original roots so we can filter out roots that aren't
666 # Also, keep the original roots so we can filter out roots that aren't
667 # 'real' roots (i.e. are descended from other roots).
667 # 'real' roots (i.e. are descended from other roots).
668 roots = descendants.copy()
668 roots = descendants.copy()
669 # Our topologically sorted list of output nodes.
669 # Our topologically sorted list of output nodes.
670 orderedout = []
670 orderedout = []
671 # Don't start at nullid since we don't want nullid in our output list,
671 # Don't start at nullid since we don't want nullid in our output list,
672 # and if nullid shows up in descendants, empty parents will look like
672 # and if nullid shows up in descendants, empty parents will look like
673 # they're descendants.
673 # they're descendants.
674 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
674 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
675 n = self.node(r)
675 n = self.node(r)
676 isdescendant = False
676 isdescendant = False
677 if lowestrev == nullrev: # Everybody is a descendant of nullid
677 if lowestrev == nullrev: # Everybody is a descendant of nullid
678 isdescendant = True
678 isdescendant = True
679 elif n in descendants:
679 elif n in descendants:
680 # n is already a descendant
680 # n is already a descendant
681 isdescendant = True
681 isdescendant = True
682 # This check only needs to be done here because all the roots
682 # This check only needs to be done here because all the roots
683 # will start being marked is descendants before the loop.
683 # will start being marked is descendants before the loop.
684 if n in roots:
684 if n in roots:
685 # If n was a root, check if it's a 'real' root.
685 # If n was a root, check if it's a 'real' root.
686 p = tuple(self.parents(n))
686 p = tuple(self.parents(n))
687 # If any of its parents are descendants, it's not a root.
687 # If any of its parents are descendants, it's not a root.
688 if (p[0] in descendants) or (p[1] in descendants):
688 if (p[0] in descendants) or (p[1] in descendants):
689 roots.remove(n)
689 roots.remove(n)
690 else:
690 else:
691 p = tuple(self.parents(n))
691 p = tuple(self.parents(n))
692 # A node is a descendant if either of its parents are
692 # A node is a descendant if either of its parents are
693 # descendants. (We seeded the dependents list with the roots
693 # descendants. (We seeded the dependents list with the roots
694 # up there, remember?)
694 # up there, remember?)
695 if (p[0] in descendants) or (p[1] in descendants):
695 if (p[0] in descendants) or (p[1] in descendants):
696 descendants.add(n)
696 descendants.add(n)
697 isdescendant = True
697 isdescendant = True
698 if isdescendant and ((ancestors is None) or (n in ancestors)):
698 if isdescendant and ((ancestors is None) or (n in ancestors)):
699 # Only include nodes that are both descendants and ancestors.
699 # Only include nodes that are both descendants and ancestors.
700 orderedout.append(n)
700 orderedout.append(n)
701 if (ancestors is not None) and (n in heads):
701 if (ancestors is not None) and (n in heads):
702 # We're trying to figure out which heads are reachable
702 # We're trying to figure out which heads are reachable
703 # from roots.
703 # from roots.
704 # Mark this head as having been reached
704 # Mark this head as having been reached
705 heads[n] = True
705 heads[n] = True
706 elif ancestors is None:
706 elif ancestors is None:
707 # Otherwise, we're trying to discover the heads.
707 # Otherwise, we're trying to discover the heads.
708 # Assume this is a head because if it isn't, the next step
708 # Assume this is a head because if it isn't, the next step
709 # will eventually remove it.
709 # will eventually remove it.
710 heads[n] = True
710 heads[n] = True
711 # But, obviously its parents aren't.
711 # But, obviously its parents aren't.
712 for p in self.parents(n):
712 for p in self.parents(n):
713 heads.pop(p, None)
713 heads.pop(p, None)
714 heads = [n for n, flag in heads.iteritems() if flag]
714 heads = [n for n, flag in heads.iteritems() if flag]
715 roots = list(roots)
715 roots = list(roots)
716 assert orderedout
716 assert orderedout
717 assert roots
717 assert roots
718 assert heads
718 assert heads
719 return (orderedout, roots, heads)
719 return (orderedout, roots, heads)
720
720
721 def headrevs(self):
721 def headrevs(self):
722 try:
722 try:
723 return self.index.headrevs()
723 return self.index.headrevs()
724 except AttributeError:
724 except AttributeError:
725 return self._headrevs()
725 return self._headrevs()
726
726
727 def _headrevs(self):
727 def _headrevs(self):
728 count = len(self)
728 count = len(self)
729 if not count:
729 if not count:
730 return [nullrev]
730 return [nullrev]
731 # we won't iter over filtered rev so nobody is a head at start
731 # we won't iter over filtered rev so nobody is a head at start
732 ishead = [0] * (count + 1)
732 ishead = [0] * (count + 1)
733 index = self.index
733 index = self.index
734 for r in self:
734 for r in self:
735 ishead[r] = 1 # I may be an head
735 ishead[r] = 1 # I may be an head
736 e = index[r]
736 e = index[r]
737 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
737 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
738 return [r for r, val in enumerate(ishead) if val]
738 return [r for r, val in enumerate(ishead) if val]
739
739
740 def heads(self, start=None, stop=None):
740 def heads(self, start=None, stop=None):
741 """return the list of all nodes that have no children
741 """return the list of all nodes that have no children
742
742
743 if start is specified, only heads that are descendants of
743 if start is specified, only heads that are descendants of
744 start will be returned
744 start will be returned
745 if stop is specified, it will consider all the revs from stop
745 if stop is specified, it will consider all the revs from stop
746 as if they had no children
746 as if they had no children
747 """
747 """
748 if start is None and stop is None:
748 if start is None and stop is None:
749 if not len(self):
749 if not len(self):
750 return [nullid]
750 return [nullid]
751 return [self.node(r) for r in self.headrevs()]
751 return [self.node(r) for r in self.headrevs()]
752
752
753 if start is None:
753 if start is None:
754 start = nullid
754 start = nullid
755 if stop is None:
755 if stop is None:
756 stop = []
756 stop = []
757 stoprevs = set([self.rev(n) for n in stop])
757 stoprevs = set([self.rev(n) for n in stop])
758 startrev = self.rev(start)
758 startrev = self.rev(start)
759 reachable = set((startrev,))
759 reachable = set((startrev,))
760 heads = set((startrev,))
760 heads = set((startrev,))
761
761
762 parentrevs = self.parentrevs
762 parentrevs = self.parentrevs
763 for r in self.revs(start=startrev + 1):
763 for r in self.revs(start=startrev + 1):
764 for p in parentrevs(r):
764 for p in parentrevs(r):
765 if p in reachable:
765 if p in reachable:
766 if r not in stoprevs:
766 if r not in stoprevs:
767 reachable.add(r)
767 reachable.add(r)
768 heads.add(r)
768 heads.add(r)
769 if p in heads and p not in stoprevs:
769 if p in heads and p not in stoprevs:
770 heads.remove(p)
770 heads.remove(p)
771
771
772 return [self.node(r) for r in heads]
772 return [self.node(r) for r in heads]
773
773
774 def children(self, node):
774 def children(self, node):
775 """find the children of a given node"""
775 """find the children of a given node"""
776 c = []
776 c = []
777 p = self.rev(node)
777 p = self.rev(node)
778 for r in self.revs(start=p + 1):
778 for r in self.revs(start=p + 1):
779 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
779 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
780 if prevs:
780 if prevs:
781 for pr in prevs:
781 for pr in prevs:
782 if pr == p:
782 if pr == p:
783 c.append(self.node(r))
783 c.append(self.node(r))
784 elif p == nullrev:
784 elif p == nullrev:
785 c.append(self.node(r))
785 c.append(self.node(r))
786 return c
786 return c
787
787
788 def descendant(self, start, end):
788 def descendant(self, start, end):
789 if start == nullrev:
789 if start == nullrev:
790 return True
790 return True
791 for i in self.descendants([start]):
791 for i in self.descendants([start]):
792 if i == end:
792 if i == end:
793 return True
793 return True
794 elif i > end:
794 elif i > end:
795 break
795 break
796 return False
796 return False
797
797
798 def commonancestorsheads(self, a, b):
798 def commonancestorsheads(self, a, b):
799 """calculate all the heads of the common ancestors of nodes a and b"""
799 """calculate all the heads of the common ancestors of nodes a and b"""
800 a, b = self.rev(a), self.rev(b)
800 a, b = self.rev(a), self.rev(b)
801 try:
801 try:
802 ancs = self.index.commonancestorsheads(a, b)
802 ancs = self.index.commonancestorsheads(a, b)
803 except (AttributeError, OverflowError): # C implementation failed
803 except (AttributeError, OverflowError): # C implementation failed
804 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
804 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
805 return map(self.node, ancs)
805 return map(self.node, ancs)
806
806
807 def isancestor(self, a, b):
807 def isancestor(self, a, b):
808 """return True if node a is an ancestor of node b
808 """return True if node a is an ancestor of node b
809
809
810 The implementation of this is trivial but the use of
810 The implementation of this is trivial but the use of
811 commonancestorsheads is not."""
811 commonancestorsheads is not."""
812 return a in self.commonancestorsheads(a, b)
812 return a in self.commonancestorsheads(a, b)
813
813
814 def ancestor(self, a, b):
814 def ancestor(self, a, b):
815 """calculate the "best" common ancestor of nodes a and b"""
815 """calculate the "best" common ancestor of nodes a and b"""
816
816
817 a, b = self.rev(a), self.rev(b)
817 a, b = self.rev(a), self.rev(b)
818 try:
818 try:
819 ancs = self.index.ancestors(a, b)
819 ancs = self.index.ancestors(a, b)
820 except (AttributeError, OverflowError):
820 except (AttributeError, OverflowError):
821 ancs = ancestor.ancestors(self.parentrevs, a, b)
821 ancs = ancestor.ancestors(self.parentrevs, a, b)
822 if ancs:
822 if ancs:
823 # choose a consistent winner when there's a tie
823 # choose a consistent winner when there's a tie
824 return min(map(self.node, ancs))
824 return min(map(self.node, ancs))
825 return nullid
825 return nullid
826
826
827 def _match(self, id):
827 def _match(self, id):
828 if isinstance(id, int):
828 if isinstance(id, int):
829 # rev
829 # rev
830 return self.node(id)
830 return self.node(id)
831 if len(id) == 20:
831 if len(id) == 20:
832 # possibly a binary node
832 # possibly a binary node
833 # odds of a binary node being all hex in ASCII are 1 in 10**25
833 # odds of a binary node being all hex in ASCII are 1 in 10**25
834 try:
834 try:
835 node = id
835 node = id
836 self.rev(node) # quick search the index
836 self.rev(node) # quick search the index
837 return node
837 return node
838 except LookupError:
838 except LookupError:
839 pass # may be partial hex id
839 pass # may be partial hex id
840 try:
840 try:
841 # str(rev)
841 # str(rev)
842 rev = int(id)
842 rev = int(id)
843 if str(rev) != id:
843 if str(rev) != id:
844 raise ValueError
844 raise ValueError
845 if rev < 0:
845 if rev < 0:
846 rev = len(self) + rev
846 rev = len(self) + rev
847 if rev < 0 or rev >= len(self):
847 if rev < 0 or rev >= len(self):
848 raise ValueError
848 raise ValueError
849 return self.node(rev)
849 return self.node(rev)
850 except (ValueError, OverflowError):
850 except (ValueError, OverflowError):
851 pass
851 pass
852 if len(id) == 40:
852 if len(id) == 40:
853 try:
853 try:
854 # a full hex nodeid?
854 # a full hex nodeid?
855 node = bin(id)
855 node = bin(id)
856 self.rev(node)
856 self.rev(node)
857 return node
857 return node
858 except (TypeError, LookupError):
858 except (TypeError, LookupError):
859 pass
859 pass
860
860
861 def _partialmatch(self, id):
861 def _partialmatch(self, id):
862 try:
862 try:
863 n = self.index.partialmatch(id)
863 n = self.index.partialmatch(id)
864 if n and self.hasnode(n):
864 if n and self.hasnode(n):
865 return n
865 return n
866 return None
866 return None
867 except RevlogError:
867 except RevlogError:
868 # parsers.c radix tree lookup gave multiple matches
868 # parsers.c radix tree lookup gave multiple matches
869 # fall through to slow path that filters hidden revisions
869 # fall through to slow path that filters hidden revisions
870 pass
870 pass
871 except (AttributeError, ValueError):
871 except (AttributeError, ValueError):
872 # we are pure python, or key was too short to search radix tree
872 # we are pure python, or key was too short to search radix tree
873 pass
873 pass
874
874
875 if id in self._pcache:
875 if id in self._pcache:
876 return self._pcache[id]
876 return self._pcache[id]
877
877
878 if len(id) < 40:
878 if len(id) < 40:
879 try:
879 try:
880 # hex(node)[:...]
880 # hex(node)[:...]
881 l = len(id) // 2 # grab an even number of digits
881 l = len(id) // 2 # grab an even number of digits
882 prefix = bin(id[:l * 2])
882 prefix = bin(id[:l * 2])
883 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
883 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
884 nl = [n for n in nl if hex(n).startswith(id) and
884 nl = [n for n in nl if hex(n).startswith(id) and
885 self.hasnode(n)]
885 self.hasnode(n)]
886 if len(nl) > 0:
886 if len(nl) > 0:
887 if len(nl) == 1:
887 if len(nl) == 1:
888 self._pcache[id] = nl[0]
888 self._pcache[id] = nl[0]
889 return nl[0]
889 return nl[0]
890 raise LookupError(id, self.indexfile,
890 raise LookupError(id, self.indexfile,
891 _('ambiguous identifier'))
891 _('ambiguous identifier'))
892 return None
892 return None
893 except TypeError:
893 except TypeError:
894 pass
894 pass
895
895
896 def lookup(self, id):
896 def lookup(self, id):
897 """locate a node based on:
897 """locate a node based on:
898 - revision number or str(revision number)
898 - revision number or str(revision number)
899 - nodeid or subset of hex nodeid
899 - nodeid or subset of hex nodeid
900 """
900 """
901 n = self._match(id)
901 n = self._match(id)
902 if n is not None:
902 if n is not None:
903 return n
903 return n
904 n = self._partialmatch(id)
904 n = self._partialmatch(id)
905 if n:
905 if n:
906 return n
906 return n
907
907
908 raise LookupError(id, self.indexfile, _('no match found'))
908 raise LookupError(id, self.indexfile, _('no match found'))
909
909
910 def cmp(self, node, text):
910 def cmp(self, node, text):
911 """compare text with a given file revision
911 """compare text with a given file revision
912
912
913 returns True if text is different than what is stored.
913 returns True if text is different than what is stored.
914 """
914 """
915 p1, p2 = self.parents(node)
915 p1, p2 = self.parents(node)
916 return hash(text, p1, p2) != node
916 return hash(text, p1, p2) != node
917
917
918 def _addchunk(self, offset, data):
918 def _addchunk(self, offset, data):
919 o, d = self._chunkcache
919 o, d = self._chunkcache
920 # try to add to existing cache
920 # try to add to existing cache
921 if o + len(d) == offset and len(d) + len(data) < _chunksize:
921 if o + len(d) == offset and len(d) + len(data) < _chunksize:
922 self._chunkcache = o, d + data
922 self._chunkcache = o, d + data
923 else:
923 else:
924 self._chunkcache = offset, data
924 self._chunkcache = offset, data
925
925
926 def _loadchunk(self, offset, length):
926 def _loadchunk(self, offset, length):
927 if self._inline:
927 if self._inline:
928 df = self.opener(self.indexfile)
928 df = self.opener(self.indexfile)
929 else:
929 else:
930 df = self.opener(self.datafile)
930 df = self.opener(self.datafile)
931
931
932 # Cache data both forward and backward around the requested
932 # Cache data both forward and backward around the requested
933 # data, in a fixed size window. This helps speed up operations
933 # data, in a fixed size window. This helps speed up operations
934 # involving reading the revlog backwards.
934 # involving reading the revlog backwards.
935 cachesize = self._chunkcachesize
935 cachesize = self._chunkcachesize
936 realoffset = offset & ~(cachesize - 1)
936 realoffset = offset & ~(cachesize - 1)
937 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
937 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
938 - realoffset)
938 - realoffset)
939 df.seek(realoffset)
939 df.seek(realoffset)
940 d = df.read(reallength)
940 d = df.read(reallength)
941 df.close()
941 df.close()
942 self._addchunk(realoffset, d)
942 self._addchunk(realoffset, d)
943 if offset != realoffset or reallength != length:
943 if offset != realoffset or reallength != length:
944 return util.buffer(d, offset - realoffset, length)
944 return util.buffer(d, offset - realoffset, length)
945 return d
945 return d
946
946
947 def _getchunk(self, offset, length):
947 def _getchunk(self, offset, length):
948 o, d = self._chunkcache
948 o, d = self._chunkcache
949 l = len(d)
949 l = len(d)
950
950
951 # is it in the cache?
951 # is it in the cache?
952 cachestart = offset - o
952 cachestart = offset - o
953 cacheend = cachestart + length
953 cacheend = cachestart + length
954 if cachestart >= 0 and cacheend <= l:
954 if cachestart >= 0 and cacheend <= l:
955 if cachestart == 0 and cacheend == l:
955 if cachestart == 0 and cacheend == l:
956 return d # avoid a copy
956 return d # avoid a copy
957 return util.buffer(d, cachestart, cacheend - cachestart)
957 return util.buffer(d, cachestart, cacheend - cachestart)
958
958
959 return self._loadchunk(offset, length)
959 return self._loadchunk(offset, length)
960
960
961 def _chunkraw(self, startrev, endrev):
961 def _chunkraw(self, startrev, endrev):
962 start = self.start(startrev)
962 start = self.start(startrev)
963 end = self.end(endrev)
963 end = self.end(endrev)
964 if self._inline:
964 if self._inline:
965 start += (startrev + 1) * self._io.size
965 start += (startrev + 1) * self._io.size
966 end += (endrev + 1) * self._io.size
966 end += (endrev + 1) * self._io.size
967 length = end - start
967 length = end - start
968 return self._getchunk(start, length)
968 return self._getchunk(start, length)
969
969
970 def _chunk(self, rev):
970 def _chunk(self, rev):
971 return decompress(self._chunkraw(rev, rev))
971 return decompress(self._chunkraw(rev, rev))
972
972
973 def _chunks(self, revs):
973 def _chunks(self, revs):
974 '''faster version of [self._chunk(rev) for rev in revs]
974 '''faster version of [self._chunk(rev) for rev in revs]
975
975
976 Assumes that revs is in ascending order.'''
976 Assumes that revs is in ascending order.'''
977 if not revs:
977 if not revs:
978 return []
978 return []
979 start = self.start
979 start = self.start
980 length = self.length
980 length = self.length
981 inline = self._inline
981 inline = self._inline
982 iosize = self._io.size
982 iosize = self._io.size
983 buffer = util.buffer
983 buffer = util.buffer
984
984
985 l = []
985 l = []
986 ladd = l.append
986 ladd = l.append
987
987
988 # preload the cache
988 # preload the cache
989 try:
989 try:
990 while True:
990 while True:
991 # ensure that the cache doesn't change out from under us
991 # ensure that the cache doesn't change out from under us
992 _cache = self._chunkcache
992 _cache = self._chunkcache
993 self._chunkraw(revs[0], revs[-1])
993 self._chunkraw(revs[0], revs[-1])
994 if _cache == self._chunkcache:
994 if _cache == self._chunkcache:
995 break
995 break
996 offset, data = _cache
996 offset, data = _cache
997 except OverflowError:
997 except OverflowError:
998 # issue4215 - we can't cache a run of chunks greater than
998 # issue4215 - we can't cache a run of chunks greater than
999 # 2G on Windows
999 # 2G on Windows
1000 return [self._chunk(rev) for rev in revs]
1000 return [self._chunk(rev) for rev in revs]
1001
1001
1002 for rev in revs:
1002 for rev in revs:
1003 chunkstart = start(rev)
1003 chunkstart = start(rev)
1004 if inline:
1004 if inline:
1005 chunkstart += (rev + 1) * iosize
1005 chunkstart += (rev + 1) * iosize
1006 chunklength = length(rev)
1006 chunklength = length(rev)
1007 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1007 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1008
1008
1009 return l
1009 return l
1010
1010
1011 def _chunkclear(self):
1011 def _chunkclear(self):
1012 self._chunkcache = (0, '')
1012 self._chunkcache = (0, '')
1013
1013
1014 def deltaparent(self, rev):
1014 def deltaparent(self, rev):
1015 """return deltaparent of the given revision"""
1015 """return deltaparent of the given revision"""
1016 base = self.index[rev][3]
1016 base = self.index[rev][3]
1017 if base == rev:
1017 if base == rev:
1018 return nullrev
1018 return nullrev
1019 elif self._generaldelta:
1019 elif self._generaldelta:
1020 return base
1020 return base
1021 else:
1021 else:
1022 return rev - 1
1022 return rev - 1
1023
1023
1024 def revdiff(self, rev1, rev2):
1024 def revdiff(self, rev1, rev2):
1025 """return or calculate a delta between two revisions"""
1025 """return or calculate a delta between two revisions"""
1026 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1026 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1027 return str(self._chunk(rev2))
1027 return str(self._chunk(rev2))
1028
1028
1029 return mdiff.textdiff(self.revision(rev1),
1029 return mdiff.textdiff(self.revision(rev1),
1030 self.revision(rev2))
1030 self.revision(rev2))
1031
1031
1032 def revision(self, nodeorrev):
1032 def revision(self, nodeorrev):
1033 """return an uncompressed revision of a given node or revision
1033 """return an uncompressed revision of a given node or revision
1034 number.
1034 number.
1035 """
1035 """
1036 if isinstance(nodeorrev, int):
1036 if isinstance(nodeorrev, int):
1037 rev = nodeorrev
1037 rev = nodeorrev
1038 node = self.node(rev)
1038 node = self.node(rev)
1039 else:
1039 else:
1040 node = nodeorrev
1040 node = nodeorrev
1041 rev = None
1041 rev = None
1042
1042
1043 _cache = self._cache # grab local copy of cache to avoid thread race
1043 _cache = self._cache # grab local copy of cache to avoid thread race
1044 cachedrev = None
1044 cachedrev = None
1045 if node == nullid:
1045 if node == nullid:
1046 return ""
1046 return ""
1047 if _cache:
1047 if _cache:
1048 if _cache[0] == node:
1048 if _cache[0] == node:
1049 return _cache[2]
1049 return _cache[2]
1050 cachedrev = _cache[1]
1050 cachedrev = _cache[1]
1051
1051
1052 # look up what we need to read
1052 # look up what we need to read
1053 text = None
1053 text = None
1054 if rev is None:
1054 if rev is None:
1055 rev = self.rev(node)
1055 rev = self.rev(node)
1056
1056
1057 # check rev flags
1057 # check rev flags
1058 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1058 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1059 raise RevlogError(_('incompatible revision flag %x') %
1059 raise RevlogError(_('incompatible revision flag %x') %
1060 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1060 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1061
1061
1062 # build delta chain
1062 # build delta chain
1063 chain = []
1063 chain = []
1064 index = self.index # for performance
1064 index = self.index # for performance
1065 generaldelta = self._generaldelta
1065 generaldelta = self._generaldelta
1066 iterrev = rev
1066 iterrev = rev
1067 e = index[iterrev]
1067 e = index[iterrev]
1068 while iterrev != e[3] and iterrev != cachedrev:
1068 while iterrev != e[3] and iterrev != cachedrev:
1069 chain.append(iterrev)
1069 chain.append(iterrev)
1070 if generaldelta:
1070 if generaldelta:
1071 iterrev = e[3]
1071 iterrev = e[3]
1072 else:
1072 else:
1073 iterrev -= 1
1073 iterrev -= 1
1074 e = index[iterrev]
1074 e = index[iterrev]
1075
1075
1076 if iterrev == cachedrev:
1076 if iterrev == cachedrev:
1077 # cache hit
1077 # cache hit
1078 text = _cache[2]
1078 text = _cache[2]
1079 else:
1079 else:
1080 chain.append(iterrev)
1080 chain.append(iterrev)
1081 chain.reverse()
1081 chain.reverse()
1082
1082
1083 # drop cache to save memory
1083 # drop cache to save memory
1084 self._cache = None
1084 self._cache = None
1085
1085
1086 bins = self._chunks(chain)
1086 bins = self._chunks(chain)
1087 if text is None:
1087 if text is None:
1088 text = str(bins[0])
1088 text = str(bins[0])
1089 bins = bins[1:]
1089 bins = bins[1:]
1090
1090
1091 text = mdiff.patches(text, bins)
1091 text = mdiff.patches(text, bins)
1092
1092
1093 text = self._checkhash(text, node, rev)
1093 text = self._checkhash(text, node, rev)
1094
1094
1095 self._cache = (node, rev, text)
1095 self._cache = (node, rev, text)
1096 return text
1096 return text
1097
1097
1098 def hash(self, text, p1, p2):
1098 def hash(self, text, p1, p2):
1099 """Compute a node hash.
1099 """Compute a node hash.
1100
1100
1101 Available as a function so that subclasses can replace the hash
1101 Available as a function so that subclasses can replace the hash
1102 as needed.
1102 as needed.
1103 """
1103 """
1104 return hash(text, p1, p2)
1104 return hash(text, p1, p2)
1105
1105
1106 def _checkhash(self, text, node, rev):
1106 def _checkhash(self, text, node, rev):
1107 p1, p2 = self.parents(node)
1107 p1, p2 = self.parents(node)
1108 self.checkhash(text, p1, p2, node, rev)
1108 self.checkhash(text, p1, p2, node, rev)
1109 return text
1109 return text
1110
1110
1111 def checkhash(self, text, p1, p2, node, rev=None):
1111 def checkhash(self, text, p1, p2, node, rev=None):
1112 if node != self.hash(text, p1, p2):
1112 if node != self.hash(text, p1, p2):
1113 revornode = rev
1113 revornode = rev
1114 if revornode is None:
1114 if revornode is None:
1115 revornode = templatefilters.short(hex(node))
1115 revornode = templatefilters.short(hex(node))
1116 raise RevlogError(_("integrity check failed on %s:%s")
1116 raise RevlogError(_("integrity check failed on %s:%s")
1117 % (self.indexfile, revornode))
1117 % (self.indexfile, revornode))
1118
1118
1119 def checkinlinesize(self, tr, fp=None):
1119 def checkinlinesize(self, tr, fp=None):
1120 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1120 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1121 return
1121 return
1122
1122
1123 trinfo = tr.find(self.indexfile)
1123 trinfo = tr.find(self.indexfile)
1124 if trinfo is None:
1124 if trinfo is None:
1125 raise RevlogError(_("%s not found in the transaction")
1125 raise RevlogError(_("%s not found in the transaction")
1126 % self.indexfile)
1126 % self.indexfile)
1127
1127
1128 trindex = trinfo[2]
1128 trindex = trinfo[2]
1129 dataoff = self.start(trindex)
1129 dataoff = self.start(trindex)
1130
1130
1131 tr.add(self.datafile, dataoff)
1131 tr.add(self.datafile, dataoff)
1132
1132
1133 if fp:
1133 if fp:
1134 fp.flush()
1134 fp.flush()
1135 fp.close()
1135 fp.close()
1136
1136
1137 df = self.opener(self.datafile, 'w')
1137 df = self.opener(self.datafile, 'w')
1138 try:
1138 try:
1139 for r in self:
1139 for r in self:
1140 df.write(self._chunkraw(r, r))
1140 df.write(self._chunkraw(r, r))
1141 finally:
1141 finally:
1142 df.close()
1142 df.close()
1143
1143
1144 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1144 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1145 self.version &= ~(REVLOGNGINLINEDATA)
1145 self.version &= ~(REVLOGNGINLINEDATA)
1146 self._inline = False
1146 self._inline = False
1147 for i in self:
1147 for i in self:
1148 e = self._io.packentry(self.index[i], self.node, self.version, i)
1148 e = self._io.packentry(self.index[i], self.node, self.version, i)
1149 fp.write(e)
1149 fp.write(e)
1150
1150
1151 # if we don't call close, the temp file will never replace the
1151 # if we don't call close, the temp file will never replace the
1152 # real index
1152 # real index
1153 fp.close()
1153 fp.close()
1154
1154
1155 tr.replace(self.indexfile, trindex * self._io.size)
1155 tr.replace(self.indexfile, trindex * self._io.size)
1156 self._chunkclear()
1156 self._chunkclear()
1157
1157
1158 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1158 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1159 node=None):
1159 node=None):
1160 """add a revision to the log
1160 """add a revision to the log
1161
1161
1162 text - the revision data to add
1162 text - the revision data to add
1163 transaction - the transaction object used for rollback
1163 transaction - the transaction object used for rollback
1164 link - the linkrev data to add
1164 link - the linkrev data to add
1165 p1, p2 - the parent nodeids of the revision
1165 p1, p2 - the parent nodeids of the revision
1166 cachedelta - an optional precomputed delta
1166 cachedelta - an optional precomputed delta
1167 node - nodeid of revision; typically node is not specified, and it is
1167 node - nodeid of revision; typically node is not specified, and it is
1168 computed by default as hash(text, p1, p2), however subclasses might
1168 computed by default as hash(text, p1, p2), however subclasses might
1169 use different hashing method (and override checkhash() in such case)
1169 use different hashing method (and override checkhash() in such case)
1170 """
1170 """
1171 if link == nullrev:
1171 if link == nullrev:
1172 raise RevlogError(_("attempted to add linkrev -1 to %s")
1172 raise RevlogError(_("attempted to add linkrev -1 to %s")
1173 % self.indexfile)
1173 % self.indexfile)
1174 node = node or self.hash(text, p1, p2)
1174 node = node or self.hash(text, p1, p2)
1175 if node in self.nodemap:
1175 if node in self.nodemap:
1176 return node
1176 return node
1177
1177
1178 dfh = None
1178 dfh = None
1179 if not self._inline:
1179 if not self._inline:
1180 dfh = self.opener(self.datafile, "a")
1180 dfh = self.opener(self.datafile, "a")
1181 ifh = self.opener(self.indexfile, "a+")
1181 ifh = self.opener(self.indexfile, "a+")
1182 try:
1182 try:
1183 return self._addrevision(node, text, transaction, link, p1, p2,
1183 return self._addrevision(node, text, transaction, link, p1, p2,
1184 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1184 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1185 finally:
1185 finally:
1186 if dfh:
1186 if dfh:
1187 dfh.close()
1187 dfh.close()
1188 ifh.close()
1188 ifh.close()
1189
1189
1190 def compress(self, text):
1190 def compress(self, text):
1191 """ generate a possibly-compressed representation of text """
1191 """ generate a possibly-compressed representation of text """
1192 if not text:
1192 if not text:
1193 return ("", text)
1193 return ("", text)
1194 l = len(text)
1194 l = len(text)
1195 bin = None
1195 bin = None
1196 if l < 44:
1196 if l < 44:
1197 pass
1197 pass
1198 elif l > 1000000:
1198 elif l > 1000000:
1199 # zlib makes an internal copy, thus doubling memory usage for
1199 # zlib makes an internal copy, thus doubling memory usage for
1200 # large files, so lets do this in pieces
1200 # large files, so lets do this in pieces
1201 z = zlib.compressobj()
1201 z = zlib.compressobj()
1202 p = []
1202 p = []
1203 pos = 0
1203 pos = 0
1204 while pos < l:
1204 while pos < l:
1205 pos2 = pos + 2**20
1205 pos2 = pos + 2**20
1206 p.append(z.compress(text[pos:pos2]))
1206 p.append(z.compress(text[pos:pos2]))
1207 pos = pos2
1207 pos = pos2
1208 p.append(z.flush())
1208 p.append(z.flush())
1209 if sum(map(len, p)) < l:
1209 if sum(map(len, p)) < l:
1210 bin = "".join(p)
1210 bin = "".join(p)
1211 else:
1211 else:
1212 bin = _compress(text)
1212 bin = _compress(text)
1213 if bin is None or len(bin) > l:
1213 if bin is None or len(bin) > l:
1214 if text[0] == '\0':
1214 if text[0] == '\0':
1215 return ("", text)
1215 return ("", text)
1216 return ('u', text)
1216 return ('u', text)
1217 return ("", bin)
1217 return ("", bin)
1218
1218
1219 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1219 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1220 cachedelta, ifh, dfh):
1220 cachedelta, ifh, dfh):
1221 """internal function to add revisions to the log
1221 """internal function to add revisions to the log
1222
1222
1223 see addrevision for argument descriptions.
1223 see addrevision for argument descriptions.
1224 invariants:
1224 invariants:
1225 - text is optional (can be None); if not set, cachedelta must be set.
1225 - text is optional (can be None); if not set, cachedelta must be set.
1226 if both are set, they must correspond to each other.
1226 if both are set, they must correspond to each other.
1227 """
1227 """
1228 btext = [text]
1228 btext = [text]
1229 def buildtext():
1229 def buildtext():
1230 if btext[0] is not None:
1230 if btext[0] is not None:
1231 return btext[0]
1231 return btext[0]
1232 # flush any pending writes here so we can read it in revision
1232 # flush any pending writes here so we can read it in revision
1233 if dfh:
1233 if dfh:
1234 dfh.flush()
1234 dfh.flush()
1235 ifh.flush()
1235 ifh.flush()
1236 basetext = self.revision(self.node(cachedelta[0]))
1236 basetext = self.revision(self.node(cachedelta[0]))
1237 btext[0] = mdiff.patch(basetext, cachedelta[1])
1237 btext[0] = mdiff.patch(basetext, cachedelta[1])
1238 try:
1238 try:
1239 self.checkhash(btext[0], p1, p2, node)
1239 self.checkhash(btext[0], p1, p2, node)
1240 if flags & REVIDX_ISCENSORED:
1240 if flags & REVIDX_ISCENSORED:
1241 raise RevlogError(_('node %s is not censored') % node)
1241 raise RevlogError(_('node %s is not censored') % node)
1242 except CensoredNodeError:
1242 except CensoredNodeError:
1243 # must pass the censored index flag to add censored revisions
1243 # must pass the censored index flag to add censored revisions
1244 if not flags & REVIDX_ISCENSORED:
1244 if not flags & REVIDX_ISCENSORED:
1245 raise
1245 raise
1246 return btext[0]
1246 return btext[0]
1247
1247
1248 def builddelta(rev):
1248 def builddelta(rev):
1249 # can we use the cached delta?
1249 # can we use the cached delta?
1250 if cachedelta and cachedelta[0] == rev:
1250 if cachedelta and cachedelta[0] == rev:
1251 delta = cachedelta[1]
1251 delta = cachedelta[1]
1252 else:
1252 else:
1253 t = buildtext()
1253 t = buildtext()
1254 ptext = self.revision(self.node(rev))
1254 ptext = self.revision(self.node(rev))
1255 delta = mdiff.textdiff(ptext, t)
1255 delta = mdiff.textdiff(ptext, t)
1256 data = self.compress(delta)
1256 data = self.compress(delta)
1257 l = len(data[1]) + len(data[0])
1257 l = len(data[1]) + len(data[0])
1258 if basecache[0] == rev:
1258 if basecache[0] == rev:
1259 chainbase = basecache[1]
1259 chainbase = basecache[1]
1260 else:
1260 else:
1261 chainbase = self.chainbase(rev)
1261 chainbase = self.chainbase(rev)
1262 dist = l + offset - self.start(chainbase)
1262 dist = l + offset - self.start(chainbase)
1263 if self._generaldelta:
1263 if self._generaldelta:
1264 base = rev
1264 base = rev
1265 else:
1265 else:
1266 base = chainbase
1266 base = chainbase
1267 chainlen, compresseddeltalen = self._chaininfo(rev)
1267 chainlen, compresseddeltalen = self._chaininfo(rev)
1268 chainlen += 1
1268 chainlen += 1
1269 compresseddeltalen += l
1269 compresseddeltalen += l
1270 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1270 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1271
1271
1272 curr = len(self)
1272 curr = len(self)
1273 prev = curr - 1
1273 prev = curr - 1
1274 base = chainbase = curr
1274 base = chainbase = curr
1275 chainlen = None
1275 chainlen = None
1276 offset = self.end(prev)
1276 offset = self.end(prev)
1277 d = None
1277 d = None
1278 if self._basecache is None:
1278 if self._basecache is None:
1279 self._basecache = (prev, self.chainbase(prev))
1279 self._basecache = (prev, self.chainbase(prev))
1280 basecache = self._basecache
1280 basecache = self._basecache
1281 p1r, p2r = self.rev(p1), self.rev(p2)
1281 p1r, p2r = self.rev(p1), self.rev(p2)
1282
1282
1283 # should we try to build a delta?
1283 # should we try to build a delta?
1284 if prev != nullrev:
1284 if prev != nullrev:
1285 if self._generaldelta:
1285 if self._generaldelta:
1286 if p1r >= basecache[1]:
1286 if p1r >= basecache[1]:
1287 d = builddelta(p1r)
1287 d = builddelta(p1r)
1288 elif p2r >= basecache[1]:
1288 elif p2r >= basecache[1]:
1289 d = builddelta(p2r)
1289 d = builddelta(p2r)
1290 else:
1290 else:
1291 d = builddelta(prev)
1291 d = builddelta(prev)
1292 else:
1292 else:
1293 d = builddelta(prev)
1293 d = builddelta(prev)
1294 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1294 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1295
1295
1296 # full versions are inserted when the needed deltas
1296 # full versions are inserted when the needed deltas
1297 # become comparable to the uncompressed text
1297 # become comparable to the uncompressed text
1298 if text is None:
1298 if text is None:
1299 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1299 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1300 cachedelta[1])
1300 cachedelta[1])
1301 else:
1301 else:
1302 textlen = len(text)
1302 textlen = len(text)
1303
1303
1304 # - 'dist' is the distance from the base revision -- bounding it limits
1304 # - 'dist' is the distance from the base revision -- bounding it limits
1305 # the amount of I/O we need to do.
1305 # the amount of I/O we need to do.
1306 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1306 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1307 # to apply -- bounding it limits the amount of CPU we consume.
1307 # to apply -- bounding it limits the amount of CPU we consume.
1308 if (d is None or dist > textlen * 4 or l > textlen or
1308 if (d is None or dist > textlen * 4 or l > textlen or
1309 compresseddeltalen > textlen * 2 or
1309 compresseddeltalen > textlen * 2 or
1310 (self._maxchainlen and chainlen > self._maxchainlen)):
1310 (self._maxchainlen and chainlen > self._maxchainlen)):
1311 text = buildtext()
1311 text = buildtext()
1312 data = self.compress(text)
1312 data = self.compress(text)
1313 l = len(data[1]) + len(data[0])
1313 l = len(data[1]) + len(data[0])
1314 base = chainbase = curr
1314 base = chainbase = curr
1315
1315
1316 e = (offset_type(offset, flags), l, textlen,
1316 e = (offset_type(offset, flags), l, textlen,
1317 base, link, p1r, p2r, node)
1317 base, link, p1r, p2r, node)
1318 self.index.insert(-1, e)
1318 self.index.insert(-1, e)
1319 self.nodemap[node] = curr
1319 self.nodemap[node] = curr
1320
1320
1321 entry = self._io.packentry(e, self.node, self.version, curr)
1321 entry = self._io.packentry(e, self.node, self.version, curr)
1322 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1322 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1323
1323
1324 if type(text) == str: # only accept immutable objects
1324 if type(text) == str: # only accept immutable objects
1325 self._cache = (node, curr, text)
1325 self._cache = (node, curr, text)
1326 self._basecache = (curr, chainbase)
1326 self._basecache = (curr, chainbase)
1327 return node
1327 return node
1328
1328
1329 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1329 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1330 curr = len(self) - 1
1330 curr = len(self) - 1
1331 if not self._inline:
1331 if not self._inline:
1332 transaction.add(self.datafile, offset)
1332 transaction.add(self.datafile, offset)
1333 transaction.add(self.indexfile, curr * len(entry))
1333 transaction.add(self.indexfile, curr * len(entry))
1334 if data[0]:
1334 if data[0]:
1335 dfh.write(data[0])
1335 dfh.write(data[0])
1336 dfh.write(data[1])
1336 dfh.write(data[1])
1337 dfh.flush()
1337 dfh.flush()
1338 ifh.write(entry)
1338 ifh.write(entry)
1339 else:
1339 else:
1340 offset += curr * self._io.size
1340 offset += curr * self._io.size
1341 transaction.add(self.indexfile, offset, curr)
1341 transaction.add(self.indexfile, offset, curr)
1342 ifh.write(entry)
1342 ifh.write(entry)
1343 ifh.write(data[0])
1343 ifh.write(data[0])
1344 ifh.write(data[1])
1344 ifh.write(data[1])
1345 self.checkinlinesize(transaction, ifh)
1345 self.checkinlinesize(transaction, ifh)
1346
1346
1347 def addgroup(self, bundle, linkmapper, transaction):
1347 def addgroup(self, bundle, linkmapper, transaction):
1348 """
1348 """
1349 add a delta group
1349 add a delta group
1350
1350
1351 given a set of deltas, add them to the revision log. the
1351 given a set of deltas, add them to the revision log. the
1352 first delta is against its parent, which should be in our
1352 first delta is against its parent, which should be in our
1353 log, the rest are against the previous delta.
1353 log, the rest are against the previous delta.
1354 """
1354 """
1355
1355
1356 # track the base of the current delta log
1356 # track the base of the current delta log
1357 content = []
1357 content = []
1358 node = None
1358 node = None
1359
1359
1360 r = len(self)
1360 r = len(self)
1361 end = 0
1361 end = 0
1362 if r:
1362 if r:
1363 end = self.end(r - 1)
1363 end = self.end(r - 1)
1364 ifh = self.opener(self.indexfile, "a+")
1364 ifh = self.opener(self.indexfile, "a+")
1365 isize = r * self._io.size
1365 isize = r * self._io.size
1366 if self._inline:
1366 if self._inline:
1367 transaction.add(self.indexfile, end + isize, r)
1367 transaction.add(self.indexfile, end + isize, r)
1368 dfh = None
1368 dfh = None
1369 else:
1369 else:
1370 transaction.add(self.indexfile, isize, r)
1370 transaction.add(self.indexfile, isize, r)
1371 transaction.add(self.datafile, end)
1371 transaction.add(self.datafile, end)
1372 dfh = self.opener(self.datafile, "a")
1372 dfh = self.opener(self.datafile, "a")
1373
1373
1374 try:
1374 try:
1375 # loop through our set of deltas
1375 # loop through our set of deltas
1376 chain = None
1376 chain = None
1377 while True:
1377 while True:
1378 chunkdata = bundle.deltachunk(chain)
1378 chunkdata = bundle.deltachunk(chain)
1379 if not chunkdata:
1379 if not chunkdata:
1380 break
1380 break
1381 node = chunkdata['node']
1381 node = chunkdata['node']
1382 p1 = chunkdata['p1']
1382 p1 = chunkdata['p1']
1383 p2 = chunkdata['p2']
1383 p2 = chunkdata['p2']
1384 cs = chunkdata['cs']
1384 cs = chunkdata['cs']
1385 deltabase = chunkdata['deltabase']
1385 deltabase = chunkdata['deltabase']
1386 delta = chunkdata['delta']
1386 delta = chunkdata['delta']
1387
1387
1388 content.append(node)
1388 content.append(node)
1389
1389
1390 link = linkmapper(cs)
1390 link = linkmapper(cs)
1391 if node in self.nodemap:
1391 if node in self.nodemap:
1392 # this can happen if two branches make the same change
1392 # this can happen if two branches make the same change
1393 chain = node
1393 chain = node
1394 continue
1394 continue
1395
1395
1396 for p in (p1, p2):
1396 for p in (p1, p2):
1397 if p not in self.nodemap:
1397 if p not in self.nodemap:
1398 raise LookupError(p, self.indexfile,
1398 raise LookupError(p, self.indexfile,
1399 _('unknown parent'))
1399 _('unknown parent'))
1400
1400
1401 if deltabase not in self.nodemap:
1401 if deltabase not in self.nodemap:
1402 raise LookupError(deltabase, self.indexfile,
1402 raise LookupError(deltabase, self.indexfile,
1403 _('unknown delta base'))
1403 _('unknown delta base'))
1404
1404
1405 baserev = self.rev(deltabase)
1405 baserev = self.rev(deltabase)
1406 chain = self._addrevision(node, None, transaction, link,
1406 chain = self._addrevision(node, None, transaction, link,
1407 p1, p2, REVIDX_DEFAULT_FLAGS,
1407 p1, p2, REVIDX_DEFAULT_FLAGS,
1408 (baserev, delta), ifh, dfh)
1408 (baserev, delta), ifh, dfh)
1409 if not dfh and not self._inline:
1409 if not dfh and not self._inline:
1410 # addrevision switched from inline to conventional
1410 # addrevision switched from inline to conventional
1411 # reopen the index
1411 # reopen the index
1412 ifh.close()
1412 ifh.close()
1413 dfh = self.opener(self.datafile, "a")
1413 dfh = self.opener(self.datafile, "a")
1414 ifh = self.opener(self.indexfile, "a")
1414 ifh = self.opener(self.indexfile, "a")
1415 finally:
1415 finally:
1416 if dfh:
1416 if dfh:
1417 dfh.close()
1417 dfh.close()
1418 ifh.close()
1418 ifh.close()
1419
1419
1420 return content
1420 return content
1421
1421
1422 def iscensored(self, rev):
1423 """Check if a file revision is censored."""
1424 return False
1425
1422 def getstrippoint(self, minlink):
1426 def getstrippoint(self, minlink):
1423 """find the minimum rev that must be stripped to strip the linkrev
1427 """find the minimum rev that must be stripped to strip the linkrev
1424
1428
1425 Returns a tuple containing the minimum rev and a set of all revs that
1429 Returns a tuple containing the minimum rev and a set of all revs that
1426 have linkrevs that will be broken by this strip.
1430 have linkrevs that will be broken by this strip.
1427 """
1431 """
1428 brokenrevs = set()
1432 brokenrevs = set()
1429 strippoint = len(self)
1433 strippoint = len(self)
1430
1434
1431 heads = {}
1435 heads = {}
1432 futurelargelinkrevs = set()
1436 futurelargelinkrevs = set()
1433 for head in self.headrevs():
1437 for head in self.headrevs():
1434 headlinkrev = self.linkrev(head)
1438 headlinkrev = self.linkrev(head)
1435 heads[head] = headlinkrev
1439 heads[head] = headlinkrev
1436 if headlinkrev >= minlink:
1440 if headlinkrev >= minlink:
1437 futurelargelinkrevs.add(headlinkrev)
1441 futurelargelinkrevs.add(headlinkrev)
1438
1442
1439 # This algorithm involves walking down the rev graph, starting at the
1443 # This algorithm involves walking down the rev graph, starting at the
1440 # heads. Since the revs are topologically sorted according to linkrev,
1444 # heads. Since the revs are topologically sorted according to linkrev,
1441 # once all head linkrevs are below the minlink, we know there are
1445 # once all head linkrevs are below the minlink, we know there are
1442 # no more revs that could have a linkrev greater than minlink.
1446 # no more revs that could have a linkrev greater than minlink.
1443 # So we can stop walking.
1447 # So we can stop walking.
1444 while futurelargelinkrevs:
1448 while futurelargelinkrevs:
1445 strippoint -= 1
1449 strippoint -= 1
1446 linkrev = heads.pop(strippoint)
1450 linkrev = heads.pop(strippoint)
1447
1451
1448 if linkrev < minlink:
1452 if linkrev < minlink:
1449 brokenrevs.add(strippoint)
1453 brokenrevs.add(strippoint)
1450 else:
1454 else:
1451 futurelargelinkrevs.remove(linkrev)
1455 futurelargelinkrevs.remove(linkrev)
1452
1456
1453 for p in self.parentrevs(strippoint):
1457 for p in self.parentrevs(strippoint):
1454 if p != nullrev:
1458 if p != nullrev:
1455 plinkrev = self.linkrev(p)
1459 plinkrev = self.linkrev(p)
1456 heads[p] = plinkrev
1460 heads[p] = plinkrev
1457 if plinkrev >= minlink:
1461 if plinkrev >= minlink:
1458 futurelargelinkrevs.add(plinkrev)
1462 futurelargelinkrevs.add(plinkrev)
1459
1463
1460 return strippoint, brokenrevs
1464 return strippoint, brokenrevs
1461
1465
1462 def strip(self, minlink, transaction):
1466 def strip(self, minlink, transaction):
1463 """truncate the revlog on the first revision with a linkrev >= minlink
1467 """truncate the revlog on the first revision with a linkrev >= minlink
1464
1468
1465 This function is called when we're stripping revision minlink and
1469 This function is called when we're stripping revision minlink and
1466 its descendants from the repository.
1470 its descendants from the repository.
1467
1471
1468 We have to remove all revisions with linkrev >= minlink, because
1472 We have to remove all revisions with linkrev >= minlink, because
1469 the equivalent changelog revisions will be renumbered after the
1473 the equivalent changelog revisions will be renumbered after the
1470 strip.
1474 strip.
1471
1475
1472 So we truncate the revlog on the first of these revisions, and
1476 So we truncate the revlog on the first of these revisions, and
1473 trust that the caller has saved the revisions that shouldn't be
1477 trust that the caller has saved the revisions that shouldn't be
1474 removed and that it'll re-add them after this truncation.
1478 removed and that it'll re-add them after this truncation.
1475 """
1479 """
1476 if len(self) == 0:
1480 if len(self) == 0:
1477 return
1481 return
1478
1482
1479 rev, _ = self.getstrippoint(minlink)
1483 rev, _ = self.getstrippoint(minlink)
1480 if rev == len(self):
1484 if rev == len(self):
1481 return
1485 return
1482
1486
1483 # first truncate the files on disk
1487 # first truncate the files on disk
1484 end = self.start(rev)
1488 end = self.start(rev)
1485 if not self._inline:
1489 if not self._inline:
1486 transaction.add(self.datafile, end)
1490 transaction.add(self.datafile, end)
1487 end = rev * self._io.size
1491 end = rev * self._io.size
1488 else:
1492 else:
1489 end += rev * self._io.size
1493 end += rev * self._io.size
1490
1494
1491 transaction.add(self.indexfile, end)
1495 transaction.add(self.indexfile, end)
1492
1496
1493 # then reset internal state in memory to forget those revisions
1497 # then reset internal state in memory to forget those revisions
1494 self._cache = None
1498 self._cache = None
1495 self._chaininfocache = {}
1499 self._chaininfocache = {}
1496 self._chunkclear()
1500 self._chunkclear()
1497 for x in xrange(rev, len(self)):
1501 for x in xrange(rev, len(self)):
1498 del self.nodemap[self.node(x)]
1502 del self.nodemap[self.node(x)]
1499
1503
1500 del self.index[rev:-1]
1504 del self.index[rev:-1]
1501
1505
1502 def checksize(self):
1506 def checksize(self):
1503 expected = 0
1507 expected = 0
1504 if len(self):
1508 if len(self):
1505 expected = max(0, self.end(len(self) - 1))
1509 expected = max(0, self.end(len(self) - 1))
1506
1510
1507 try:
1511 try:
1508 f = self.opener(self.datafile)
1512 f = self.opener(self.datafile)
1509 f.seek(0, 2)
1513 f.seek(0, 2)
1510 actual = f.tell()
1514 actual = f.tell()
1511 f.close()
1515 f.close()
1512 dd = actual - expected
1516 dd = actual - expected
1513 except IOError, inst:
1517 except IOError, inst:
1514 if inst.errno != errno.ENOENT:
1518 if inst.errno != errno.ENOENT:
1515 raise
1519 raise
1516 dd = 0
1520 dd = 0
1517
1521
1518 try:
1522 try:
1519 f = self.opener(self.indexfile)
1523 f = self.opener(self.indexfile)
1520 f.seek(0, 2)
1524 f.seek(0, 2)
1521 actual = f.tell()
1525 actual = f.tell()
1522 f.close()
1526 f.close()
1523 s = self._io.size
1527 s = self._io.size
1524 i = max(0, actual // s)
1528 i = max(0, actual // s)
1525 di = actual - (i * s)
1529 di = actual - (i * s)
1526 if self._inline:
1530 if self._inline:
1527 databytes = 0
1531 databytes = 0
1528 for r in self:
1532 for r in self:
1529 databytes += max(0, self.length(r))
1533 databytes += max(0, self.length(r))
1530 dd = 0
1534 dd = 0
1531 di = actual - len(self) * s - databytes
1535 di = actual - len(self) * s - databytes
1532 except IOError, inst:
1536 except IOError, inst:
1533 if inst.errno != errno.ENOENT:
1537 if inst.errno != errno.ENOENT:
1534 raise
1538 raise
1535 di = 0
1539 di = 0
1536
1540
1537 return (dd, di)
1541 return (dd, di)
1538
1542
1539 def files(self):
1543 def files(self):
1540 res = [self.indexfile]
1544 res = [self.indexfile]
1541 if not self._inline:
1545 if not self._inline:
1542 res.append(self.datafile)
1546 res.append(self.datafile)
1543 return res
1547 return res
@@ -1,236 +1,242 b''
1 # unionrepo.py - repository class for viewing union of repository changesets
1 # unionrepo.py - repository class for viewing union of repository changesets
2 #
2 #
3 # Derived from bundlerepo.py
3 # Derived from bundlerepo.py
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Repository class for "in-memory pull" of one local repository to another,
10 """Repository class for "in-memory pull" of one local repository to another,
11 allowing operations like diff and log with revsets.
11 allowing operations like diff and log with revsets.
12 """
12 """
13
13
14 from node import nullid
14 from node import nullid
15 from i18n import _
15 from i18n import _
16 import os
16 import os
17 import util, mdiff, cmdutil, scmutil
17 import util, mdiff, cmdutil, scmutil
18 import localrepo, changelog, manifest, filelog, revlog
18 import localrepo, changelog, manifest, filelog, revlog
19
19
20 class unionrevlog(revlog.revlog):
20 class unionrevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, revlog2, linkmapper):
21 def __init__(self, opener, indexfile, revlog2, linkmapper):
22 # How it works:
22 # How it works:
23 # To retrieve a revision, we just need to know the node id so we can
23 # To retrieve a revision, we just need to know the node id so we can
24 # look it up in revlog2.
24 # look it up in revlog2.
25 #
25 #
26 # To differentiate a rev in the second revlog from a rev in the revlog,
26 # To differentiate a rev in the second revlog from a rev in the revlog,
27 # we check revision against repotiprev.
27 # we check revision against repotiprev.
28 opener = scmutil.readonlyvfs(opener)
28 opener = scmutil.readonlyvfs(opener)
29 revlog.revlog.__init__(self, opener, indexfile)
29 revlog.revlog.__init__(self, opener, indexfile)
30 self.revlog2 = revlog2
30 self.revlog2 = revlog2
31
31
32 n = len(self)
32 n = len(self)
33 self.repotiprev = n - 1
33 self.repotiprev = n - 1
34 self.bundlerevs = set() # used by 'bundle()' revset expression
34 self.bundlerevs = set() # used by 'bundle()' revset expression
35 for rev2 in self.revlog2:
35 for rev2 in self.revlog2:
36 rev = self.revlog2.index[rev2]
36 rev = self.revlog2.index[rev2]
37 # rev numbers - in revlog2, very different from self.rev
37 # rev numbers - in revlog2, very different from self.rev
38 _start, _csize, _rsize, _base, linkrev, p1rev, p2rev, node = rev
38 _start, _csize, _rsize, _base, linkrev, p1rev, p2rev, node = rev
39
39
40 if linkmapper is None: # link is to same revlog
40 if linkmapper is None: # link is to same revlog
41 assert linkrev == rev2 # we never link back
41 assert linkrev == rev2 # we never link back
42 link = n
42 link = n
43 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
43 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
44 link = linkmapper(linkrev)
44 link = linkmapper(linkrev)
45
45
46 if node in self.nodemap:
46 if node in self.nodemap:
47 # this happens for the common revlog revisions
47 # this happens for the common revlog revisions
48 self.bundlerevs.add(self.nodemap[node])
48 self.bundlerevs.add(self.nodemap[node])
49 continue
49 continue
50
50
51 p1node = self.revlog2.node(p1rev)
51 p1node = self.revlog2.node(p1rev)
52 p2node = self.revlog2.node(p2rev)
52 p2node = self.revlog2.node(p2rev)
53
53
54 e = (None, None, None, None,
54 e = (None, None, None, None,
55 link, self.rev(p1node), self.rev(p2node), node)
55 link, self.rev(p1node), self.rev(p2node), node)
56 self.index.insert(-1, e)
56 self.index.insert(-1, e)
57 self.nodemap[node] = n
57 self.nodemap[node] = n
58 self.bundlerevs.add(n)
58 self.bundlerevs.add(n)
59 n += 1
59 n += 1
60
60
61 def _chunk(self, rev):
61 def _chunk(self, rev):
62 if rev <= self.repotiprev:
62 if rev <= self.repotiprev:
63 return revlog.revlog._chunk(self, rev)
63 return revlog.revlog._chunk(self, rev)
64 return self.revlog2._chunk(self.node(rev))
64 return self.revlog2._chunk(self.node(rev))
65
65
66 def revdiff(self, rev1, rev2):
66 def revdiff(self, rev1, rev2):
67 """return or calculate a delta between two revisions"""
67 """return or calculate a delta between two revisions"""
68 if rev1 > self.repotiprev and rev2 > self.repotiprev:
68 if rev1 > self.repotiprev and rev2 > self.repotiprev:
69 return self.revlog2.revdiff(
69 return self.revlog2.revdiff(
70 self.revlog2.rev(self.node(rev1)),
70 self.revlog2.rev(self.node(rev1)),
71 self.revlog2.rev(self.node(rev2)))
71 self.revlog2.rev(self.node(rev2)))
72 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
72 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
73 return self.baserevdiff(rev1, rev2)
73 return self.baserevdiff(rev1, rev2)
74
74
75 return mdiff.textdiff(self.revision(self.node(rev1)),
75 return mdiff.textdiff(self.revision(self.node(rev1)),
76 self.revision(self.node(rev2)))
76 self.revision(self.node(rev2)))
77
77
78 def revision(self, nodeorrev):
78 def revision(self, nodeorrev):
79 """return an uncompressed revision of a given node or revision
79 """return an uncompressed revision of a given node or revision
80 number.
80 number.
81 """
81 """
82 if isinstance(nodeorrev, int):
82 if isinstance(nodeorrev, int):
83 rev = nodeorrev
83 rev = nodeorrev
84 node = self.node(rev)
84 node = self.node(rev)
85 else:
85 else:
86 node = nodeorrev
86 node = nodeorrev
87 rev = self.rev(node)
87 rev = self.rev(node)
88
88
89 if node == nullid:
89 if node == nullid:
90 return ""
90 return ""
91
91
92 if rev > self.repotiprev:
92 if rev > self.repotiprev:
93 text = self.revlog2.revision(node)
93 text = self.revlog2.revision(node)
94 self._cache = (node, rev, text)
94 self._cache = (node, rev, text)
95 else:
95 else:
96 text = self.baserevision(rev)
96 text = self.baserevision(rev)
97 # already cached
97 # already cached
98 return text
98 return text
99
99
100 def baserevision(self, nodeorrev):
100 def baserevision(self, nodeorrev):
101 # Revlog subclasses may override 'revision' method to modify format of
101 # Revlog subclasses may override 'revision' method to modify format of
102 # content retrieved from revlog. To use unionrevlog with such class one
102 # content retrieved from revlog. To use unionrevlog with such class one
103 # needs to override 'baserevision' and make more specific call here.
103 # needs to override 'baserevision' and make more specific call here.
104 return revlog.revlog.revision(self, nodeorrev)
104 return revlog.revlog.revision(self, nodeorrev)
105
105
106 def baserevdiff(self, rev1, rev2):
106 def baserevdiff(self, rev1, rev2):
107 # Exists for the same purpose as baserevision.
107 # Exists for the same purpose as baserevision.
108 return revlog.revlog.revdiff(self, rev1, rev2)
108 return revlog.revlog.revdiff(self, rev1, rev2)
109
109
110 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
110 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
111 raise NotImplementedError
111 raise NotImplementedError
112 def addgroup(self, revs, linkmapper, transaction):
112 def addgroup(self, revs, linkmapper, transaction):
113 raise NotImplementedError
113 raise NotImplementedError
114 def strip(self, rev, minlink):
114 def strip(self, rev, minlink):
115 raise NotImplementedError
115 raise NotImplementedError
116 def checksize(self):
116 def checksize(self):
117 raise NotImplementedError
117 raise NotImplementedError
118
118
119 class unionchangelog(unionrevlog, changelog.changelog):
119 class unionchangelog(unionrevlog, changelog.changelog):
120 def __init__(self, opener, opener2):
120 def __init__(self, opener, opener2):
121 changelog.changelog.__init__(self, opener)
121 changelog.changelog.__init__(self, opener)
122 linkmapper = None
122 linkmapper = None
123 changelog2 = changelog.changelog(opener2)
123 changelog2 = changelog.changelog(opener2)
124 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
124 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
125 linkmapper)
125 linkmapper)
126
126
127 def baserevision(self, nodeorrev):
127 def baserevision(self, nodeorrev):
128 # Although changelog doesn't override 'revision' method, some extensions
128 # Although changelog doesn't override 'revision' method, some extensions
129 # may replace this class with another that does. Same story with
129 # may replace this class with another that does. Same story with
130 # manifest and filelog classes.
130 # manifest and filelog classes.
131 return changelog.changelog.revision(self, nodeorrev)
131 return changelog.changelog.revision(self, nodeorrev)
132
132
133 def baserevdiff(self, rev1, rev2):
133 def baserevdiff(self, rev1, rev2):
134 return changelog.changelog.revdiff(self, rev1, rev2)
134 return changelog.changelog.revdiff(self, rev1, rev2)
135
135
136 class unionmanifest(unionrevlog, manifest.manifest):
136 class unionmanifest(unionrevlog, manifest.manifest):
137 def __init__(self, opener, opener2, linkmapper):
137 def __init__(self, opener, opener2, linkmapper):
138 manifest.manifest.__init__(self, opener)
138 manifest.manifest.__init__(self, opener)
139 manifest2 = manifest.manifest(opener2)
139 manifest2 = manifest.manifest(opener2)
140 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
140 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
141 linkmapper)
141 linkmapper)
142
142
143 def baserevision(self, nodeorrev):
143 def baserevision(self, nodeorrev):
144 return manifest.manifest.revision(self, nodeorrev)
144 return manifest.manifest.revision(self, nodeorrev)
145
145
146 def baserevdiff(self, rev1, rev2):
146 def baserevdiff(self, rev1, rev2):
147 return manifest.manifest.revdiff(self, rev1, rev2)
147 return manifest.manifest.revdiff(self, rev1, rev2)
148
148
149 class unionfilelog(unionrevlog, filelog.filelog):
149 class unionfilelog(unionrevlog, filelog.filelog):
150 def __init__(self, opener, path, opener2, linkmapper, repo):
150 def __init__(self, opener, path, opener2, linkmapper, repo):
151 filelog.filelog.__init__(self, opener, path)
151 filelog.filelog.__init__(self, opener, path)
152 filelog2 = filelog.filelog(opener2, path)
152 filelog2 = filelog.filelog(opener2, path)
153 unionrevlog.__init__(self, opener, self.indexfile, filelog2,
153 unionrevlog.__init__(self, opener, self.indexfile, filelog2,
154 linkmapper)
154 linkmapper)
155 self._repo = repo
155 self._repo = repo
156
156
157 def baserevision(self, nodeorrev):
157 def baserevision(self, nodeorrev):
158 return filelog.filelog.revision(self, nodeorrev)
158 return filelog.filelog.revision(self, nodeorrev)
159
159
160 def baserevdiff(self, rev1, rev2):
160 def baserevdiff(self, rev1, rev2):
161 return filelog.filelog.revdiff(self, rev1, rev2)
161 return filelog.filelog.revdiff(self, rev1, rev2)
162
162
163 def iscensored(self, rev):
164 """Check if a revision is censored."""
165 if rev <= self.repotiprev:
166 return filelog.filelog.iscensored(self, rev)
167 return self.revlog2.iscensored(rev)
168
163 class unionpeer(localrepo.localpeer):
169 class unionpeer(localrepo.localpeer):
164 def canpush(self):
170 def canpush(self):
165 return False
171 return False
166
172
167 class unionrepository(localrepo.localrepository):
173 class unionrepository(localrepo.localrepository):
168 def __init__(self, ui, path, path2):
174 def __init__(self, ui, path, path2):
169 localrepo.localrepository.__init__(self, ui, path)
175 localrepo.localrepository.__init__(self, ui, path)
170 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
176 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
171
177
172 self._url = 'union:%s+%s' % (util.expandpath(path),
178 self._url = 'union:%s+%s' % (util.expandpath(path),
173 util.expandpath(path2))
179 util.expandpath(path2))
174 self.repo2 = localrepo.localrepository(ui, path2)
180 self.repo2 = localrepo.localrepository(ui, path2)
175
181
176 @localrepo.unfilteredpropertycache
182 @localrepo.unfilteredpropertycache
177 def changelog(self):
183 def changelog(self):
178 return unionchangelog(self.svfs, self.repo2.svfs)
184 return unionchangelog(self.svfs, self.repo2.svfs)
179
185
180 def _clrev(self, rev2):
186 def _clrev(self, rev2):
181 """map from repo2 changelog rev to temporary rev in self.changelog"""
187 """map from repo2 changelog rev to temporary rev in self.changelog"""
182 node = self.repo2.changelog.node(rev2)
188 node = self.repo2.changelog.node(rev2)
183 return self.changelog.rev(node)
189 return self.changelog.rev(node)
184
190
185 @localrepo.unfilteredpropertycache
191 @localrepo.unfilteredpropertycache
186 def manifest(self):
192 def manifest(self):
187 return unionmanifest(self.svfs, self.repo2.svfs,
193 return unionmanifest(self.svfs, self.repo2.svfs,
188 self._clrev)
194 self._clrev)
189
195
190 def url(self):
196 def url(self):
191 return self._url
197 return self._url
192
198
193 def file(self, f):
199 def file(self, f):
194 return unionfilelog(self.svfs, f, self.repo2.svfs,
200 return unionfilelog(self.svfs, f, self.repo2.svfs,
195 self._clrev, self)
201 self._clrev, self)
196
202
197 def close(self):
203 def close(self):
198 self.repo2.close()
204 self.repo2.close()
199
205
200 def cancopy(self):
206 def cancopy(self):
201 return False
207 return False
202
208
203 def peer(self):
209 def peer(self):
204 return unionpeer(self)
210 return unionpeer(self)
205
211
206 def getcwd(self):
212 def getcwd(self):
207 return os.getcwd() # always outside the repo
213 return os.getcwd() # always outside the repo
208
214
209 def instance(ui, path, create):
215 def instance(ui, path, create):
210 if create:
216 if create:
211 raise util.Abort(_('cannot create new union repository'))
217 raise util.Abort(_('cannot create new union repository'))
212 parentpath = ui.config("bundle", "mainreporoot", "")
218 parentpath = ui.config("bundle", "mainreporoot", "")
213 if not parentpath:
219 if not parentpath:
214 # try to find the correct path to the working directory repo
220 # try to find the correct path to the working directory repo
215 parentpath = cmdutil.findrepo(os.getcwd())
221 parentpath = cmdutil.findrepo(os.getcwd())
216 if parentpath is None:
222 if parentpath is None:
217 parentpath = ''
223 parentpath = ''
218 if parentpath:
224 if parentpath:
219 # Try to make the full path relative so we get a nice, short URL.
225 # Try to make the full path relative so we get a nice, short URL.
220 # In particular, we don't want temp dir names in test outputs.
226 # In particular, we don't want temp dir names in test outputs.
221 cwd = os.getcwd()
227 cwd = os.getcwd()
222 if parentpath == cwd:
228 if parentpath == cwd:
223 parentpath = ''
229 parentpath = ''
224 else:
230 else:
225 cwd = os.path.join(cwd,'')
231 cwd = os.path.join(cwd,'')
226 if parentpath.startswith(cwd):
232 if parentpath.startswith(cwd):
227 parentpath = parentpath[len(cwd):]
233 parentpath = parentpath[len(cwd):]
228 if path.startswith('union:'):
234 if path.startswith('union:'):
229 s = path.split(":", 1)[1].split("+", 1)
235 s = path.split(":", 1)[1].split("+", 1)
230 if len(s) == 1:
236 if len(s) == 1:
231 repopath, repopath2 = parentpath, s[0]
237 repopath, repopath2 = parentpath, s[0]
232 else:
238 else:
233 repopath, repopath2 = s
239 repopath, repopath2 = s
234 else:
240 else:
235 repopath, repopath2 = parentpath, path
241 repopath, repopath2 = parentpath, path
236 return unionrepository(ui, repopath, repopath2)
242 return unionrepository(ui, repopath, repopath2)
General Comments 0
You need to be logged in to leave comments. Login now