##// END OF EJS Templates
revlog: use absolute_import
Gregory Szorc -
r27361:29f50344 default
parent child Browse files
Show More
@@ -1,1731 +1,1748 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 from __future__ import absolute_import
15
15 import collections
16 import collections
16 from node import bin, hex, nullid, nullrev
17 import errno
17 from i18n import _
18 import struct
18 import ancestor, mdiff, parsers, error, util, templatefilters
19 import zlib
19 import struct, zlib, errno
20
21 # import stuff from node for others to import from revlog
22 from .node import (
23 bin,
24 hex,
25 nullid,
26 nullrev,
27 )
28 from .i18n import _
29 from . import (
30 ancestor,
31 error,
32 mdiff,
33 parsers,
34 templatefilters,
35 util,
36 )
20
37
21 _pack = struct.pack
38 _pack = struct.pack
22 _unpack = struct.unpack
39 _unpack = struct.unpack
23 _compress = zlib.compress
40 _compress = zlib.compress
24 _decompress = zlib.decompress
41 _decompress = zlib.decompress
25 _sha = util.sha1
42 _sha = util.sha1
26
43
27 # revlog header flags
44 # revlog header flags
28 REVLOGV0 = 0
45 REVLOGV0 = 0
29 REVLOGNG = 1
46 REVLOGNG = 1
30 REVLOGNGINLINEDATA = (1 << 16)
47 REVLOGNGINLINEDATA = (1 << 16)
31 REVLOGGENERALDELTA = (1 << 17)
48 REVLOGGENERALDELTA = (1 << 17)
32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
49 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
33 REVLOG_DEFAULT_FORMAT = REVLOGNG
50 REVLOG_DEFAULT_FORMAT = REVLOGNG
34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
51 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
52 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
36
53
37 # revlog index flags
54 # revlog index flags
38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
55 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
39 REVIDX_DEFAULT_FLAGS = 0
56 REVIDX_DEFAULT_FLAGS = 0
40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
57 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
41
58
42 # max size of revlog with inline data
59 # max size of revlog with inline data
43 _maxinline = 131072
60 _maxinline = 131072
44 _chunksize = 1048576
61 _chunksize = 1048576
45
62
46 RevlogError = error.RevlogError
63 RevlogError = error.RevlogError
47 LookupError = error.LookupError
64 LookupError = error.LookupError
48 CensoredNodeError = error.CensoredNodeError
65 CensoredNodeError = error.CensoredNodeError
49
66
50 def getoffset(q):
67 def getoffset(q):
51 return int(q >> 16)
68 return int(q >> 16)
52
69
53 def gettype(q):
70 def gettype(q):
54 return int(q & 0xFFFF)
71 return int(q & 0xFFFF)
55
72
56 def offset_type(offset, type):
73 def offset_type(offset, type):
57 return long(long(offset) << 16 | type)
74 return long(long(offset) << 16 | type)
58
75
59 _nullhash = _sha(nullid)
76 _nullhash = _sha(nullid)
60
77
61 def hash(text, p1, p2):
78 def hash(text, p1, p2):
62 """generate a hash from the given text and its parent hashes
79 """generate a hash from the given text and its parent hashes
63
80
64 This hash combines both the current file contents and its history
81 This hash combines both the current file contents and its history
65 in a manner that makes it easy to distinguish nodes with the same
82 in a manner that makes it easy to distinguish nodes with the same
66 content in the revision graph.
83 content in the revision graph.
67 """
84 """
68 # As of now, if one of the parent node is null, p2 is null
85 # As of now, if one of the parent node is null, p2 is null
69 if p2 == nullid:
86 if p2 == nullid:
70 # deep copy of a hash is faster than creating one
87 # deep copy of a hash is faster than creating one
71 s = _nullhash.copy()
88 s = _nullhash.copy()
72 s.update(p1)
89 s.update(p1)
73 else:
90 else:
74 # none of the parent nodes are nullid
91 # none of the parent nodes are nullid
75 l = [p1, p2]
92 l = [p1, p2]
76 l.sort()
93 l.sort()
77 s = _sha(l[0])
94 s = _sha(l[0])
78 s.update(l[1])
95 s.update(l[1])
79 s.update(text)
96 s.update(text)
80 return s.digest()
97 return s.digest()
81
98
82 def decompress(bin):
99 def decompress(bin):
83 """ decompress the given input """
100 """ decompress the given input """
84 if not bin:
101 if not bin:
85 return bin
102 return bin
86 t = bin[0]
103 t = bin[0]
87 if t == '\0':
104 if t == '\0':
88 return bin
105 return bin
89 if t == 'x':
106 if t == 'x':
90 try:
107 try:
91 return _decompress(bin)
108 return _decompress(bin)
92 except zlib.error as e:
109 except zlib.error as e:
93 raise RevlogError(_("revlog decompress error: %s") % str(e))
110 raise RevlogError(_("revlog decompress error: %s") % str(e))
94 if t == 'u':
111 if t == 'u':
95 return bin[1:]
112 return bin[1:]
96 raise RevlogError(_("unknown compression type %r") % t)
113 raise RevlogError(_("unknown compression type %r") % t)
97
114
98 # index v0:
115 # index v0:
99 # 4 bytes: offset
116 # 4 bytes: offset
100 # 4 bytes: compressed length
117 # 4 bytes: compressed length
101 # 4 bytes: base rev
118 # 4 bytes: base rev
102 # 4 bytes: link rev
119 # 4 bytes: link rev
103 # 20 bytes: parent 1 nodeid
120 # 20 bytes: parent 1 nodeid
104 # 20 bytes: parent 2 nodeid
121 # 20 bytes: parent 2 nodeid
105 # 20 bytes: nodeid
122 # 20 bytes: nodeid
106 indexformatv0 = ">4l20s20s20s"
123 indexformatv0 = ">4l20s20s20s"
107
124
108 class revlogoldio(object):
125 class revlogoldio(object):
109 def __init__(self):
126 def __init__(self):
110 self.size = struct.calcsize(indexformatv0)
127 self.size = struct.calcsize(indexformatv0)
111
128
112 def parseindex(self, data, inline):
129 def parseindex(self, data, inline):
113 s = self.size
130 s = self.size
114 index = []
131 index = []
115 nodemap = {nullid: nullrev}
132 nodemap = {nullid: nullrev}
116 n = off = 0
133 n = off = 0
117 l = len(data)
134 l = len(data)
118 while off + s <= l:
135 while off + s <= l:
119 cur = data[off:off + s]
136 cur = data[off:off + s]
120 off += s
137 off += s
121 e = _unpack(indexformatv0, cur)
138 e = _unpack(indexformatv0, cur)
122 # transform to revlogv1 format
139 # transform to revlogv1 format
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
140 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
141 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 index.append(e2)
142 index.append(e2)
126 nodemap[e[6]] = n
143 nodemap[e[6]] = n
127 n += 1
144 n += 1
128
145
129 # add the magic null revision at -1
146 # add the magic null revision at -1
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
147 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131
148
132 return index, nodemap, None
149 return index, nodemap, None
133
150
134 def packentry(self, entry, node, version, rev):
151 def packentry(self, entry, node, version, rev):
135 if gettype(entry[0]):
152 if gettype(entry[0]):
136 raise RevlogError(_("index entry flags need RevlogNG"))
153 raise RevlogError(_("index entry flags need RevlogNG"))
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
154 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 node(entry[5]), node(entry[6]), entry[7])
155 node(entry[5]), node(entry[6]), entry[7])
139 return _pack(indexformatv0, *e2)
156 return _pack(indexformatv0, *e2)
140
157
141 # index ng:
158 # index ng:
142 # 6 bytes: offset
159 # 6 bytes: offset
143 # 2 bytes: flags
160 # 2 bytes: flags
144 # 4 bytes: compressed length
161 # 4 bytes: compressed length
145 # 4 bytes: uncompressed length
162 # 4 bytes: uncompressed length
146 # 4 bytes: base rev
163 # 4 bytes: base rev
147 # 4 bytes: link rev
164 # 4 bytes: link rev
148 # 4 bytes: parent 1 rev
165 # 4 bytes: parent 1 rev
149 # 4 bytes: parent 2 rev
166 # 4 bytes: parent 2 rev
150 # 32 bytes: nodeid
167 # 32 bytes: nodeid
151 indexformatng = ">Qiiiiii20s12x"
168 indexformatng = ">Qiiiiii20s12x"
152 versionformat = ">I"
169 versionformat = ">I"
153
170
154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
171 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
155 # signed integer)
172 # signed integer)
156 _maxentrysize = 0x7fffffff
173 _maxentrysize = 0x7fffffff
157
174
158 class revlogio(object):
175 class revlogio(object):
159 def __init__(self):
176 def __init__(self):
160 self.size = struct.calcsize(indexformatng)
177 self.size = struct.calcsize(indexformatng)
161
178
162 def parseindex(self, data, inline):
179 def parseindex(self, data, inline):
163 # call the C implementation to parse the index data
180 # call the C implementation to parse the index data
164 index, cache = parsers.parse_index2(data, inline)
181 index, cache = parsers.parse_index2(data, inline)
165 return index, getattr(index, 'nodemap', None), cache
182 return index, getattr(index, 'nodemap', None), cache
166
183
167 def packentry(self, entry, node, version, rev):
184 def packentry(self, entry, node, version, rev):
168 p = _pack(indexformatng, *entry)
185 p = _pack(indexformatng, *entry)
169 if rev == 0:
186 if rev == 0:
170 p = _pack(versionformat, version) + p[4:]
187 p = _pack(versionformat, version) + p[4:]
171 return p
188 return p
172
189
173 class revlog(object):
190 class revlog(object):
174 """
191 """
175 the underlying revision storage object
192 the underlying revision storage object
176
193
177 A revlog consists of two parts, an index and the revision data.
194 A revlog consists of two parts, an index and the revision data.
178
195
179 The index is a file with a fixed record size containing
196 The index is a file with a fixed record size containing
180 information on each revision, including its nodeid (hash), the
197 information on each revision, including its nodeid (hash), the
181 nodeids of its parents, the position and offset of its data within
198 nodeids of its parents, the position and offset of its data within
182 the data file, and the revision it's based on. Finally, each entry
199 the data file, and the revision it's based on. Finally, each entry
183 contains a linkrev entry that can serve as a pointer to external
200 contains a linkrev entry that can serve as a pointer to external
184 data.
201 data.
185
202
186 The revision data itself is a linear collection of data chunks.
203 The revision data itself is a linear collection of data chunks.
187 Each chunk represents a revision and is usually represented as a
204 Each chunk represents a revision and is usually represented as a
188 delta against the previous chunk. To bound lookup time, runs of
205 delta against the previous chunk. To bound lookup time, runs of
189 deltas are limited to about 2 times the length of the original
206 deltas are limited to about 2 times the length of the original
190 version data. This makes retrieval of a version proportional to
207 version data. This makes retrieval of a version proportional to
191 its size, or O(1) relative to the number of revisions.
208 its size, or O(1) relative to the number of revisions.
192
209
193 Both pieces of the revlog are written to in an append-only
210 Both pieces of the revlog are written to in an append-only
194 fashion, which means we never need to rewrite a file to insert or
211 fashion, which means we never need to rewrite a file to insert or
195 remove data, and can use some simple techniques to avoid the need
212 remove data, and can use some simple techniques to avoid the need
196 for locking while reading.
213 for locking while reading.
197 """
214 """
198 def __init__(self, opener, indexfile):
215 def __init__(self, opener, indexfile):
199 """
216 """
200 create a revlog object
217 create a revlog object
201
218
202 opener is a function that abstracts the file opening operation
219 opener is a function that abstracts the file opening operation
203 and can be used to implement COW semantics or the like.
220 and can be used to implement COW semantics or the like.
204 """
221 """
205 self.indexfile = indexfile
222 self.indexfile = indexfile
206 self.datafile = indexfile[:-2] + ".d"
223 self.datafile = indexfile[:-2] + ".d"
207 self.opener = opener
224 self.opener = opener
208 # 3-tuple of (node, rev, text) for a raw revision.
225 # 3-tuple of (node, rev, text) for a raw revision.
209 self._cache = None
226 self._cache = None
210 # 2-tuple of (rev, baserev) defining the base revision the delta chain
227 # 2-tuple of (rev, baserev) defining the base revision the delta chain
211 # begins at for a revision.
228 # begins at for a revision.
212 self._basecache = None
229 self._basecache = None
213 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
230 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
214 self._chunkcache = (0, '')
231 self._chunkcache = (0, '')
215 # How much data to read and cache into the raw revlog data cache.
232 # How much data to read and cache into the raw revlog data cache.
216 self._chunkcachesize = 65536
233 self._chunkcachesize = 65536
217 self._maxchainlen = None
234 self._maxchainlen = None
218 self._aggressivemergedeltas = False
235 self._aggressivemergedeltas = False
219 self.index = []
236 self.index = []
220 # Mapping of partial identifiers to full nodes.
237 # Mapping of partial identifiers to full nodes.
221 self._pcache = {}
238 self._pcache = {}
222 # Mapping of revision integer to full node.
239 # Mapping of revision integer to full node.
223 self._nodecache = {nullid: nullrev}
240 self._nodecache = {nullid: nullrev}
224 self._nodepos = None
241 self._nodepos = None
225
242
226 v = REVLOG_DEFAULT_VERSION
243 v = REVLOG_DEFAULT_VERSION
227 opts = getattr(opener, 'options', None)
244 opts = getattr(opener, 'options', None)
228 if opts is not None:
245 if opts is not None:
229 if 'revlogv1' in opts:
246 if 'revlogv1' in opts:
230 if 'generaldelta' in opts:
247 if 'generaldelta' in opts:
231 v |= REVLOGGENERALDELTA
248 v |= REVLOGGENERALDELTA
232 else:
249 else:
233 v = 0
250 v = 0
234 if 'chunkcachesize' in opts:
251 if 'chunkcachesize' in opts:
235 self._chunkcachesize = opts['chunkcachesize']
252 self._chunkcachesize = opts['chunkcachesize']
236 if 'maxchainlen' in opts:
253 if 'maxchainlen' in opts:
237 self._maxchainlen = opts['maxchainlen']
254 self._maxchainlen = opts['maxchainlen']
238 if 'aggressivemergedeltas' in opts:
255 if 'aggressivemergedeltas' in opts:
239 self._aggressivemergedeltas = opts['aggressivemergedeltas']
256 self._aggressivemergedeltas = opts['aggressivemergedeltas']
240 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
257 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
241
258
242 if self._chunkcachesize <= 0:
259 if self._chunkcachesize <= 0:
243 raise RevlogError(_('revlog chunk cache size %r is not greater '
260 raise RevlogError(_('revlog chunk cache size %r is not greater '
244 'than 0') % self._chunkcachesize)
261 'than 0') % self._chunkcachesize)
245 elif self._chunkcachesize & (self._chunkcachesize - 1):
262 elif self._chunkcachesize & (self._chunkcachesize - 1):
246 raise RevlogError(_('revlog chunk cache size %r is not a power '
263 raise RevlogError(_('revlog chunk cache size %r is not a power '
247 'of 2') % self._chunkcachesize)
264 'of 2') % self._chunkcachesize)
248
265
249 indexdata = ''
266 indexdata = ''
250 self._initempty = True
267 self._initempty = True
251 try:
268 try:
252 f = self.opener(self.indexfile)
269 f = self.opener(self.indexfile)
253 indexdata = f.read()
270 indexdata = f.read()
254 f.close()
271 f.close()
255 if len(indexdata) > 0:
272 if len(indexdata) > 0:
256 v = struct.unpack(versionformat, indexdata[:4])[0]
273 v = struct.unpack(versionformat, indexdata[:4])[0]
257 self._initempty = False
274 self._initempty = False
258 except IOError as inst:
275 except IOError as inst:
259 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
260 raise
277 raise
261
278
262 self.version = v
279 self.version = v
263 self._inline = v & REVLOGNGINLINEDATA
280 self._inline = v & REVLOGNGINLINEDATA
264 self._generaldelta = v & REVLOGGENERALDELTA
281 self._generaldelta = v & REVLOGGENERALDELTA
265 flags = v & ~0xFFFF
282 flags = v & ~0xFFFF
266 fmt = v & 0xFFFF
283 fmt = v & 0xFFFF
267 if fmt == REVLOGV0 and flags:
284 if fmt == REVLOGV0 and flags:
268 raise RevlogError(_("index %s unknown flags %#04x for format v0")
285 raise RevlogError(_("index %s unknown flags %#04x for format v0")
269 % (self.indexfile, flags >> 16))
286 % (self.indexfile, flags >> 16))
270 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
287 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
271 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
288 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
272 % (self.indexfile, flags >> 16))
289 % (self.indexfile, flags >> 16))
273 elif fmt > REVLOGNG:
290 elif fmt > REVLOGNG:
274 raise RevlogError(_("index %s unknown format %d")
291 raise RevlogError(_("index %s unknown format %d")
275 % (self.indexfile, fmt))
292 % (self.indexfile, fmt))
276
293
277 self._io = revlogio()
294 self._io = revlogio()
278 if self.version == REVLOGV0:
295 if self.version == REVLOGV0:
279 self._io = revlogoldio()
296 self._io = revlogoldio()
280 try:
297 try:
281 d = self._io.parseindex(indexdata, self._inline)
298 d = self._io.parseindex(indexdata, self._inline)
282 except (ValueError, IndexError):
299 except (ValueError, IndexError):
283 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
300 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
284 self.index, nodemap, self._chunkcache = d
301 self.index, nodemap, self._chunkcache = d
285 if nodemap is not None:
302 if nodemap is not None:
286 self.nodemap = self._nodecache = nodemap
303 self.nodemap = self._nodecache = nodemap
287 if not self._chunkcache:
304 if not self._chunkcache:
288 self._chunkclear()
305 self._chunkclear()
289 # revnum -> (chain-length, sum-delta-length)
306 # revnum -> (chain-length, sum-delta-length)
290 self._chaininfocache = {}
307 self._chaininfocache = {}
291
308
292 def tip(self):
309 def tip(self):
293 return self.node(len(self.index) - 2)
310 return self.node(len(self.index) - 2)
294 def __contains__(self, rev):
311 def __contains__(self, rev):
295 return 0 <= rev < len(self)
312 return 0 <= rev < len(self)
296 def __len__(self):
313 def __len__(self):
297 return len(self.index) - 1
314 return len(self.index) - 1
298 def __iter__(self):
315 def __iter__(self):
299 return iter(xrange(len(self)))
316 return iter(xrange(len(self)))
300 def revs(self, start=0, stop=None):
317 def revs(self, start=0, stop=None):
301 """iterate over all rev in this revlog (from start to stop)"""
318 """iterate over all rev in this revlog (from start to stop)"""
302 step = 1
319 step = 1
303 if stop is not None:
320 if stop is not None:
304 if start > stop:
321 if start > stop:
305 step = -1
322 step = -1
306 stop += step
323 stop += step
307 else:
324 else:
308 stop = len(self)
325 stop = len(self)
309 return xrange(start, stop, step)
326 return xrange(start, stop, step)
310
327
311 @util.propertycache
328 @util.propertycache
312 def nodemap(self):
329 def nodemap(self):
313 self.rev(self.node(0))
330 self.rev(self.node(0))
314 return self._nodecache
331 return self._nodecache
315
332
316 def hasnode(self, node):
333 def hasnode(self, node):
317 try:
334 try:
318 self.rev(node)
335 self.rev(node)
319 return True
336 return True
320 except KeyError:
337 except KeyError:
321 return False
338 return False
322
339
323 def clearcaches(self):
340 def clearcaches(self):
324 try:
341 try:
325 self._nodecache.clearcaches()
342 self._nodecache.clearcaches()
326 except AttributeError:
343 except AttributeError:
327 self._nodecache = {nullid: nullrev}
344 self._nodecache = {nullid: nullrev}
328 self._nodepos = None
345 self._nodepos = None
329
346
330 def rev(self, node):
347 def rev(self, node):
331 try:
348 try:
332 return self._nodecache[node]
349 return self._nodecache[node]
333 except TypeError:
350 except TypeError:
334 raise
351 raise
335 except RevlogError:
352 except RevlogError:
336 # parsers.c radix tree lookup failed
353 # parsers.c radix tree lookup failed
337 raise LookupError(node, self.indexfile, _('no node'))
354 raise LookupError(node, self.indexfile, _('no node'))
338 except KeyError:
355 except KeyError:
339 # pure python cache lookup failed
356 # pure python cache lookup failed
340 n = self._nodecache
357 n = self._nodecache
341 i = self.index
358 i = self.index
342 p = self._nodepos
359 p = self._nodepos
343 if p is None:
360 if p is None:
344 p = len(i) - 2
361 p = len(i) - 2
345 for r in xrange(p, -1, -1):
362 for r in xrange(p, -1, -1):
346 v = i[r][7]
363 v = i[r][7]
347 n[v] = r
364 n[v] = r
348 if v == node:
365 if v == node:
349 self._nodepos = r - 1
366 self._nodepos = r - 1
350 return r
367 return r
351 raise LookupError(node, self.indexfile, _('no node'))
368 raise LookupError(node, self.indexfile, _('no node'))
352
369
353 def node(self, rev):
370 def node(self, rev):
354 return self.index[rev][7]
371 return self.index[rev][7]
355 def linkrev(self, rev):
372 def linkrev(self, rev):
356 return self.index[rev][4]
373 return self.index[rev][4]
357 def parents(self, node):
374 def parents(self, node):
358 i = self.index
375 i = self.index
359 d = i[self.rev(node)]
376 d = i[self.rev(node)]
360 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
377 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
361 def parentrevs(self, rev):
378 def parentrevs(self, rev):
362 return self.index[rev][5:7]
379 return self.index[rev][5:7]
363 def start(self, rev):
380 def start(self, rev):
364 return int(self.index[rev][0] >> 16)
381 return int(self.index[rev][0] >> 16)
365 def end(self, rev):
382 def end(self, rev):
366 return self.start(rev) + self.length(rev)
383 return self.start(rev) + self.length(rev)
367 def length(self, rev):
384 def length(self, rev):
368 return self.index[rev][1]
385 return self.index[rev][1]
369 def chainbase(self, rev):
386 def chainbase(self, rev):
370 index = self.index
387 index = self.index
371 base = index[rev][3]
388 base = index[rev][3]
372 while base != rev:
389 while base != rev:
373 rev = base
390 rev = base
374 base = index[rev][3]
391 base = index[rev][3]
375 return base
392 return base
376 def chainlen(self, rev):
393 def chainlen(self, rev):
377 return self._chaininfo(rev)[0]
394 return self._chaininfo(rev)[0]
378
395
379 def _chaininfo(self, rev):
396 def _chaininfo(self, rev):
380 chaininfocache = self._chaininfocache
397 chaininfocache = self._chaininfocache
381 if rev in chaininfocache:
398 if rev in chaininfocache:
382 return chaininfocache[rev]
399 return chaininfocache[rev]
383 index = self.index
400 index = self.index
384 generaldelta = self._generaldelta
401 generaldelta = self._generaldelta
385 iterrev = rev
402 iterrev = rev
386 e = index[iterrev]
403 e = index[iterrev]
387 clen = 0
404 clen = 0
388 compresseddeltalen = 0
405 compresseddeltalen = 0
389 while iterrev != e[3]:
406 while iterrev != e[3]:
390 clen += 1
407 clen += 1
391 compresseddeltalen += e[1]
408 compresseddeltalen += e[1]
392 if generaldelta:
409 if generaldelta:
393 iterrev = e[3]
410 iterrev = e[3]
394 else:
411 else:
395 iterrev -= 1
412 iterrev -= 1
396 if iterrev in chaininfocache:
413 if iterrev in chaininfocache:
397 t = chaininfocache[iterrev]
414 t = chaininfocache[iterrev]
398 clen += t[0]
415 clen += t[0]
399 compresseddeltalen += t[1]
416 compresseddeltalen += t[1]
400 break
417 break
401 e = index[iterrev]
418 e = index[iterrev]
402 else:
419 else:
403 # Add text length of base since decompressing that also takes
420 # Add text length of base since decompressing that also takes
404 # work. For cache hits the length is already included.
421 # work. For cache hits the length is already included.
405 compresseddeltalen += e[1]
422 compresseddeltalen += e[1]
406 r = (clen, compresseddeltalen)
423 r = (clen, compresseddeltalen)
407 chaininfocache[rev] = r
424 chaininfocache[rev] = r
408 return r
425 return r
409
426
410 def flags(self, rev):
427 def flags(self, rev):
411 return self.index[rev][0] & 0xFFFF
428 return self.index[rev][0] & 0xFFFF
412 def rawsize(self, rev):
429 def rawsize(self, rev):
413 """return the length of the uncompressed text for a given revision"""
430 """return the length of the uncompressed text for a given revision"""
414 l = self.index[rev][2]
431 l = self.index[rev][2]
415 if l >= 0:
432 if l >= 0:
416 return l
433 return l
417
434
418 t = self.revision(self.node(rev))
435 t = self.revision(self.node(rev))
419 return len(t)
436 return len(t)
420 size = rawsize
437 size = rawsize
421
438
422 def ancestors(self, revs, stoprev=0, inclusive=False):
439 def ancestors(self, revs, stoprev=0, inclusive=False):
423 """Generate the ancestors of 'revs' in reverse topological order.
440 """Generate the ancestors of 'revs' in reverse topological order.
424 Does not generate revs lower than stoprev.
441 Does not generate revs lower than stoprev.
425
442
426 See the documentation for ancestor.lazyancestors for more details."""
443 See the documentation for ancestor.lazyancestors for more details."""
427
444
428 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
445 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
429 inclusive=inclusive)
446 inclusive=inclusive)
430
447
431 def descendants(self, revs):
448 def descendants(self, revs):
432 """Generate the descendants of 'revs' in revision order.
449 """Generate the descendants of 'revs' in revision order.
433
450
434 Yield a sequence of revision numbers starting with a child of
451 Yield a sequence of revision numbers starting with a child of
435 some rev in revs, i.e., each revision is *not* considered a
452 some rev in revs, i.e., each revision is *not* considered a
436 descendant of itself. Results are ordered by revision number (a
453 descendant of itself. Results are ordered by revision number (a
437 topological sort)."""
454 topological sort)."""
438 first = min(revs)
455 first = min(revs)
439 if first == nullrev:
456 if first == nullrev:
440 for i in self:
457 for i in self:
441 yield i
458 yield i
442 return
459 return
443
460
444 seen = set(revs)
461 seen = set(revs)
445 for i in self.revs(start=first + 1):
462 for i in self.revs(start=first + 1):
446 for x in self.parentrevs(i):
463 for x in self.parentrevs(i):
447 if x != nullrev and x in seen:
464 if x != nullrev and x in seen:
448 seen.add(i)
465 seen.add(i)
449 yield i
466 yield i
450 break
467 break
451
468
452 def findcommonmissing(self, common=None, heads=None):
469 def findcommonmissing(self, common=None, heads=None):
453 """Return a tuple of the ancestors of common and the ancestors of heads
470 """Return a tuple of the ancestors of common and the ancestors of heads
454 that are not ancestors of common. In revset terminology, we return the
471 that are not ancestors of common. In revset terminology, we return the
455 tuple:
472 tuple:
456
473
457 ::common, (::heads) - (::common)
474 ::common, (::heads) - (::common)
458
475
459 The list is sorted by revision number, meaning it is
476 The list is sorted by revision number, meaning it is
460 topologically sorted.
477 topologically sorted.
461
478
462 'heads' and 'common' are both lists of node IDs. If heads is
479 'heads' and 'common' are both lists of node IDs. If heads is
463 not supplied, uses all of the revlog's heads. If common is not
480 not supplied, uses all of the revlog's heads. If common is not
464 supplied, uses nullid."""
481 supplied, uses nullid."""
465 if common is None:
482 if common is None:
466 common = [nullid]
483 common = [nullid]
467 if heads is None:
484 if heads is None:
468 heads = self.heads()
485 heads = self.heads()
469
486
470 common = [self.rev(n) for n in common]
487 common = [self.rev(n) for n in common]
471 heads = [self.rev(n) for n in heads]
488 heads = [self.rev(n) for n in heads]
472
489
473 # we want the ancestors, but inclusive
490 # we want the ancestors, but inclusive
474 class lazyset(object):
491 class lazyset(object):
475 def __init__(self, lazyvalues):
492 def __init__(self, lazyvalues):
476 self.addedvalues = set()
493 self.addedvalues = set()
477 self.lazyvalues = lazyvalues
494 self.lazyvalues = lazyvalues
478
495
479 def __contains__(self, value):
496 def __contains__(self, value):
480 return value in self.addedvalues or value in self.lazyvalues
497 return value in self.addedvalues or value in self.lazyvalues
481
498
482 def __iter__(self):
499 def __iter__(self):
483 added = self.addedvalues
500 added = self.addedvalues
484 for r in added:
501 for r in added:
485 yield r
502 yield r
486 for r in self.lazyvalues:
503 for r in self.lazyvalues:
487 if not r in added:
504 if not r in added:
488 yield r
505 yield r
489
506
490 def add(self, value):
507 def add(self, value):
491 self.addedvalues.add(value)
508 self.addedvalues.add(value)
492
509
493 def update(self, values):
510 def update(self, values):
494 self.addedvalues.update(values)
511 self.addedvalues.update(values)
495
512
496 has = lazyset(self.ancestors(common))
513 has = lazyset(self.ancestors(common))
497 has.add(nullrev)
514 has.add(nullrev)
498 has.update(common)
515 has.update(common)
499
516
500 # take all ancestors from heads that aren't in has
517 # take all ancestors from heads that aren't in has
501 missing = set()
518 missing = set()
502 visit = collections.deque(r for r in heads if r not in has)
519 visit = collections.deque(r for r in heads if r not in has)
503 while visit:
520 while visit:
504 r = visit.popleft()
521 r = visit.popleft()
505 if r in missing:
522 if r in missing:
506 continue
523 continue
507 else:
524 else:
508 missing.add(r)
525 missing.add(r)
509 for p in self.parentrevs(r):
526 for p in self.parentrevs(r):
510 if p not in has:
527 if p not in has:
511 visit.append(p)
528 visit.append(p)
512 missing = list(missing)
529 missing = list(missing)
513 missing.sort()
530 missing.sort()
514 return has, [self.node(r) for r in missing]
531 return has, [self.node(r) for r in missing]
515
532
516 def incrementalmissingrevs(self, common=None):
533 def incrementalmissingrevs(self, common=None):
517 """Return an object that can be used to incrementally compute the
534 """Return an object that can be used to incrementally compute the
518 revision numbers of the ancestors of arbitrary sets that are not
535 revision numbers of the ancestors of arbitrary sets that are not
519 ancestors of common. This is an ancestor.incrementalmissingancestors
536 ancestors of common. This is an ancestor.incrementalmissingancestors
520 object.
537 object.
521
538
522 'common' is a list of revision numbers. If common is not supplied, uses
539 'common' is a list of revision numbers. If common is not supplied, uses
523 nullrev.
540 nullrev.
524 """
541 """
525 if common is None:
542 if common is None:
526 common = [nullrev]
543 common = [nullrev]
527
544
528 return ancestor.incrementalmissingancestors(self.parentrevs, common)
545 return ancestor.incrementalmissingancestors(self.parentrevs, common)
529
546
530 def findmissingrevs(self, common=None, heads=None):
547 def findmissingrevs(self, common=None, heads=None):
531 """Return the revision numbers of the ancestors of heads that
548 """Return the revision numbers of the ancestors of heads that
532 are not ancestors of common.
549 are not ancestors of common.
533
550
534 More specifically, return a list of revision numbers corresponding to
551 More specifically, return a list of revision numbers corresponding to
535 nodes N such that every N satisfies the following constraints:
552 nodes N such that every N satisfies the following constraints:
536
553
537 1. N is an ancestor of some node in 'heads'
554 1. N is an ancestor of some node in 'heads'
538 2. N is not an ancestor of any node in 'common'
555 2. N is not an ancestor of any node in 'common'
539
556
540 The list is sorted by revision number, meaning it is
557 The list is sorted by revision number, meaning it is
541 topologically sorted.
558 topologically sorted.
542
559
543 'heads' and 'common' are both lists of revision numbers. If heads is
560 'heads' and 'common' are both lists of revision numbers. If heads is
544 not supplied, uses all of the revlog's heads. If common is not
561 not supplied, uses all of the revlog's heads. If common is not
545 supplied, uses nullid."""
562 supplied, uses nullid."""
546 if common is None:
563 if common is None:
547 common = [nullrev]
564 common = [nullrev]
548 if heads is None:
565 if heads is None:
549 heads = self.headrevs()
566 heads = self.headrevs()
550
567
551 inc = self.incrementalmissingrevs(common=common)
568 inc = self.incrementalmissingrevs(common=common)
552 return inc.missingancestors(heads)
569 return inc.missingancestors(heads)
553
570
554 def findmissing(self, common=None, heads=None):
571 def findmissing(self, common=None, heads=None):
555 """Return the ancestors of heads that are not ancestors of common.
572 """Return the ancestors of heads that are not ancestors of common.
556
573
557 More specifically, return a list of nodes N such that every N
574 More specifically, return a list of nodes N such that every N
558 satisfies the following constraints:
575 satisfies the following constraints:
559
576
560 1. N is an ancestor of some node in 'heads'
577 1. N is an ancestor of some node in 'heads'
561 2. N is not an ancestor of any node in 'common'
578 2. N is not an ancestor of any node in 'common'
562
579
563 The list is sorted by revision number, meaning it is
580 The list is sorted by revision number, meaning it is
564 topologically sorted.
581 topologically sorted.
565
582
566 'heads' and 'common' are both lists of node IDs. If heads is
583 'heads' and 'common' are both lists of node IDs. If heads is
567 not supplied, uses all of the revlog's heads. If common is not
584 not supplied, uses all of the revlog's heads. If common is not
568 supplied, uses nullid."""
585 supplied, uses nullid."""
569 if common is None:
586 if common is None:
570 common = [nullid]
587 common = [nullid]
571 if heads is None:
588 if heads is None:
572 heads = self.heads()
589 heads = self.heads()
573
590
574 common = [self.rev(n) for n in common]
591 common = [self.rev(n) for n in common]
575 heads = [self.rev(n) for n in heads]
592 heads = [self.rev(n) for n in heads]
576
593
577 inc = self.incrementalmissingrevs(common=common)
594 inc = self.incrementalmissingrevs(common=common)
578 return [self.node(r) for r in inc.missingancestors(heads)]
595 return [self.node(r) for r in inc.missingancestors(heads)]
579
596
580 def nodesbetween(self, roots=None, heads=None):
597 def nodesbetween(self, roots=None, heads=None):
581 """Return a topological path from 'roots' to 'heads'.
598 """Return a topological path from 'roots' to 'heads'.
582
599
583 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
600 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
584 topologically sorted list of all nodes N that satisfy both of
601 topologically sorted list of all nodes N that satisfy both of
585 these constraints:
602 these constraints:
586
603
587 1. N is a descendant of some node in 'roots'
604 1. N is a descendant of some node in 'roots'
588 2. N is an ancestor of some node in 'heads'
605 2. N is an ancestor of some node in 'heads'
589
606
590 Every node is considered to be both a descendant and an ancestor
607 Every node is considered to be both a descendant and an ancestor
591 of itself, so every reachable node in 'roots' and 'heads' will be
608 of itself, so every reachable node in 'roots' and 'heads' will be
592 included in 'nodes'.
609 included in 'nodes'.
593
610
594 'outroots' is the list of reachable nodes in 'roots', i.e., the
611 'outroots' is the list of reachable nodes in 'roots', i.e., the
595 subset of 'roots' that is returned in 'nodes'. Likewise,
612 subset of 'roots' that is returned in 'nodes'. Likewise,
596 'outheads' is the subset of 'heads' that is also in 'nodes'.
613 'outheads' is the subset of 'heads' that is also in 'nodes'.
597
614
598 'roots' and 'heads' are both lists of node IDs. If 'roots' is
615 'roots' and 'heads' are both lists of node IDs. If 'roots' is
599 unspecified, uses nullid as the only root. If 'heads' is
616 unspecified, uses nullid as the only root. If 'heads' is
600 unspecified, uses list of all of the revlog's heads."""
617 unspecified, uses list of all of the revlog's heads."""
601 nonodes = ([], [], [])
618 nonodes = ([], [], [])
602 if roots is not None:
619 if roots is not None:
603 roots = list(roots)
620 roots = list(roots)
604 if not roots:
621 if not roots:
605 return nonodes
622 return nonodes
606 lowestrev = min([self.rev(n) for n in roots])
623 lowestrev = min([self.rev(n) for n in roots])
607 else:
624 else:
608 roots = [nullid] # Everybody's a descendant of nullid
625 roots = [nullid] # Everybody's a descendant of nullid
609 lowestrev = nullrev
626 lowestrev = nullrev
610 if (lowestrev == nullrev) and (heads is None):
627 if (lowestrev == nullrev) and (heads is None):
611 # We want _all_ the nodes!
628 # We want _all_ the nodes!
612 return ([self.node(r) for r in self], [nullid], list(self.heads()))
629 return ([self.node(r) for r in self], [nullid], list(self.heads()))
613 if heads is None:
630 if heads is None:
614 # All nodes are ancestors, so the latest ancestor is the last
631 # All nodes are ancestors, so the latest ancestor is the last
615 # node.
632 # node.
616 highestrev = len(self) - 1
633 highestrev = len(self) - 1
617 # Set ancestors to None to signal that every node is an ancestor.
634 # Set ancestors to None to signal that every node is an ancestor.
618 ancestors = None
635 ancestors = None
619 # Set heads to an empty dictionary for later discovery of heads
636 # Set heads to an empty dictionary for later discovery of heads
620 heads = {}
637 heads = {}
621 else:
638 else:
622 heads = list(heads)
639 heads = list(heads)
623 if not heads:
640 if not heads:
624 return nonodes
641 return nonodes
625 ancestors = set()
642 ancestors = set()
626 # Turn heads into a dictionary so we can remove 'fake' heads.
643 # Turn heads into a dictionary so we can remove 'fake' heads.
627 # Also, later we will be using it to filter out the heads we can't
644 # Also, later we will be using it to filter out the heads we can't
628 # find from roots.
645 # find from roots.
629 heads = dict.fromkeys(heads, False)
646 heads = dict.fromkeys(heads, False)
630 # Start at the top and keep marking parents until we're done.
647 # Start at the top and keep marking parents until we're done.
631 nodestotag = set(heads)
648 nodestotag = set(heads)
632 # Remember where the top was so we can use it as a limit later.
649 # Remember where the top was so we can use it as a limit later.
633 highestrev = max([self.rev(n) for n in nodestotag])
650 highestrev = max([self.rev(n) for n in nodestotag])
634 while nodestotag:
651 while nodestotag:
635 # grab a node to tag
652 # grab a node to tag
636 n = nodestotag.pop()
653 n = nodestotag.pop()
637 # Never tag nullid
654 # Never tag nullid
638 if n == nullid:
655 if n == nullid:
639 continue
656 continue
640 # A node's revision number represents its place in a
657 # A node's revision number represents its place in a
641 # topologically sorted list of nodes.
658 # topologically sorted list of nodes.
642 r = self.rev(n)
659 r = self.rev(n)
643 if r >= lowestrev:
660 if r >= lowestrev:
644 if n not in ancestors:
661 if n not in ancestors:
645 # If we are possibly a descendant of one of the roots
662 # If we are possibly a descendant of one of the roots
646 # and we haven't already been marked as an ancestor
663 # and we haven't already been marked as an ancestor
647 ancestors.add(n) # Mark as ancestor
664 ancestors.add(n) # Mark as ancestor
648 # Add non-nullid parents to list of nodes to tag.
665 # Add non-nullid parents to list of nodes to tag.
649 nodestotag.update([p for p in self.parents(n) if
666 nodestotag.update([p for p in self.parents(n) if
650 p != nullid])
667 p != nullid])
651 elif n in heads: # We've seen it before, is it a fake head?
668 elif n in heads: # We've seen it before, is it a fake head?
652 # So it is, real heads should not be the ancestors of
669 # So it is, real heads should not be the ancestors of
653 # any other heads.
670 # any other heads.
654 heads.pop(n)
671 heads.pop(n)
655 if not ancestors:
672 if not ancestors:
656 return nonodes
673 return nonodes
657 # Now that we have our set of ancestors, we want to remove any
674 # Now that we have our set of ancestors, we want to remove any
658 # roots that are not ancestors.
675 # roots that are not ancestors.
659
676
660 # If one of the roots was nullid, everything is included anyway.
677 # If one of the roots was nullid, everything is included anyway.
661 if lowestrev > nullrev:
678 if lowestrev > nullrev:
662 # But, since we weren't, let's recompute the lowest rev to not
679 # But, since we weren't, let's recompute the lowest rev to not
663 # include roots that aren't ancestors.
680 # include roots that aren't ancestors.
664
681
665 # Filter out roots that aren't ancestors of heads
682 # Filter out roots that aren't ancestors of heads
666 roots = [n for n in roots if n in ancestors]
683 roots = [n for n in roots if n in ancestors]
667 # Recompute the lowest revision
684 # Recompute the lowest revision
668 if roots:
685 if roots:
669 lowestrev = min([self.rev(n) for n in roots])
686 lowestrev = min([self.rev(n) for n in roots])
670 else:
687 else:
671 # No more roots? Return empty list
688 # No more roots? Return empty list
672 return nonodes
689 return nonodes
673 else:
690 else:
674 # We are descending from nullid, and don't need to care about
691 # We are descending from nullid, and don't need to care about
675 # any other roots.
692 # any other roots.
676 lowestrev = nullrev
693 lowestrev = nullrev
677 roots = [nullid]
694 roots = [nullid]
678 # Transform our roots list into a set.
695 # Transform our roots list into a set.
679 descendants = set(roots)
696 descendants = set(roots)
680 # Also, keep the original roots so we can filter out roots that aren't
697 # Also, keep the original roots so we can filter out roots that aren't
681 # 'real' roots (i.e. are descended from other roots).
698 # 'real' roots (i.e. are descended from other roots).
682 roots = descendants.copy()
699 roots = descendants.copy()
683 # Our topologically sorted list of output nodes.
700 # Our topologically sorted list of output nodes.
684 orderedout = []
701 orderedout = []
685 # Don't start at nullid since we don't want nullid in our output list,
702 # Don't start at nullid since we don't want nullid in our output list,
686 # and if nullid shows up in descendants, empty parents will look like
703 # and if nullid shows up in descendants, empty parents will look like
687 # they're descendants.
704 # they're descendants.
688 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
705 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
689 n = self.node(r)
706 n = self.node(r)
690 isdescendant = False
707 isdescendant = False
691 if lowestrev == nullrev: # Everybody is a descendant of nullid
708 if lowestrev == nullrev: # Everybody is a descendant of nullid
692 isdescendant = True
709 isdescendant = True
693 elif n in descendants:
710 elif n in descendants:
694 # n is already a descendant
711 # n is already a descendant
695 isdescendant = True
712 isdescendant = True
696 # This check only needs to be done here because all the roots
713 # This check only needs to be done here because all the roots
697 # will start being marked is descendants before the loop.
714 # will start being marked is descendants before the loop.
698 if n in roots:
715 if n in roots:
699 # If n was a root, check if it's a 'real' root.
716 # If n was a root, check if it's a 'real' root.
700 p = tuple(self.parents(n))
717 p = tuple(self.parents(n))
701 # If any of its parents are descendants, it's not a root.
718 # If any of its parents are descendants, it's not a root.
702 if (p[0] in descendants) or (p[1] in descendants):
719 if (p[0] in descendants) or (p[1] in descendants):
703 roots.remove(n)
720 roots.remove(n)
704 else:
721 else:
705 p = tuple(self.parents(n))
722 p = tuple(self.parents(n))
706 # A node is a descendant if either of its parents are
723 # A node is a descendant if either of its parents are
707 # descendants. (We seeded the dependents list with the roots
724 # descendants. (We seeded the dependents list with the roots
708 # up there, remember?)
725 # up there, remember?)
709 if (p[0] in descendants) or (p[1] in descendants):
726 if (p[0] in descendants) or (p[1] in descendants):
710 descendants.add(n)
727 descendants.add(n)
711 isdescendant = True
728 isdescendant = True
712 if isdescendant and ((ancestors is None) or (n in ancestors)):
729 if isdescendant and ((ancestors is None) or (n in ancestors)):
713 # Only include nodes that are both descendants and ancestors.
730 # Only include nodes that are both descendants and ancestors.
714 orderedout.append(n)
731 orderedout.append(n)
715 if (ancestors is not None) and (n in heads):
732 if (ancestors is not None) and (n in heads):
716 # We're trying to figure out which heads are reachable
733 # We're trying to figure out which heads are reachable
717 # from roots.
734 # from roots.
718 # Mark this head as having been reached
735 # Mark this head as having been reached
719 heads[n] = True
736 heads[n] = True
720 elif ancestors is None:
737 elif ancestors is None:
721 # Otherwise, we're trying to discover the heads.
738 # Otherwise, we're trying to discover the heads.
722 # Assume this is a head because if it isn't, the next step
739 # Assume this is a head because if it isn't, the next step
723 # will eventually remove it.
740 # will eventually remove it.
724 heads[n] = True
741 heads[n] = True
725 # But, obviously its parents aren't.
742 # But, obviously its parents aren't.
726 for p in self.parents(n):
743 for p in self.parents(n):
727 heads.pop(p, None)
744 heads.pop(p, None)
728 heads = [n for n, flag in heads.iteritems() if flag]
745 heads = [n for n, flag in heads.iteritems() if flag]
729 roots = list(roots)
746 roots = list(roots)
730 assert orderedout
747 assert orderedout
731 assert roots
748 assert roots
732 assert heads
749 assert heads
733 return (orderedout, roots, heads)
750 return (orderedout, roots, heads)
734
751
735 def headrevs(self):
752 def headrevs(self):
736 try:
753 try:
737 return self.index.headrevs()
754 return self.index.headrevs()
738 except AttributeError:
755 except AttributeError:
739 return self._headrevs()
756 return self._headrevs()
740
757
741 def computephases(self, roots):
758 def computephases(self, roots):
742 return self.index.computephasesmapsets(roots)
759 return self.index.computephasesmapsets(roots)
743
760
744 def _headrevs(self):
761 def _headrevs(self):
745 count = len(self)
762 count = len(self)
746 if not count:
763 if not count:
747 return [nullrev]
764 return [nullrev]
748 # we won't iter over filtered rev so nobody is a head at start
765 # we won't iter over filtered rev so nobody is a head at start
749 ishead = [0] * (count + 1)
766 ishead = [0] * (count + 1)
750 index = self.index
767 index = self.index
751 for r in self:
768 for r in self:
752 ishead[r] = 1 # I may be an head
769 ishead[r] = 1 # I may be an head
753 e = index[r]
770 e = index[r]
754 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
771 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
755 return [r for r, val in enumerate(ishead) if val]
772 return [r for r, val in enumerate(ishead) if val]
756
773
757 def heads(self, start=None, stop=None):
774 def heads(self, start=None, stop=None):
758 """return the list of all nodes that have no children
775 """return the list of all nodes that have no children
759
776
760 if start is specified, only heads that are descendants of
777 if start is specified, only heads that are descendants of
761 start will be returned
778 start will be returned
762 if stop is specified, it will consider all the revs from stop
779 if stop is specified, it will consider all the revs from stop
763 as if they had no children
780 as if they had no children
764 """
781 """
765 if start is None and stop is None:
782 if start is None and stop is None:
766 if not len(self):
783 if not len(self):
767 return [nullid]
784 return [nullid]
768 return [self.node(r) for r in self.headrevs()]
785 return [self.node(r) for r in self.headrevs()]
769
786
770 if start is None:
787 if start is None:
771 start = nullid
788 start = nullid
772 if stop is None:
789 if stop is None:
773 stop = []
790 stop = []
774 stoprevs = set([self.rev(n) for n in stop])
791 stoprevs = set([self.rev(n) for n in stop])
775 startrev = self.rev(start)
792 startrev = self.rev(start)
776 reachable = set((startrev,))
793 reachable = set((startrev,))
777 heads = set((startrev,))
794 heads = set((startrev,))
778
795
779 parentrevs = self.parentrevs
796 parentrevs = self.parentrevs
780 for r in self.revs(start=startrev + 1):
797 for r in self.revs(start=startrev + 1):
781 for p in parentrevs(r):
798 for p in parentrevs(r):
782 if p in reachable:
799 if p in reachable:
783 if r not in stoprevs:
800 if r not in stoprevs:
784 reachable.add(r)
801 reachable.add(r)
785 heads.add(r)
802 heads.add(r)
786 if p in heads and p not in stoprevs:
803 if p in heads and p not in stoprevs:
787 heads.remove(p)
804 heads.remove(p)
788
805
789 return [self.node(r) for r in heads]
806 return [self.node(r) for r in heads]
790
807
791 def children(self, node):
808 def children(self, node):
792 """find the children of a given node"""
809 """find the children of a given node"""
793 c = []
810 c = []
794 p = self.rev(node)
811 p = self.rev(node)
795 for r in self.revs(start=p + 1):
812 for r in self.revs(start=p + 1):
796 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
813 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
797 if prevs:
814 if prevs:
798 for pr in prevs:
815 for pr in prevs:
799 if pr == p:
816 if pr == p:
800 c.append(self.node(r))
817 c.append(self.node(r))
801 elif p == nullrev:
818 elif p == nullrev:
802 c.append(self.node(r))
819 c.append(self.node(r))
803 return c
820 return c
804
821
805 def descendant(self, start, end):
822 def descendant(self, start, end):
806 if start == nullrev:
823 if start == nullrev:
807 return True
824 return True
808 for i in self.descendants([start]):
825 for i in self.descendants([start]):
809 if i == end:
826 if i == end:
810 return True
827 return True
811 elif i > end:
828 elif i > end:
812 break
829 break
813 return False
830 return False
814
831
815 def commonancestorsheads(self, a, b):
832 def commonancestorsheads(self, a, b):
816 """calculate all the heads of the common ancestors of nodes a and b"""
833 """calculate all the heads of the common ancestors of nodes a and b"""
817 a, b = self.rev(a), self.rev(b)
834 a, b = self.rev(a), self.rev(b)
818 try:
835 try:
819 ancs = self.index.commonancestorsheads(a, b)
836 ancs = self.index.commonancestorsheads(a, b)
820 except (AttributeError, OverflowError): # C implementation failed
837 except (AttributeError, OverflowError): # C implementation failed
821 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
838 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
822 return map(self.node, ancs)
839 return map(self.node, ancs)
823
840
824 def isancestor(self, a, b):
841 def isancestor(self, a, b):
825 """return True if node a is an ancestor of node b
842 """return True if node a is an ancestor of node b
826
843
827 The implementation of this is trivial but the use of
844 The implementation of this is trivial but the use of
828 commonancestorsheads is not."""
845 commonancestorsheads is not."""
829 return a in self.commonancestorsheads(a, b)
846 return a in self.commonancestorsheads(a, b)
830
847
831 def ancestor(self, a, b):
848 def ancestor(self, a, b):
832 """calculate the "best" common ancestor of nodes a and b"""
849 """calculate the "best" common ancestor of nodes a and b"""
833
850
834 a, b = self.rev(a), self.rev(b)
851 a, b = self.rev(a), self.rev(b)
835 try:
852 try:
836 ancs = self.index.ancestors(a, b)
853 ancs = self.index.ancestors(a, b)
837 except (AttributeError, OverflowError):
854 except (AttributeError, OverflowError):
838 ancs = ancestor.ancestors(self.parentrevs, a, b)
855 ancs = ancestor.ancestors(self.parentrevs, a, b)
839 if ancs:
856 if ancs:
840 # choose a consistent winner when there's a tie
857 # choose a consistent winner when there's a tie
841 return min(map(self.node, ancs))
858 return min(map(self.node, ancs))
842 return nullid
859 return nullid
843
860
844 def _match(self, id):
861 def _match(self, id):
845 if isinstance(id, int):
862 if isinstance(id, int):
846 # rev
863 # rev
847 return self.node(id)
864 return self.node(id)
848 if len(id) == 20:
865 if len(id) == 20:
849 # possibly a binary node
866 # possibly a binary node
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
867 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 try:
868 try:
852 node = id
869 node = id
853 self.rev(node) # quick search the index
870 self.rev(node) # quick search the index
854 return node
871 return node
855 except LookupError:
872 except LookupError:
856 pass # may be partial hex id
873 pass # may be partial hex id
857 try:
874 try:
858 # str(rev)
875 # str(rev)
859 rev = int(id)
876 rev = int(id)
860 if str(rev) != id:
877 if str(rev) != id:
861 raise ValueError
878 raise ValueError
862 if rev < 0:
879 if rev < 0:
863 rev = len(self) + rev
880 rev = len(self) + rev
864 if rev < 0 or rev >= len(self):
881 if rev < 0 or rev >= len(self):
865 raise ValueError
882 raise ValueError
866 return self.node(rev)
883 return self.node(rev)
867 except (ValueError, OverflowError):
884 except (ValueError, OverflowError):
868 pass
885 pass
869 if len(id) == 40:
886 if len(id) == 40:
870 try:
887 try:
871 # a full hex nodeid?
888 # a full hex nodeid?
872 node = bin(id)
889 node = bin(id)
873 self.rev(node)
890 self.rev(node)
874 return node
891 return node
875 except (TypeError, LookupError):
892 except (TypeError, LookupError):
876 pass
893 pass
877
894
878 def _partialmatch(self, id):
895 def _partialmatch(self, id):
879 try:
896 try:
880 n = self.index.partialmatch(id)
897 n = self.index.partialmatch(id)
881 if n and self.hasnode(n):
898 if n and self.hasnode(n):
882 return n
899 return n
883 return None
900 return None
884 except RevlogError:
901 except RevlogError:
885 # parsers.c radix tree lookup gave multiple matches
902 # parsers.c radix tree lookup gave multiple matches
886 # fall through to slow path that filters hidden revisions
903 # fall through to slow path that filters hidden revisions
887 pass
904 pass
888 except (AttributeError, ValueError):
905 except (AttributeError, ValueError):
889 # we are pure python, or key was too short to search radix tree
906 # we are pure python, or key was too short to search radix tree
890 pass
907 pass
891
908
892 if id in self._pcache:
909 if id in self._pcache:
893 return self._pcache[id]
910 return self._pcache[id]
894
911
895 if len(id) < 40:
912 if len(id) < 40:
896 try:
913 try:
897 # hex(node)[:...]
914 # hex(node)[:...]
898 l = len(id) // 2 # grab an even number of digits
915 l = len(id) // 2 # grab an even number of digits
899 prefix = bin(id[:l * 2])
916 prefix = bin(id[:l * 2])
900 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
917 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
901 nl = [n for n in nl if hex(n).startswith(id) and
918 nl = [n for n in nl if hex(n).startswith(id) and
902 self.hasnode(n)]
919 self.hasnode(n)]
903 if len(nl) > 0:
920 if len(nl) > 0:
904 if len(nl) == 1:
921 if len(nl) == 1:
905 self._pcache[id] = nl[0]
922 self._pcache[id] = nl[0]
906 return nl[0]
923 return nl[0]
907 raise LookupError(id, self.indexfile,
924 raise LookupError(id, self.indexfile,
908 _('ambiguous identifier'))
925 _('ambiguous identifier'))
909 return None
926 return None
910 except TypeError:
927 except TypeError:
911 pass
928 pass
912
929
913 def lookup(self, id):
930 def lookup(self, id):
914 """locate a node based on:
931 """locate a node based on:
915 - revision number or str(revision number)
932 - revision number or str(revision number)
916 - nodeid or subset of hex nodeid
933 - nodeid or subset of hex nodeid
917 """
934 """
918 n = self._match(id)
935 n = self._match(id)
919 if n is not None:
936 if n is not None:
920 return n
937 return n
921 n = self._partialmatch(id)
938 n = self._partialmatch(id)
922 if n:
939 if n:
923 return n
940 return n
924
941
925 raise LookupError(id, self.indexfile, _('no match found'))
942 raise LookupError(id, self.indexfile, _('no match found'))
926
943
927 def cmp(self, node, text):
944 def cmp(self, node, text):
928 """compare text with a given file revision
945 """compare text with a given file revision
929
946
930 returns True if text is different than what is stored.
947 returns True if text is different than what is stored.
931 """
948 """
932 p1, p2 = self.parents(node)
949 p1, p2 = self.parents(node)
933 return hash(text, p1, p2) != node
950 return hash(text, p1, p2) != node
934
951
935 def _addchunk(self, offset, data):
952 def _addchunk(self, offset, data):
936 """Add a segment to the revlog cache.
953 """Add a segment to the revlog cache.
937
954
938 Accepts an absolute offset and the data that is at that location.
955 Accepts an absolute offset and the data that is at that location.
939 """
956 """
940 o, d = self._chunkcache
957 o, d = self._chunkcache
941 # try to add to existing cache
958 # try to add to existing cache
942 if o + len(d) == offset and len(d) + len(data) < _chunksize:
959 if o + len(d) == offset and len(d) + len(data) < _chunksize:
943 self._chunkcache = o, d + data
960 self._chunkcache = o, d + data
944 else:
961 else:
945 self._chunkcache = offset, data
962 self._chunkcache = offset, data
946
963
947 def _loadchunk(self, offset, length, df=None):
964 def _loadchunk(self, offset, length, df=None):
948 """Load a segment of raw data from the revlog.
965 """Load a segment of raw data from the revlog.
949
966
950 Accepts an absolute offset, length to read, and an optional existing
967 Accepts an absolute offset, length to read, and an optional existing
951 file handle to read from.
968 file handle to read from.
952
969
953 If an existing file handle is passed, it will be seeked and the
970 If an existing file handle is passed, it will be seeked and the
954 original seek position will NOT be restored.
971 original seek position will NOT be restored.
955
972
956 Returns a str or buffer of raw byte data.
973 Returns a str or buffer of raw byte data.
957 """
974 """
958 if df is not None:
975 if df is not None:
959 closehandle = False
976 closehandle = False
960 else:
977 else:
961 if self._inline:
978 if self._inline:
962 df = self.opener(self.indexfile)
979 df = self.opener(self.indexfile)
963 else:
980 else:
964 df = self.opener(self.datafile)
981 df = self.opener(self.datafile)
965 closehandle = True
982 closehandle = True
966
983
967 # Cache data both forward and backward around the requested
984 # Cache data both forward and backward around the requested
968 # data, in a fixed size window. This helps speed up operations
985 # data, in a fixed size window. This helps speed up operations
969 # involving reading the revlog backwards.
986 # involving reading the revlog backwards.
970 cachesize = self._chunkcachesize
987 cachesize = self._chunkcachesize
971 realoffset = offset & ~(cachesize - 1)
988 realoffset = offset & ~(cachesize - 1)
972 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
989 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
973 - realoffset)
990 - realoffset)
974 df.seek(realoffset)
991 df.seek(realoffset)
975 d = df.read(reallength)
992 d = df.read(reallength)
976 if closehandle:
993 if closehandle:
977 df.close()
994 df.close()
978 self._addchunk(realoffset, d)
995 self._addchunk(realoffset, d)
979 if offset != realoffset or reallength != length:
996 if offset != realoffset or reallength != length:
980 return util.buffer(d, offset - realoffset, length)
997 return util.buffer(d, offset - realoffset, length)
981 return d
998 return d
982
999
983 def _getchunk(self, offset, length, df=None):
1000 def _getchunk(self, offset, length, df=None):
984 """Obtain a segment of raw data from the revlog.
1001 """Obtain a segment of raw data from the revlog.
985
1002
986 Accepts an absolute offset, length of bytes to obtain, and an
1003 Accepts an absolute offset, length of bytes to obtain, and an
987 optional file handle to the already-opened revlog. If the file
1004 optional file handle to the already-opened revlog. If the file
988 handle is used, it's original seek position will not be preserved.
1005 handle is used, it's original seek position will not be preserved.
989
1006
990 Requests for data may be returned from a cache.
1007 Requests for data may be returned from a cache.
991
1008
992 Returns a str or a buffer instance of raw byte data.
1009 Returns a str or a buffer instance of raw byte data.
993 """
1010 """
994 o, d = self._chunkcache
1011 o, d = self._chunkcache
995 l = len(d)
1012 l = len(d)
996
1013
997 # is it in the cache?
1014 # is it in the cache?
998 cachestart = offset - o
1015 cachestart = offset - o
999 cacheend = cachestart + length
1016 cacheend = cachestart + length
1000 if cachestart >= 0 and cacheend <= l:
1017 if cachestart >= 0 and cacheend <= l:
1001 if cachestart == 0 and cacheend == l:
1018 if cachestart == 0 and cacheend == l:
1002 return d # avoid a copy
1019 return d # avoid a copy
1003 return util.buffer(d, cachestart, cacheend - cachestart)
1020 return util.buffer(d, cachestart, cacheend - cachestart)
1004
1021
1005 return self._loadchunk(offset, length, df=df)
1022 return self._loadchunk(offset, length, df=df)
1006
1023
1007 def _chunkraw(self, startrev, endrev, df=None):
1024 def _chunkraw(self, startrev, endrev, df=None):
1008 """Obtain a segment of raw data corresponding to a range of revisions.
1025 """Obtain a segment of raw data corresponding to a range of revisions.
1009
1026
1010 Accepts the start and end revisions and an optional already-open
1027 Accepts the start and end revisions and an optional already-open
1011 file handle to be used for reading. If the file handle is read, its
1028 file handle to be used for reading. If the file handle is read, its
1012 seek position will not be preserved.
1029 seek position will not be preserved.
1013
1030
1014 Requests for data may be satisfied by a cache.
1031 Requests for data may be satisfied by a cache.
1015
1032
1016 Returns a str or a buffer instance of raw byte data. Callers will
1033 Returns a str or a buffer instance of raw byte data. Callers will
1017 need to call ``self.start(rev)`` and ``self.length()`` to determine
1034 need to call ``self.start(rev)`` and ``self.length()`` to determine
1018 where each revision's data begins and ends.
1035 where each revision's data begins and ends.
1019 """
1036 """
1020 start = self.start(startrev)
1037 start = self.start(startrev)
1021 end = self.end(endrev)
1038 end = self.end(endrev)
1022 if self._inline:
1039 if self._inline:
1023 start += (startrev + 1) * self._io.size
1040 start += (startrev + 1) * self._io.size
1024 end += (endrev + 1) * self._io.size
1041 end += (endrev + 1) * self._io.size
1025 length = end - start
1042 length = end - start
1026 return self._getchunk(start, length, df=df)
1043 return self._getchunk(start, length, df=df)
1027
1044
1028 def _chunk(self, rev, df=None):
1045 def _chunk(self, rev, df=None):
1029 """Obtain a single decompressed chunk for a revision.
1046 """Obtain a single decompressed chunk for a revision.
1030
1047
1031 Accepts an integer revision and an optional already-open file handle
1048 Accepts an integer revision and an optional already-open file handle
1032 to be used for reading. If used, the seek position of the file will not
1049 to be used for reading. If used, the seek position of the file will not
1033 be preserved.
1050 be preserved.
1034
1051
1035 Returns a str holding uncompressed data for the requested revision.
1052 Returns a str holding uncompressed data for the requested revision.
1036 """
1053 """
1037 return decompress(self._chunkraw(rev, rev, df=df))
1054 return decompress(self._chunkraw(rev, rev, df=df))
1038
1055
1039 def _chunks(self, revs, df=None):
1056 def _chunks(self, revs, df=None):
1040 """Obtain decompressed chunks for the specified revisions.
1057 """Obtain decompressed chunks for the specified revisions.
1041
1058
1042 Accepts an iterable of numeric revisions that are assumed to be in
1059 Accepts an iterable of numeric revisions that are assumed to be in
1043 ascending order. Also accepts an optional already-open file handle
1060 ascending order. Also accepts an optional already-open file handle
1044 to be used for reading. If used, the seek position of the file will
1061 to be used for reading. If used, the seek position of the file will
1045 not be preserved.
1062 not be preserved.
1046
1063
1047 This function is similar to calling ``self._chunk()`` multiple times,
1064 This function is similar to calling ``self._chunk()`` multiple times,
1048 but is faster.
1065 but is faster.
1049
1066
1050 Returns a list with decompressed data for each requested revision.
1067 Returns a list with decompressed data for each requested revision.
1051 """
1068 """
1052 if not revs:
1069 if not revs:
1053 return []
1070 return []
1054 start = self.start
1071 start = self.start
1055 length = self.length
1072 length = self.length
1056 inline = self._inline
1073 inline = self._inline
1057 iosize = self._io.size
1074 iosize = self._io.size
1058 buffer = util.buffer
1075 buffer = util.buffer
1059
1076
1060 l = []
1077 l = []
1061 ladd = l.append
1078 ladd = l.append
1062
1079
1063 # preload the cache
1080 # preload the cache
1064 try:
1081 try:
1065 while True:
1082 while True:
1066 # ensure that the cache doesn't change out from under us
1083 # ensure that the cache doesn't change out from under us
1067 _cache = self._chunkcache
1084 _cache = self._chunkcache
1068 self._chunkraw(revs[0], revs[-1], df=df)
1085 self._chunkraw(revs[0], revs[-1], df=df)
1069 if _cache == self._chunkcache:
1086 if _cache == self._chunkcache:
1070 break
1087 break
1071 offset, data = _cache
1088 offset, data = _cache
1072 except OverflowError:
1089 except OverflowError:
1073 # issue4215 - we can't cache a run of chunks greater than
1090 # issue4215 - we can't cache a run of chunks greater than
1074 # 2G on Windows
1091 # 2G on Windows
1075 return [self._chunk(rev, df=df) for rev in revs]
1092 return [self._chunk(rev, df=df) for rev in revs]
1076
1093
1077 for rev in revs:
1094 for rev in revs:
1078 chunkstart = start(rev)
1095 chunkstart = start(rev)
1079 if inline:
1096 if inline:
1080 chunkstart += (rev + 1) * iosize
1097 chunkstart += (rev + 1) * iosize
1081 chunklength = length(rev)
1098 chunklength = length(rev)
1082 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1099 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1083
1100
1084 return l
1101 return l
1085
1102
1086 def _chunkclear(self):
1103 def _chunkclear(self):
1087 """Clear the raw chunk cache."""
1104 """Clear the raw chunk cache."""
1088 self._chunkcache = (0, '')
1105 self._chunkcache = (0, '')
1089
1106
1090 def deltaparent(self, rev):
1107 def deltaparent(self, rev):
1091 """return deltaparent of the given revision"""
1108 """return deltaparent of the given revision"""
1092 base = self.index[rev][3]
1109 base = self.index[rev][3]
1093 if base == rev:
1110 if base == rev:
1094 return nullrev
1111 return nullrev
1095 elif self._generaldelta:
1112 elif self._generaldelta:
1096 return base
1113 return base
1097 else:
1114 else:
1098 return rev - 1
1115 return rev - 1
1099
1116
1100 def revdiff(self, rev1, rev2):
1117 def revdiff(self, rev1, rev2):
1101 """return or calculate a delta between two revisions"""
1118 """return or calculate a delta between two revisions"""
1102 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1119 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1103 return str(self._chunk(rev2))
1120 return str(self._chunk(rev2))
1104
1121
1105 return mdiff.textdiff(self.revision(rev1),
1122 return mdiff.textdiff(self.revision(rev1),
1106 self.revision(rev2))
1123 self.revision(rev2))
1107
1124
1108 def revision(self, nodeorrev, _df=None):
1125 def revision(self, nodeorrev, _df=None):
1109 """return an uncompressed revision of a given node or revision
1126 """return an uncompressed revision of a given node or revision
1110 number.
1127 number.
1111
1128
1112 _df is an existing file handle to read from. It is meant to only be
1129 _df is an existing file handle to read from. It is meant to only be
1113 used internally.
1130 used internally.
1114 """
1131 """
1115 if isinstance(nodeorrev, int):
1132 if isinstance(nodeorrev, int):
1116 rev = nodeorrev
1133 rev = nodeorrev
1117 node = self.node(rev)
1134 node = self.node(rev)
1118 else:
1135 else:
1119 node = nodeorrev
1136 node = nodeorrev
1120 rev = None
1137 rev = None
1121
1138
1122 cachedrev = None
1139 cachedrev = None
1123 if node == nullid:
1140 if node == nullid:
1124 return ""
1141 return ""
1125 if self._cache:
1142 if self._cache:
1126 if self._cache[0] == node:
1143 if self._cache[0] == node:
1127 return self._cache[2]
1144 return self._cache[2]
1128 cachedrev = self._cache[1]
1145 cachedrev = self._cache[1]
1129
1146
1130 # look up what we need to read
1147 # look up what we need to read
1131 text = None
1148 text = None
1132 if rev is None:
1149 if rev is None:
1133 rev = self.rev(node)
1150 rev = self.rev(node)
1134
1151
1135 # check rev flags
1152 # check rev flags
1136 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1153 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1137 raise RevlogError(_('incompatible revision flag %x') %
1154 raise RevlogError(_('incompatible revision flag %x') %
1138 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1155 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1139
1156
1140 # build delta chain
1157 # build delta chain
1141 chain = []
1158 chain = []
1142 index = self.index # for performance
1159 index = self.index # for performance
1143 generaldelta = self._generaldelta
1160 generaldelta = self._generaldelta
1144 iterrev = rev
1161 iterrev = rev
1145 e = index[iterrev]
1162 e = index[iterrev]
1146 while iterrev != e[3] and iterrev != cachedrev:
1163 while iterrev != e[3] and iterrev != cachedrev:
1147 chain.append(iterrev)
1164 chain.append(iterrev)
1148 if generaldelta:
1165 if generaldelta:
1149 iterrev = e[3]
1166 iterrev = e[3]
1150 else:
1167 else:
1151 iterrev -= 1
1168 iterrev -= 1
1152 e = index[iterrev]
1169 e = index[iterrev]
1153
1170
1154 if iterrev == cachedrev:
1171 if iterrev == cachedrev:
1155 # cache hit
1172 # cache hit
1156 text = self._cache[2]
1173 text = self._cache[2]
1157 else:
1174 else:
1158 chain.append(iterrev)
1175 chain.append(iterrev)
1159 chain.reverse()
1176 chain.reverse()
1160
1177
1161 # drop cache to save memory
1178 # drop cache to save memory
1162 self._cache = None
1179 self._cache = None
1163
1180
1164 bins = self._chunks(chain, df=_df)
1181 bins = self._chunks(chain, df=_df)
1165 if text is None:
1182 if text is None:
1166 text = str(bins[0])
1183 text = str(bins[0])
1167 bins = bins[1:]
1184 bins = bins[1:]
1168
1185
1169 text = mdiff.patches(text, bins)
1186 text = mdiff.patches(text, bins)
1170
1187
1171 text = self._checkhash(text, node, rev)
1188 text = self._checkhash(text, node, rev)
1172
1189
1173 self._cache = (node, rev, text)
1190 self._cache = (node, rev, text)
1174 return text
1191 return text
1175
1192
1176 def hash(self, text, p1, p2):
1193 def hash(self, text, p1, p2):
1177 """Compute a node hash.
1194 """Compute a node hash.
1178
1195
1179 Available as a function so that subclasses can replace the hash
1196 Available as a function so that subclasses can replace the hash
1180 as needed.
1197 as needed.
1181 """
1198 """
1182 return hash(text, p1, p2)
1199 return hash(text, p1, p2)
1183
1200
1184 def _checkhash(self, text, node, rev):
1201 def _checkhash(self, text, node, rev):
1185 p1, p2 = self.parents(node)
1202 p1, p2 = self.parents(node)
1186 self.checkhash(text, p1, p2, node, rev)
1203 self.checkhash(text, p1, p2, node, rev)
1187 return text
1204 return text
1188
1205
1189 def checkhash(self, text, p1, p2, node, rev=None):
1206 def checkhash(self, text, p1, p2, node, rev=None):
1190 if node != self.hash(text, p1, p2):
1207 if node != self.hash(text, p1, p2):
1191 revornode = rev
1208 revornode = rev
1192 if revornode is None:
1209 if revornode is None:
1193 revornode = templatefilters.short(hex(node))
1210 revornode = templatefilters.short(hex(node))
1194 raise RevlogError(_("integrity check failed on %s:%s")
1211 raise RevlogError(_("integrity check failed on %s:%s")
1195 % (self.indexfile, revornode))
1212 % (self.indexfile, revornode))
1196
1213
1197 def checkinlinesize(self, tr, fp=None):
1214 def checkinlinesize(self, tr, fp=None):
1198 """Check if the revlog is too big for inline and convert if so.
1215 """Check if the revlog is too big for inline and convert if so.
1199
1216
1200 This should be called after revisions are added to the revlog. If the
1217 This should be called after revisions are added to the revlog. If the
1201 revlog has grown too large to be an inline revlog, it will convert it
1218 revlog has grown too large to be an inline revlog, it will convert it
1202 to use multiple index and data files.
1219 to use multiple index and data files.
1203 """
1220 """
1204 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1221 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1205 return
1222 return
1206
1223
1207 trinfo = tr.find(self.indexfile)
1224 trinfo = tr.find(self.indexfile)
1208 if trinfo is None:
1225 if trinfo is None:
1209 raise RevlogError(_("%s not found in the transaction")
1226 raise RevlogError(_("%s not found in the transaction")
1210 % self.indexfile)
1227 % self.indexfile)
1211
1228
1212 trindex = trinfo[2]
1229 trindex = trinfo[2]
1213 if trindex is not None:
1230 if trindex is not None:
1214 dataoff = self.start(trindex)
1231 dataoff = self.start(trindex)
1215 else:
1232 else:
1216 # revlog was stripped at start of transaction, use all leftover data
1233 # revlog was stripped at start of transaction, use all leftover data
1217 trindex = len(self) - 1
1234 trindex = len(self) - 1
1218 dataoff = self.end(-2)
1235 dataoff = self.end(-2)
1219
1236
1220 tr.add(self.datafile, dataoff)
1237 tr.add(self.datafile, dataoff)
1221
1238
1222 if fp:
1239 if fp:
1223 fp.flush()
1240 fp.flush()
1224 fp.close()
1241 fp.close()
1225
1242
1226 df = self.opener(self.datafile, 'w')
1243 df = self.opener(self.datafile, 'w')
1227 try:
1244 try:
1228 for r in self:
1245 for r in self:
1229 df.write(self._chunkraw(r, r))
1246 df.write(self._chunkraw(r, r))
1230 finally:
1247 finally:
1231 df.close()
1248 df.close()
1232
1249
1233 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1250 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1234 self.version &= ~(REVLOGNGINLINEDATA)
1251 self.version &= ~(REVLOGNGINLINEDATA)
1235 self._inline = False
1252 self._inline = False
1236 for i in self:
1253 for i in self:
1237 e = self._io.packentry(self.index[i], self.node, self.version, i)
1254 e = self._io.packentry(self.index[i], self.node, self.version, i)
1238 fp.write(e)
1255 fp.write(e)
1239
1256
1240 # if we don't call close, the temp file will never replace the
1257 # if we don't call close, the temp file will never replace the
1241 # real index
1258 # real index
1242 fp.close()
1259 fp.close()
1243
1260
1244 tr.replace(self.indexfile, trindex * self._io.size)
1261 tr.replace(self.indexfile, trindex * self._io.size)
1245 self._chunkclear()
1262 self._chunkclear()
1246
1263
1247 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1264 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1248 node=None):
1265 node=None):
1249 """add a revision to the log
1266 """add a revision to the log
1250
1267
1251 text - the revision data to add
1268 text - the revision data to add
1252 transaction - the transaction object used for rollback
1269 transaction - the transaction object used for rollback
1253 link - the linkrev data to add
1270 link - the linkrev data to add
1254 p1, p2 - the parent nodeids of the revision
1271 p1, p2 - the parent nodeids of the revision
1255 cachedelta - an optional precomputed delta
1272 cachedelta - an optional precomputed delta
1256 node - nodeid of revision; typically node is not specified, and it is
1273 node - nodeid of revision; typically node is not specified, and it is
1257 computed by default as hash(text, p1, p2), however subclasses might
1274 computed by default as hash(text, p1, p2), however subclasses might
1258 use different hashing method (and override checkhash() in such case)
1275 use different hashing method (and override checkhash() in such case)
1259 """
1276 """
1260 if link == nullrev:
1277 if link == nullrev:
1261 raise RevlogError(_("attempted to add linkrev -1 to %s")
1278 raise RevlogError(_("attempted to add linkrev -1 to %s")
1262 % self.indexfile)
1279 % self.indexfile)
1263
1280
1264 if len(text) > _maxentrysize:
1281 if len(text) > _maxentrysize:
1265 raise RevlogError(
1282 raise RevlogError(
1266 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1283 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1267 % (self.indexfile, len(text)))
1284 % (self.indexfile, len(text)))
1268
1285
1269 node = node or self.hash(text, p1, p2)
1286 node = node or self.hash(text, p1, p2)
1270 if node in self.nodemap:
1287 if node in self.nodemap:
1271 return node
1288 return node
1272
1289
1273 dfh = None
1290 dfh = None
1274 if not self._inline:
1291 if not self._inline:
1275 dfh = self.opener(self.datafile, "a+")
1292 dfh = self.opener(self.datafile, "a+")
1276 ifh = self.opener(self.indexfile, "a+")
1293 ifh = self.opener(self.indexfile, "a+")
1277 try:
1294 try:
1278 return self._addrevision(node, text, transaction, link, p1, p2,
1295 return self._addrevision(node, text, transaction, link, p1, p2,
1279 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1296 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1280 finally:
1297 finally:
1281 if dfh:
1298 if dfh:
1282 dfh.close()
1299 dfh.close()
1283 ifh.close()
1300 ifh.close()
1284
1301
1285 def compress(self, text):
1302 def compress(self, text):
1286 """ generate a possibly-compressed representation of text """
1303 """ generate a possibly-compressed representation of text """
1287 if not text:
1304 if not text:
1288 return ("", text)
1305 return ("", text)
1289 l = len(text)
1306 l = len(text)
1290 bin = None
1307 bin = None
1291 if l < 44:
1308 if l < 44:
1292 pass
1309 pass
1293 elif l > 1000000:
1310 elif l > 1000000:
1294 # zlib makes an internal copy, thus doubling memory usage for
1311 # zlib makes an internal copy, thus doubling memory usage for
1295 # large files, so lets do this in pieces
1312 # large files, so lets do this in pieces
1296 z = zlib.compressobj()
1313 z = zlib.compressobj()
1297 p = []
1314 p = []
1298 pos = 0
1315 pos = 0
1299 while pos < l:
1316 while pos < l:
1300 pos2 = pos + 2**20
1317 pos2 = pos + 2**20
1301 p.append(z.compress(text[pos:pos2]))
1318 p.append(z.compress(text[pos:pos2]))
1302 pos = pos2
1319 pos = pos2
1303 p.append(z.flush())
1320 p.append(z.flush())
1304 if sum(map(len, p)) < l:
1321 if sum(map(len, p)) < l:
1305 bin = "".join(p)
1322 bin = "".join(p)
1306 else:
1323 else:
1307 bin = _compress(text)
1324 bin = _compress(text)
1308 if bin is None or len(bin) > l:
1325 if bin is None or len(bin) > l:
1309 if text[0] == '\0':
1326 if text[0] == '\0':
1310 return ("", text)
1327 return ("", text)
1311 return ('u', text)
1328 return ('u', text)
1312 return ("", bin)
1329 return ("", bin)
1313
1330
1314 def _isgooddelta(self, d, textlen):
1331 def _isgooddelta(self, d, textlen):
1315 """Returns True if the given delta is good. Good means that it is within
1332 """Returns True if the given delta is good. Good means that it is within
1316 the disk span, disk size, and chain length bounds that we know to be
1333 the disk span, disk size, and chain length bounds that we know to be
1317 performant."""
1334 performant."""
1318 if d is None:
1335 if d is None:
1319 return False
1336 return False
1320
1337
1321 # - 'dist' is the distance from the base revision -- bounding it limits
1338 # - 'dist' is the distance from the base revision -- bounding it limits
1322 # the amount of I/O we need to do.
1339 # the amount of I/O we need to do.
1323 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1340 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1324 # to apply -- bounding it limits the amount of CPU we consume.
1341 # to apply -- bounding it limits the amount of CPU we consume.
1325 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1342 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1326 if (dist > textlen * 4 or l > textlen or
1343 if (dist > textlen * 4 or l > textlen or
1327 compresseddeltalen > textlen * 2 or
1344 compresseddeltalen > textlen * 2 or
1328 (self._maxchainlen and chainlen > self._maxchainlen)):
1345 (self._maxchainlen and chainlen > self._maxchainlen)):
1329 return False
1346 return False
1330
1347
1331 return True
1348 return True
1332
1349
1333 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1350 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1334 cachedelta, ifh, dfh, alwayscache=False):
1351 cachedelta, ifh, dfh, alwayscache=False):
1335 """internal function to add revisions to the log
1352 """internal function to add revisions to the log
1336
1353
1337 see addrevision for argument descriptions.
1354 see addrevision for argument descriptions.
1338 invariants:
1355 invariants:
1339 - text is optional (can be None); if not set, cachedelta must be set.
1356 - text is optional (can be None); if not set, cachedelta must be set.
1340 if both are set, they must correspond to each other.
1357 if both are set, they must correspond to each other.
1341 """
1358 """
1342 btext = [text]
1359 btext = [text]
1343 def buildtext():
1360 def buildtext():
1344 if btext[0] is not None:
1361 if btext[0] is not None:
1345 return btext[0]
1362 return btext[0]
1346 baserev = cachedelta[0]
1363 baserev = cachedelta[0]
1347 delta = cachedelta[1]
1364 delta = cachedelta[1]
1348 # special case deltas which replace entire base; no need to decode
1365 # special case deltas which replace entire base; no need to decode
1349 # base revision. this neatly avoids censored bases, which throw when
1366 # base revision. this neatly avoids censored bases, which throw when
1350 # they're decoded.
1367 # they're decoded.
1351 hlen = struct.calcsize(">lll")
1368 hlen = struct.calcsize(">lll")
1352 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1369 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1353 len(delta) - hlen):
1370 len(delta) - hlen):
1354 btext[0] = delta[hlen:]
1371 btext[0] = delta[hlen:]
1355 else:
1372 else:
1356 if self._inline:
1373 if self._inline:
1357 fh = ifh
1374 fh = ifh
1358 else:
1375 else:
1359 fh = dfh
1376 fh = dfh
1360 basetext = self.revision(self.node(baserev), _df=fh)
1377 basetext = self.revision(self.node(baserev), _df=fh)
1361 btext[0] = mdiff.patch(basetext, delta)
1378 btext[0] = mdiff.patch(basetext, delta)
1362 try:
1379 try:
1363 self.checkhash(btext[0], p1, p2, node)
1380 self.checkhash(btext[0], p1, p2, node)
1364 if flags & REVIDX_ISCENSORED:
1381 if flags & REVIDX_ISCENSORED:
1365 raise RevlogError(_('node %s is not censored') % node)
1382 raise RevlogError(_('node %s is not censored') % node)
1366 except CensoredNodeError:
1383 except CensoredNodeError:
1367 # must pass the censored index flag to add censored revisions
1384 # must pass the censored index flag to add censored revisions
1368 if not flags & REVIDX_ISCENSORED:
1385 if not flags & REVIDX_ISCENSORED:
1369 raise
1386 raise
1370 return btext[0]
1387 return btext[0]
1371
1388
1372 def builddelta(rev):
1389 def builddelta(rev):
1373 # can we use the cached delta?
1390 # can we use the cached delta?
1374 if cachedelta and cachedelta[0] == rev:
1391 if cachedelta and cachedelta[0] == rev:
1375 delta = cachedelta[1]
1392 delta = cachedelta[1]
1376 else:
1393 else:
1377 t = buildtext()
1394 t = buildtext()
1378 if self.iscensored(rev):
1395 if self.iscensored(rev):
1379 # deltas based on a censored revision must replace the
1396 # deltas based on a censored revision must replace the
1380 # full content in one patch, so delta works everywhere
1397 # full content in one patch, so delta works everywhere
1381 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1398 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1382 delta = header + t
1399 delta = header + t
1383 else:
1400 else:
1384 if self._inline:
1401 if self._inline:
1385 fh = ifh
1402 fh = ifh
1386 else:
1403 else:
1387 fh = dfh
1404 fh = dfh
1388 ptext = self.revision(self.node(rev), _df=fh)
1405 ptext = self.revision(self.node(rev), _df=fh)
1389 delta = mdiff.textdiff(ptext, t)
1406 delta = mdiff.textdiff(ptext, t)
1390 data = self.compress(delta)
1407 data = self.compress(delta)
1391 l = len(data[1]) + len(data[0])
1408 l = len(data[1]) + len(data[0])
1392 if basecache[0] == rev:
1409 if basecache[0] == rev:
1393 chainbase = basecache[1]
1410 chainbase = basecache[1]
1394 else:
1411 else:
1395 chainbase = self.chainbase(rev)
1412 chainbase = self.chainbase(rev)
1396 dist = l + offset - self.start(chainbase)
1413 dist = l + offset - self.start(chainbase)
1397 if self._generaldelta:
1414 if self._generaldelta:
1398 base = rev
1415 base = rev
1399 else:
1416 else:
1400 base = chainbase
1417 base = chainbase
1401 chainlen, compresseddeltalen = self._chaininfo(rev)
1418 chainlen, compresseddeltalen = self._chaininfo(rev)
1402 chainlen += 1
1419 chainlen += 1
1403 compresseddeltalen += l
1420 compresseddeltalen += l
1404 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1421 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1405
1422
1406 curr = len(self)
1423 curr = len(self)
1407 prev = curr - 1
1424 prev = curr - 1
1408 base = chainbase = curr
1425 base = chainbase = curr
1409 offset = self.end(prev)
1426 offset = self.end(prev)
1410 delta = None
1427 delta = None
1411 if self._basecache is None:
1428 if self._basecache is None:
1412 self._basecache = (prev, self.chainbase(prev))
1429 self._basecache = (prev, self.chainbase(prev))
1413 basecache = self._basecache
1430 basecache = self._basecache
1414 p1r, p2r = self.rev(p1), self.rev(p2)
1431 p1r, p2r = self.rev(p1), self.rev(p2)
1415
1432
1416 # full versions are inserted when the needed deltas
1433 # full versions are inserted when the needed deltas
1417 # become comparable to the uncompressed text
1434 # become comparable to the uncompressed text
1418 if text is None:
1435 if text is None:
1419 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1436 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1420 cachedelta[1])
1437 cachedelta[1])
1421 else:
1438 else:
1422 textlen = len(text)
1439 textlen = len(text)
1423
1440
1424 # should we try to build a delta?
1441 # should we try to build a delta?
1425 if prev != nullrev:
1442 if prev != nullrev:
1426 tested = set()
1443 tested = set()
1427 if cachedelta and self._generaldelta and self._lazydeltabase:
1444 if cachedelta and self._generaldelta and self._lazydeltabase:
1428 # Assume what we received from the server is a good choice
1445 # Assume what we received from the server is a good choice
1429 # build delta will reuse the cache
1446 # build delta will reuse the cache
1430 candidatedelta = builddelta(cachedelta[0])
1447 candidatedelta = builddelta(cachedelta[0])
1431 tested.add(cachedelta[0])
1448 tested.add(cachedelta[0])
1432 if self._isgooddelta(candidatedelta, textlen):
1449 if self._isgooddelta(candidatedelta, textlen):
1433 delta = candidatedelta
1450 delta = candidatedelta
1434 if delta is None and self._generaldelta:
1451 if delta is None and self._generaldelta:
1435 # exclude already lazy tested base if any
1452 # exclude already lazy tested base if any
1436 parents = [p for p in (p1r, p2r)
1453 parents = [p for p in (p1r, p2r)
1437 if p != nullrev and p not in tested]
1454 if p != nullrev and p not in tested]
1438 if parents and not self._aggressivemergedeltas:
1455 if parents and not self._aggressivemergedeltas:
1439 # Pick whichever parent is closer to us (to minimize the
1456 # Pick whichever parent is closer to us (to minimize the
1440 # chance of having to build a fulltext).
1457 # chance of having to build a fulltext).
1441 parents = [max(parents)]
1458 parents = [max(parents)]
1442 tested.update(parents)
1459 tested.update(parents)
1443 pdeltas = []
1460 pdeltas = []
1444 for p in parents:
1461 for p in parents:
1445 pd = builddelta(p)
1462 pd = builddelta(p)
1446 if self._isgooddelta(pd, textlen):
1463 if self._isgooddelta(pd, textlen):
1447 pdeltas.append(pd)
1464 pdeltas.append(pd)
1448 if pdeltas:
1465 if pdeltas:
1449 delta = min(pdeltas, key=lambda x: x[1])
1466 delta = min(pdeltas, key=lambda x: x[1])
1450 if delta is None and prev not in tested:
1467 if delta is None and prev not in tested:
1451 # other approach failed try against prev to hopefully save us a
1468 # other approach failed try against prev to hopefully save us a
1452 # fulltext.
1469 # fulltext.
1453 candidatedelta = builddelta(prev)
1470 candidatedelta = builddelta(prev)
1454 if self._isgooddelta(candidatedelta, textlen):
1471 if self._isgooddelta(candidatedelta, textlen):
1455 delta = candidatedelta
1472 delta = candidatedelta
1456 if delta is not None:
1473 if delta is not None:
1457 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1474 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1458 else:
1475 else:
1459 text = buildtext()
1476 text = buildtext()
1460 data = self.compress(text)
1477 data = self.compress(text)
1461 l = len(data[1]) + len(data[0])
1478 l = len(data[1]) + len(data[0])
1462 base = chainbase = curr
1479 base = chainbase = curr
1463
1480
1464 e = (offset_type(offset, flags), l, textlen,
1481 e = (offset_type(offset, flags), l, textlen,
1465 base, link, p1r, p2r, node)
1482 base, link, p1r, p2r, node)
1466 self.index.insert(-1, e)
1483 self.index.insert(-1, e)
1467 self.nodemap[node] = curr
1484 self.nodemap[node] = curr
1468
1485
1469 entry = self._io.packentry(e, self.node, self.version, curr)
1486 entry = self._io.packentry(e, self.node, self.version, curr)
1470 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1487 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1471
1488
1472 if alwayscache and text is None:
1489 if alwayscache and text is None:
1473 text = buildtext()
1490 text = buildtext()
1474
1491
1475 if type(text) == str: # only accept immutable objects
1492 if type(text) == str: # only accept immutable objects
1476 self._cache = (node, curr, text)
1493 self._cache = (node, curr, text)
1477 self._basecache = (curr, chainbase)
1494 self._basecache = (curr, chainbase)
1478 return node
1495 return node
1479
1496
1480 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1497 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1481 curr = len(self) - 1
1498 curr = len(self) - 1
1482 if not self._inline:
1499 if not self._inline:
1483 transaction.add(self.datafile, offset)
1500 transaction.add(self.datafile, offset)
1484 transaction.add(self.indexfile, curr * len(entry))
1501 transaction.add(self.indexfile, curr * len(entry))
1485 if data[0]:
1502 if data[0]:
1486 dfh.write(data[0])
1503 dfh.write(data[0])
1487 dfh.write(data[1])
1504 dfh.write(data[1])
1488 ifh.write(entry)
1505 ifh.write(entry)
1489 else:
1506 else:
1490 offset += curr * self._io.size
1507 offset += curr * self._io.size
1491 transaction.add(self.indexfile, offset, curr)
1508 transaction.add(self.indexfile, offset, curr)
1492 ifh.write(entry)
1509 ifh.write(entry)
1493 ifh.write(data[0])
1510 ifh.write(data[0])
1494 ifh.write(data[1])
1511 ifh.write(data[1])
1495 self.checkinlinesize(transaction, ifh)
1512 self.checkinlinesize(transaction, ifh)
1496
1513
1497 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1514 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1498 """
1515 """
1499 add a delta group
1516 add a delta group
1500
1517
1501 given a set of deltas, add them to the revision log. the
1518 given a set of deltas, add them to the revision log. the
1502 first delta is against its parent, which should be in our
1519 first delta is against its parent, which should be in our
1503 log, the rest are against the previous delta.
1520 log, the rest are against the previous delta.
1504
1521
1505 If ``addrevisioncb`` is defined, it will be called with arguments of
1522 If ``addrevisioncb`` is defined, it will be called with arguments of
1506 this revlog and the node that was added.
1523 this revlog and the node that was added.
1507 """
1524 """
1508
1525
1509 # track the base of the current delta log
1526 # track the base of the current delta log
1510 content = []
1527 content = []
1511 node = None
1528 node = None
1512
1529
1513 r = len(self)
1530 r = len(self)
1514 end = 0
1531 end = 0
1515 if r:
1532 if r:
1516 end = self.end(r - 1)
1533 end = self.end(r - 1)
1517 ifh = self.opener(self.indexfile, "a+")
1534 ifh = self.opener(self.indexfile, "a+")
1518 isize = r * self._io.size
1535 isize = r * self._io.size
1519 if self._inline:
1536 if self._inline:
1520 transaction.add(self.indexfile, end + isize, r)
1537 transaction.add(self.indexfile, end + isize, r)
1521 dfh = None
1538 dfh = None
1522 else:
1539 else:
1523 transaction.add(self.indexfile, isize, r)
1540 transaction.add(self.indexfile, isize, r)
1524 transaction.add(self.datafile, end)
1541 transaction.add(self.datafile, end)
1525 dfh = self.opener(self.datafile, "a+")
1542 dfh = self.opener(self.datafile, "a+")
1526 def flush():
1543 def flush():
1527 if dfh:
1544 if dfh:
1528 dfh.flush()
1545 dfh.flush()
1529 ifh.flush()
1546 ifh.flush()
1530 try:
1547 try:
1531 # loop through our set of deltas
1548 # loop through our set of deltas
1532 chain = None
1549 chain = None
1533 while True:
1550 while True:
1534 chunkdata = cg.deltachunk(chain)
1551 chunkdata = cg.deltachunk(chain)
1535 if not chunkdata:
1552 if not chunkdata:
1536 break
1553 break
1537 node = chunkdata['node']
1554 node = chunkdata['node']
1538 p1 = chunkdata['p1']
1555 p1 = chunkdata['p1']
1539 p2 = chunkdata['p2']
1556 p2 = chunkdata['p2']
1540 cs = chunkdata['cs']
1557 cs = chunkdata['cs']
1541 deltabase = chunkdata['deltabase']
1558 deltabase = chunkdata['deltabase']
1542 delta = chunkdata['delta']
1559 delta = chunkdata['delta']
1543
1560
1544 content.append(node)
1561 content.append(node)
1545
1562
1546 link = linkmapper(cs)
1563 link = linkmapper(cs)
1547 if node in self.nodemap:
1564 if node in self.nodemap:
1548 # this can happen if two branches make the same change
1565 # this can happen if two branches make the same change
1549 chain = node
1566 chain = node
1550 continue
1567 continue
1551
1568
1552 for p in (p1, p2):
1569 for p in (p1, p2):
1553 if p not in self.nodemap:
1570 if p not in self.nodemap:
1554 raise LookupError(p, self.indexfile,
1571 raise LookupError(p, self.indexfile,
1555 _('unknown parent'))
1572 _('unknown parent'))
1556
1573
1557 if deltabase not in self.nodemap:
1574 if deltabase not in self.nodemap:
1558 raise LookupError(deltabase, self.indexfile,
1575 raise LookupError(deltabase, self.indexfile,
1559 _('unknown delta base'))
1576 _('unknown delta base'))
1560
1577
1561 baserev = self.rev(deltabase)
1578 baserev = self.rev(deltabase)
1562
1579
1563 if baserev != nullrev and self.iscensored(baserev):
1580 if baserev != nullrev and self.iscensored(baserev):
1564 # if base is censored, delta must be full replacement in a
1581 # if base is censored, delta must be full replacement in a
1565 # single patch operation
1582 # single patch operation
1566 hlen = struct.calcsize(">lll")
1583 hlen = struct.calcsize(">lll")
1567 oldlen = self.rawsize(baserev)
1584 oldlen = self.rawsize(baserev)
1568 newlen = len(delta) - hlen
1585 newlen = len(delta) - hlen
1569 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1586 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1570 raise error.CensoredBaseError(self.indexfile,
1587 raise error.CensoredBaseError(self.indexfile,
1571 self.node(baserev))
1588 self.node(baserev))
1572
1589
1573 flags = REVIDX_DEFAULT_FLAGS
1590 flags = REVIDX_DEFAULT_FLAGS
1574 if self._peek_iscensored(baserev, delta, flush):
1591 if self._peek_iscensored(baserev, delta, flush):
1575 flags |= REVIDX_ISCENSORED
1592 flags |= REVIDX_ISCENSORED
1576
1593
1577 # We assume consumers of addrevisioncb will want to retrieve
1594 # We assume consumers of addrevisioncb will want to retrieve
1578 # the added revision, which will require a call to
1595 # the added revision, which will require a call to
1579 # revision(). revision() will fast path if there is a cache
1596 # revision(). revision() will fast path if there is a cache
1580 # hit. So, we tell _addrevision() to always cache in this case.
1597 # hit. So, we tell _addrevision() to always cache in this case.
1581 chain = self._addrevision(node, None, transaction, link,
1598 chain = self._addrevision(node, None, transaction, link,
1582 p1, p2, flags, (baserev, delta),
1599 p1, p2, flags, (baserev, delta),
1583 ifh, dfh,
1600 ifh, dfh,
1584 alwayscache=bool(addrevisioncb))
1601 alwayscache=bool(addrevisioncb))
1585
1602
1586 if addrevisioncb:
1603 if addrevisioncb:
1587 addrevisioncb(self, chain)
1604 addrevisioncb(self, chain)
1588
1605
1589 if not dfh and not self._inline:
1606 if not dfh and not self._inline:
1590 # addrevision switched from inline to conventional
1607 # addrevision switched from inline to conventional
1591 # reopen the index
1608 # reopen the index
1592 ifh.close()
1609 ifh.close()
1593 dfh = self.opener(self.datafile, "a+")
1610 dfh = self.opener(self.datafile, "a+")
1594 ifh = self.opener(self.indexfile, "a+")
1611 ifh = self.opener(self.indexfile, "a+")
1595 finally:
1612 finally:
1596 if dfh:
1613 if dfh:
1597 dfh.close()
1614 dfh.close()
1598 ifh.close()
1615 ifh.close()
1599
1616
1600 return content
1617 return content
1601
1618
1602 def iscensored(self, rev):
1619 def iscensored(self, rev):
1603 """Check if a file revision is censored."""
1620 """Check if a file revision is censored."""
1604 return False
1621 return False
1605
1622
1606 def _peek_iscensored(self, baserev, delta, flush):
1623 def _peek_iscensored(self, baserev, delta, flush):
1607 """Quickly check if a delta produces a censored revision."""
1624 """Quickly check if a delta produces a censored revision."""
1608 return False
1625 return False
1609
1626
1610 def getstrippoint(self, minlink):
1627 def getstrippoint(self, minlink):
1611 """find the minimum rev that must be stripped to strip the linkrev
1628 """find the minimum rev that must be stripped to strip the linkrev
1612
1629
1613 Returns a tuple containing the minimum rev and a set of all revs that
1630 Returns a tuple containing the minimum rev and a set of all revs that
1614 have linkrevs that will be broken by this strip.
1631 have linkrevs that will be broken by this strip.
1615 """
1632 """
1616 brokenrevs = set()
1633 brokenrevs = set()
1617 strippoint = len(self)
1634 strippoint = len(self)
1618
1635
1619 heads = {}
1636 heads = {}
1620 futurelargelinkrevs = set()
1637 futurelargelinkrevs = set()
1621 for head in self.headrevs():
1638 for head in self.headrevs():
1622 headlinkrev = self.linkrev(head)
1639 headlinkrev = self.linkrev(head)
1623 heads[head] = headlinkrev
1640 heads[head] = headlinkrev
1624 if headlinkrev >= minlink:
1641 if headlinkrev >= minlink:
1625 futurelargelinkrevs.add(headlinkrev)
1642 futurelargelinkrevs.add(headlinkrev)
1626
1643
1627 # This algorithm involves walking down the rev graph, starting at the
1644 # This algorithm involves walking down the rev graph, starting at the
1628 # heads. Since the revs are topologically sorted according to linkrev,
1645 # heads. Since the revs are topologically sorted according to linkrev,
1629 # once all head linkrevs are below the minlink, we know there are
1646 # once all head linkrevs are below the minlink, we know there are
1630 # no more revs that could have a linkrev greater than minlink.
1647 # no more revs that could have a linkrev greater than minlink.
1631 # So we can stop walking.
1648 # So we can stop walking.
1632 while futurelargelinkrevs:
1649 while futurelargelinkrevs:
1633 strippoint -= 1
1650 strippoint -= 1
1634 linkrev = heads.pop(strippoint)
1651 linkrev = heads.pop(strippoint)
1635
1652
1636 if linkrev < minlink:
1653 if linkrev < minlink:
1637 brokenrevs.add(strippoint)
1654 brokenrevs.add(strippoint)
1638 else:
1655 else:
1639 futurelargelinkrevs.remove(linkrev)
1656 futurelargelinkrevs.remove(linkrev)
1640
1657
1641 for p in self.parentrevs(strippoint):
1658 for p in self.parentrevs(strippoint):
1642 if p != nullrev:
1659 if p != nullrev:
1643 plinkrev = self.linkrev(p)
1660 plinkrev = self.linkrev(p)
1644 heads[p] = plinkrev
1661 heads[p] = plinkrev
1645 if plinkrev >= minlink:
1662 if plinkrev >= minlink:
1646 futurelargelinkrevs.add(plinkrev)
1663 futurelargelinkrevs.add(plinkrev)
1647
1664
1648 return strippoint, brokenrevs
1665 return strippoint, brokenrevs
1649
1666
1650 def strip(self, minlink, transaction):
1667 def strip(self, minlink, transaction):
1651 """truncate the revlog on the first revision with a linkrev >= minlink
1668 """truncate the revlog on the first revision with a linkrev >= minlink
1652
1669
1653 This function is called when we're stripping revision minlink and
1670 This function is called when we're stripping revision minlink and
1654 its descendants from the repository.
1671 its descendants from the repository.
1655
1672
1656 We have to remove all revisions with linkrev >= minlink, because
1673 We have to remove all revisions with linkrev >= minlink, because
1657 the equivalent changelog revisions will be renumbered after the
1674 the equivalent changelog revisions will be renumbered after the
1658 strip.
1675 strip.
1659
1676
1660 So we truncate the revlog on the first of these revisions, and
1677 So we truncate the revlog on the first of these revisions, and
1661 trust that the caller has saved the revisions that shouldn't be
1678 trust that the caller has saved the revisions that shouldn't be
1662 removed and that it'll re-add them after this truncation.
1679 removed and that it'll re-add them after this truncation.
1663 """
1680 """
1664 if len(self) == 0:
1681 if len(self) == 0:
1665 return
1682 return
1666
1683
1667 rev, _ = self.getstrippoint(minlink)
1684 rev, _ = self.getstrippoint(minlink)
1668 if rev == len(self):
1685 if rev == len(self):
1669 return
1686 return
1670
1687
1671 # first truncate the files on disk
1688 # first truncate the files on disk
1672 end = self.start(rev)
1689 end = self.start(rev)
1673 if not self._inline:
1690 if not self._inline:
1674 transaction.add(self.datafile, end)
1691 transaction.add(self.datafile, end)
1675 end = rev * self._io.size
1692 end = rev * self._io.size
1676 else:
1693 else:
1677 end += rev * self._io.size
1694 end += rev * self._io.size
1678
1695
1679 transaction.add(self.indexfile, end)
1696 transaction.add(self.indexfile, end)
1680
1697
1681 # then reset internal state in memory to forget those revisions
1698 # then reset internal state in memory to forget those revisions
1682 self._cache = None
1699 self._cache = None
1683 self._chaininfocache = {}
1700 self._chaininfocache = {}
1684 self._chunkclear()
1701 self._chunkclear()
1685 for x in xrange(rev, len(self)):
1702 for x in xrange(rev, len(self)):
1686 del self.nodemap[self.node(x)]
1703 del self.nodemap[self.node(x)]
1687
1704
1688 del self.index[rev:-1]
1705 del self.index[rev:-1]
1689
1706
1690 def checksize(self):
1707 def checksize(self):
1691 expected = 0
1708 expected = 0
1692 if len(self):
1709 if len(self):
1693 expected = max(0, self.end(len(self) - 1))
1710 expected = max(0, self.end(len(self) - 1))
1694
1711
1695 try:
1712 try:
1696 f = self.opener(self.datafile)
1713 f = self.opener(self.datafile)
1697 f.seek(0, 2)
1714 f.seek(0, 2)
1698 actual = f.tell()
1715 actual = f.tell()
1699 f.close()
1716 f.close()
1700 dd = actual - expected
1717 dd = actual - expected
1701 except IOError as inst:
1718 except IOError as inst:
1702 if inst.errno != errno.ENOENT:
1719 if inst.errno != errno.ENOENT:
1703 raise
1720 raise
1704 dd = 0
1721 dd = 0
1705
1722
1706 try:
1723 try:
1707 f = self.opener(self.indexfile)
1724 f = self.opener(self.indexfile)
1708 f.seek(0, 2)
1725 f.seek(0, 2)
1709 actual = f.tell()
1726 actual = f.tell()
1710 f.close()
1727 f.close()
1711 s = self._io.size
1728 s = self._io.size
1712 i = max(0, actual // s)
1729 i = max(0, actual // s)
1713 di = actual - (i * s)
1730 di = actual - (i * s)
1714 if self._inline:
1731 if self._inline:
1715 databytes = 0
1732 databytes = 0
1716 for r in self:
1733 for r in self:
1717 databytes += max(0, self.length(r))
1734 databytes += max(0, self.length(r))
1718 dd = 0
1735 dd = 0
1719 di = actual - len(self) * s - databytes
1736 di = actual - len(self) * s - databytes
1720 except IOError as inst:
1737 except IOError as inst:
1721 if inst.errno != errno.ENOENT:
1738 if inst.errno != errno.ENOENT:
1722 raise
1739 raise
1723 di = 0
1740 di = 0
1724
1741
1725 return (dd, di)
1742 return (dd, di)
1726
1743
1727 def files(self):
1744 def files(self):
1728 res = [self.indexfile]
1745 res = [self.indexfile]
1729 if not self._inline:
1746 if not self._inline:
1730 res.append(self.datafile)
1747 res.append(self.datafile)
1731 return res
1748 return res
@@ -1,208 +1,207 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ cd "$TESTDIR"/..
3 $ cd "$TESTDIR"/..
4
4
5 $ hg files 'set:(**.py)' | xargs python contrib/check-py3-compat.py
5 $ hg files 'set:(**.py)' | xargs python contrib/check-py3-compat.py
6 contrib/casesmash.py not using absolute_import
6 contrib/casesmash.py not using absolute_import
7 contrib/check-code.py not using absolute_import
7 contrib/check-code.py not using absolute_import
8 contrib/check-code.py requires print_function
8 contrib/check-code.py requires print_function
9 contrib/check-config.py not using absolute_import
9 contrib/check-config.py not using absolute_import
10 contrib/check-config.py requires print_function
10 contrib/check-config.py requires print_function
11 contrib/debugcmdserver.py not using absolute_import
11 contrib/debugcmdserver.py not using absolute_import
12 contrib/debugcmdserver.py requires print_function
12 contrib/debugcmdserver.py requires print_function
13 contrib/debugshell.py not using absolute_import
13 contrib/debugshell.py not using absolute_import
14 contrib/fixpax.py not using absolute_import
14 contrib/fixpax.py not using absolute_import
15 contrib/fixpax.py requires print_function
15 contrib/fixpax.py requires print_function
16 contrib/hgclient.py not using absolute_import
16 contrib/hgclient.py not using absolute_import
17 contrib/hgclient.py requires print_function
17 contrib/hgclient.py requires print_function
18 contrib/hgfixes/fix_bytes.py not using absolute_import
18 contrib/hgfixes/fix_bytes.py not using absolute_import
19 contrib/hgfixes/fix_bytesmod.py not using absolute_import
19 contrib/hgfixes/fix_bytesmod.py not using absolute_import
20 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
20 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
21 contrib/import-checker.py not using absolute_import
21 contrib/import-checker.py not using absolute_import
22 contrib/import-checker.py requires print_function
22 contrib/import-checker.py requires print_function
23 contrib/memory.py not using absolute_import
23 contrib/memory.py not using absolute_import
24 contrib/perf.py not using absolute_import
24 contrib/perf.py not using absolute_import
25 contrib/python-hook-examples.py not using absolute_import
25 contrib/python-hook-examples.py not using absolute_import
26 contrib/revsetbenchmarks.py not using absolute_import
26 contrib/revsetbenchmarks.py not using absolute_import
27 contrib/revsetbenchmarks.py requires print_function
27 contrib/revsetbenchmarks.py requires print_function
28 contrib/showstack.py not using absolute_import
28 contrib/showstack.py not using absolute_import
29 contrib/synthrepo.py not using absolute_import
29 contrib/synthrepo.py not using absolute_import
30 contrib/win32/hgwebdir_wsgi.py not using absolute_import
30 contrib/win32/hgwebdir_wsgi.py not using absolute_import
31 doc/check-seclevel.py not using absolute_import
31 doc/check-seclevel.py not using absolute_import
32 doc/gendoc.py not using absolute_import
32 doc/gendoc.py not using absolute_import
33 doc/hgmanpage.py not using absolute_import
33 doc/hgmanpage.py not using absolute_import
34 hgext/__init__.py not using absolute_import
34 hgext/__init__.py not using absolute_import
35 hgext/acl.py not using absolute_import
35 hgext/acl.py not using absolute_import
36 hgext/blackbox.py not using absolute_import
36 hgext/blackbox.py not using absolute_import
37 hgext/bugzilla.py not using absolute_import
37 hgext/bugzilla.py not using absolute_import
38 hgext/censor.py not using absolute_import
38 hgext/censor.py not using absolute_import
39 hgext/children.py not using absolute_import
39 hgext/children.py not using absolute_import
40 hgext/churn.py not using absolute_import
40 hgext/churn.py not using absolute_import
41 hgext/clonebundles.py not using absolute_import
41 hgext/clonebundles.py not using absolute_import
42 hgext/color.py not using absolute_import
42 hgext/color.py not using absolute_import
43 hgext/convert/__init__.py not using absolute_import
43 hgext/convert/__init__.py not using absolute_import
44 hgext/convert/bzr.py not using absolute_import
44 hgext/convert/bzr.py not using absolute_import
45 hgext/convert/common.py not using absolute_import
45 hgext/convert/common.py not using absolute_import
46 hgext/convert/convcmd.py not using absolute_import
46 hgext/convert/convcmd.py not using absolute_import
47 hgext/convert/cvs.py not using absolute_import
47 hgext/convert/cvs.py not using absolute_import
48 hgext/convert/cvsps.py not using absolute_import
48 hgext/convert/cvsps.py not using absolute_import
49 hgext/convert/darcs.py not using absolute_import
49 hgext/convert/darcs.py not using absolute_import
50 hgext/convert/filemap.py not using absolute_import
50 hgext/convert/filemap.py not using absolute_import
51 hgext/convert/git.py not using absolute_import
51 hgext/convert/git.py not using absolute_import
52 hgext/convert/gnuarch.py not using absolute_import
52 hgext/convert/gnuarch.py not using absolute_import
53 hgext/convert/hg.py not using absolute_import
53 hgext/convert/hg.py not using absolute_import
54 hgext/convert/monotone.py not using absolute_import
54 hgext/convert/monotone.py not using absolute_import
55 hgext/convert/p4.py not using absolute_import
55 hgext/convert/p4.py not using absolute_import
56 hgext/convert/subversion.py not using absolute_import
56 hgext/convert/subversion.py not using absolute_import
57 hgext/convert/transport.py not using absolute_import
57 hgext/convert/transport.py not using absolute_import
58 hgext/eol.py not using absolute_import
58 hgext/eol.py not using absolute_import
59 hgext/extdiff.py not using absolute_import
59 hgext/extdiff.py not using absolute_import
60 hgext/factotum.py not using absolute_import
60 hgext/factotum.py not using absolute_import
61 hgext/fetch.py not using absolute_import
61 hgext/fetch.py not using absolute_import
62 hgext/gpg.py not using absolute_import
62 hgext/gpg.py not using absolute_import
63 hgext/graphlog.py not using absolute_import
63 hgext/graphlog.py not using absolute_import
64 hgext/hgcia.py not using absolute_import
64 hgext/hgcia.py not using absolute_import
65 hgext/hgk.py not using absolute_import
65 hgext/hgk.py not using absolute_import
66 hgext/highlight/__init__.py not using absolute_import
66 hgext/highlight/__init__.py not using absolute_import
67 hgext/highlight/highlight.py not using absolute_import
67 hgext/highlight/highlight.py not using absolute_import
68 hgext/histedit.py not using absolute_import
68 hgext/histedit.py not using absolute_import
69 hgext/keyword.py not using absolute_import
69 hgext/keyword.py not using absolute_import
70 hgext/largefiles/__init__.py not using absolute_import
70 hgext/largefiles/__init__.py not using absolute_import
71 hgext/largefiles/basestore.py not using absolute_import
71 hgext/largefiles/basestore.py not using absolute_import
72 hgext/largefiles/lfcommands.py not using absolute_import
72 hgext/largefiles/lfcommands.py not using absolute_import
73 hgext/largefiles/lfutil.py not using absolute_import
73 hgext/largefiles/lfutil.py not using absolute_import
74 hgext/largefiles/localstore.py not using absolute_import
74 hgext/largefiles/localstore.py not using absolute_import
75 hgext/largefiles/overrides.py not using absolute_import
75 hgext/largefiles/overrides.py not using absolute_import
76 hgext/largefiles/proto.py not using absolute_import
76 hgext/largefiles/proto.py not using absolute_import
77 hgext/largefiles/remotestore.py not using absolute_import
77 hgext/largefiles/remotestore.py not using absolute_import
78 hgext/largefiles/reposetup.py not using absolute_import
78 hgext/largefiles/reposetup.py not using absolute_import
79 hgext/largefiles/uisetup.py not using absolute_import
79 hgext/largefiles/uisetup.py not using absolute_import
80 hgext/largefiles/wirestore.py not using absolute_import
80 hgext/largefiles/wirestore.py not using absolute_import
81 hgext/mq.py not using absolute_import
81 hgext/mq.py not using absolute_import
82 hgext/notify.py not using absolute_import
82 hgext/notify.py not using absolute_import
83 hgext/pager.py not using absolute_import
83 hgext/pager.py not using absolute_import
84 hgext/patchbomb.py not using absolute_import
84 hgext/patchbomb.py not using absolute_import
85 hgext/purge.py not using absolute_import
85 hgext/purge.py not using absolute_import
86 hgext/rebase.py not using absolute_import
86 hgext/rebase.py not using absolute_import
87 hgext/record.py not using absolute_import
87 hgext/record.py not using absolute_import
88 hgext/relink.py not using absolute_import
88 hgext/relink.py not using absolute_import
89 hgext/schemes.py not using absolute_import
89 hgext/schemes.py not using absolute_import
90 hgext/share.py not using absolute_import
90 hgext/share.py not using absolute_import
91 hgext/shelve.py not using absolute_import
91 hgext/shelve.py not using absolute_import
92 hgext/strip.py not using absolute_import
92 hgext/strip.py not using absolute_import
93 hgext/transplant.py not using absolute_import
93 hgext/transplant.py not using absolute_import
94 hgext/win32mbcs.py not using absolute_import
94 hgext/win32mbcs.py not using absolute_import
95 hgext/win32text.py not using absolute_import
95 hgext/win32text.py not using absolute_import
96 hgext/zeroconf/Zeroconf.py not using absolute_import
96 hgext/zeroconf/Zeroconf.py not using absolute_import
97 hgext/zeroconf/Zeroconf.py requires print_function
97 hgext/zeroconf/Zeroconf.py requires print_function
98 hgext/zeroconf/__init__.py not using absolute_import
98 hgext/zeroconf/__init__.py not using absolute_import
99 i18n/check-translation.py not using absolute_import
99 i18n/check-translation.py not using absolute_import
100 i18n/polib.py not using absolute_import
100 i18n/polib.py not using absolute_import
101 mercurial/byterange.py not using absolute_import
101 mercurial/byterange.py not using absolute_import
102 mercurial/cmdutil.py not using absolute_import
102 mercurial/cmdutil.py not using absolute_import
103 mercurial/commands.py not using absolute_import
103 mercurial/commands.py not using absolute_import
104 mercurial/context.py not using absolute_import
104 mercurial/context.py not using absolute_import
105 mercurial/dirstate.py not using absolute_import
105 mercurial/dirstate.py not using absolute_import
106 mercurial/dispatch.py requires print_function
106 mercurial/dispatch.py requires print_function
107 mercurial/exchange.py not using absolute_import
107 mercurial/exchange.py not using absolute_import
108 mercurial/help.py not using absolute_import
108 mercurial/help.py not using absolute_import
109 mercurial/httpclient/__init__.py not using absolute_import
109 mercurial/httpclient/__init__.py not using absolute_import
110 mercurial/httpclient/_readers.py not using absolute_import
110 mercurial/httpclient/_readers.py not using absolute_import
111 mercurial/httpclient/socketutil.py not using absolute_import
111 mercurial/httpclient/socketutil.py not using absolute_import
112 mercurial/httpconnection.py not using absolute_import
112 mercurial/httpconnection.py not using absolute_import
113 mercurial/keepalive.py not using absolute_import
113 mercurial/keepalive.py not using absolute_import
114 mercurial/keepalive.py requires print_function
114 mercurial/keepalive.py requires print_function
115 mercurial/localrepo.py not using absolute_import
115 mercurial/localrepo.py not using absolute_import
116 mercurial/lsprof.py requires print_function
116 mercurial/lsprof.py requires print_function
117 mercurial/lsprofcalltree.py not using absolute_import
117 mercurial/lsprofcalltree.py not using absolute_import
118 mercurial/lsprofcalltree.py requires print_function
118 mercurial/lsprofcalltree.py requires print_function
119 mercurial/mail.py requires print_function
119 mercurial/mail.py requires print_function
120 mercurial/manifest.py not using absolute_import
120 mercurial/manifest.py not using absolute_import
121 mercurial/mdiff.py not using absolute_import
121 mercurial/mdiff.py not using absolute_import
122 mercurial/patch.py not using absolute_import
122 mercurial/patch.py not using absolute_import
123 mercurial/pvec.py not using absolute_import
123 mercurial/pvec.py not using absolute_import
124 mercurial/py3kcompat.py not using absolute_import
124 mercurial/py3kcompat.py not using absolute_import
125 mercurial/revlog.py not using absolute_import
126 mercurial/scmposix.py not using absolute_import
125 mercurial/scmposix.py not using absolute_import
127 mercurial/scmutil.py not using absolute_import
126 mercurial/scmutil.py not using absolute_import
128 mercurial/scmwindows.py not using absolute_import
127 mercurial/scmwindows.py not using absolute_import
129 mercurial/store.py not using absolute_import
128 mercurial/store.py not using absolute_import
130 setup.py not using absolute_import
129 setup.py not using absolute_import
131 tests/filterpyflakes.py requires print_function
130 tests/filterpyflakes.py requires print_function
132 tests/generate-working-copy-states.py requires print_function
131 tests/generate-working-copy-states.py requires print_function
133 tests/get-with-headers.py requires print_function
132 tests/get-with-headers.py requires print_function
134 tests/heredoctest.py requires print_function
133 tests/heredoctest.py requires print_function
135 tests/hypothesishelpers.py not using absolute_import
134 tests/hypothesishelpers.py not using absolute_import
136 tests/hypothesishelpers.py requires print_function
135 tests/hypothesishelpers.py requires print_function
137 tests/killdaemons.py not using absolute_import
136 tests/killdaemons.py not using absolute_import
138 tests/md5sum.py not using absolute_import
137 tests/md5sum.py not using absolute_import
139 tests/mockblackbox.py not using absolute_import
138 tests/mockblackbox.py not using absolute_import
140 tests/printenv.py not using absolute_import
139 tests/printenv.py not using absolute_import
141 tests/readlink.py not using absolute_import
140 tests/readlink.py not using absolute_import
142 tests/readlink.py requires print_function
141 tests/readlink.py requires print_function
143 tests/revlog-formatv0.py not using absolute_import
142 tests/revlog-formatv0.py not using absolute_import
144 tests/run-tests.py not using absolute_import
143 tests/run-tests.py not using absolute_import
145 tests/seq.py not using absolute_import
144 tests/seq.py not using absolute_import
146 tests/seq.py requires print_function
145 tests/seq.py requires print_function
147 tests/silenttestrunner.py not using absolute_import
146 tests/silenttestrunner.py not using absolute_import
148 tests/silenttestrunner.py requires print_function
147 tests/silenttestrunner.py requires print_function
149 tests/sitecustomize.py not using absolute_import
148 tests/sitecustomize.py not using absolute_import
150 tests/svn-safe-append.py not using absolute_import
149 tests/svn-safe-append.py not using absolute_import
151 tests/svnxml.py not using absolute_import
150 tests/svnxml.py not using absolute_import
152 tests/test-ancestor.py requires print_function
151 tests/test-ancestor.py requires print_function
153 tests/test-atomictempfile.py not using absolute_import
152 tests/test-atomictempfile.py not using absolute_import
154 tests/test-batching.py not using absolute_import
153 tests/test-batching.py not using absolute_import
155 tests/test-batching.py requires print_function
154 tests/test-batching.py requires print_function
156 tests/test-bdiff.py not using absolute_import
155 tests/test-bdiff.py not using absolute_import
157 tests/test-bdiff.py requires print_function
156 tests/test-bdiff.py requires print_function
158 tests/test-context.py not using absolute_import
157 tests/test-context.py not using absolute_import
159 tests/test-context.py requires print_function
158 tests/test-context.py requires print_function
160 tests/test-demandimport.py not using absolute_import
159 tests/test-demandimport.py not using absolute_import
161 tests/test-demandimport.py requires print_function
160 tests/test-demandimport.py requires print_function
162 tests/test-dispatch.py not using absolute_import
161 tests/test-dispatch.py not using absolute_import
163 tests/test-dispatch.py requires print_function
162 tests/test-dispatch.py requires print_function
164 tests/test-doctest.py not using absolute_import
163 tests/test-doctest.py not using absolute_import
165 tests/test-duplicateoptions.py not using absolute_import
164 tests/test-duplicateoptions.py not using absolute_import
166 tests/test-duplicateoptions.py requires print_function
165 tests/test-duplicateoptions.py requires print_function
167 tests/test-filecache.py not using absolute_import
166 tests/test-filecache.py not using absolute_import
168 tests/test-filecache.py requires print_function
167 tests/test-filecache.py requires print_function
169 tests/test-filelog.py not using absolute_import
168 tests/test-filelog.py not using absolute_import
170 tests/test-filelog.py requires print_function
169 tests/test-filelog.py requires print_function
171 tests/test-hg-parseurl.py not using absolute_import
170 tests/test-hg-parseurl.py not using absolute_import
172 tests/test-hg-parseurl.py requires print_function
171 tests/test-hg-parseurl.py requires print_function
173 tests/test-hgweb-auth.py not using absolute_import
172 tests/test-hgweb-auth.py not using absolute_import
174 tests/test-hgweb-auth.py requires print_function
173 tests/test-hgweb-auth.py requires print_function
175 tests/test-hgwebdir-paths.py not using absolute_import
174 tests/test-hgwebdir-paths.py not using absolute_import
176 tests/test-hybridencode.py not using absolute_import
175 tests/test-hybridencode.py not using absolute_import
177 tests/test-hybridencode.py requires print_function
176 tests/test-hybridencode.py requires print_function
178 tests/test-lrucachedict.py not using absolute_import
177 tests/test-lrucachedict.py not using absolute_import
179 tests/test-lrucachedict.py requires print_function
178 tests/test-lrucachedict.py requires print_function
180 tests/test-manifest.py not using absolute_import
179 tests/test-manifest.py not using absolute_import
181 tests/test-minirst.py not using absolute_import
180 tests/test-minirst.py not using absolute_import
182 tests/test-minirst.py requires print_function
181 tests/test-minirst.py requires print_function
183 tests/test-parseindex2.py not using absolute_import
182 tests/test-parseindex2.py not using absolute_import
184 tests/test-parseindex2.py requires print_function
183 tests/test-parseindex2.py requires print_function
185 tests/test-pathencode.py not using absolute_import
184 tests/test-pathencode.py not using absolute_import
186 tests/test-pathencode.py requires print_function
185 tests/test-pathencode.py requires print_function
187 tests/test-propertycache.py not using absolute_import
186 tests/test-propertycache.py not using absolute_import
188 tests/test-propertycache.py requires print_function
187 tests/test-propertycache.py requires print_function
189 tests/test-revlog-ancestry.py not using absolute_import
188 tests/test-revlog-ancestry.py not using absolute_import
190 tests/test-revlog-ancestry.py requires print_function
189 tests/test-revlog-ancestry.py requires print_function
191 tests/test-run-tests.py not using absolute_import
190 tests/test-run-tests.py not using absolute_import
192 tests/test-simplemerge.py not using absolute_import
191 tests/test-simplemerge.py not using absolute_import
193 tests/test-status-inprocess.py not using absolute_import
192 tests/test-status-inprocess.py not using absolute_import
194 tests/test-status-inprocess.py requires print_function
193 tests/test-status-inprocess.py requires print_function
195 tests/test-symlink-os-yes-fs-no.py not using absolute_import
194 tests/test-symlink-os-yes-fs-no.py not using absolute_import
196 tests/test-trusted.py not using absolute_import
195 tests/test-trusted.py not using absolute_import
197 tests/test-trusted.py requires print_function
196 tests/test-trusted.py requires print_function
198 tests/test-ui-color.py not using absolute_import
197 tests/test-ui-color.py not using absolute_import
199 tests/test-ui-color.py requires print_function
198 tests/test-ui-color.py requires print_function
200 tests/test-ui-config.py not using absolute_import
199 tests/test-ui-config.py not using absolute_import
201 tests/test-ui-config.py requires print_function
200 tests/test-ui-config.py requires print_function
202 tests/test-ui-verbosity.py not using absolute_import
201 tests/test-ui-verbosity.py not using absolute_import
203 tests/test-ui-verbosity.py requires print_function
202 tests/test-ui-verbosity.py requires print_function
204 tests/test-url.py not using absolute_import
203 tests/test-url.py not using absolute_import
205 tests/test-url.py requires print_function
204 tests/test-url.py requires print_function
206 tests/test-walkrepo.py requires print_function
205 tests/test-walkrepo.py requires print_function
207 tests/test-wireproto.py requires print_function
206 tests/test-wireproto.py requires print_function
208 tests/tinyproxy.py requires print_function
207 tests/tinyproxy.py requires print_function
General Comments 0
You need to be logged in to leave comments. Login now