##// END OF EJS Templates
revlog: don't include trailing nullrev in revlog.revs(stop=len(revlog))...
Martin von Zweigbergk -
r38800:83a505b5 default
parent child Browse files
Show More
@@ -1,2960 +1,2963 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import heapq
20 import heapq
21 import os
21 import os
22 import re
22 import re
23 import struct
23 import struct
24 import zlib
24 import zlib
25
25
26 # import stuff from node for others to import from revlog
26 # import stuff from node for others to import from revlog
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullid,
30 nullid,
31 nullrev,
31 nullrev,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .thirdparty import (
38 from .thirdparty import (
39 attr,
39 attr,
40 )
40 )
41 from . import (
41 from . import (
42 ancestor,
42 ancestor,
43 error,
43 error,
44 mdiff,
44 mdiff,
45 policy,
45 policy,
46 pycompat,
46 pycompat,
47 templatefilters,
47 templatefilters,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 parsers = policy.importmod(r'parsers')
54 parsers = policy.importmod(r'parsers')
55
55
56 # Aliased for performance.
56 # Aliased for performance.
57 _zlibdecompress = zlib.decompress
57 _zlibdecompress = zlib.decompress
58
58
59 # revlog header flags
59 # revlog header flags
60 REVLOGV0 = 0
60 REVLOGV0 = 0
61 REVLOGV1 = 1
61 REVLOGV1 = 1
62 # Dummy value until file format is finalized.
62 # Dummy value until file format is finalized.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
64 REVLOGV2 = 0xDEAD
64 REVLOGV2 = 0xDEAD
65 FLAG_INLINE_DATA = (1 << 16)
65 FLAG_INLINE_DATA = (1 << 16)
66 FLAG_GENERALDELTA = (1 << 17)
66 FLAG_GENERALDELTA = (1 << 17)
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
72
72
73 # revlog index flags
73 # revlog index flags
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
77 REVIDX_DEFAULT_FLAGS = 0
77 REVIDX_DEFAULT_FLAGS = 0
78 # stable order in which flags need to be processed and their processors applied
78 # stable order in which flags need to be processed and their processors applied
79 REVIDX_FLAGS_ORDER = [
79 REVIDX_FLAGS_ORDER = [
80 REVIDX_ISCENSORED,
80 REVIDX_ISCENSORED,
81 REVIDX_ELLIPSIS,
81 REVIDX_ELLIPSIS,
82 REVIDX_EXTSTORED,
82 REVIDX_EXTSTORED,
83 ]
83 ]
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
85 # bitmark for flags that could cause rawdata content change
85 # bitmark for flags that could cause rawdata content change
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
87
87
88 # max size of revlog with inline data
88 # max size of revlog with inline data
89 _maxinline = 131072
89 _maxinline = 131072
90 _chunksize = 1048576
90 _chunksize = 1048576
91
91
92 RevlogError = error.RevlogError
92 RevlogError = error.RevlogError
93 LookupError = error.LookupError
93 LookupError = error.LookupError
94 CensoredNodeError = error.CensoredNodeError
94 CensoredNodeError = error.CensoredNodeError
95 ProgrammingError = error.ProgrammingError
95 ProgrammingError = error.ProgrammingError
96
96
97 # Store flag processors (cf. 'addflagprocessor()' to register)
97 # Store flag processors (cf. 'addflagprocessor()' to register)
98 _flagprocessors = {
98 _flagprocessors = {
99 REVIDX_ISCENSORED: None,
99 REVIDX_ISCENSORED: None,
100 }
100 }
101
101
102 _mdre = re.compile('\1\n')
102 _mdre = re.compile('\1\n')
103 def parsemeta(text):
103 def parsemeta(text):
104 """return (metadatadict, metadatasize)"""
104 """return (metadatadict, metadatasize)"""
105 # text can be buffer, so we can't use .startswith or .index
105 # text can be buffer, so we can't use .startswith or .index
106 if text[:2] != '\1\n':
106 if text[:2] != '\1\n':
107 return None, None
107 return None, None
108 s = _mdre.search(text, 2).start()
108 s = _mdre.search(text, 2).start()
109 mtext = text[2:s]
109 mtext = text[2:s]
110 meta = {}
110 meta = {}
111 for l in mtext.splitlines():
111 for l in mtext.splitlines():
112 k, v = l.split(": ", 1)
112 k, v = l.split(": ", 1)
113 meta[k] = v
113 meta[k] = v
114 return meta, (s + 2)
114 return meta, (s + 2)
115
115
116 def packmeta(meta, text):
116 def packmeta(meta, text):
117 keys = sorted(meta)
117 keys = sorted(meta)
118 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
118 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
119 return "\1\n%s\1\n%s" % (metatext, text)
119 return "\1\n%s\1\n%s" % (metatext, text)
120
120
121 def _censoredtext(text):
121 def _censoredtext(text):
122 m, offs = parsemeta(text)
122 m, offs = parsemeta(text)
123 return m and "censored" in m
123 return m and "censored" in m
124
124
125 def addflagprocessor(flag, processor):
125 def addflagprocessor(flag, processor):
126 """Register a flag processor on a revision data flag.
126 """Register a flag processor on a revision data flag.
127
127
128 Invariant:
128 Invariant:
129 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
129 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
130 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
130 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
131 - Only one flag processor can be registered on a specific flag.
131 - Only one flag processor can be registered on a specific flag.
132 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
132 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
133 following signatures:
133 following signatures:
134 - (read) f(self, rawtext) -> text, bool
134 - (read) f(self, rawtext) -> text, bool
135 - (write) f(self, text) -> rawtext, bool
135 - (write) f(self, text) -> rawtext, bool
136 - (raw) f(self, rawtext) -> bool
136 - (raw) f(self, rawtext) -> bool
137 "text" is presented to the user. "rawtext" is stored in revlog data, not
137 "text" is presented to the user. "rawtext" is stored in revlog data, not
138 directly visible to the user.
138 directly visible to the user.
139 The boolean returned by these transforms is used to determine whether
139 The boolean returned by these transforms is used to determine whether
140 the returned text can be used for hash integrity checking. For example,
140 the returned text can be used for hash integrity checking. For example,
141 if "write" returns False, then "text" is used to generate hash. If
141 if "write" returns False, then "text" is used to generate hash. If
142 "write" returns True, that basically means "rawtext" returned by "write"
142 "write" returns True, that basically means "rawtext" returned by "write"
143 should be used to generate hash. Usually, "write" and "read" return
143 should be used to generate hash. Usually, "write" and "read" return
144 different booleans. And "raw" returns a same boolean as "write".
144 different booleans. And "raw" returns a same boolean as "write".
145
145
146 Note: The 'raw' transform is used for changegroup generation and in some
146 Note: The 'raw' transform is used for changegroup generation and in some
147 debug commands. In this case the transform only indicates whether the
147 debug commands. In this case the transform only indicates whether the
148 contents can be used for hash integrity checks.
148 contents can be used for hash integrity checks.
149 """
149 """
150 if not flag & REVIDX_KNOWN_FLAGS:
150 if not flag & REVIDX_KNOWN_FLAGS:
151 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
151 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
152 raise ProgrammingError(msg)
152 raise ProgrammingError(msg)
153 if flag not in REVIDX_FLAGS_ORDER:
153 if flag not in REVIDX_FLAGS_ORDER:
154 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
154 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
155 raise ProgrammingError(msg)
155 raise ProgrammingError(msg)
156 if flag in _flagprocessors:
156 if flag in _flagprocessors:
157 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
157 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
158 raise error.Abort(msg)
158 raise error.Abort(msg)
159 _flagprocessors[flag] = processor
159 _flagprocessors[flag] = processor
160
160
161 def getoffset(q):
161 def getoffset(q):
162 return int(q >> 16)
162 return int(q >> 16)
163
163
164 def gettype(q):
164 def gettype(q):
165 return int(q & 0xFFFF)
165 return int(q & 0xFFFF)
166
166
167 def offset_type(offset, type):
167 def offset_type(offset, type):
168 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
168 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
169 raise ValueError('unknown revlog index flags')
169 raise ValueError('unknown revlog index flags')
170 return int(int(offset) << 16 | type)
170 return int(int(offset) << 16 | type)
171
171
172 _nullhash = hashlib.sha1(nullid)
172 _nullhash = hashlib.sha1(nullid)
173
173
174 def hash(text, p1, p2):
174 def hash(text, p1, p2):
175 """generate a hash from the given text and its parent hashes
175 """generate a hash from the given text and its parent hashes
176
176
177 This hash combines both the current file contents and its history
177 This hash combines both the current file contents and its history
178 in a manner that makes it easy to distinguish nodes with the same
178 in a manner that makes it easy to distinguish nodes with the same
179 content in the revision graph.
179 content in the revision graph.
180 """
180 """
181 # As of now, if one of the parent node is null, p2 is null
181 # As of now, if one of the parent node is null, p2 is null
182 if p2 == nullid:
182 if p2 == nullid:
183 # deep copy of a hash is faster than creating one
183 # deep copy of a hash is faster than creating one
184 s = _nullhash.copy()
184 s = _nullhash.copy()
185 s.update(p1)
185 s.update(p1)
186 else:
186 else:
187 # none of the parent nodes are nullid
187 # none of the parent nodes are nullid
188 if p1 < p2:
188 if p1 < p2:
189 a = p1
189 a = p1
190 b = p2
190 b = p2
191 else:
191 else:
192 a = p2
192 a = p2
193 b = p1
193 b = p1
194 s = hashlib.sha1(a)
194 s = hashlib.sha1(a)
195 s.update(b)
195 s.update(b)
196 s.update(text)
196 s.update(text)
197 return s.digest()
197 return s.digest()
198
198
199 class _testrevlog(object):
199 class _testrevlog(object):
200 """minimalist fake revlog to use in doctests"""
200 """minimalist fake revlog to use in doctests"""
201
201
202 def __init__(self, data, density=0.5, mingap=0):
202 def __init__(self, data, density=0.5, mingap=0):
203 """data is an list of revision payload boundaries"""
203 """data is an list of revision payload boundaries"""
204 self._data = data
204 self._data = data
205 self._srdensitythreshold = density
205 self._srdensitythreshold = density
206 self._srmingapsize = mingap
206 self._srmingapsize = mingap
207
207
208 def start(self, rev):
208 def start(self, rev):
209 if rev == 0:
209 if rev == 0:
210 return 0
210 return 0
211 return self._data[rev - 1]
211 return self._data[rev - 1]
212
212
213 def end(self, rev):
213 def end(self, rev):
214 return self._data[rev]
214 return self._data[rev]
215
215
216 def length(self, rev):
216 def length(self, rev):
217 return self.end(rev) - self.start(rev)
217 return self.end(rev) - self.start(rev)
218
218
219 def __len__(self):
219 def __len__(self):
220 return len(self._data)
220 return len(self._data)
221
221
222 def _trimchunk(revlog, revs, startidx, endidx=None):
222 def _trimchunk(revlog, revs, startidx, endidx=None):
223 """returns revs[startidx:endidx] without empty trailing revs
223 """returns revs[startidx:endidx] without empty trailing revs
224
224
225 Doctest Setup
225 Doctest Setup
226 >>> revlog = _testrevlog([
226 >>> revlog = _testrevlog([
227 ... 5, #0
227 ... 5, #0
228 ... 10, #1
228 ... 10, #1
229 ... 12, #2
229 ... 12, #2
230 ... 12, #3 (empty)
230 ... 12, #3 (empty)
231 ... 17, #4
231 ... 17, #4
232 ... 21, #5
232 ... 21, #5
233 ... 21, #6 (empty)
233 ... 21, #6 (empty)
234 ... ])
234 ... ])
235
235
236 Contiguous cases:
236 Contiguous cases:
237 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
237 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
238 [0, 1, 2, 3, 4, 5]
238 [0, 1, 2, 3, 4, 5]
239 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
239 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
240 [0, 1, 2, 3, 4]
240 [0, 1, 2, 3, 4]
241 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
241 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
242 [0, 1, 2]
242 [0, 1, 2]
243 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
243 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
244 [2]
244 [2]
245 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
245 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
246 [3, 4, 5]
246 [3, 4, 5]
247 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
247 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
248 [3, 4]
248 [3, 4]
249
249
250 Discontiguous cases:
250 Discontiguous cases:
251 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
251 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
252 [1, 3, 5]
252 [1, 3, 5]
253 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
253 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
254 [1]
254 [1]
255 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
255 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
256 [3, 5]
256 [3, 5]
257 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
257 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
258 [3, 5]
258 [3, 5]
259 """
259 """
260 length = revlog.length
260 length = revlog.length
261
261
262 if endidx is None:
262 if endidx is None:
263 endidx = len(revs)
263 endidx = len(revs)
264
264
265 # Trim empty revs at the end, but never the very first revision of a chain
265 # Trim empty revs at the end, but never the very first revision of a chain
266 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
266 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
267 endidx -= 1
267 endidx -= 1
268
268
269 return revs[startidx:endidx]
269 return revs[startidx:endidx]
270
270
271 def _segmentspan(revlog, revs):
271 def _segmentspan(revlog, revs):
272 """Get the byte span of a segment of revisions
272 """Get the byte span of a segment of revisions
273
273
274 revs is a sorted array of revision numbers
274 revs is a sorted array of revision numbers
275
275
276 >>> revlog = _testrevlog([
276 >>> revlog = _testrevlog([
277 ... 5, #0
277 ... 5, #0
278 ... 10, #1
278 ... 10, #1
279 ... 12, #2
279 ... 12, #2
280 ... 12, #3 (empty)
280 ... 12, #3 (empty)
281 ... 17, #4
281 ... 17, #4
282 ... ])
282 ... ])
283
283
284 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
284 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
285 17
285 17
286 >>> _segmentspan(revlog, [0, 4])
286 >>> _segmentspan(revlog, [0, 4])
287 17
287 17
288 >>> _segmentspan(revlog, [3, 4])
288 >>> _segmentspan(revlog, [3, 4])
289 5
289 5
290 >>> _segmentspan(revlog, [1, 2, 3,])
290 >>> _segmentspan(revlog, [1, 2, 3,])
291 7
291 7
292 >>> _segmentspan(revlog, [1, 3])
292 >>> _segmentspan(revlog, [1, 3])
293 7
293 7
294 """
294 """
295 if not revs:
295 if not revs:
296 return 0
296 return 0
297 return revlog.end(revs[-1]) - revlog.start(revs[0])
297 return revlog.end(revs[-1]) - revlog.start(revs[0])
298
298
299 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
299 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
300 """slice revs to reduce the amount of unrelated data to be read from disk.
300 """slice revs to reduce the amount of unrelated data to be read from disk.
301
301
302 ``revs`` is sliced into groups that should be read in one time.
302 ``revs`` is sliced into groups that should be read in one time.
303 Assume that revs are sorted.
303 Assume that revs are sorted.
304
304
305 The initial chunk is sliced until the overall density (payload/chunks-span
305 The initial chunk is sliced until the overall density (payload/chunks-span
306 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
306 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
307 `revlog._srmingapsize` is skipped.
307 `revlog._srmingapsize` is skipped.
308
308
309 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
309 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
310 For consistency with other slicing choice, this limit won't go lower than
310 For consistency with other slicing choice, this limit won't go lower than
311 `revlog._srmingapsize`.
311 `revlog._srmingapsize`.
312
312
313 If individual revisions chunk are larger than this limit, they will still
313 If individual revisions chunk are larger than this limit, they will still
314 be raised individually.
314 be raised individually.
315
315
316 >>> revlog = _testrevlog([
316 >>> revlog = _testrevlog([
317 ... 5, #00 (5)
317 ... 5, #00 (5)
318 ... 10, #01 (5)
318 ... 10, #01 (5)
319 ... 12, #02 (2)
319 ... 12, #02 (2)
320 ... 12, #03 (empty)
320 ... 12, #03 (empty)
321 ... 27, #04 (15)
321 ... 27, #04 (15)
322 ... 31, #05 (4)
322 ... 31, #05 (4)
323 ... 31, #06 (empty)
323 ... 31, #06 (empty)
324 ... 42, #07 (11)
324 ... 42, #07 (11)
325 ... 47, #08 (5)
325 ... 47, #08 (5)
326 ... 47, #09 (empty)
326 ... 47, #09 (empty)
327 ... 48, #10 (1)
327 ... 48, #10 (1)
328 ... 51, #11 (3)
328 ... 51, #11 (3)
329 ... 74, #12 (23)
329 ... 74, #12 (23)
330 ... 85, #13 (11)
330 ... 85, #13 (11)
331 ... 86, #14 (1)
331 ... 86, #14 (1)
332 ... 91, #15 (5)
332 ... 91, #15 (5)
333 ... ])
333 ... ])
334
334
335 >>> list(_slicechunk(revlog, list(range(16))))
335 >>> list(_slicechunk(revlog, list(range(16))))
336 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
336 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
337 >>> list(_slicechunk(revlog, [0, 15]))
337 >>> list(_slicechunk(revlog, [0, 15]))
338 [[0], [15]]
338 [[0], [15]]
339 >>> list(_slicechunk(revlog, [0, 11, 15]))
339 >>> list(_slicechunk(revlog, [0, 11, 15]))
340 [[0], [11], [15]]
340 [[0], [11], [15]]
341 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
341 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
342 [[0], [11, 13, 15]]
342 [[0], [11, 13, 15]]
343 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
343 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
344 [[1, 2], [5, 8, 10, 11], [14]]
344 [[1, 2], [5, 8, 10, 11], [14]]
345
345
346 Slicing with a maximum chunk size
346 Slicing with a maximum chunk size
347 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
347 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
348 [[0], [11], [13], [15]]
348 [[0], [11], [13], [15]]
349 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
349 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
350 [[0], [11], [13, 15]]
350 [[0], [11], [13, 15]]
351 """
351 """
352 if targetsize is not None:
352 if targetsize is not None:
353 targetsize = max(targetsize, revlog._srmingapsize)
353 targetsize = max(targetsize, revlog._srmingapsize)
354 # targetsize should not be specified when evaluating delta candidates:
354 # targetsize should not be specified when evaluating delta candidates:
355 # * targetsize is used to ensure we stay within specification when reading,
355 # * targetsize is used to ensure we stay within specification when reading,
356 # * deltainfo is used to pick are good delta chain when writing.
356 # * deltainfo is used to pick are good delta chain when writing.
357 if not (deltainfo is None or targetsize is None):
357 if not (deltainfo is None or targetsize is None):
358 msg = 'cannot use `targetsize` with a `deltainfo`'
358 msg = 'cannot use `targetsize` with a `deltainfo`'
359 raise error.ProgrammingError(msg)
359 raise error.ProgrammingError(msg)
360 for chunk in _slicechunktodensity(revlog, revs,
360 for chunk in _slicechunktodensity(revlog, revs,
361 deltainfo,
361 deltainfo,
362 revlog._srdensitythreshold,
362 revlog._srdensitythreshold,
363 revlog._srmingapsize):
363 revlog._srmingapsize):
364 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
364 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
365 yield subchunk
365 yield subchunk
366
366
367 def _slicechunktosize(revlog, revs, targetsize=None):
367 def _slicechunktosize(revlog, revs, targetsize=None):
368 """slice revs to match the target size
368 """slice revs to match the target size
369
369
370 This is intended to be used on chunk that density slicing selected by that
370 This is intended to be used on chunk that density slicing selected by that
371 are still too large compared to the read garantee of revlog. This might
371 are still too large compared to the read garantee of revlog. This might
372 happens when "minimal gap size" interrupted the slicing or when chain are
372 happens when "minimal gap size" interrupted the slicing or when chain are
373 built in a way that create large blocks next to each other.
373 built in a way that create large blocks next to each other.
374
374
375 >>> revlog = _testrevlog([
375 >>> revlog = _testrevlog([
376 ... 3, #0 (3)
376 ... 3, #0 (3)
377 ... 5, #1 (2)
377 ... 5, #1 (2)
378 ... 6, #2 (1)
378 ... 6, #2 (1)
379 ... 8, #3 (2)
379 ... 8, #3 (2)
380 ... 8, #4 (empty)
380 ... 8, #4 (empty)
381 ... 11, #5 (3)
381 ... 11, #5 (3)
382 ... 12, #6 (1)
382 ... 12, #6 (1)
383 ... 13, #7 (1)
383 ... 13, #7 (1)
384 ... 14, #8 (1)
384 ... 14, #8 (1)
385 ... ])
385 ... ])
386
386
387 Cases where chunk is already small enough
387 Cases where chunk is already small enough
388 >>> list(_slicechunktosize(revlog, [0], 3))
388 >>> list(_slicechunktosize(revlog, [0], 3))
389 [[0]]
389 [[0]]
390 >>> list(_slicechunktosize(revlog, [6, 7], 3))
390 >>> list(_slicechunktosize(revlog, [6, 7], 3))
391 [[6, 7]]
391 [[6, 7]]
392 >>> list(_slicechunktosize(revlog, [0], None))
392 >>> list(_slicechunktosize(revlog, [0], None))
393 [[0]]
393 [[0]]
394 >>> list(_slicechunktosize(revlog, [6, 7], None))
394 >>> list(_slicechunktosize(revlog, [6, 7], None))
395 [[6, 7]]
395 [[6, 7]]
396
396
397 cases where we need actual slicing
397 cases where we need actual slicing
398 >>> list(_slicechunktosize(revlog, [0, 1], 3))
398 >>> list(_slicechunktosize(revlog, [0, 1], 3))
399 [[0], [1]]
399 [[0], [1]]
400 >>> list(_slicechunktosize(revlog, [1, 3], 3))
400 >>> list(_slicechunktosize(revlog, [1, 3], 3))
401 [[1], [3]]
401 [[1], [3]]
402 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
402 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
403 [[1, 2], [3]]
403 [[1, 2], [3]]
404 >>> list(_slicechunktosize(revlog, [3, 5], 3))
404 >>> list(_slicechunktosize(revlog, [3, 5], 3))
405 [[3], [5]]
405 [[3], [5]]
406 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
406 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
407 [[3], [5]]
407 [[3], [5]]
408 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
408 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
409 [[5], [6, 7, 8]]
409 [[5], [6, 7, 8]]
410 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
410 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
411 [[0], [1, 2], [3], [5], [6, 7, 8]]
411 [[0], [1, 2], [3], [5], [6, 7, 8]]
412
412
413 Case with too large individual chunk (must return valid chunk)
413 Case with too large individual chunk (must return valid chunk)
414 >>> list(_slicechunktosize(revlog, [0, 1], 2))
414 >>> list(_slicechunktosize(revlog, [0, 1], 2))
415 [[0], [1]]
415 [[0], [1]]
416 >>> list(_slicechunktosize(revlog, [1, 3], 1))
416 >>> list(_slicechunktosize(revlog, [1, 3], 1))
417 [[1], [3]]
417 [[1], [3]]
418 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
418 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
419 [[3], [5]]
419 [[3], [5]]
420 """
420 """
421 assert targetsize is None or 0 <= targetsize
421 assert targetsize is None or 0 <= targetsize
422 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
422 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
423 yield revs
423 yield revs
424 return
424 return
425
425
426 startrevidx = 0
426 startrevidx = 0
427 startdata = revlog.start(revs[0])
427 startdata = revlog.start(revs[0])
428 endrevidx = 0
428 endrevidx = 0
429 iterrevs = enumerate(revs)
429 iterrevs = enumerate(revs)
430 next(iterrevs) # skip first rev.
430 next(iterrevs) # skip first rev.
431 for idx, r in iterrevs:
431 for idx, r in iterrevs:
432 span = revlog.end(r) - startdata
432 span = revlog.end(r) - startdata
433 if span <= targetsize:
433 if span <= targetsize:
434 endrevidx = idx
434 endrevidx = idx
435 else:
435 else:
436 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
436 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
437 if chunk:
437 if chunk:
438 yield chunk
438 yield chunk
439 startrevidx = idx
439 startrevidx = idx
440 startdata = revlog.start(r)
440 startdata = revlog.start(r)
441 endrevidx = idx
441 endrevidx = idx
442 yield _trimchunk(revlog, revs, startrevidx)
442 yield _trimchunk(revlog, revs, startrevidx)
443
443
444 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
444 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
445 mingapsize=0):
445 mingapsize=0):
446 """slice revs to reduce the amount of unrelated data to be read from disk.
446 """slice revs to reduce the amount of unrelated data to be read from disk.
447
447
448 ``revs`` is sliced into groups that should be read in one time.
448 ``revs`` is sliced into groups that should be read in one time.
449 Assume that revs are sorted.
449 Assume that revs are sorted.
450
450
451 ``deltainfo`` is a _deltainfo instance of a revision that we would append
451 ``deltainfo`` is a _deltainfo instance of a revision that we would append
452 to the top of the revlog.
452 to the top of the revlog.
453
453
454 The initial chunk is sliced until the overall density (payload/chunks-span
454 The initial chunk is sliced until the overall density (payload/chunks-span
455 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
455 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
456 skipped.
456 skipped.
457
457
458 >>> revlog = _testrevlog([
458 >>> revlog = _testrevlog([
459 ... 5, #00 (5)
459 ... 5, #00 (5)
460 ... 10, #01 (5)
460 ... 10, #01 (5)
461 ... 12, #02 (2)
461 ... 12, #02 (2)
462 ... 12, #03 (empty)
462 ... 12, #03 (empty)
463 ... 27, #04 (15)
463 ... 27, #04 (15)
464 ... 31, #05 (4)
464 ... 31, #05 (4)
465 ... 31, #06 (empty)
465 ... 31, #06 (empty)
466 ... 42, #07 (11)
466 ... 42, #07 (11)
467 ... 47, #08 (5)
467 ... 47, #08 (5)
468 ... 47, #09 (empty)
468 ... 47, #09 (empty)
469 ... 48, #10 (1)
469 ... 48, #10 (1)
470 ... 51, #11 (3)
470 ... 51, #11 (3)
471 ... 74, #12 (23)
471 ... 74, #12 (23)
472 ... 85, #13 (11)
472 ... 85, #13 (11)
473 ... 86, #14 (1)
473 ... 86, #14 (1)
474 ... 91, #15 (5)
474 ... 91, #15 (5)
475 ... ])
475 ... ])
476
476
477 >>> list(_slicechunktodensity(revlog, list(range(16))))
477 >>> list(_slicechunktodensity(revlog, list(range(16))))
478 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
478 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
479 >>> list(_slicechunktodensity(revlog, [0, 15]))
479 >>> list(_slicechunktodensity(revlog, [0, 15]))
480 [[0], [15]]
480 [[0], [15]]
481 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
481 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
482 [[0], [11], [15]]
482 [[0], [11], [15]]
483 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
483 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
484 [[0], [11, 13, 15]]
484 [[0], [11, 13, 15]]
485 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
485 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
486 [[1, 2], [5, 8, 10, 11], [14]]
486 [[1, 2], [5, 8, 10, 11], [14]]
487 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
487 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
488 ... mingapsize=20))
488 ... mingapsize=20))
489 [[1, 2, 3, 5, 8, 10, 11], [14]]
489 [[1, 2, 3, 5, 8, 10, 11], [14]]
490 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
490 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
491 ... targetdensity=0.95))
491 ... targetdensity=0.95))
492 [[1, 2], [5], [8, 10, 11], [14]]
492 [[1, 2], [5], [8, 10, 11], [14]]
493 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
493 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
494 ... targetdensity=0.95, mingapsize=12))
494 ... targetdensity=0.95, mingapsize=12))
495 [[1, 2], [5, 8, 10, 11], [14]]
495 [[1, 2], [5, 8, 10, 11], [14]]
496 """
496 """
497 start = revlog.start
497 start = revlog.start
498 length = revlog.length
498 length = revlog.length
499
499
500 if len(revs) <= 1:
500 if len(revs) <= 1:
501 yield revs
501 yield revs
502 return
502 return
503
503
504 nextrev = len(revlog)
504 nextrev = len(revlog)
505 nextoffset = revlog.end(nextrev - 1)
505 nextoffset = revlog.end(nextrev - 1)
506
506
507 if deltainfo is None:
507 if deltainfo is None:
508 deltachainspan = _segmentspan(revlog, revs)
508 deltachainspan = _segmentspan(revlog, revs)
509 chainpayload = sum(length(r) for r in revs)
509 chainpayload = sum(length(r) for r in revs)
510 else:
510 else:
511 deltachainspan = deltainfo.distance
511 deltachainspan = deltainfo.distance
512 chainpayload = deltainfo.compresseddeltalen
512 chainpayload = deltainfo.compresseddeltalen
513
513
514 if deltachainspan < mingapsize:
514 if deltachainspan < mingapsize:
515 yield revs
515 yield revs
516 return
516 return
517
517
518 readdata = deltachainspan
518 readdata = deltachainspan
519
519
520 if deltachainspan:
520 if deltachainspan:
521 density = chainpayload / float(deltachainspan)
521 density = chainpayload / float(deltachainspan)
522 else:
522 else:
523 density = 1.0
523 density = 1.0
524
524
525 if density >= targetdensity:
525 if density >= targetdensity:
526 yield revs
526 yield revs
527 return
527 return
528
528
529 if deltainfo is not None:
529 if deltainfo is not None:
530 revs = list(revs)
530 revs = list(revs)
531 revs.append(nextrev)
531 revs.append(nextrev)
532
532
533 # Store the gaps in a heap to have them sorted by decreasing size
533 # Store the gaps in a heap to have them sorted by decreasing size
534 gapsheap = []
534 gapsheap = []
535 heapq.heapify(gapsheap)
535 heapq.heapify(gapsheap)
536 prevend = None
536 prevend = None
537 for i, rev in enumerate(revs):
537 for i, rev in enumerate(revs):
538 if rev < nextrev:
538 if rev < nextrev:
539 revstart = start(rev)
539 revstart = start(rev)
540 revlen = length(rev)
540 revlen = length(rev)
541 else:
541 else:
542 revstart = nextoffset
542 revstart = nextoffset
543 revlen = deltainfo.deltalen
543 revlen = deltainfo.deltalen
544
544
545 # Skip empty revisions to form larger holes
545 # Skip empty revisions to form larger holes
546 if revlen == 0:
546 if revlen == 0:
547 continue
547 continue
548
548
549 if prevend is not None:
549 if prevend is not None:
550 gapsize = revstart - prevend
550 gapsize = revstart - prevend
551 # only consider holes that are large enough
551 # only consider holes that are large enough
552 if gapsize > mingapsize:
552 if gapsize > mingapsize:
553 heapq.heappush(gapsheap, (-gapsize, i))
553 heapq.heappush(gapsheap, (-gapsize, i))
554
554
555 prevend = revstart + revlen
555 prevend = revstart + revlen
556
556
557 # Collect the indices of the largest holes until the density is acceptable
557 # Collect the indices of the largest holes until the density is acceptable
558 indicesheap = []
558 indicesheap = []
559 heapq.heapify(indicesheap)
559 heapq.heapify(indicesheap)
560 while gapsheap and density < targetdensity:
560 while gapsheap and density < targetdensity:
561 oppgapsize, gapidx = heapq.heappop(gapsheap)
561 oppgapsize, gapidx = heapq.heappop(gapsheap)
562
562
563 heapq.heappush(indicesheap, gapidx)
563 heapq.heappush(indicesheap, gapidx)
564
564
565 # the gap sizes are stored as negatives to be sorted decreasingly
565 # the gap sizes are stored as negatives to be sorted decreasingly
566 # by the heap
566 # by the heap
567 readdata -= (-oppgapsize)
567 readdata -= (-oppgapsize)
568 if readdata > 0:
568 if readdata > 0:
569 density = chainpayload / float(readdata)
569 density = chainpayload / float(readdata)
570 else:
570 else:
571 density = 1.0
571 density = 1.0
572
572
573 # Cut the revs at collected indices
573 # Cut the revs at collected indices
574 previdx = 0
574 previdx = 0
575 while indicesheap:
575 while indicesheap:
576 idx = heapq.heappop(indicesheap)
576 idx = heapq.heappop(indicesheap)
577
577
578 chunk = _trimchunk(revlog, revs, previdx, idx)
578 chunk = _trimchunk(revlog, revs, previdx, idx)
579 if chunk:
579 if chunk:
580 yield chunk
580 yield chunk
581
581
582 previdx = idx
582 previdx = idx
583
583
584 chunk = _trimchunk(revlog, revs, previdx)
584 chunk = _trimchunk(revlog, revs, previdx)
585 if chunk:
585 if chunk:
586 yield chunk
586 yield chunk
587
587
588 @attr.s(slots=True, frozen=True)
588 @attr.s(slots=True, frozen=True)
589 class _deltainfo(object):
589 class _deltainfo(object):
590 distance = attr.ib()
590 distance = attr.ib()
591 deltalen = attr.ib()
591 deltalen = attr.ib()
592 data = attr.ib()
592 data = attr.ib()
593 base = attr.ib()
593 base = attr.ib()
594 chainbase = attr.ib()
594 chainbase = attr.ib()
595 chainlen = attr.ib()
595 chainlen = attr.ib()
596 compresseddeltalen = attr.ib()
596 compresseddeltalen = attr.ib()
597
597
598 class _deltacomputer(object):
598 class _deltacomputer(object):
599 def __init__(self, revlog):
599 def __init__(self, revlog):
600 self.revlog = revlog
600 self.revlog = revlog
601
601
602 def _getcandidaterevs(self, p1, p2, cachedelta):
602 def _getcandidaterevs(self, p1, p2, cachedelta):
603 """
603 """
604 Provides revisions that present an interest to be diffed against,
604 Provides revisions that present an interest to be diffed against,
605 grouped by level of easiness.
605 grouped by level of easiness.
606 """
606 """
607 revlog = self.revlog
607 revlog = self.revlog
608 gdelta = revlog._generaldelta
608 gdelta = revlog._generaldelta
609 curr = len(revlog)
609 curr = len(revlog)
610 prev = curr - 1
610 prev = curr - 1
611 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
611 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
612
612
613 # should we try to build a delta?
613 # should we try to build a delta?
614 if prev != nullrev and revlog.storedeltachains:
614 if prev != nullrev and revlog.storedeltachains:
615 tested = set()
615 tested = set()
616 # This condition is true most of the time when processing
616 # This condition is true most of the time when processing
617 # changegroup data into a generaldelta repo. The only time it
617 # changegroup data into a generaldelta repo. The only time it
618 # isn't true is if this is the first revision in a delta chain
618 # isn't true is if this is the first revision in a delta chain
619 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
619 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
620 if cachedelta and gdelta and revlog._lazydeltabase:
620 if cachedelta and gdelta and revlog._lazydeltabase:
621 # Assume what we received from the server is a good choice
621 # Assume what we received from the server is a good choice
622 # build delta will reuse the cache
622 # build delta will reuse the cache
623 yield (cachedelta[0],)
623 yield (cachedelta[0],)
624 tested.add(cachedelta[0])
624 tested.add(cachedelta[0])
625
625
626 if gdelta:
626 if gdelta:
627 # exclude already lazy tested base if any
627 # exclude already lazy tested base if any
628 parents = [p for p in (p1r, p2r)
628 parents = [p for p in (p1r, p2r)
629 if p != nullrev and p not in tested]
629 if p != nullrev and p not in tested]
630
630
631 if not revlog._deltabothparents and len(parents) == 2:
631 if not revlog._deltabothparents and len(parents) == 2:
632 parents.sort()
632 parents.sort()
633 # To minimize the chance of having to build a fulltext,
633 # To minimize the chance of having to build a fulltext,
634 # pick first whichever parent is closest to us (max rev)
634 # pick first whichever parent is closest to us (max rev)
635 yield (parents[1],)
635 yield (parents[1],)
636 # then the other one (min rev) if the first did not fit
636 # then the other one (min rev) if the first did not fit
637 yield (parents[0],)
637 yield (parents[0],)
638 tested.update(parents)
638 tested.update(parents)
639 elif len(parents) > 0:
639 elif len(parents) > 0:
640 # Test all parents (1 or 2), and keep the best candidate
640 # Test all parents (1 or 2), and keep the best candidate
641 yield parents
641 yield parents
642 tested.update(parents)
642 tested.update(parents)
643
643
644 if prev not in tested:
644 if prev not in tested:
645 # other approach failed try against prev to hopefully save us a
645 # other approach failed try against prev to hopefully save us a
646 # fulltext.
646 # fulltext.
647 yield (prev,)
647 yield (prev,)
648 tested.add(prev)
648 tested.add(prev)
649
649
650 def buildtext(self, revinfo, fh):
650 def buildtext(self, revinfo, fh):
651 """Builds a fulltext version of a revision
651 """Builds a fulltext version of a revision
652
652
653 revinfo: _revisioninfo instance that contains all needed info
653 revinfo: _revisioninfo instance that contains all needed info
654 fh: file handle to either the .i or the .d revlog file,
654 fh: file handle to either the .i or the .d revlog file,
655 depending on whether it is inlined or not
655 depending on whether it is inlined or not
656 """
656 """
657 btext = revinfo.btext
657 btext = revinfo.btext
658 if btext[0] is not None:
658 if btext[0] is not None:
659 return btext[0]
659 return btext[0]
660
660
661 revlog = self.revlog
661 revlog = self.revlog
662 cachedelta = revinfo.cachedelta
662 cachedelta = revinfo.cachedelta
663 flags = revinfo.flags
663 flags = revinfo.flags
664 node = revinfo.node
664 node = revinfo.node
665
665
666 baserev = cachedelta[0]
666 baserev = cachedelta[0]
667 delta = cachedelta[1]
667 delta = cachedelta[1]
668 # special case deltas which replace entire base; no need to decode
668 # special case deltas which replace entire base; no need to decode
669 # base revision. this neatly avoids censored bases, which throw when
669 # base revision. this neatly avoids censored bases, which throw when
670 # they're decoded.
670 # they're decoded.
671 hlen = struct.calcsize(">lll")
671 hlen = struct.calcsize(">lll")
672 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
672 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
673 len(delta) - hlen):
673 len(delta) - hlen):
674 btext[0] = delta[hlen:]
674 btext[0] = delta[hlen:]
675 else:
675 else:
676 # deltabase is rawtext before changed by flag processors, which is
676 # deltabase is rawtext before changed by flag processors, which is
677 # equivalent to non-raw text
677 # equivalent to non-raw text
678 basetext = revlog.revision(baserev, _df=fh, raw=False)
678 basetext = revlog.revision(baserev, _df=fh, raw=False)
679 btext[0] = mdiff.patch(basetext, delta)
679 btext[0] = mdiff.patch(basetext, delta)
680
680
681 try:
681 try:
682 res = revlog._processflags(btext[0], flags, 'read', raw=True)
682 res = revlog._processflags(btext[0], flags, 'read', raw=True)
683 btext[0], validatehash = res
683 btext[0], validatehash = res
684 if validatehash:
684 if validatehash:
685 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
685 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
686 if flags & REVIDX_ISCENSORED:
686 if flags & REVIDX_ISCENSORED:
687 raise RevlogError(_('node %s is not censored') % node)
687 raise RevlogError(_('node %s is not censored') % node)
688 except CensoredNodeError:
688 except CensoredNodeError:
689 # must pass the censored index flag to add censored revisions
689 # must pass the censored index flag to add censored revisions
690 if not flags & REVIDX_ISCENSORED:
690 if not flags & REVIDX_ISCENSORED:
691 raise
691 raise
692 return btext[0]
692 return btext[0]
693
693
694 def _builddeltadiff(self, base, revinfo, fh):
694 def _builddeltadiff(self, base, revinfo, fh):
695 revlog = self.revlog
695 revlog = self.revlog
696 t = self.buildtext(revinfo, fh)
696 t = self.buildtext(revinfo, fh)
697 if revlog.iscensored(base):
697 if revlog.iscensored(base):
698 # deltas based on a censored revision must replace the
698 # deltas based on a censored revision must replace the
699 # full content in one patch, so delta works everywhere
699 # full content in one patch, so delta works everywhere
700 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
700 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
701 delta = header + t
701 delta = header + t
702 else:
702 else:
703 ptext = revlog.revision(base, _df=fh, raw=True)
703 ptext = revlog.revision(base, _df=fh, raw=True)
704 delta = mdiff.textdiff(ptext, t)
704 delta = mdiff.textdiff(ptext, t)
705
705
706 return delta
706 return delta
707
707
708 def _builddeltainfo(self, revinfo, base, fh):
708 def _builddeltainfo(self, revinfo, base, fh):
709 # can we use the cached delta?
709 # can we use the cached delta?
710 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
710 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
711 delta = revinfo.cachedelta[1]
711 delta = revinfo.cachedelta[1]
712 else:
712 else:
713 delta = self._builddeltadiff(base, revinfo, fh)
713 delta = self._builddeltadiff(base, revinfo, fh)
714 revlog = self.revlog
714 revlog = self.revlog
715 header, data = revlog.compress(delta)
715 header, data = revlog.compress(delta)
716 deltalen = len(header) + len(data)
716 deltalen = len(header) + len(data)
717 chainbase = revlog.chainbase(base)
717 chainbase = revlog.chainbase(base)
718 offset = revlog.end(len(revlog) - 1)
718 offset = revlog.end(len(revlog) - 1)
719 dist = deltalen + offset - revlog.start(chainbase)
719 dist = deltalen + offset - revlog.start(chainbase)
720 if revlog._generaldelta:
720 if revlog._generaldelta:
721 deltabase = base
721 deltabase = base
722 else:
722 else:
723 deltabase = chainbase
723 deltabase = chainbase
724 chainlen, compresseddeltalen = revlog._chaininfo(base)
724 chainlen, compresseddeltalen = revlog._chaininfo(base)
725 chainlen += 1
725 chainlen += 1
726 compresseddeltalen += deltalen
726 compresseddeltalen += deltalen
727 return _deltainfo(dist, deltalen, (header, data), deltabase,
727 return _deltainfo(dist, deltalen, (header, data), deltabase,
728 chainbase, chainlen, compresseddeltalen)
728 chainbase, chainlen, compresseddeltalen)
729
729
730 def finddeltainfo(self, revinfo, fh):
730 def finddeltainfo(self, revinfo, fh):
731 """Find an acceptable delta against a candidate revision
731 """Find an acceptable delta against a candidate revision
732
732
733 revinfo: information about the revision (instance of _revisioninfo)
733 revinfo: information about the revision (instance of _revisioninfo)
734 fh: file handle to either the .i or the .d revlog file,
734 fh: file handle to either the .i or the .d revlog file,
735 depending on whether it is inlined or not
735 depending on whether it is inlined or not
736
736
737 Returns the first acceptable candidate revision, as ordered by
737 Returns the first acceptable candidate revision, as ordered by
738 _getcandidaterevs
738 _getcandidaterevs
739 """
739 """
740 cachedelta = revinfo.cachedelta
740 cachedelta = revinfo.cachedelta
741 p1 = revinfo.p1
741 p1 = revinfo.p1
742 p2 = revinfo.p2
742 p2 = revinfo.p2
743 revlog = self.revlog
743 revlog = self.revlog
744
744
745 deltainfo = None
745 deltainfo = None
746 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
746 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
747 nominateddeltas = []
747 nominateddeltas = []
748 for candidaterev in candidaterevs:
748 for candidaterev in candidaterevs:
749 # no delta for rawtext-changing revs (see "candelta" for why)
749 # no delta for rawtext-changing revs (see "candelta" for why)
750 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
750 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
751 continue
751 continue
752 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
752 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
753 if revlog._isgooddeltainfo(candidatedelta, revinfo):
753 if revlog._isgooddeltainfo(candidatedelta, revinfo):
754 nominateddeltas.append(candidatedelta)
754 nominateddeltas.append(candidatedelta)
755 if nominateddeltas:
755 if nominateddeltas:
756 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
756 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
757 break
757 break
758
758
759 return deltainfo
759 return deltainfo
760
760
761 @attr.s(slots=True, frozen=True)
761 @attr.s(slots=True, frozen=True)
762 class _revisioninfo(object):
762 class _revisioninfo(object):
763 """Information about a revision that allows building its fulltext
763 """Information about a revision that allows building its fulltext
764 node: expected hash of the revision
764 node: expected hash of the revision
765 p1, p2: parent revs of the revision
765 p1, p2: parent revs of the revision
766 btext: built text cache consisting of a one-element list
766 btext: built text cache consisting of a one-element list
767 cachedelta: (baserev, uncompressed_delta) or None
767 cachedelta: (baserev, uncompressed_delta) or None
768 flags: flags associated to the revision storage
768 flags: flags associated to the revision storage
769
769
770 One of btext[0] or cachedelta must be set.
770 One of btext[0] or cachedelta must be set.
771 """
771 """
772 node = attr.ib()
772 node = attr.ib()
773 p1 = attr.ib()
773 p1 = attr.ib()
774 p2 = attr.ib()
774 p2 = attr.ib()
775 btext = attr.ib()
775 btext = attr.ib()
776 textlen = attr.ib()
776 textlen = attr.ib()
777 cachedelta = attr.ib()
777 cachedelta = attr.ib()
778 flags = attr.ib()
778 flags = attr.ib()
779
779
780 # index v0:
780 # index v0:
781 # 4 bytes: offset
781 # 4 bytes: offset
782 # 4 bytes: compressed length
782 # 4 bytes: compressed length
783 # 4 bytes: base rev
783 # 4 bytes: base rev
784 # 4 bytes: link rev
784 # 4 bytes: link rev
785 # 20 bytes: parent 1 nodeid
785 # 20 bytes: parent 1 nodeid
786 # 20 bytes: parent 2 nodeid
786 # 20 bytes: parent 2 nodeid
787 # 20 bytes: nodeid
787 # 20 bytes: nodeid
788 indexformatv0 = struct.Struct(">4l20s20s20s")
788 indexformatv0 = struct.Struct(">4l20s20s20s")
789 indexformatv0_pack = indexformatv0.pack
789 indexformatv0_pack = indexformatv0.pack
790 indexformatv0_unpack = indexformatv0.unpack
790 indexformatv0_unpack = indexformatv0.unpack
791
791
792 class revlogoldio(object):
792 class revlogoldio(object):
793 def __init__(self):
793 def __init__(self):
794 self.size = indexformatv0.size
794 self.size = indexformatv0.size
795
795
796 def parseindex(self, data, inline):
796 def parseindex(self, data, inline):
797 s = self.size
797 s = self.size
798 index = []
798 index = []
799 nodemap = {nullid: nullrev}
799 nodemap = {nullid: nullrev}
800 n = off = 0
800 n = off = 0
801 l = len(data)
801 l = len(data)
802 while off + s <= l:
802 while off + s <= l:
803 cur = data[off:off + s]
803 cur = data[off:off + s]
804 off += s
804 off += s
805 e = indexformatv0_unpack(cur)
805 e = indexformatv0_unpack(cur)
806 # transform to revlogv1 format
806 # transform to revlogv1 format
807 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
807 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
808 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
808 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
809 index.append(e2)
809 index.append(e2)
810 nodemap[e[6]] = n
810 nodemap[e[6]] = n
811 n += 1
811 n += 1
812
812
813 # add the magic null revision at -1
813 # add the magic null revision at -1
814 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
814 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
815
815
816 return index, nodemap, None
816 return index, nodemap, None
817
817
818 def packentry(self, entry, node, version, rev):
818 def packentry(self, entry, node, version, rev):
819 if gettype(entry[0]):
819 if gettype(entry[0]):
820 raise RevlogError(_('index entry flags need revlog version 1'))
820 raise RevlogError(_('index entry flags need revlog version 1'))
821 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
821 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
822 node(entry[5]), node(entry[6]), entry[7])
822 node(entry[5]), node(entry[6]), entry[7])
823 return indexformatv0_pack(*e2)
823 return indexformatv0_pack(*e2)
824
824
825 # index ng:
825 # index ng:
826 # 6 bytes: offset
826 # 6 bytes: offset
827 # 2 bytes: flags
827 # 2 bytes: flags
828 # 4 bytes: compressed length
828 # 4 bytes: compressed length
829 # 4 bytes: uncompressed length
829 # 4 bytes: uncompressed length
830 # 4 bytes: base rev
830 # 4 bytes: base rev
831 # 4 bytes: link rev
831 # 4 bytes: link rev
832 # 4 bytes: parent 1 rev
832 # 4 bytes: parent 1 rev
833 # 4 bytes: parent 2 rev
833 # 4 bytes: parent 2 rev
834 # 32 bytes: nodeid
834 # 32 bytes: nodeid
835 indexformatng = struct.Struct(">Qiiiiii20s12x")
835 indexformatng = struct.Struct(">Qiiiiii20s12x")
836 indexformatng_pack = indexformatng.pack
836 indexformatng_pack = indexformatng.pack
837 versionformat = struct.Struct(">I")
837 versionformat = struct.Struct(">I")
838 versionformat_pack = versionformat.pack
838 versionformat_pack = versionformat.pack
839 versionformat_unpack = versionformat.unpack
839 versionformat_unpack = versionformat.unpack
840
840
841 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
841 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
842 # signed integer)
842 # signed integer)
843 _maxentrysize = 0x7fffffff
843 _maxentrysize = 0x7fffffff
844
844
845 class revlogio(object):
845 class revlogio(object):
846 def __init__(self):
846 def __init__(self):
847 self.size = indexformatng.size
847 self.size = indexformatng.size
848
848
849 def parseindex(self, data, inline):
849 def parseindex(self, data, inline):
850 # call the C implementation to parse the index data
850 # call the C implementation to parse the index data
851 index, cache = parsers.parse_index2(data, inline)
851 index, cache = parsers.parse_index2(data, inline)
852 return index, getattr(index, 'nodemap', None), cache
852 return index, getattr(index, 'nodemap', None), cache
853
853
854 def packentry(self, entry, node, version, rev):
854 def packentry(self, entry, node, version, rev):
855 p = indexformatng_pack(*entry)
855 p = indexformatng_pack(*entry)
856 if rev == 0:
856 if rev == 0:
857 p = versionformat_pack(version) + p[4:]
857 p = versionformat_pack(version) + p[4:]
858 return p
858 return p
859
859
860 class revlog(object):
860 class revlog(object):
861 """
861 """
862 the underlying revision storage object
862 the underlying revision storage object
863
863
864 A revlog consists of two parts, an index and the revision data.
864 A revlog consists of two parts, an index and the revision data.
865
865
866 The index is a file with a fixed record size containing
866 The index is a file with a fixed record size containing
867 information on each revision, including its nodeid (hash), the
867 information on each revision, including its nodeid (hash), the
868 nodeids of its parents, the position and offset of its data within
868 nodeids of its parents, the position and offset of its data within
869 the data file, and the revision it's based on. Finally, each entry
869 the data file, and the revision it's based on. Finally, each entry
870 contains a linkrev entry that can serve as a pointer to external
870 contains a linkrev entry that can serve as a pointer to external
871 data.
871 data.
872
872
873 The revision data itself is a linear collection of data chunks.
873 The revision data itself is a linear collection of data chunks.
874 Each chunk represents a revision and is usually represented as a
874 Each chunk represents a revision and is usually represented as a
875 delta against the previous chunk. To bound lookup time, runs of
875 delta against the previous chunk. To bound lookup time, runs of
876 deltas are limited to about 2 times the length of the original
876 deltas are limited to about 2 times the length of the original
877 version data. This makes retrieval of a version proportional to
877 version data. This makes retrieval of a version proportional to
878 its size, or O(1) relative to the number of revisions.
878 its size, or O(1) relative to the number of revisions.
879
879
880 Both pieces of the revlog are written to in an append-only
880 Both pieces of the revlog are written to in an append-only
881 fashion, which means we never need to rewrite a file to insert or
881 fashion, which means we never need to rewrite a file to insert or
882 remove data, and can use some simple techniques to avoid the need
882 remove data, and can use some simple techniques to avoid the need
883 for locking while reading.
883 for locking while reading.
884
884
885 If checkambig, indexfile is opened with checkambig=True at
885 If checkambig, indexfile is opened with checkambig=True at
886 writing, to avoid file stat ambiguity.
886 writing, to avoid file stat ambiguity.
887
887
888 If mmaplargeindex is True, and an mmapindexthreshold is set, the
888 If mmaplargeindex is True, and an mmapindexthreshold is set, the
889 index will be mmapped rather than read if it is larger than the
889 index will be mmapped rather than read if it is larger than the
890 configured threshold.
890 configured threshold.
891
891
892 If censorable is True, the revlog can have censored revisions.
892 If censorable is True, the revlog can have censored revisions.
893 """
893 """
894 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
894 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
895 mmaplargeindex=False, censorable=False):
895 mmaplargeindex=False, censorable=False):
896 """
896 """
897 create a revlog object
897 create a revlog object
898
898
899 opener is a function that abstracts the file opening operation
899 opener is a function that abstracts the file opening operation
900 and can be used to implement COW semantics or the like.
900 and can be used to implement COW semantics or the like.
901 """
901 """
902 self.indexfile = indexfile
902 self.indexfile = indexfile
903 self.datafile = datafile or (indexfile[:-2] + ".d")
903 self.datafile = datafile or (indexfile[:-2] + ".d")
904 self.opener = opener
904 self.opener = opener
905 # When True, indexfile is opened with checkambig=True at writing, to
905 # When True, indexfile is opened with checkambig=True at writing, to
906 # avoid file stat ambiguity.
906 # avoid file stat ambiguity.
907 self._checkambig = checkambig
907 self._checkambig = checkambig
908 self._censorable = censorable
908 self._censorable = censorable
909 # 3-tuple of (node, rev, text) for a raw revision.
909 # 3-tuple of (node, rev, text) for a raw revision.
910 self._cache = None
910 self._cache = None
911 # Maps rev to chain base rev.
911 # Maps rev to chain base rev.
912 self._chainbasecache = util.lrucachedict(100)
912 self._chainbasecache = util.lrucachedict(100)
913 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
913 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
914 self._chunkcache = (0, '')
914 self._chunkcache = (0, '')
915 # How much data to read and cache into the raw revlog data cache.
915 # How much data to read and cache into the raw revlog data cache.
916 self._chunkcachesize = 65536
916 self._chunkcachesize = 65536
917 self._maxchainlen = None
917 self._maxchainlen = None
918 self._deltabothparents = True
918 self._deltabothparents = True
919 self.index = []
919 self.index = []
920 # Mapping of partial identifiers to full nodes.
920 # Mapping of partial identifiers to full nodes.
921 self._pcache = {}
921 self._pcache = {}
922 # Mapping of revision integer to full node.
922 # Mapping of revision integer to full node.
923 self._nodecache = {nullid: nullrev}
923 self._nodecache = {nullid: nullrev}
924 self._nodepos = None
924 self._nodepos = None
925 self._compengine = 'zlib'
925 self._compengine = 'zlib'
926 self._maxdeltachainspan = -1
926 self._maxdeltachainspan = -1
927 self._withsparseread = False
927 self._withsparseread = False
928 self._sparserevlog = False
928 self._sparserevlog = False
929 self._srdensitythreshold = 0.50
929 self._srdensitythreshold = 0.50
930 self._srmingapsize = 262144
930 self._srmingapsize = 262144
931
931
932 mmapindexthreshold = None
932 mmapindexthreshold = None
933 v = REVLOG_DEFAULT_VERSION
933 v = REVLOG_DEFAULT_VERSION
934 opts = getattr(opener, 'options', None)
934 opts = getattr(opener, 'options', None)
935 if opts is not None:
935 if opts is not None:
936 if 'revlogv2' in opts:
936 if 'revlogv2' in opts:
937 # version 2 revlogs always use generaldelta.
937 # version 2 revlogs always use generaldelta.
938 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
938 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
939 elif 'revlogv1' in opts:
939 elif 'revlogv1' in opts:
940 if 'generaldelta' in opts:
940 if 'generaldelta' in opts:
941 v |= FLAG_GENERALDELTA
941 v |= FLAG_GENERALDELTA
942 else:
942 else:
943 v = 0
943 v = 0
944 if 'chunkcachesize' in opts:
944 if 'chunkcachesize' in opts:
945 self._chunkcachesize = opts['chunkcachesize']
945 self._chunkcachesize = opts['chunkcachesize']
946 if 'maxchainlen' in opts:
946 if 'maxchainlen' in opts:
947 self._maxchainlen = opts['maxchainlen']
947 self._maxchainlen = opts['maxchainlen']
948 if 'deltabothparents' in opts:
948 if 'deltabothparents' in opts:
949 self._deltabothparents = opts['deltabothparents']
949 self._deltabothparents = opts['deltabothparents']
950 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
950 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
951 if 'compengine' in opts:
951 if 'compengine' in opts:
952 self._compengine = opts['compengine']
952 self._compengine = opts['compengine']
953 if 'maxdeltachainspan' in opts:
953 if 'maxdeltachainspan' in opts:
954 self._maxdeltachainspan = opts['maxdeltachainspan']
954 self._maxdeltachainspan = opts['maxdeltachainspan']
955 if mmaplargeindex and 'mmapindexthreshold' in opts:
955 if mmaplargeindex and 'mmapindexthreshold' in opts:
956 mmapindexthreshold = opts['mmapindexthreshold']
956 mmapindexthreshold = opts['mmapindexthreshold']
957 self._sparserevlog = bool(opts.get('sparse-revlog', False))
957 self._sparserevlog = bool(opts.get('sparse-revlog', False))
958 withsparseread = bool(opts.get('with-sparse-read', False))
958 withsparseread = bool(opts.get('with-sparse-read', False))
959 # sparse-revlog forces sparse-read
959 # sparse-revlog forces sparse-read
960 self._withsparseread = self._sparserevlog or withsparseread
960 self._withsparseread = self._sparserevlog or withsparseread
961 if 'sparse-read-density-threshold' in opts:
961 if 'sparse-read-density-threshold' in opts:
962 self._srdensitythreshold = opts['sparse-read-density-threshold']
962 self._srdensitythreshold = opts['sparse-read-density-threshold']
963 if 'sparse-read-min-gap-size' in opts:
963 if 'sparse-read-min-gap-size' in opts:
964 self._srmingapsize = opts['sparse-read-min-gap-size']
964 self._srmingapsize = opts['sparse-read-min-gap-size']
965
965
966 if self._chunkcachesize <= 0:
966 if self._chunkcachesize <= 0:
967 raise RevlogError(_('revlog chunk cache size %r is not greater '
967 raise RevlogError(_('revlog chunk cache size %r is not greater '
968 'than 0') % self._chunkcachesize)
968 'than 0') % self._chunkcachesize)
969 elif self._chunkcachesize & (self._chunkcachesize - 1):
969 elif self._chunkcachesize & (self._chunkcachesize - 1):
970 raise RevlogError(_('revlog chunk cache size %r is not a power '
970 raise RevlogError(_('revlog chunk cache size %r is not a power '
971 'of 2') % self._chunkcachesize)
971 'of 2') % self._chunkcachesize)
972
972
973 indexdata = ''
973 indexdata = ''
974 self._initempty = True
974 self._initempty = True
975 try:
975 try:
976 with self._indexfp() as f:
976 with self._indexfp() as f:
977 if (mmapindexthreshold is not None and
977 if (mmapindexthreshold is not None and
978 self.opener.fstat(f).st_size >= mmapindexthreshold):
978 self.opener.fstat(f).st_size >= mmapindexthreshold):
979 indexdata = util.buffer(util.mmapread(f))
979 indexdata = util.buffer(util.mmapread(f))
980 else:
980 else:
981 indexdata = f.read()
981 indexdata = f.read()
982 if len(indexdata) > 0:
982 if len(indexdata) > 0:
983 v = versionformat_unpack(indexdata[:4])[0]
983 v = versionformat_unpack(indexdata[:4])[0]
984 self._initempty = False
984 self._initempty = False
985 except IOError as inst:
985 except IOError as inst:
986 if inst.errno != errno.ENOENT:
986 if inst.errno != errno.ENOENT:
987 raise
987 raise
988
988
989 self.version = v
989 self.version = v
990 self._inline = v & FLAG_INLINE_DATA
990 self._inline = v & FLAG_INLINE_DATA
991 self._generaldelta = v & FLAG_GENERALDELTA
991 self._generaldelta = v & FLAG_GENERALDELTA
992 flags = v & ~0xFFFF
992 flags = v & ~0xFFFF
993 fmt = v & 0xFFFF
993 fmt = v & 0xFFFF
994 if fmt == REVLOGV0:
994 if fmt == REVLOGV0:
995 if flags:
995 if flags:
996 raise RevlogError(_('unknown flags (%#04x) in version %d '
996 raise RevlogError(_('unknown flags (%#04x) in version %d '
997 'revlog %s') %
997 'revlog %s') %
998 (flags >> 16, fmt, self.indexfile))
998 (flags >> 16, fmt, self.indexfile))
999 elif fmt == REVLOGV1:
999 elif fmt == REVLOGV1:
1000 if flags & ~REVLOGV1_FLAGS:
1000 if flags & ~REVLOGV1_FLAGS:
1001 raise RevlogError(_('unknown flags (%#04x) in version %d '
1001 raise RevlogError(_('unknown flags (%#04x) in version %d '
1002 'revlog %s') %
1002 'revlog %s') %
1003 (flags >> 16, fmt, self.indexfile))
1003 (flags >> 16, fmt, self.indexfile))
1004 elif fmt == REVLOGV2:
1004 elif fmt == REVLOGV2:
1005 if flags & ~REVLOGV2_FLAGS:
1005 if flags & ~REVLOGV2_FLAGS:
1006 raise RevlogError(_('unknown flags (%#04x) in version %d '
1006 raise RevlogError(_('unknown flags (%#04x) in version %d '
1007 'revlog %s') %
1007 'revlog %s') %
1008 (flags >> 16, fmt, self.indexfile))
1008 (flags >> 16, fmt, self.indexfile))
1009 else:
1009 else:
1010 raise RevlogError(_('unknown version (%d) in revlog %s') %
1010 raise RevlogError(_('unknown version (%d) in revlog %s') %
1011 (fmt, self.indexfile))
1011 (fmt, self.indexfile))
1012
1012
1013 self.storedeltachains = True
1013 self.storedeltachains = True
1014
1014
1015 self._io = revlogio()
1015 self._io = revlogio()
1016 if self.version == REVLOGV0:
1016 if self.version == REVLOGV0:
1017 self._io = revlogoldio()
1017 self._io = revlogoldio()
1018 try:
1018 try:
1019 d = self._io.parseindex(indexdata, self._inline)
1019 d = self._io.parseindex(indexdata, self._inline)
1020 except (ValueError, IndexError):
1020 except (ValueError, IndexError):
1021 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1021 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1022 self.index, nodemap, self._chunkcache = d
1022 self.index, nodemap, self._chunkcache = d
1023 if nodemap is not None:
1023 if nodemap is not None:
1024 self.nodemap = self._nodecache = nodemap
1024 self.nodemap = self._nodecache = nodemap
1025 if not self._chunkcache:
1025 if not self._chunkcache:
1026 self._chunkclear()
1026 self._chunkclear()
1027 # revnum -> (chain-length, sum-delta-length)
1027 # revnum -> (chain-length, sum-delta-length)
1028 self._chaininfocache = {}
1028 self._chaininfocache = {}
1029 # revlog header -> revlog compressor
1029 # revlog header -> revlog compressor
1030 self._decompressors = {}
1030 self._decompressors = {}
1031
1031
1032 @util.propertycache
1032 @util.propertycache
1033 def _compressor(self):
1033 def _compressor(self):
1034 return util.compengines[self._compengine].revlogcompressor()
1034 return util.compengines[self._compengine].revlogcompressor()
1035
1035
1036 def _indexfp(self, mode='r'):
1036 def _indexfp(self, mode='r'):
1037 """file object for the revlog's index file"""
1037 """file object for the revlog's index file"""
1038 args = {r'mode': mode}
1038 args = {r'mode': mode}
1039 if mode != 'r':
1039 if mode != 'r':
1040 args[r'checkambig'] = self._checkambig
1040 args[r'checkambig'] = self._checkambig
1041 if mode == 'w':
1041 if mode == 'w':
1042 args[r'atomictemp'] = True
1042 args[r'atomictemp'] = True
1043 return self.opener(self.indexfile, **args)
1043 return self.opener(self.indexfile, **args)
1044
1044
1045 def _datafp(self, mode='r'):
1045 def _datafp(self, mode='r'):
1046 """file object for the revlog's data file"""
1046 """file object for the revlog's data file"""
1047 return self.opener(self.datafile, mode=mode)
1047 return self.opener(self.datafile, mode=mode)
1048
1048
1049 @contextlib.contextmanager
1049 @contextlib.contextmanager
1050 def _datareadfp(self, existingfp=None):
1050 def _datareadfp(self, existingfp=None):
1051 """file object suitable to read data"""
1051 """file object suitable to read data"""
1052 if existingfp is not None:
1052 if existingfp is not None:
1053 yield existingfp
1053 yield existingfp
1054 else:
1054 else:
1055 if self._inline:
1055 if self._inline:
1056 func = self._indexfp
1056 func = self._indexfp
1057 else:
1057 else:
1058 func = self._datafp
1058 func = self._datafp
1059 with func() as fp:
1059 with func() as fp:
1060 yield fp
1060 yield fp
1061
1061
1062 def tip(self):
1062 def tip(self):
1063 return self.node(len(self.index) - 2)
1063 return self.node(len(self.index) - 2)
1064 def __contains__(self, rev):
1064 def __contains__(self, rev):
1065 return 0 <= rev < len(self)
1065 return 0 <= rev < len(self)
1066 def __len__(self):
1066 def __len__(self):
1067 return len(self.index) - 1
1067 return len(self.index) - 1
1068 def __iter__(self):
1068 def __iter__(self):
1069 return iter(xrange(len(self)))
1069 return iter(xrange(len(self)))
1070 def revs(self, start=0, stop=None):
1070 def revs(self, start=0, stop=None):
1071 """iterate over all rev in this revlog (from start to stop)"""
1071 """iterate over all rev in this revlog (from start to stop)"""
1072 step = 1
1072 step = 1
1073 length = len(self)
1073 if stop is not None:
1074 if stop is not None:
1074 if start > stop:
1075 if start > stop:
1075 step = -1
1076 step = -1
1076 stop += step
1077 stop += step
1078 if stop > length:
1079 stop = length
1077 else:
1080 else:
1078 stop = len(self)
1081 stop = length
1079 return xrange(start, stop, step)
1082 return xrange(start, stop, step)
1080
1083
1081 @util.propertycache
1084 @util.propertycache
1082 def nodemap(self):
1085 def nodemap(self):
1083 self.rev(self.node(0))
1086 self.rev(self.node(0))
1084 return self._nodecache
1087 return self._nodecache
1085
1088
1086 def hasnode(self, node):
1089 def hasnode(self, node):
1087 try:
1090 try:
1088 self.rev(node)
1091 self.rev(node)
1089 return True
1092 return True
1090 except KeyError:
1093 except KeyError:
1091 return False
1094 return False
1092
1095
1093 def candelta(self, baserev, rev):
1096 def candelta(self, baserev, rev):
1094 """whether two revisions (baserev, rev) can be delta-ed or not"""
1097 """whether two revisions (baserev, rev) can be delta-ed or not"""
1095 # Disable delta if either rev requires a content-changing flag
1098 # Disable delta if either rev requires a content-changing flag
1096 # processor (ex. LFS). This is because such flag processor can alter
1099 # processor (ex. LFS). This is because such flag processor can alter
1097 # the rawtext content that the delta will be based on, and two clients
1100 # the rawtext content that the delta will be based on, and two clients
1098 # could have a same revlog node with different flags (i.e. different
1101 # could have a same revlog node with different flags (i.e. different
1099 # rawtext contents) and the delta could be incompatible.
1102 # rawtext contents) and the delta could be incompatible.
1100 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1103 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1101 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1104 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1102 return False
1105 return False
1103 return True
1106 return True
1104
1107
1105 def clearcaches(self):
1108 def clearcaches(self):
1106 self._cache = None
1109 self._cache = None
1107 self._chainbasecache.clear()
1110 self._chainbasecache.clear()
1108 self._chunkcache = (0, '')
1111 self._chunkcache = (0, '')
1109 self._pcache = {}
1112 self._pcache = {}
1110
1113
1111 try:
1114 try:
1112 self._nodecache.clearcaches()
1115 self._nodecache.clearcaches()
1113 except AttributeError:
1116 except AttributeError:
1114 self._nodecache = {nullid: nullrev}
1117 self._nodecache = {nullid: nullrev}
1115 self._nodepos = None
1118 self._nodepos = None
1116
1119
1117 def rev(self, node):
1120 def rev(self, node):
1118 try:
1121 try:
1119 return self._nodecache[node]
1122 return self._nodecache[node]
1120 except TypeError:
1123 except TypeError:
1121 raise
1124 raise
1122 except RevlogError:
1125 except RevlogError:
1123 # parsers.c radix tree lookup failed
1126 # parsers.c radix tree lookup failed
1124 if node == wdirid or node in wdirfilenodeids:
1127 if node == wdirid or node in wdirfilenodeids:
1125 raise error.WdirUnsupported
1128 raise error.WdirUnsupported
1126 raise LookupError(node, self.indexfile, _('no node'))
1129 raise LookupError(node, self.indexfile, _('no node'))
1127 except KeyError:
1130 except KeyError:
1128 # pure python cache lookup failed
1131 # pure python cache lookup failed
1129 n = self._nodecache
1132 n = self._nodecache
1130 i = self.index
1133 i = self.index
1131 p = self._nodepos
1134 p = self._nodepos
1132 if p is None:
1135 if p is None:
1133 p = len(i) - 2
1136 p = len(i) - 2
1134 else:
1137 else:
1135 assert p < len(i)
1138 assert p < len(i)
1136 for r in xrange(p, -1, -1):
1139 for r in xrange(p, -1, -1):
1137 v = i[r][7]
1140 v = i[r][7]
1138 n[v] = r
1141 n[v] = r
1139 if v == node:
1142 if v == node:
1140 self._nodepos = r - 1
1143 self._nodepos = r - 1
1141 return r
1144 return r
1142 if node == wdirid or node in wdirfilenodeids:
1145 if node == wdirid or node in wdirfilenodeids:
1143 raise error.WdirUnsupported
1146 raise error.WdirUnsupported
1144 raise LookupError(node, self.indexfile, _('no node'))
1147 raise LookupError(node, self.indexfile, _('no node'))
1145
1148
1146 # Accessors for index entries.
1149 # Accessors for index entries.
1147
1150
1148 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1151 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1149 # are flags.
1152 # are flags.
1150 def start(self, rev):
1153 def start(self, rev):
1151 return int(self.index[rev][0] >> 16)
1154 return int(self.index[rev][0] >> 16)
1152
1155
1153 def flags(self, rev):
1156 def flags(self, rev):
1154 return self.index[rev][0] & 0xFFFF
1157 return self.index[rev][0] & 0xFFFF
1155
1158
1156 def length(self, rev):
1159 def length(self, rev):
1157 return self.index[rev][1]
1160 return self.index[rev][1]
1158
1161
1159 def rawsize(self, rev):
1162 def rawsize(self, rev):
1160 """return the length of the uncompressed text for a given revision"""
1163 """return the length of the uncompressed text for a given revision"""
1161 l = self.index[rev][2]
1164 l = self.index[rev][2]
1162 if l >= 0:
1165 if l >= 0:
1163 return l
1166 return l
1164
1167
1165 t = self.revision(rev, raw=True)
1168 t = self.revision(rev, raw=True)
1166 return len(t)
1169 return len(t)
1167
1170
1168 def size(self, rev):
1171 def size(self, rev):
1169 """length of non-raw text (processed by a "read" flag processor)"""
1172 """length of non-raw text (processed by a "read" flag processor)"""
1170 # fast path: if no "read" flag processor could change the content,
1173 # fast path: if no "read" flag processor could change the content,
1171 # size is rawsize. note: ELLIPSIS is known to not change the content.
1174 # size is rawsize. note: ELLIPSIS is known to not change the content.
1172 flags = self.flags(rev)
1175 flags = self.flags(rev)
1173 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1176 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1174 return self.rawsize(rev)
1177 return self.rawsize(rev)
1175
1178
1176 return len(self.revision(rev, raw=False))
1179 return len(self.revision(rev, raw=False))
1177
1180
1178 def chainbase(self, rev):
1181 def chainbase(self, rev):
1179 base = self._chainbasecache.get(rev)
1182 base = self._chainbasecache.get(rev)
1180 if base is not None:
1183 if base is not None:
1181 return base
1184 return base
1182
1185
1183 index = self.index
1186 index = self.index
1184 iterrev = rev
1187 iterrev = rev
1185 base = index[iterrev][3]
1188 base = index[iterrev][3]
1186 while base != iterrev:
1189 while base != iterrev:
1187 iterrev = base
1190 iterrev = base
1188 base = index[iterrev][3]
1191 base = index[iterrev][3]
1189
1192
1190 self._chainbasecache[rev] = base
1193 self._chainbasecache[rev] = base
1191 return base
1194 return base
1192
1195
1193 def linkrev(self, rev):
1196 def linkrev(self, rev):
1194 return self.index[rev][4]
1197 return self.index[rev][4]
1195
1198
1196 def parentrevs(self, rev):
1199 def parentrevs(self, rev):
1197 try:
1200 try:
1198 entry = self.index[rev]
1201 entry = self.index[rev]
1199 except IndexError:
1202 except IndexError:
1200 if rev == wdirrev:
1203 if rev == wdirrev:
1201 raise error.WdirUnsupported
1204 raise error.WdirUnsupported
1202 raise
1205 raise
1203
1206
1204 return entry[5], entry[6]
1207 return entry[5], entry[6]
1205
1208
1206 def node(self, rev):
1209 def node(self, rev):
1207 try:
1210 try:
1208 return self.index[rev][7]
1211 return self.index[rev][7]
1209 except IndexError:
1212 except IndexError:
1210 if rev == wdirrev:
1213 if rev == wdirrev:
1211 raise error.WdirUnsupported
1214 raise error.WdirUnsupported
1212 raise
1215 raise
1213
1216
1214 # Derived from index values.
1217 # Derived from index values.
1215
1218
1216 def end(self, rev):
1219 def end(self, rev):
1217 return self.start(rev) + self.length(rev)
1220 return self.start(rev) + self.length(rev)
1218
1221
1219 def parents(self, node):
1222 def parents(self, node):
1220 i = self.index
1223 i = self.index
1221 d = i[self.rev(node)]
1224 d = i[self.rev(node)]
1222 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1225 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1223
1226
1224 def chainlen(self, rev):
1227 def chainlen(self, rev):
1225 return self._chaininfo(rev)[0]
1228 return self._chaininfo(rev)[0]
1226
1229
1227 def _chaininfo(self, rev):
1230 def _chaininfo(self, rev):
1228 chaininfocache = self._chaininfocache
1231 chaininfocache = self._chaininfocache
1229 if rev in chaininfocache:
1232 if rev in chaininfocache:
1230 return chaininfocache[rev]
1233 return chaininfocache[rev]
1231 index = self.index
1234 index = self.index
1232 generaldelta = self._generaldelta
1235 generaldelta = self._generaldelta
1233 iterrev = rev
1236 iterrev = rev
1234 e = index[iterrev]
1237 e = index[iterrev]
1235 clen = 0
1238 clen = 0
1236 compresseddeltalen = 0
1239 compresseddeltalen = 0
1237 while iterrev != e[3]:
1240 while iterrev != e[3]:
1238 clen += 1
1241 clen += 1
1239 compresseddeltalen += e[1]
1242 compresseddeltalen += e[1]
1240 if generaldelta:
1243 if generaldelta:
1241 iterrev = e[3]
1244 iterrev = e[3]
1242 else:
1245 else:
1243 iterrev -= 1
1246 iterrev -= 1
1244 if iterrev in chaininfocache:
1247 if iterrev in chaininfocache:
1245 t = chaininfocache[iterrev]
1248 t = chaininfocache[iterrev]
1246 clen += t[0]
1249 clen += t[0]
1247 compresseddeltalen += t[1]
1250 compresseddeltalen += t[1]
1248 break
1251 break
1249 e = index[iterrev]
1252 e = index[iterrev]
1250 else:
1253 else:
1251 # Add text length of base since decompressing that also takes
1254 # Add text length of base since decompressing that also takes
1252 # work. For cache hits the length is already included.
1255 # work. For cache hits the length is already included.
1253 compresseddeltalen += e[1]
1256 compresseddeltalen += e[1]
1254 r = (clen, compresseddeltalen)
1257 r = (clen, compresseddeltalen)
1255 chaininfocache[rev] = r
1258 chaininfocache[rev] = r
1256 return r
1259 return r
1257
1260
1258 def _deltachain(self, rev, stoprev=None):
1261 def _deltachain(self, rev, stoprev=None):
1259 """Obtain the delta chain for a revision.
1262 """Obtain the delta chain for a revision.
1260
1263
1261 ``stoprev`` specifies a revision to stop at. If not specified, we
1264 ``stoprev`` specifies a revision to stop at. If not specified, we
1262 stop at the base of the chain.
1265 stop at the base of the chain.
1263
1266
1264 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1267 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1265 revs in ascending order and ``stopped`` is a bool indicating whether
1268 revs in ascending order and ``stopped`` is a bool indicating whether
1266 ``stoprev`` was hit.
1269 ``stoprev`` was hit.
1267 """
1270 """
1268 # Try C implementation.
1271 # Try C implementation.
1269 try:
1272 try:
1270 return self.index.deltachain(rev, stoprev, self._generaldelta)
1273 return self.index.deltachain(rev, stoprev, self._generaldelta)
1271 except AttributeError:
1274 except AttributeError:
1272 pass
1275 pass
1273
1276
1274 chain = []
1277 chain = []
1275
1278
1276 # Alias to prevent attribute lookup in tight loop.
1279 # Alias to prevent attribute lookup in tight loop.
1277 index = self.index
1280 index = self.index
1278 generaldelta = self._generaldelta
1281 generaldelta = self._generaldelta
1279
1282
1280 iterrev = rev
1283 iterrev = rev
1281 e = index[iterrev]
1284 e = index[iterrev]
1282 while iterrev != e[3] and iterrev != stoprev:
1285 while iterrev != e[3] and iterrev != stoprev:
1283 chain.append(iterrev)
1286 chain.append(iterrev)
1284 if generaldelta:
1287 if generaldelta:
1285 iterrev = e[3]
1288 iterrev = e[3]
1286 else:
1289 else:
1287 iterrev -= 1
1290 iterrev -= 1
1288 e = index[iterrev]
1291 e = index[iterrev]
1289
1292
1290 if iterrev == stoprev:
1293 if iterrev == stoprev:
1291 stopped = True
1294 stopped = True
1292 else:
1295 else:
1293 chain.append(iterrev)
1296 chain.append(iterrev)
1294 stopped = False
1297 stopped = False
1295
1298
1296 chain.reverse()
1299 chain.reverse()
1297 return chain, stopped
1300 return chain, stopped
1298
1301
1299 def ancestors(self, revs, stoprev=0, inclusive=False):
1302 def ancestors(self, revs, stoprev=0, inclusive=False):
1300 """Generate the ancestors of 'revs' in reverse topological order.
1303 """Generate the ancestors of 'revs' in reverse topological order.
1301 Does not generate revs lower than stoprev.
1304 Does not generate revs lower than stoprev.
1302
1305
1303 See the documentation for ancestor.lazyancestors for more details."""
1306 See the documentation for ancestor.lazyancestors for more details."""
1304
1307
1305 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1308 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1306 inclusive=inclusive)
1309 inclusive=inclusive)
1307
1310
1308 def descendants(self, revs):
1311 def descendants(self, revs):
1309 """Generate the descendants of 'revs' in revision order.
1312 """Generate the descendants of 'revs' in revision order.
1310
1313
1311 Yield a sequence of revision numbers starting with a child of
1314 Yield a sequence of revision numbers starting with a child of
1312 some rev in revs, i.e., each revision is *not* considered a
1315 some rev in revs, i.e., each revision is *not* considered a
1313 descendant of itself. Results are ordered by revision number (a
1316 descendant of itself. Results are ordered by revision number (a
1314 topological sort)."""
1317 topological sort)."""
1315 first = min(revs)
1318 first = min(revs)
1316 if first == nullrev:
1319 if first == nullrev:
1317 for i in self:
1320 for i in self:
1318 yield i
1321 yield i
1319 return
1322 return
1320
1323
1321 seen = set(revs)
1324 seen = set(revs)
1322 for i in self.revs(start=first + 1):
1325 for i in self.revs(start=first + 1):
1323 for x in self.parentrevs(i):
1326 for x in self.parentrevs(i):
1324 if x != nullrev and x in seen:
1327 if x != nullrev and x in seen:
1325 seen.add(i)
1328 seen.add(i)
1326 yield i
1329 yield i
1327 break
1330 break
1328
1331
1329 def findcommonmissing(self, common=None, heads=None):
1332 def findcommonmissing(self, common=None, heads=None):
1330 """Return a tuple of the ancestors of common and the ancestors of heads
1333 """Return a tuple of the ancestors of common and the ancestors of heads
1331 that are not ancestors of common. In revset terminology, we return the
1334 that are not ancestors of common. In revset terminology, we return the
1332 tuple:
1335 tuple:
1333
1336
1334 ::common, (::heads) - (::common)
1337 ::common, (::heads) - (::common)
1335
1338
1336 The list is sorted by revision number, meaning it is
1339 The list is sorted by revision number, meaning it is
1337 topologically sorted.
1340 topologically sorted.
1338
1341
1339 'heads' and 'common' are both lists of node IDs. If heads is
1342 'heads' and 'common' are both lists of node IDs. If heads is
1340 not supplied, uses all of the revlog's heads. If common is not
1343 not supplied, uses all of the revlog's heads. If common is not
1341 supplied, uses nullid."""
1344 supplied, uses nullid."""
1342 if common is None:
1345 if common is None:
1343 common = [nullid]
1346 common = [nullid]
1344 if heads is None:
1347 if heads is None:
1345 heads = self.heads()
1348 heads = self.heads()
1346
1349
1347 common = [self.rev(n) for n in common]
1350 common = [self.rev(n) for n in common]
1348 heads = [self.rev(n) for n in heads]
1351 heads = [self.rev(n) for n in heads]
1349
1352
1350 # we want the ancestors, but inclusive
1353 # we want the ancestors, but inclusive
1351 class lazyset(object):
1354 class lazyset(object):
1352 def __init__(self, lazyvalues):
1355 def __init__(self, lazyvalues):
1353 self.addedvalues = set()
1356 self.addedvalues = set()
1354 self.lazyvalues = lazyvalues
1357 self.lazyvalues = lazyvalues
1355
1358
1356 def __contains__(self, value):
1359 def __contains__(self, value):
1357 return value in self.addedvalues or value in self.lazyvalues
1360 return value in self.addedvalues or value in self.lazyvalues
1358
1361
1359 def __iter__(self):
1362 def __iter__(self):
1360 added = self.addedvalues
1363 added = self.addedvalues
1361 for r in added:
1364 for r in added:
1362 yield r
1365 yield r
1363 for r in self.lazyvalues:
1366 for r in self.lazyvalues:
1364 if not r in added:
1367 if not r in added:
1365 yield r
1368 yield r
1366
1369
1367 def add(self, value):
1370 def add(self, value):
1368 self.addedvalues.add(value)
1371 self.addedvalues.add(value)
1369
1372
1370 def update(self, values):
1373 def update(self, values):
1371 self.addedvalues.update(values)
1374 self.addedvalues.update(values)
1372
1375
1373 has = lazyset(self.ancestors(common))
1376 has = lazyset(self.ancestors(common))
1374 has.add(nullrev)
1377 has.add(nullrev)
1375 has.update(common)
1378 has.update(common)
1376
1379
1377 # take all ancestors from heads that aren't in has
1380 # take all ancestors from heads that aren't in has
1378 missing = set()
1381 missing = set()
1379 visit = collections.deque(r for r in heads if r not in has)
1382 visit = collections.deque(r for r in heads if r not in has)
1380 while visit:
1383 while visit:
1381 r = visit.popleft()
1384 r = visit.popleft()
1382 if r in missing:
1385 if r in missing:
1383 continue
1386 continue
1384 else:
1387 else:
1385 missing.add(r)
1388 missing.add(r)
1386 for p in self.parentrevs(r):
1389 for p in self.parentrevs(r):
1387 if p not in has:
1390 if p not in has:
1388 visit.append(p)
1391 visit.append(p)
1389 missing = list(missing)
1392 missing = list(missing)
1390 missing.sort()
1393 missing.sort()
1391 return has, [self.node(miss) for miss in missing]
1394 return has, [self.node(miss) for miss in missing]
1392
1395
1393 def incrementalmissingrevs(self, common=None):
1396 def incrementalmissingrevs(self, common=None):
1394 """Return an object that can be used to incrementally compute the
1397 """Return an object that can be used to incrementally compute the
1395 revision numbers of the ancestors of arbitrary sets that are not
1398 revision numbers of the ancestors of arbitrary sets that are not
1396 ancestors of common. This is an ancestor.incrementalmissingancestors
1399 ancestors of common. This is an ancestor.incrementalmissingancestors
1397 object.
1400 object.
1398
1401
1399 'common' is a list of revision numbers. If common is not supplied, uses
1402 'common' is a list of revision numbers. If common is not supplied, uses
1400 nullrev.
1403 nullrev.
1401 """
1404 """
1402 if common is None:
1405 if common is None:
1403 common = [nullrev]
1406 common = [nullrev]
1404
1407
1405 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1408 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1406
1409
1407 def findmissingrevs(self, common=None, heads=None):
1410 def findmissingrevs(self, common=None, heads=None):
1408 """Return the revision numbers of the ancestors of heads that
1411 """Return the revision numbers of the ancestors of heads that
1409 are not ancestors of common.
1412 are not ancestors of common.
1410
1413
1411 More specifically, return a list of revision numbers corresponding to
1414 More specifically, return a list of revision numbers corresponding to
1412 nodes N such that every N satisfies the following constraints:
1415 nodes N such that every N satisfies the following constraints:
1413
1416
1414 1. N is an ancestor of some node in 'heads'
1417 1. N is an ancestor of some node in 'heads'
1415 2. N is not an ancestor of any node in 'common'
1418 2. N is not an ancestor of any node in 'common'
1416
1419
1417 The list is sorted by revision number, meaning it is
1420 The list is sorted by revision number, meaning it is
1418 topologically sorted.
1421 topologically sorted.
1419
1422
1420 'heads' and 'common' are both lists of revision numbers. If heads is
1423 'heads' and 'common' are both lists of revision numbers. If heads is
1421 not supplied, uses all of the revlog's heads. If common is not
1424 not supplied, uses all of the revlog's heads. If common is not
1422 supplied, uses nullid."""
1425 supplied, uses nullid."""
1423 if common is None:
1426 if common is None:
1424 common = [nullrev]
1427 common = [nullrev]
1425 if heads is None:
1428 if heads is None:
1426 heads = self.headrevs()
1429 heads = self.headrevs()
1427
1430
1428 inc = self.incrementalmissingrevs(common=common)
1431 inc = self.incrementalmissingrevs(common=common)
1429 return inc.missingancestors(heads)
1432 return inc.missingancestors(heads)
1430
1433
1431 def findmissing(self, common=None, heads=None):
1434 def findmissing(self, common=None, heads=None):
1432 """Return the ancestors of heads that are not ancestors of common.
1435 """Return the ancestors of heads that are not ancestors of common.
1433
1436
1434 More specifically, return a list of nodes N such that every N
1437 More specifically, return a list of nodes N such that every N
1435 satisfies the following constraints:
1438 satisfies the following constraints:
1436
1439
1437 1. N is an ancestor of some node in 'heads'
1440 1. N is an ancestor of some node in 'heads'
1438 2. N is not an ancestor of any node in 'common'
1441 2. N is not an ancestor of any node in 'common'
1439
1442
1440 The list is sorted by revision number, meaning it is
1443 The list is sorted by revision number, meaning it is
1441 topologically sorted.
1444 topologically sorted.
1442
1445
1443 'heads' and 'common' are both lists of node IDs. If heads is
1446 'heads' and 'common' are both lists of node IDs. If heads is
1444 not supplied, uses all of the revlog's heads. If common is not
1447 not supplied, uses all of the revlog's heads. If common is not
1445 supplied, uses nullid."""
1448 supplied, uses nullid."""
1446 if common is None:
1449 if common is None:
1447 common = [nullid]
1450 common = [nullid]
1448 if heads is None:
1451 if heads is None:
1449 heads = self.heads()
1452 heads = self.heads()
1450
1453
1451 common = [self.rev(n) for n in common]
1454 common = [self.rev(n) for n in common]
1452 heads = [self.rev(n) for n in heads]
1455 heads = [self.rev(n) for n in heads]
1453
1456
1454 inc = self.incrementalmissingrevs(common=common)
1457 inc = self.incrementalmissingrevs(common=common)
1455 return [self.node(r) for r in inc.missingancestors(heads)]
1458 return [self.node(r) for r in inc.missingancestors(heads)]
1456
1459
1457 def nodesbetween(self, roots=None, heads=None):
1460 def nodesbetween(self, roots=None, heads=None):
1458 """Return a topological path from 'roots' to 'heads'.
1461 """Return a topological path from 'roots' to 'heads'.
1459
1462
1460 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1463 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1461 topologically sorted list of all nodes N that satisfy both of
1464 topologically sorted list of all nodes N that satisfy both of
1462 these constraints:
1465 these constraints:
1463
1466
1464 1. N is a descendant of some node in 'roots'
1467 1. N is a descendant of some node in 'roots'
1465 2. N is an ancestor of some node in 'heads'
1468 2. N is an ancestor of some node in 'heads'
1466
1469
1467 Every node is considered to be both a descendant and an ancestor
1470 Every node is considered to be both a descendant and an ancestor
1468 of itself, so every reachable node in 'roots' and 'heads' will be
1471 of itself, so every reachable node in 'roots' and 'heads' will be
1469 included in 'nodes'.
1472 included in 'nodes'.
1470
1473
1471 'outroots' is the list of reachable nodes in 'roots', i.e., the
1474 'outroots' is the list of reachable nodes in 'roots', i.e., the
1472 subset of 'roots' that is returned in 'nodes'. Likewise,
1475 subset of 'roots' that is returned in 'nodes'. Likewise,
1473 'outheads' is the subset of 'heads' that is also in 'nodes'.
1476 'outheads' is the subset of 'heads' that is also in 'nodes'.
1474
1477
1475 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1478 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1476 unspecified, uses nullid as the only root. If 'heads' is
1479 unspecified, uses nullid as the only root. If 'heads' is
1477 unspecified, uses list of all of the revlog's heads."""
1480 unspecified, uses list of all of the revlog's heads."""
1478 nonodes = ([], [], [])
1481 nonodes = ([], [], [])
1479 if roots is not None:
1482 if roots is not None:
1480 roots = list(roots)
1483 roots = list(roots)
1481 if not roots:
1484 if not roots:
1482 return nonodes
1485 return nonodes
1483 lowestrev = min([self.rev(n) for n in roots])
1486 lowestrev = min([self.rev(n) for n in roots])
1484 else:
1487 else:
1485 roots = [nullid] # Everybody's a descendant of nullid
1488 roots = [nullid] # Everybody's a descendant of nullid
1486 lowestrev = nullrev
1489 lowestrev = nullrev
1487 if (lowestrev == nullrev) and (heads is None):
1490 if (lowestrev == nullrev) and (heads is None):
1488 # We want _all_ the nodes!
1491 # We want _all_ the nodes!
1489 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1492 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1490 if heads is None:
1493 if heads is None:
1491 # All nodes are ancestors, so the latest ancestor is the last
1494 # All nodes are ancestors, so the latest ancestor is the last
1492 # node.
1495 # node.
1493 highestrev = len(self) - 1
1496 highestrev = len(self) - 1
1494 # Set ancestors to None to signal that every node is an ancestor.
1497 # Set ancestors to None to signal that every node is an ancestor.
1495 ancestors = None
1498 ancestors = None
1496 # Set heads to an empty dictionary for later discovery of heads
1499 # Set heads to an empty dictionary for later discovery of heads
1497 heads = {}
1500 heads = {}
1498 else:
1501 else:
1499 heads = list(heads)
1502 heads = list(heads)
1500 if not heads:
1503 if not heads:
1501 return nonodes
1504 return nonodes
1502 ancestors = set()
1505 ancestors = set()
1503 # Turn heads into a dictionary so we can remove 'fake' heads.
1506 # Turn heads into a dictionary so we can remove 'fake' heads.
1504 # Also, later we will be using it to filter out the heads we can't
1507 # Also, later we will be using it to filter out the heads we can't
1505 # find from roots.
1508 # find from roots.
1506 heads = dict.fromkeys(heads, False)
1509 heads = dict.fromkeys(heads, False)
1507 # Start at the top and keep marking parents until we're done.
1510 # Start at the top and keep marking parents until we're done.
1508 nodestotag = set(heads)
1511 nodestotag = set(heads)
1509 # Remember where the top was so we can use it as a limit later.
1512 # Remember where the top was so we can use it as a limit later.
1510 highestrev = max([self.rev(n) for n in nodestotag])
1513 highestrev = max([self.rev(n) for n in nodestotag])
1511 while nodestotag:
1514 while nodestotag:
1512 # grab a node to tag
1515 # grab a node to tag
1513 n = nodestotag.pop()
1516 n = nodestotag.pop()
1514 # Never tag nullid
1517 # Never tag nullid
1515 if n == nullid:
1518 if n == nullid:
1516 continue
1519 continue
1517 # A node's revision number represents its place in a
1520 # A node's revision number represents its place in a
1518 # topologically sorted list of nodes.
1521 # topologically sorted list of nodes.
1519 r = self.rev(n)
1522 r = self.rev(n)
1520 if r >= lowestrev:
1523 if r >= lowestrev:
1521 if n not in ancestors:
1524 if n not in ancestors:
1522 # If we are possibly a descendant of one of the roots
1525 # If we are possibly a descendant of one of the roots
1523 # and we haven't already been marked as an ancestor
1526 # and we haven't already been marked as an ancestor
1524 ancestors.add(n) # Mark as ancestor
1527 ancestors.add(n) # Mark as ancestor
1525 # Add non-nullid parents to list of nodes to tag.
1528 # Add non-nullid parents to list of nodes to tag.
1526 nodestotag.update([p for p in self.parents(n) if
1529 nodestotag.update([p for p in self.parents(n) if
1527 p != nullid])
1530 p != nullid])
1528 elif n in heads: # We've seen it before, is it a fake head?
1531 elif n in heads: # We've seen it before, is it a fake head?
1529 # So it is, real heads should not be the ancestors of
1532 # So it is, real heads should not be the ancestors of
1530 # any other heads.
1533 # any other heads.
1531 heads.pop(n)
1534 heads.pop(n)
1532 if not ancestors:
1535 if not ancestors:
1533 return nonodes
1536 return nonodes
1534 # Now that we have our set of ancestors, we want to remove any
1537 # Now that we have our set of ancestors, we want to remove any
1535 # roots that are not ancestors.
1538 # roots that are not ancestors.
1536
1539
1537 # If one of the roots was nullid, everything is included anyway.
1540 # If one of the roots was nullid, everything is included anyway.
1538 if lowestrev > nullrev:
1541 if lowestrev > nullrev:
1539 # But, since we weren't, let's recompute the lowest rev to not
1542 # But, since we weren't, let's recompute the lowest rev to not
1540 # include roots that aren't ancestors.
1543 # include roots that aren't ancestors.
1541
1544
1542 # Filter out roots that aren't ancestors of heads
1545 # Filter out roots that aren't ancestors of heads
1543 roots = [root for root in roots if root in ancestors]
1546 roots = [root for root in roots if root in ancestors]
1544 # Recompute the lowest revision
1547 # Recompute the lowest revision
1545 if roots:
1548 if roots:
1546 lowestrev = min([self.rev(root) for root in roots])
1549 lowestrev = min([self.rev(root) for root in roots])
1547 else:
1550 else:
1548 # No more roots? Return empty list
1551 # No more roots? Return empty list
1549 return nonodes
1552 return nonodes
1550 else:
1553 else:
1551 # We are descending from nullid, and don't need to care about
1554 # We are descending from nullid, and don't need to care about
1552 # any other roots.
1555 # any other roots.
1553 lowestrev = nullrev
1556 lowestrev = nullrev
1554 roots = [nullid]
1557 roots = [nullid]
1555 # Transform our roots list into a set.
1558 # Transform our roots list into a set.
1556 descendants = set(roots)
1559 descendants = set(roots)
1557 # Also, keep the original roots so we can filter out roots that aren't
1560 # Also, keep the original roots so we can filter out roots that aren't
1558 # 'real' roots (i.e. are descended from other roots).
1561 # 'real' roots (i.e. are descended from other roots).
1559 roots = descendants.copy()
1562 roots = descendants.copy()
1560 # Our topologically sorted list of output nodes.
1563 # Our topologically sorted list of output nodes.
1561 orderedout = []
1564 orderedout = []
1562 # Don't start at nullid since we don't want nullid in our output list,
1565 # Don't start at nullid since we don't want nullid in our output list,
1563 # and if nullid shows up in descendants, empty parents will look like
1566 # and if nullid shows up in descendants, empty parents will look like
1564 # they're descendants.
1567 # they're descendants.
1565 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1568 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1566 n = self.node(r)
1569 n = self.node(r)
1567 isdescendant = False
1570 isdescendant = False
1568 if lowestrev == nullrev: # Everybody is a descendant of nullid
1571 if lowestrev == nullrev: # Everybody is a descendant of nullid
1569 isdescendant = True
1572 isdescendant = True
1570 elif n in descendants:
1573 elif n in descendants:
1571 # n is already a descendant
1574 # n is already a descendant
1572 isdescendant = True
1575 isdescendant = True
1573 # This check only needs to be done here because all the roots
1576 # This check only needs to be done here because all the roots
1574 # will start being marked is descendants before the loop.
1577 # will start being marked is descendants before the loop.
1575 if n in roots:
1578 if n in roots:
1576 # If n was a root, check if it's a 'real' root.
1579 # If n was a root, check if it's a 'real' root.
1577 p = tuple(self.parents(n))
1580 p = tuple(self.parents(n))
1578 # If any of its parents are descendants, it's not a root.
1581 # If any of its parents are descendants, it's not a root.
1579 if (p[0] in descendants) or (p[1] in descendants):
1582 if (p[0] in descendants) or (p[1] in descendants):
1580 roots.remove(n)
1583 roots.remove(n)
1581 else:
1584 else:
1582 p = tuple(self.parents(n))
1585 p = tuple(self.parents(n))
1583 # A node is a descendant if either of its parents are
1586 # A node is a descendant if either of its parents are
1584 # descendants. (We seeded the dependents list with the roots
1587 # descendants. (We seeded the dependents list with the roots
1585 # up there, remember?)
1588 # up there, remember?)
1586 if (p[0] in descendants) or (p[1] in descendants):
1589 if (p[0] in descendants) or (p[1] in descendants):
1587 descendants.add(n)
1590 descendants.add(n)
1588 isdescendant = True
1591 isdescendant = True
1589 if isdescendant and ((ancestors is None) or (n in ancestors)):
1592 if isdescendant and ((ancestors is None) or (n in ancestors)):
1590 # Only include nodes that are both descendants and ancestors.
1593 # Only include nodes that are both descendants and ancestors.
1591 orderedout.append(n)
1594 orderedout.append(n)
1592 if (ancestors is not None) and (n in heads):
1595 if (ancestors is not None) and (n in heads):
1593 # We're trying to figure out which heads are reachable
1596 # We're trying to figure out which heads are reachable
1594 # from roots.
1597 # from roots.
1595 # Mark this head as having been reached
1598 # Mark this head as having been reached
1596 heads[n] = True
1599 heads[n] = True
1597 elif ancestors is None:
1600 elif ancestors is None:
1598 # Otherwise, we're trying to discover the heads.
1601 # Otherwise, we're trying to discover the heads.
1599 # Assume this is a head because if it isn't, the next step
1602 # Assume this is a head because if it isn't, the next step
1600 # will eventually remove it.
1603 # will eventually remove it.
1601 heads[n] = True
1604 heads[n] = True
1602 # But, obviously its parents aren't.
1605 # But, obviously its parents aren't.
1603 for p in self.parents(n):
1606 for p in self.parents(n):
1604 heads.pop(p, None)
1607 heads.pop(p, None)
1605 heads = [head for head, flag in heads.iteritems() if flag]
1608 heads = [head for head, flag in heads.iteritems() if flag]
1606 roots = list(roots)
1609 roots = list(roots)
1607 assert orderedout
1610 assert orderedout
1608 assert roots
1611 assert roots
1609 assert heads
1612 assert heads
1610 return (orderedout, roots, heads)
1613 return (orderedout, roots, heads)
1611
1614
1612 def headrevs(self):
1615 def headrevs(self):
1613 try:
1616 try:
1614 return self.index.headrevs()
1617 return self.index.headrevs()
1615 except AttributeError:
1618 except AttributeError:
1616 return self._headrevs()
1619 return self._headrevs()
1617
1620
1618 def computephases(self, roots):
1621 def computephases(self, roots):
1619 return self.index.computephasesmapsets(roots)
1622 return self.index.computephasesmapsets(roots)
1620
1623
1621 def _headrevs(self):
1624 def _headrevs(self):
1622 count = len(self)
1625 count = len(self)
1623 if not count:
1626 if not count:
1624 return [nullrev]
1627 return [nullrev]
1625 # we won't iter over filtered rev so nobody is a head at start
1628 # we won't iter over filtered rev so nobody is a head at start
1626 ishead = [0] * (count + 1)
1629 ishead = [0] * (count + 1)
1627 index = self.index
1630 index = self.index
1628 for r in self:
1631 for r in self:
1629 ishead[r] = 1 # I may be an head
1632 ishead[r] = 1 # I may be an head
1630 e = index[r]
1633 e = index[r]
1631 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1634 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1632 return [r for r, val in enumerate(ishead) if val]
1635 return [r for r, val in enumerate(ishead) if val]
1633
1636
1634 def heads(self, start=None, stop=None):
1637 def heads(self, start=None, stop=None):
1635 """return the list of all nodes that have no children
1638 """return the list of all nodes that have no children
1636
1639
1637 if start is specified, only heads that are descendants of
1640 if start is specified, only heads that are descendants of
1638 start will be returned
1641 start will be returned
1639 if stop is specified, it will consider all the revs from stop
1642 if stop is specified, it will consider all the revs from stop
1640 as if they had no children
1643 as if they had no children
1641 """
1644 """
1642 if start is None and stop is None:
1645 if start is None and stop is None:
1643 if not len(self):
1646 if not len(self):
1644 return [nullid]
1647 return [nullid]
1645 return [self.node(r) for r in self.headrevs()]
1648 return [self.node(r) for r in self.headrevs()]
1646
1649
1647 if start is None:
1650 if start is None:
1648 start = nullid
1651 start = nullid
1649 if stop is None:
1652 if stop is None:
1650 stop = []
1653 stop = []
1651 stoprevs = set([self.rev(n) for n in stop])
1654 stoprevs = set([self.rev(n) for n in stop])
1652 startrev = self.rev(start)
1655 startrev = self.rev(start)
1653 reachable = {startrev}
1656 reachable = {startrev}
1654 heads = {startrev}
1657 heads = {startrev}
1655
1658
1656 parentrevs = self.parentrevs
1659 parentrevs = self.parentrevs
1657 for r in self.revs(start=startrev + 1):
1660 for r in self.revs(start=startrev + 1):
1658 for p in parentrevs(r):
1661 for p in parentrevs(r):
1659 if p in reachable:
1662 if p in reachable:
1660 if r not in stoprevs:
1663 if r not in stoprevs:
1661 reachable.add(r)
1664 reachable.add(r)
1662 heads.add(r)
1665 heads.add(r)
1663 if p in heads and p not in stoprevs:
1666 if p in heads and p not in stoprevs:
1664 heads.remove(p)
1667 heads.remove(p)
1665
1668
1666 return [self.node(r) for r in heads]
1669 return [self.node(r) for r in heads]
1667
1670
1668 def children(self, node):
1671 def children(self, node):
1669 """find the children of a given node"""
1672 """find the children of a given node"""
1670 c = []
1673 c = []
1671 p = self.rev(node)
1674 p = self.rev(node)
1672 for r in self.revs(start=p + 1):
1675 for r in self.revs(start=p + 1):
1673 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1676 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1674 if prevs:
1677 if prevs:
1675 for pr in prevs:
1678 for pr in prevs:
1676 if pr == p:
1679 if pr == p:
1677 c.append(self.node(r))
1680 c.append(self.node(r))
1678 elif p == nullrev:
1681 elif p == nullrev:
1679 c.append(self.node(r))
1682 c.append(self.node(r))
1680 return c
1683 return c
1681
1684
1682 def commonancestorsheads(self, a, b):
1685 def commonancestorsheads(self, a, b):
1683 """calculate all the heads of the common ancestors of nodes a and b"""
1686 """calculate all the heads of the common ancestors of nodes a and b"""
1684 a, b = self.rev(a), self.rev(b)
1687 a, b = self.rev(a), self.rev(b)
1685 ancs = self._commonancestorsheads(a, b)
1688 ancs = self._commonancestorsheads(a, b)
1686 return pycompat.maplist(self.node, ancs)
1689 return pycompat.maplist(self.node, ancs)
1687
1690
1688 def _commonancestorsheads(self, *revs):
1691 def _commonancestorsheads(self, *revs):
1689 """calculate all the heads of the common ancestors of revs"""
1692 """calculate all the heads of the common ancestors of revs"""
1690 try:
1693 try:
1691 ancs = self.index.commonancestorsheads(*revs)
1694 ancs = self.index.commonancestorsheads(*revs)
1692 except (AttributeError, OverflowError): # C implementation failed
1695 except (AttributeError, OverflowError): # C implementation failed
1693 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1696 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1694 return ancs
1697 return ancs
1695
1698
1696 def isancestor(self, a, b):
1699 def isancestor(self, a, b):
1697 """return True if node a is an ancestor of node b
1700 """return True if node a is an ancestor of node b
1698
1701
1699 A revision is considered an ancestor of itself."""
1702 A revision is considered an ancestor of itself."""
1700 a, b = self.rev(a), self.rev(b)
1703 a, b = self.rev(a), self.rev(b)
1701 return self.isancestorrev(a, b)
1704 return self.isancestorrev(a, b)
1702
1705
1703 def descendant(self, a, b):
1706 def descendant(self, a, b):
1704 msg = 'revlog.descendant is deprecated, use revlog.isancestorrev'
1707 msg = 'revlog.descendant is deprecated, use revlog.isancestorrev'
1705 util.nouideprecwarn(msg, '4.7')
1708 util.nouideprecwarn(msg, '4.7')
1706 return self.isancestorrev(a, b)
1709 return self.isancestorrev(a, b)
1707
1710
1708 def isancestorrev(self, a, b):
1711 def isancestorrev(self, a, b):
1709 """return True if revision a is an ancestor of revision b
1712 """return True if revision a is an ancestor of revision b
1710
1713
1711 A revision is considered an ancestor of itself.
1714 A revision is considered an ancestor of itself.
1712
1715
1713 The implementation of this is trivial but the use of
1716 The implementation of this is trivial but the use of
1714 commonancestorsheads is not."""
1717 commonancestorsheads is not."""
1715 if a == nullrev:
1718 if a == nullrev:
1716 return True
1719 return True
1717 elif a == b:
1720 elif a == b:
1718 return True
1721 return True
1719 elif a > b:
1722 elif a > b:
1720 return False
1723 return False
1721 return a in self._commonancestorsheads(a, b)
1724 return a in self._commonancestorsheads(a, b)
1722
1725
1723 def ancestor(self, a, b):
1726 def ancestor(self, a, b):
1724 """calculate the "best" common ancestor of nodes a and b"""
1727 """calculate the "best" common ancestor of nodes a and b"""
1725
1728
1726 a, b = self.rev(a), self.rev(b)
1729 a, b = self.rev(a), self.rev(b)
1727 try:
1730 try:
1728 ancs = self.index.ancestors(a, b)
1731 ancs = self.index.ancestors(a, b)
1729 except (AttributeError, OverflowError):
1732 except (AttributeError, OverflowError):
1730 ancs = ancestor.ancestors(self.parentrevs, a, b)
1733 ancs = ancestor.ancestors(self.parentrevs, a, b)
1731 if ancs:
1734 if ancs:
1732 # choose a consistent winner when there's a tie
1735 # choose a consistent winner when there's a tie
1733 return min(map(self.node, ancs))
1736 return min(map(self.node, ancs))
1734 return nullid
1737 return nullid
1735
1738
1736 def _match(self, id):
1739 def _match(self, id):
1737 if isinstance(id, int):
1740 if isinstance(id, int):
1738 # rev
1741 # rev
1739 return self.node(id)
1742 return self.node(id)
1740 if len(id) == 20:
1743 if len(id) == 20:
1741 # possibly a binary node
1744 # possibly a binary node
1742 # odds of a binary node being all hex in ASCII are 1 in 10**25
1745 # odds of a binary node being all hex in ASCII are 1 in 10**25
1743 try:
1746 try:
1744 node = id
1747 node = id
1745 self.rev(node) # quick search the index
1748 self.rev(node) # quick search the index
1746 return node
1749 return node
1747 except LookupError:
1750 except LookupError:
1748 pass # may be partial hex id
1751 pass # may be partial hex id
1749 try:
1752 try:
1750 # str(rev)
1753 # str(rev)
1751 rev = int(id)
1754 rev = int(id)
1752 if "%d" % rev != id:
1755 if "%d" % rev != id:
1753 raise ValueError
1756 raise ValueError
1754 if rev < 0:
1757 if rev < 0:
1755 rev = len(self) + rev
1758 rev = len(self) + rev
1756 if rev < 0 or rev >= len(self):
1759 if rev < 0 or rev >= len(self):
1757 raise ValueError
1760 raise ValueError
1758 return self.node(rev)
1761 return self.node(rev)
1759 except (ValueError, OverflowError):
1762 except (ValueError, OverflowError):
1760 pass
1763 pass
1761 if len(id) == 40:
1764 if len(id) == 40:
1762 try:
1765 try:
1763 # a full hex nodeid?
1766 # a full hex nodeid?
1764 node = bin(id)
1767 node = bin(id)
1765 self.rev(node)
1768 self.rev(node)
1766 return node
1769 return node
1767 except (TypeError, LookupError):
1770 except (TypeError, LookupError):
1768 pass
1771 pass
1769
1772
1770 def _partialmatch(self, id):
1773 def _partialmatch(self, id):
1771 # we don't care wdirfilenodeids as they should be always full hash
1774 # we don't care wdirfilenodeids as they should be always full hash
1772 maybewdir = wdirhex.startswith(id)
1775 maybewdir = wdirhex.startswith(id)
1773 try:
1776 try:
1774 partial = self.index.partialmatch(id)
1777 partial = self.index.partialmatch(id)
1775 if partial and self.hasnode(partial):
1778 if partial and self.hasnode(partial):
1776 if maybewdir:
1779 if maybewdir:
1777 # single 'ff...' match in radix tree, ambiguous with wdir
1780 # single 'ff...' match in radix tree, ambiguous with wdir
1778 raise RevlogError
1781 raise RevlogError
1779 return partial
1782 return partial
1780 if maybewdir:
1783 if maybewdir:
1781 # no 'ff...' match in radix tree, wdir identified
1784 # no 'ff...' match in radix tree, wdir identified
1782 raise error.WdirUnsupported
1785 raise error.WdirUnsupported
1783 return None
1786 return None
1784 except RevlogError:
1787 except RevlogError:
1785 # parsers.c radix tree lookup gave multiple matches
1788 # parsers.c radix tree lookup gave multiple matches
1786 # fast path: for unfiltered changelog, radix tree is accurate
1789 # fast path: for unfiltered changelog, radix tree is accurate
1787 if not getattr(self, 'filteredrevs', None):
1790 if not getattr(self, 'filteredrevs', None):
1788 raise LookupError(id, self.indexfile,
1791 raise LookupError(id, self.indexfile,
1789 _('ambiguous identifier'))
1792 _('ambiguous identifier'))
1790 # fall through to slow path that filters hidden revisions
1793 # fall through to slow path that filters hidden revisions
1791 except (AttributeError, ValueError):
1794 except (AttributeError, ValueError):
1792 # we are pure python, or key was too short to search radix tree
1795 # we are pure python, or key was too short to search radix tree
1793 pass
1796 pass
1794
1797
1795 if id in self._pcache:
1798 if id in self._pcache:
1796 return self._pcache[id]
1799 return self._pcache[id]
1797
1800
1798 if len(id) <= 40:
1801 if len(id) <= 40:
1799 try:
1802 try:
1800 # hex(node)[:...]
1803 # hex(node)[:...]
1801 l = len(id) // 2 # grab an even number of digits
1804 l = len(id) // 2 # grab an even number of digits
1802 prefix = bin(id[:l * 2])
1805 prefix = bin(id[:l * 2])
1803 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1806 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1804 nl = [n for n in nl if hex(n).startswith(id) and
1807 nl = [n for n in nl if hex(n).startswith(id) and
1805 self.hasnode(n)]
1808 self.hasnode(n)]
1806 if len(nl) > 0:
1809 if len(nl) > 0:
1807 if len(nl) == 1 and not maybewdir:
1810 if len(nl) == 1 and not maybewdir:
1808 self._pcache[id] = nl[0]
1811 self._pcache[id] = nl[0]
1809 return nl[0]
1812 return nl[0]
1810 raise LookupError(id, self.indexfile,
1813 raise LookupError(id, self.indexfile,
1811 _('ambiguous identifier'))
1814 _('ambiguous identifier'))
1812 if maybewdir:
1815 if maybewdir:
1813 raise error.WdirUnsupported
1816 raise error.WdirUnsupported
1814 return None
1817 return None
1815 except TypeError:
1818 except TypeError:
1816 pass
1819 pass
1817
1820
1818 def lookup(self, id):
1821 def lookup(self, id):
1819 """locate a node based on:
1822 """locate a node based on:
1820 - revision number or str(revision number)
1823 - revision number or str(revision number)
1821 - nodeid or subset of hex nodeid
1824 - nodeid or subset of hex nodeid
1822 """
1825 """
1823 n = self._match(id)
1826 n = self._match(id)
1824 if n is not None:
1827 if n is not None:
1825 return n
1828 return n
1826 n = self._partialmatch(id)
1829 n = self._partialmatch(id)
1827 if n:
1830 if n:
1828 return n
1831 return n
1829
1832
1830 raise LookupError(id, self.indexfile, _('no match found'))
1833 raise LookupError(id, self.indexfile, _('no match found'))
1831
1834
1832 def shortest(self, node, minlength=1):
1835 def shortest(self, node, minlength=1):
1833 """Find the shortest unambiguous prefix that matches node."""
1836 """Find the shortest unambiguous prefix that matches node."""
1834 def isvalid(prefix):
1837 def isvalid(prefix):
1835 try:
1838 try:
1836 node = self._partialmatch(prefix)
1839 node = self._partialmatch(prefix)
1837 except error.RevlogError:
1840 except error.RevlogError:
1838 return False
1841 return False
1839 except error.WdirUnsupported:
1842 except error.WdirUnsupported:
1840 # single 'ff...' match
1843 # single 'ff...' match
1841 return True
1844 return True
1842 if node is None:
1845 if node is None:
1843 raise LookupError(node, self.indexfile, _('no node'))
1846 raise LookupError(node, self.indexfile, _('no node'))
1844 return True
1847 return True
1845
1848
1846 def maybewdir(prefix):
1849 def maybewdir(prefix):
1847 return all(c == 'f' for c in prefix)
1850 return all(c == 'f' for c in prefix)
1848
1851
1849 hexnode = hex(node)
1852 hexnode = hex(node)
1850
1853
1851 def disambiguate(hexnode, minlength):
1854 def disambiguate(hexnode, minlength):
1852 """Disambiguate against wdirid."""
1855 """Disambiguate against wdirid."""
1853 for length in range(minlength, 41):
1856 for length in range(minlength, 41):
1854 prefix = hexnode[:length]
1857 prefix = hexnode[:length]
1855 if not maybewdir(prefix):
1858 if not maybewdir(prefix):
1856 return prefix
1859 return prefix
1857
1860
1858 if not getattr(self, 'filteredrevs', None):
1861 if not getattr(self, 'filteredrevs', None):
1859 try:
1862 try:
1860 length = max(self.index.shortest(node), minlength)
1863 length = max(self.index.shortest(node), minlength)
1861 return disambiguate(hexnode, length)
1864 return disambiguate(hexnode, length)
1862 except RevlogError:
1865 except RevlogError:
1863 if node != wdirid:
1866 if node != wdirid:
1864 raise LookupError(node, self.indexfile, _('no node'))
1867 raise LookupError(node, self.indexfile, _('no node'))
1865 except AttributeError:
1868 except AttributeError:
1866 # Fall through to pure code
1869 # Fall through to pure code
1867 pass
1870 pass
1868
1871
1869 if node == wdirid:
1872 if node == wdirid:
1870 for length in range(minlength, 41):
1873 for length in range(minlength, 41):
1871 prefix = hexnode[:length]
1874 prefix = hexnode[:length]
1872 if isvalid(prefix):
1875 if isvalid(prefix):
1873 return prefix
1876 return prefix
1874
1877
1875 for length in range(minlength, 41):
1878 for length in range(minlength, 41):
1876 prefix = hexnode[:length]
1879 prefix = hexnode[:length]
1877 if isvalid(prefix):
1880 if isvalid(prefix):
1878 return disambiguate(hexnode, length)
1881 return disambiguate(hexnode, length)
1879
1882
1880 def cmp(self, node, text):
1883 def cmp(self, node, text):
1881 """compare text with a given file revision
1884 """compare text with a given file revision
1882
1885
1883 returns True if text is different than what is stored.
1886 returns True if text is different than what is stored.
1884 """
1887 """
1885 p1, p2 = self.parents(node)
1888 p1, p2 = self.parents(node)
1886 return hash(text, p1, p2) != node
1889 return hash(text, p1, p2) != node
1887
1890
1888 def _cachesegment(self, offset, data):
1891 def _cachesegment(self, offset, data):
1889 """Add a segment to the revlog cache.
1892 """Add a segment to the revlog cache.
1890
1893
1891 Accepts an absolute offset and the data that is at that location.
1894 Accepts an absolute offset and the data that is at that location.
1892 """
1895 """
1893 o, d = self._chunkcache
1896 o, d = self._chunkcache
1894 # try to add to existing cache
1897 # try to add to existing cache
1895 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1898 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1896 self._chunkcache = o, d + data
1899 self._chunkcache = o, d + data
1897 else:
1900 else:
1898 self._chunkcache = offset, data
1901 self._chunkcache = offset, data
1899
1902
1900 def _readsegment(self, offset, length, df=None):
1903 def _readsegment(self, offset, length, df=None):
1901 """Load a segment of raw data from the revlog.
1904 """Load a segment of raw data from the revlog.
1902
1905
1903 Accepts an absolute offset, length to read, and an optional existing
1906 Accepts an absolute offset, length to read, and an optional existing
1904 file handle to read from.
1907 file handle to read from.
1905
1908
1906 If an existing file handle is passed, it will be seeked and the
1909 If an existing file handle is passed, it will be seeked and the
1907 original seek position will NOT be restored.
1910 original seek position will NOT be restored.
1908
1911
1909 Returns a str or buffer of raw byte data.
1912 Returns a str or buffer of raw byte data.
1910 """
1913 """
1911 # Cache data both forward and backward around the requested
1914 # Cache data both forward and backward around the requested
1912 # data, in a fixed size window. This helps speed up operations
1915 # data, in a fixed size window. This helps speed up operations
1913 # involving reading the revlog backwards.
1916 # involving reading the revlog backwards.
1914 cachesize = self._chunkcachesize
1917 cachesize = self._chunkcachesize
1915 realoffset = offset & ~(cachesize - 1)
1918 realoffset = offset & ~(cachesize - 1)
1916 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1919 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1917 - realoffset)
1920 - realoffset)
1918 with self._datareadfp(df) as df:
1921 with self._datareadfp(df) as df:
1919 df.seek(realoffset)
1922 df.seek(realoffset)
1920 d = df.read(reallength)
1923 d = df.read(reallength)
1921 self._cachesegment(realoffset, d)
1924 self._cachesegment(realoffset, d)
1922 if offset != realoffset or reallength != length:
1925 if offset != realoffset or reallength != length:
1923 return util.buffer(d, offset - realoffset, length)
1926 return util.buffer(d, offset - realoffset, length)
1924 return d
1927 return d
1925
1928
1926 def _getsegment(self, offset, length, df=None):
1929 def _getsegment(self, offset, length, df=None):
1927 """Obtain a segment of raw data from the revlog.
1930 """Obtain a segment of raw data from the revlog.
1928
1931
1929 Accepts an absolute offset, length of bytes to obtain, and an
1932 Accepts an absolute offset, length of bytes to obtain, and an
1930 optional file handle to the already-opened revlog. If the file
1933 optional file handle to the already-opened revlog. If the file
1931 handle is used, it's original seek position will not be preserved.
1934 handle is used, it's original seek position will not be preserved.
1932
1935
1933 Requests for data may be returned from a cache.
1936 Requests for data may be returned from a cache.
1934
1937
1935 Returns a str or a buffer instance of raw byte data.
1938 Returns a str or a buffer instance of raw byte data.
1936 """
1939 """
1937 o, d = self._chunkcache
1940 o, d = self._chunkcache
1938 l = len(d)
1941 l = len(d)
1939
1942
1940 # is it in the cache?
1943 # is it in the cache?
1941 cachestart = offset - o
1944 cachestart = offset - o
1942 cacheend = cachestart + length
1945 cacheend = cachestart + length
1943 if cachestart >= 0 and cacheend <= l:
1946 if cachestart >= 0 and cacheend <= l:
1944 if cachestart == 0 and cacheend == l:
1947 if cachestart == 0 and cacheend == l:
1945 return d # avoid a copy
1948 return d # avoid a copy
1946 return util.buffer(d, cachestart, cacheend - cachestart)
1949 return util.buffer(d, cachestart, cacheend - cachestart)
1947
1950
1948 return self._readsegment(offset, length, df=df)
1951 return self._readsegment(offset, length, df=df)
1949
1952
1950 def _getsegmentforrevs(self, startrev, endrev, df=None):
1953 def _getsegmentforrevs(self, startrev, endrev, df=None):
1951 """Obtain a segment of raw data corresponding to a range of revisions.
1954 """Obtain a segment of raw data corresponding to a range of revisions.
1952
1955
1953 Accepts the start and end revisions and an optional already-open
1956 Accepts the start and end revisions and an optional already-open
1954 file handle to be used for reading. If the file handle is read, its
1957 file handle to be used for reading. If the file handle is read, its
1955 seek position will not be preserved.
1958 seek position will not be preserved.
1956
1959
1957 Requests for data may be satisfied by a cache.
1960 Requests for data may be satisfied by a cache.
1958
1961
1959 Returns a 2-tuple of (offset, data) for the requested range of
1962 Returns a 2-tuple of (offset, data) for the requested range of
1960 revisions. Offset is the integer offset from the beginning of the
1963 revisions. Offset is the integer offset from the beginning of the
1961 revlog and data is a str or buffer of the raw byte data.
1964 revlog and data is a str or buffer of the raw byte data.
1962
1965
1963 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1966 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1964 to determine where each revision's data begins and ends.
1967 to determine where each revision's data begins and ends.
1965 """
1968 """
1966 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1969 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1967 # (functions are expensive).
1970 # (functions are expensive).
1968 index = self.index
1971 index = self.index
1969 istart = index[startrev]
1972 istart = index[startrev]
1970 start = int(istart[0] >> 16)
1973 start = int(istart[0] >> 16)
1971 if startrev == endrev:
1974 if startrev == endrev:
1972 end = start + istart[1]
1975 end = start + istart[1]
1973 else:
1976 else:
1974 iend = index[endrev]
1977 iend = index[endrev]
1975 end = int(iend[0] >> 16) + iend[1]
1978 end = int(iend[0] >> 16) + iend[1]
1976
1979
1977 if self._inline:
1980 if self._inline:
1978 start += (startrev + 1) * self._io.size
1981 start += (startrev + 1) * self._io.size
1979 end += (endrev + 1) * self._io.size
1982 end += (endrev + 1) * self._io.size
1980 length = end - start
1983 length = end - start
1981
1984
1982 return start, self._getsegment(start, length, df=df)
1985 return start, self._getsegment(start, length, df=df)
1983
1986
1984 def _chunk(self, rev, df=None):
1987 def _chunk(self, rev, df=None):
1985 """Obtain a single decompressed chunk for a revision.
1988 """Obtain a single decompressed chunk for a revision.
1986
1989
1987 Accepts an integer revision and an optional already-open file handle
1990 Accepts an integer revision and an optional already-open file handle
1988 to be used for reading. If used, the seek position of the file will not
1991 to be used for reading. If used, the seek position of the file will not
1989 be preserved.
1992 be preserved.
1990
1993
1991 Returns a str holding uncompressed data for the requested revision.
1994 Returns a str holding uncompressed data for the requested revision.
1992 """
1995 """
1993 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1996 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1994
1997
1995 def _chunks(self, revs, df=None, targetsize=None):
1998 def _chunks(self, revs, df=None, targetsize=None):
1996 """Obtain decompressed chunks for the specified revisions.
1999 """Obtain decompressed chunks for the specified revisions.
1997
2000
1998 Accepts an iterable of numeric revisions that are assumed to be in
2001 Accepts an iterable of numeric revisions that are assumed to be in
1999 ascending order. Also accepts an optional already-open file handle
2002 ascending order. Also accepts an optional already-open file handle
2000 to be used for reading. If used, the seek position of the file will
2003 to be used for reading. If used, the seek position of the file will
2001 not be preserved.
2004 not be preserved.
2002
2005
2003 This function is similar to calling ``self._chunk()`` multiple times,
2006 This function is similar to calling ``self._chunk()`` multiple times,
2004 but is faster.
2007 but is faster.
2005
2008
2006 Returns a list with decompressed data for each requested revision.
2009 Returns a list with decompressed data for each requested revision.
2007 """
2010 """
2008 if not revs:
2011 if not revs:
2009 return []
2012 return []
2010 start = self.start
2013 start = self.start
2011 length = self.length
2014 length = self.length
2012 inline = self._inline
2015 inline = self._inline
2013 iosize = self._io.size
2016 iosize = self._io.size
2014 buffer = util.buffer
2017 buffer = util.buffer
2015
2018
2016 l = []
2019 l = []
2017 ladd = l.append
2020 ladd = l.append
2018
2021
2019 if not self._withsparseread:
2022 if not self._withsparseread:
2020 slicedchunks = (revs,)
2023 slicedchunks = (revs,)
2021 else:
2024 else:
2022 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2025 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2023
2026
2024 for revschunk in slicedchunks:
2027 for revschunk in slicedchunks:
2025 firstrev = revschunk[0]
2028 firstrev = revschunk[0]
2026 # Skip trailing revisions with empty diff
2029 # Skip trailing revisions with empty diff
2027 for lastrev in revschunk[::-1]:
2030 for lastrev in revschunk[::-1]:
2028 if length(lastrev) != 0:
2031 if length(lastrev) != 0:
2029 break
2032 break
2030
2033
2031 try:
2034 try:
2032 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2035 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2033 except OverflowError:
2036 except OverflowError:
2034 # issue4215 - we can't cache a run of chunks greater than
2037 # issue4215 - we can't cache a run of chunks greater than
2035 # 2G on Windows
2038 # 2G on Windows
2036 return [self._chunk(rev, df=df) for rev in revschunk]
2039 return [self._chunk(rev, df=df) for rev in revschunk]
2037
2040
2038 decomp = self.decompress
2041 decomp = self.decompress
2039 for rev in revschunk:
2042 for rev in revschunk:
2040 chunkstart = start(rev)
2043 chunkstart = start(rev)
2041 if inline:
2044 if inline:
2042 chunkstart += (rev + 1) * iosize
2045 chunkstart += (rev + 1) * iosize
2043 chunklength = length(rev)
2046 chunklength = length(rev)
2044 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2047 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2045
2048
2046 return l
2049 return l
2047
2050
2048 def _chunkclear(self):
2051 def _chunkclear(self):
2049 """Clear the raw chunk cache."""
2052 """Clear the raw chunk cache."""
2050 self._chunkcache = (0, '')
2053 self._chunkcache = (0, '')
2051
2054
2052 def deltaparent(self, rev):
2055 def deltaparent(self, rev):
2053 """return deltaparent of the given revision"""
2056 """return deltaparent of the given revision"""
2054 base = self.index[rev][3]
2057 base = self.index[rev][3]
2055 if base == rev:
2058 if base == rev:
2056 return nullrev
2059 return nullrev
2057 elif self._generaldelta:
2060 elif self._generaldelta:
2058 return base
2061 return base
2059 else:
2062 else:
2060 return rev - 1
2063 return rev - 1
2061
2064
2062 def revdiff(self, rev1, rev2):
2065 def revdiff(self, rev1, rev2):
2063 """return or calculate a delta between two revisions
2066 """return or calculate a delta between two revisions
2064
2067
2065 The delta calculated is in binary form and is intended to be written to
2068 The delta calculated is in binary form and is intended to be written to
2066 revlog data directly. So this function needs raw revision data.
2069 revlog data directly. So this function needs raw revision data.
2067 """
2070 """
2068 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2071 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2069 return bytes(self._chunk(rev2))
2072 return bytes(self._chunk(rev2))
2070
2073
2071 return mdiff.textdiff(self.revision(rev1, raw=True),
2074 return mdiff.textdiff(self.revision(rev1, raw=True),
2072 self.revision(rev2, raw=True))
2075 self.revision(rev2, raw=True))
2073
2076
2074 def revision(self, nodeorrev, _df=None, raw=False):
2077 def revision(self, nodeorrev, _df=None, raw=False):
2075 """return an uncompressed revision of a given node or revision
2078 """return an uncompressed revision of a given node or revision
2076 number.
2079 number.
2077
2080
2078 _df - an existing file handle to read from. (internal-only)
2081 _df - an existing file handle to read from. (internal-only)
2079 raw - an optional argument specifying if the revision data is to be
2082 raw - an optional argument specifying if the revision data is to be
2080 treated as raw data when applying flag transforms. 'raw' should be set
2083 treated as raw data when applying flag transforms. 'raw' should be set
2081 to True when generating changegroups or in debug commands.
2084 to True when generating changegroups or in debug commands.
2082 """
2085 """
2083 if isinstance(nodeorrev, int):
2086 if isinstance(nodeorrev, int):
2084 rev = nodeorrev
2087 rev = nodeorrev
2085 node = self.node(rev)
2088 node = self.node(rev)
2086 else:
2089 else:
2087 node = nodeorrev
2090 node = nodeorrev
2088 rev = None
2091 rev = None
2089
2092
2090 cachedrev = None
2093 cachedrev = None
2091 flags = None
2094 flags = None
2092 rawtext = None
2095 rawtext = None
2093 if node == nullid:
2096 if node == nullid:
2094 return ""
2097 return ""
2095 if self._cache:
2098 if self._cache:
2096 if self._cache[0] == node:
2099 if self._cache[0] == node:
2097 # _cache only stores rawtext
2100 # _cache only stores rawtext
2098 if raw:
2101 if raw:
2099 return self._cache[2]
2102 return self._cache[2]
2100 # duplicated, but good for perf
2103 # duplicated, but good for perf
2101 if rev is None:
2104 if rev is None:
2102 rev = self.rev(node)
2105 rev = self.rev(node)
2103 if flags is None:
2106 if flags is None:
2104 flags = self.flags(rev)
2107 flags = self.flags(rev)
2105 # no extra flags set, no flag processor runs, text = rawtext
2108 # no extra flags set, no flag processor runs, text = rawtext
2106 if flags == REVIDX_DEFAULT_FLAGS:
2109 if flags == REVIDX_DEFAULT_FLAGS:
2107 return self._cache[2]
2110 return self._cache[2]
2108 # rawtext is reusable. need to run flag processor
2111 # rawtext is reusable. need to run flag processor
2109 rawtext = self._cache[2]
2112 rawtext = self._cache[2]
2110
2113
2111 cachedrev = self._cache[1]
2114 cachedrev = self._cache[1]
2112
2115
2113 # look up what we need to read
2116 # look up what we need to read
2114 if rawtext is None:
2117 if rawtext is None:
2115 if rev is None:
2118 if rev is None:
2116 rev = self.rev(node)
2119 rev = self.rev(node)
2117
2120
2118 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2121 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2119 if stopped:
2122 if stopped:
2120 rawtext = self._cache[2]
2123 rawtext = self._cache[2]
2121
2124
2122 # drop cache to save memory
2125 # drop cache to save memory
2123 self._cache = None
2126 self._cache = None
2124
2127
2125 targetsize = None
2128 targetsize = None
2126 rawsize = self.index[rev][2]
2129 rawsize = self.index[rev][2]
2127 if 0 <= rawsize:
2130 if 0 <= rawsize:
2128 targetsize = 4 * rawsize
2131 targetsize = 4 * rawsize
2129
2132
2130 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2133 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2131 if rawtext is None:
2134 if rawtext is None:
2132 rawtext = bytes(bins[0])
2135 rawtext = bytes(bins[0])
2133 bins = bins[1:]
2136 bins = bins[1:]
2134
2137
2135 rawtext = mdiff.patches(rawtext, bins)
2138 rawtext = mdiff.patches(rawtext, bins)
2136 self._cache = (node, rev, rawtext)
2139 self._cache = (node, rev, rawtext)
2137
2140
2138 if flags is None:
2141 if flags is None:
2139 if rev is None:
2142 if rev is None:
2140 rev = self.rev(node)
2143 rev = self.rev(node)
2141 flags = self.flags(rev)
2144 flags = self.flags(rev)
2142
2145
2143 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2146 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2144 if validatehash:
2147 if validatehash:
2145 self.checkhash(text, node, rev=rev)
2148 self.checkhash(text, node, rev=rev)
2146
2149
2147 return text
2150 return text
2148
2151
2149 def hash(self, text, p1, p2):
2152 def hash(self, text, p1, p2):
2150 """Compute a node hash.
2153 """Compute a node hash.
2151
2154
2152 Available as a function so that subclasses can replace the hash
2155 Available as a function so that subclasses can replace the hash
2153 as needed.
2156 as needed.
2154 """
2157 """
2155 return hash(text, p1, p2)
2158 return hash(text, p1, p2)
2156
2159
2157 def _processflags(self, text, flags, operation, raw=False):
2160 def _processflags(self, text, flags, operation, raw=False):
2158 """Inspect revision data flags and applies transforms defined by
2161 """Inspect revision data flags and applies transforms defined by
2159 registered flag processors.
2162 registered flag processors.
2160
2163
2161 ``text`` - the revision data to process
2164 ``text`` - the revision data to process
2162 ``flags`` - the revision flags
2165 ``flags`` - the revision flags
2163 ``operation`` - the operation being performed (read or write)
2166 ``operation`` - the operation being performed (read or write)
2164 ``raw`` - an optional argument describing if the raw transform should be
2167 ``raw`` - an optional argument describing if the raw transform should be
2165 applied.
2168 applied.
2166
2169
2167 This method processes the flags in the order (or reverse order if
2170 This method processes the flags in the order (or reverse order if
2168 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2171 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2169 flag processors registered for present flags. The order of flags defined
2172 flag processors registered for present flags. The order of flags defined
2170 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2173 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2171
2174
2172 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2175 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2173 processed text and ``validatehash`` is a bool indicating whether the
2176 processed text and ``validatehash`` is a bool indicating whether the
2174 returned text should be checked for hash integrity.
2177 returned text should be checked for hash integrity.
2175
2178
2176 Note: If the ``raw`` argument is set, it has precedence over the
2179 Note: If the ``raw`` argument is set, it has precedence over the
2177 operation and will only update the value of ``validatehash``.
2180 operation and will only update the value of ``validatehash``.
2178 """
2181 """
2179 # fast path: no flag processors will run
2182 # fast path: no flag processors will run
2180 if flags == 0:
2183 if flags == 0:
2181 return text, True
2184 return text, True
2182 if not operation in ('read', 'write'):
2185 if not operation in ('read', 'write'):
2183 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2186 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2184 # Check all flags are known.
2187 # Check all flags are known.
2185 if flags & ~REVIDX_KNOWN_FLAGS:
2188 if flags & ~REVIDX_KNOWN_FLAGS:
2186 raise RevlogError(_("incompatible revision flag '%#x'") %
2189 raise RevlogError(_("incompatible revision flag '%#x'") %
2187 (flags & ~REVIDX_KNOWN_FLAGS))
2190 (flags & ~REVIDX_KNOWN_FLAGS))
2188 validatehash = True
2191 validatehash = True
2189 # Depending on the operation (read or write), the order might be
2192 # Depending on the operation (read or write), the order might be
2190 # reversed due to non-commutative transforms.
2193 # reversed due to non-commutative transforms.
2191 orderedflags = REVIDX_FLAGS_ORDER
2194 orderedflags = REVIDX_FLAGS_ORDER
2192 if operation == 'write':
2195 if operation == 'write':
2193 orderedflags = reversed(orderedflags)
2196 orderedflags = reversed(orderedflags)
2194
2197
2195 for flag in orderedflags:
2198 for flag in orderedflags:
2196 # If a flagprocessor has been registered for a known flag, apply the
2199 # If a flagprocessor has been registered for a known flag, apply the
2197 # related operation transform and update result tuple.
2200 # related operation transform and update result tuple.
2198 if flag & flags:
2201 if flag & flags:
2199 vhash = True
2202 vhash = True
2200
2203
2201 if flag not in _flagprocessors:
2204 if flag not in _flagprocessors:
2202 message = _("missing processor for flag '%#x'") % (flag)
2205 message = _("missing processor for flag '%#x'") % (flag)
2203 raise RevlogError(message)
2206 raise RevlogError(message)
2204
2207
2205 processor = _flagprocessors[flag]
2208 processor = _flagprocessors[flag]
2206 if processor is not None:
2209 if processor is not None:
2207 readtransform, writetransform, rawtransform = processor
2210 readtransform, writetransform, rawtransform = processor
2208
2211
2209 if raw:
2212 if raw:
2210 vhash = rawtransform(self, text)
2213 vhash = rawtransform(self, text)
2211 elif operation == 'read':
2214 elif operation == 'read':
2212 text, vhash = readtransform(self, text)
2215 text, vhash = readtransform(self, text)
2213 else: # write operation
2216 else: # write operation
2214 text, vhash = writetransform(self, text)
2217 text, vhash = writetransform(self, text)
2215 validatehash = validatehash and vhash
2218 validatehash = validatehash and vhash
2216
2219
2217 return text, validatehash
2220 return text, validatehash
2218
2221
2219 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2222 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2220 """Check node hash integrity.
2223 """Check node hash integrity.
2221
2224
2222 Available as a function so that subclasses can extend hash mismatch
2225 Available as a function so that subclasses can extend hash mismatch
2223 behaviors as needed.
2226 behaviors as needed.
2224 """
2227 """
2225 try:
2228 try:
2226 if p1 is None and p2 is None:
2229 if p1 is None and p2 is None:
2227 p1, p2 = self.parents(node)
2230 p1, p2 = self.parents(node)
2228 if node != self.hash(text, p1, p2):
2231 if node != self.hash(text, p1, p2):
2229 revornode = rev
2232 revornode = rev
2230 if revornode is None:
2233 if revornode is None:
2231 revornode = templatefilters.short(hex(node))
2234 revornode = templatefilters.short(hex(node))
2232 raise RevlogError(_("integrity check failed on %s:%s")
2235 raise RevlogError(_("integrity check failed on %s:%s")
2233 % (self.indexfile, pycompat.bytestr(revornode)))
2236 % (self.indexfile, pycompat.bytestr(revornode)))
2234 except RevlogError:
2237 except RevlogError:
2235 if self._censorable and _censoredtext(text):
2238 if self._censorable and _censoredtext(text):
2236 raise error.CensoredNodeError(self.indexfile, node, text)
2239 raise error.CensoredNodeError(self.indexfile, node, text)
2237 raise
2240 raise
2238
2241
2239 def _enforceinlinesize(self, tr, fp=None):
2242 def _enforceinlinesize(self, tr, fp=None):
2240 """Check if the revlog is too big for inline and convert if so.
2243 """Check if the revlog is too big for inline and convert if so.
2241
2244
2242 This should be called after revisions are added to the revlog. If the
2245 This should be called after revisions are added to the revlog. If the
2243 revlog has grown too large to be an inline revlog, it will convert it
2246 revlog has grown too large to be an inline revlog, it will convert it
2244 to use multiple index and data files.
2247 to use multiple index and data files.
2245 """
2248 """
2246 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
2249 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
2247 return
2250 return
2248
2251
2249 trinfo = tr.find(self.indexfile)
2252 trinfo = tr.find(self.indexfile)
2250 if trinfo is None:
2253 if trinfo is None:
2251 raise RevlogError(_("%s not found in the transaction")
2254 raise RevlogError(_("%s not found in the transaction")
2252 % self.indexfile)
2255 % self.indexfile)
2253
2256
2254 trindex = trinfo[2]
2257 trindex = trinfo[2]
2255 if trindex is not None:
2258 if trindex is not None:
2256 dataoff = self.start(trindex)
2259 dataoff = self.start(trindex)
2257 else:
2260 else:
2258 # revlog was stripped at start of transaction, use all leftover data
2261 # revlog was stripped at start of transaction, use all leftover data
2259 trindex = len(self) - 1
2262 trindex = len(self) - 1
2260 dataoff = self.end(-2)
2263 dataoff = self.end(-2)
2261
2264
2262 tr.add(self.datafile, dataoff)
2265 tr.add(self.datafile, dataoff)
2263
2266
2264 if fp:
2267 if fp:
2265 fp.flush()
2268 fp.flush()
2266 fp.close()
2269 fp.close()
2267
2270
2268 with self._datafp('w') as df:
2271 with self._datafp('w') as df:
2269 for r in self:
2272 for r in self:
2270 df.write(self._getsegmentforrevs(r, r)[1])
2273 df.write(self._getsegmentforrevs(r, r)[1])
2271
2274
2272 with self._indexfp('w') as fp:
2275 with self._indexfp('w') as fp:
2273 self.version &= ~FLAG_INLINE_DATA
2276 self.version &= ~FLAG_INLINE_DATA
2274 self._inline = False
2277 self._inline = False
2275 io = self._io
2278 io = self._io
2276 for i in self:
2279 for i in self:
2277 e = io.packentry(self.index[i], self.node, self.version, i)
2280 e = io.packentry(self.index[i], self.node, self.version, i)
2278 fp.write(e)
2281 fp.write(e)
2279
2282
2280 # the temp file replace the real index when we exit the context
2283 # the temp file replace the real index when we exit the context
2281 # manager
2284 # manager
2282
2285
2283 tr.replace(self.indexfile, trindex * self._io.size)
2286 tr.replace(self.indexfile, trindex * self._io.size)
2284 self._chunkclear()
2287 self._chunkclear()
2285
2288
2286 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2289 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2287 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2290 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2288 """add a revision to the log
2291 """add a revision to the log
2289
2292
2290 text - the revision data to add
2293 text - the revision data to add
2291 transaction - the transaction object used for rollback
2294 transaction - the transaction object used for rollback
2292 link - the linkrev data to add
2295 link - the linkrev data to add
2293 p1, p2 - the parent nodeids of the revision
2296 p1, p2 - the parent nodeids of the revision
2294 cachedelta - an optional precomputed delta
2297 cachedelta - an optional precomputed delta
2295 node - nodeid of revision; typically node is not specified, and it is
2298 node - nodeid of revision; typically node is not specified, and it is
2296 computed by default as hash(text, p1, p2), however subclasses might
2299 computed by default as hash(text, p1, p2), however subclasses might
2297 use different hashing method (and override checkhash() in such case)
2300 use different hashing method (and override checkhash() in such case)
2298 flags - the known flags to set on the revision
2301 flags - the known flags to set on the revision
2299 deltacomputer - an optional _deltacomputer instance shared between
2302 deltacomputer - an optional _deltacomputer instance shared between
2300 multiple calls
2303 multiple calls
2301 """
2304 """
2302 if link == nullrev:
2305 if link == nullrev:
2303 raise RevlogError(_("attempted to add linkrev -1 to %s")
2306 raise RevlogError(_("attempted to add linkrev -1 to %s")
2304 % self.indexfile)
2307 % self.indexfile)
2305
2308
2306 if flags:
2309 if flags:
2307 node = node or self.hash(text, p1, p2)
2310 node = node or self.hash(text, p1, p2)
2308
2311
2309 rawtext, validatehash = self._processflags(text, flags, 'write')
2312 rawtext, validatehash = self._processflags(text, flags, 'write')
2310
2313
2311 # If the flag processor modifies the revision data, ignore any provided
2314 # If the flag processor modifies the revision data, ignore any provided
2312 # cachedelta.
2315 # cachedelta.
2313 if rawtext != text:
2316 if rawtext != text:
2314 cachedelta = None
2317 cachedelta = None
2315
2318
2316 if len(rawtext) > _maxentrysize:
2319 if len(rawtext) > _maxentrysize:
2317 raise RevlogError(
2320 raise RevlogError(
2318 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2321 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2319 % (self.indexfile, len(rawtext)))
2322 % (self.indexfile, len(rawtext)))
2320
2323
2321 node = node or self.hash(rawtext, p1, p2)
2324 node = node or self.hash(rawtext, p1, p2)
2322 if node in self.nodemap:
2325 if node in self.nodemap:
2323 return node
2326 return node
2324
2327
2325 if validatehash:
2328 if validatehash:
2326 self.checkhash(rawtext, node, p1=p1, p2=p2)
2329 self.checkhash(rawtext, node, p1=p1, p2=p2)
2327
2330
2328 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2331 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2329 flags, cachedelta=cachedelta,
2332 flags, cachedelta=cachedelta,
2330 deltacomputer=deltacomputer)
2333 deltacomputer=deltacomputer)
2331
2334
2332 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2335 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2333 cachedelta=None, deltacomputer=None):
2336 cachedelta=None, deltacomputer=None):
2334 """add a raw revision with known flags, node and parents
2337 """add a raw revision with known flags, node and parents
2335 useful when reusing a revision not stored in this revlog (ex: received
2338 useful when reusing a revision not stored in this revlog (ex: received
2336 over wire, or read from an external bundle).
2339 over wire, or read from an external bundle).
2337 """
2340 """
2338 dfh = None
2341 dfh = None
2339 if not self._inline:
2342 if not self._inline:
2340 dfh = self._datafp("a+")
2343 dfh = self._datafp("a+")
2341 ifh = self._indexfp("a+")
2344 ifh = self._indexfp("a+")
2342 try:
2345 try:
2343 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2346 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2344 flags, cachedelta, ifh, dfh,
2347 flags, cachedelta, ifh, dfh,
2345 deltacomputer=deltacomputer)
2348 deltacomputer=deltacomputer)
2346 finally:
2349 finally:
2347 if dfh:
2350 if dfh:
2348 dfh.close()
2351 dfh.close()
2349 ifh.close()
2352 ifh.close()
2350
2353
2351 def compress(self, data):
2354 def compress(self, data):
2352 """Generate a possibly-compressed representation of data."""
2355 """Generate a possibly-compressed representation of data."""
2353 if not data:
2356 if not data:
2354 return '', data
2357 return '', data
2355
2358
2356 compressed = self._compressor.compress(data)
2359 compressed = self._compressor.compress(data)
2357
2360
2358 if compressed:
2361 if compressed:
2359 # The revlog compressor added the header in the returned data.
2362 # The revlog compressor added the header in the returned data.
2360 return '', compressed
2363 return '', compressed
2361
2364
2362 if data[0:1] == '\0':
2365 if data[0:1] == '\0':
2363 return '', data
2366 return '', data
2364 return 'u', data
2367 return 'u', data
2365
2368
2366 def decompress(self, data):
2369 def decompress(self, data):
2367 """Decompress a revlog chunk.
2370 """Decompress a revlog chunk.
2368
2371
2369 The chunk is expected to begin with a header identifying the
2372 The chunk is expected to begin with a header identifying the
2370 format type so it can be routed to an appropriate decompressor.
2373 format type so it can be routed to an appropriate decompressor.
2371 """
2374 """
2372 if not data:
2375 if not data:
2373 return data
2376 return data
2374
2377
2375 # Revlogs are read much more frequently than they are written and many
2378 # Revlogs are read much more frequently than they are written and many
2376 # chunks only take microseconds to decompress, so performance is
2379 # chunks only take microseconds to decompress, so performance is
2377 # important here.
2380 # important here.
2378 #
2381 #
2379 # We can make a few assumptions about revlogs:
2382 # We can make a few assumptions about revlogs:
2380 #
2383 #
2381 # 1) the majority of chunks will be compressed (as opposed to inline
2384 # 1) the majority of chunks will be compressed (as opposed to inline
2382 # raw data).
2385 # raw data).
2383 # 2) decompressing *any* data will likely by at least 10x slower than
2386 # 2) decompressing *any* data will likely by at least 10x slower than
2384 # returning raw inline data.
2387 # returning raw inline data.
2385 # 3) we want to prioritize common and officially supported compression
2388 # 3) we want to prioritize common and officially supported compression
2386 # engines
2389 # engines
2387 #
2390 #
2388 # It follows that we want to optimize for "decompress compressed data
2391 # It follows that we want to optimize for "decompress compressed data
2389 # when encoded with common and officially supported compression engines"
2392 # when encoded with common and officially supported compression engines"
2390 # case over "raw data" and "data encoded by less common or non-official
2393 # case over "raw data" and "data encoded by less common or non-official
2391 # compression engines." That is why we have the inline lookup first
2394 # compression engines." That is why we have the inline lookup first
2392 # followed by the compengines lookup.
2395 # followed by the compengines lookup.
2393 #
2396 #
2394 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2397 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2395 # compressed chunks. And this matters for changelog and manifest reads.
2398 # compressed chunks. And this matters for changelog and manifest reads.
2396 t = data[0:1]
2399 t = data[0:1]
2397
2400
2398 if t == 'x':
2401 if t == 'x':
2399 try:
2402 try:
2400 return _zlibdecompress(data)
2403 return _zlibdecompress(data)
2401 except zlib.error as e:
2404 except zlib.error as e:
2402 raise RevlogError(_('revlog decompress error: %s') %
2405 raise RevlogError(_('revlog decompress error: %s') %
2403 stringutil.forcebytestr(e))
2406 stringutil.forcebytestr(e))
2404 # '\0' is more common than 'u' so it goes first.
2407 # '\0' is more common than 'u' so it goes first.
2405 elif t == '\0':
2408 elif t == '\0':
2406 return data
2409 return data
2407 elif t == 'u':
2410 elif t == 'u':
2408 return util.buffer(data, 1)
2411 return util.buffer(data, 1)
2409
2412
2410 try:
2413 try:
2411 compressor = self._decompressors[t]
2414 compressor = self._decompressors[t]
2412 except KeyError:
2415 except KeyError:
2413 try:
2416 try:
2414 engine = util.compengines.forrevlogheader(t)
2417 engine = util.compengines.forrevlogheader(t)
2415 compressor = engine.revlogcompressor()
2418 compressor = engine.revlogcompressor()
2416 self._decompressors[t] = compressor
2419 self._decompressors[t] = compressor
2417 except KeyError:
2420 except KeyError:
2418 raise RevlogError(_('unknown compression type %r') % t)
2421 raise RevlogError(_('unknown compression type %r') % t)
2419
2422
2420 return compressor.decompress(data)
2423 return compressor.decompress(data)
2421
2424
2422 def _isgooddeltainfo(self, deltainfo, revinfo):
2425 def _isgooddeltainfo(self, deltainfo, revinfo):
2423 """Returns True if the given delta is good. Good means that it is within
2426 """Returns True if the given delta is good. Good means that it is within
2424 the disk span, disk size, and chain length bounds that we know to be
2427 the disk span, disk size, and chain length bounds that we know to be
2425 performant."""
2428 performant."""
2426 if deltainfo is None:
2429 if deltainfo is None:
2427 return False
2430 return False
2428
2431
2429 # - 'deltainfo.distance' is the distance from the base revision --
2432 # - 'deltainfo.distance' is the distance from the base revision --
2430 # bounding it limits the amount of I/O we need to do.
2433 # bounding it limits the amount of I/O we need to do.
2431 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2434 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2432 # deltas we need to apply -- bounding it limits the amount of CPU
2435 # deltas we need to apply -- bounding it limits the amount of CPU
2433 # we consume.
2436 # we consume.
2434
2437
2435 if self._sparserevlog:
2438 if self._sparserevlog:
2436 # As sparse-read will be used, we can consider that the distance,
2439 # As sparse-read will be used, we can consider that the distance,
2437 # instead of being the span of the whole chunk,
2440 # instead of being the span of the whole chunk,
2438 # is the span of the largest read chunk
2441 # is the span of the largest read chunk
2439 base = deltainfo.base
2442 base = deltainfo.base
2440
2443
2441 if base != nullrev:
2444 if base != nullrev:
2442 deltachain = self._deltachain(base)[0]
2445 deltachain = self._deltachain(base)[0]
2443 else:
2446 else:
2444 deltachain = []
2447 deltachain = []
2445
2448
2446 chunks = _slicechunk(self, deltachain, deltainfo)
2449 chunks = _slicechunk(self, deltachain, deltainfo)
2447 distance = max(map(lambda revs:_segmentspan(self, revs), chunks))
2450 distance = max(map(lambda revs:_segmentspan(self, revs), chunks))
2448 else:
2451 else:
2449 distance = deltainfo.distance
2452 distance = deltainfo.distance
2450
2453
2451 textlen = revinfo.textlen
2454 textlen = revinfo.textlen
2452 defaultmax = textlen * 4
2455 defaultmax = textlen * 4
2453 maxdist = self._maxdeltachainspan
2456 maxdist = self._maxdeltachainspan
2454 if not maxdist:
2457 if not maxdist:
2455 maxdist = distance # ensure the conditional pass
2458 maxdist = distance # ensure the conditional pass
2456 maxdist = max(maxdist, defaultmax)
2459 maxdist = max(maxdist, defaultmax)
2457 if self._sparserevlog and maxdist < self._srmingapsize:
2460 if self._sparserevlog and maxdist < self._srmingapsize:
2458 # In multiple place, we are ignoring irrelevant data range below a
2461 # In multiple place, we are ignoring irrelevant data range below a
2459 # certain size. Be also apply this tradeoff here and relax span
2462 # certain size. Be also apply this tradeoff here and relax span
2460 # constraint for small enought content.
2463 # constraint for small enought content.
2461 maxdist = self._srmingapsize
2464 maxdist = self._srmingapsize
2462 if (distance > maxdist or deltainfo.deltalen > textlen or
2465 if (distance > maxdist or deltainfo.deltalen > textlen or
2463 deltainfo.compresseddeltalen > textlen * 2 or
2466 deltainfo.compresseddeltalen > textlen * 2 or
2464 (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
2467 (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
2465 return False
2468 return False
2466
2469
2467 return True
2470 return True
2468
2471
2469 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2472 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2470 cachedelta, ifh, dfh, alwayscache=False,
2473 cachedelta, ifh, dfh, alwayscache=False,
2471 deltacomputer=None):
2474 deltacomputer=None):
2472 """internal function to add revisions to the log
2475 """internal function to add revisions to the log
2473
2476
2474 see addrevision for argument descriptions.
2477 see addrevision for argument descriptions.
2475
2478
2476 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2479 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2477
2480
2478 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2481 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2479 be used.
2482 be used.
2480
2483
2481 invariants:
2484 invariants:
2482 - rawtext is optional (can be None); if not set, cachedelta must be set.
2485 - rawtext is optional (can be None); if not set, cachedelta must be set.
2483 if both are set, they must correspond to each other.
2486 if both are set, they must correspond to each other.
2484 """
2487 """
2485 if node == nullid:
2488 if node == nullid:
2486 raise RevlogError(_("%s: attempt to add null revision") %
2489 raise RevlogError(_("%s: attempt to add null revision") %
2487 (self.indexfile))
2490 (self.indexfile))
2488 if node == wdirid or node in wdirfilenodeids:
2491 if node == wdirid or node in wdirfilenodeids:
2489 raise RevlogError(_("%s: attempt to add wdir revision") %
2492 raise RevlogError(_("%s: attempt to add wdir revision") %
2490 (self.indexfile))
2493 (self.indexfile))
2491
2494
2492 if self._inline:
2495 if self._inline:
2493 fh = ifh
2496 fh = ifh
2494 else:
2497 else:
2495 fh = dfh
2498 fh = dfh
2496
2499
2497 btext = [rawtext]
2500 btext = [rawtext]
2498
2501
2499 curr = len(self)
2502 curr = len(self)
2500 prev = curr - 1
2503 prev = curr - 1
2501 offset = self.end(prev)
2504 offset = self.end(prev)
2502 p1r, p2r = self.rev(p1), self.rev(p2)
2505 p1r, p2r = self.rev(p1), self.rev(p2)
2503
2506
2504 # full versions are inserted when the needed deltas
2507 # full versions are inserted when the needed deltas
2505 # become comparable to the uncompressed text
2508 # become comparable to the uncompressed text
2506 if rawtext is None:
2509 if rawtext is None:
2507 # need rawtext size, before changed by flag processors, which is
2510 # need rawtext size, before changed by flag processors, which is
2508 # the non-raw size. use revlog explicitly to avoid filelog's extra
2511 # the non-raw size. use revlog explicitly to avoid filelog's extra
2509 # logic that might remove metadata size.
2512 # logic that might remove metadata size.
2510 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2513 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2511 cachedelta[1])
2514 cachedelta[1])
2512 else:
2515 else:
2513 textlen = len(rawtext)
2516 textlen = len(rawtext)
2514
2517
2515 if deltacomputer is None:
2518 if deltacomputer is None:
2516 deltacomputer = _deltacomputer(self)
2519 deltacomputer = _deltacomputer(self)
2517
2520
2518 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2521 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2519
2522
2520 # no delta for flag processor revision (see "candelta" for why)
2523 # no delta for flag processor revision (see "candelta" for why)
2521 # not calling candelta since only one revision needs test, also to
2524 # not calling candelta since only one revision needs test, also to
2522 # avoid overhead fetching flags again.
2525 # avoid overhead fetching flags again.
2523 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2526 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2524 deltainfo = None
2527 deltainfo = None
2525 else:
2528 else:
2526 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2529 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2527
2530
2528 if deltainfo is not None:
2531 if deltainfo is not None:
2529 base = deltainfo.base
2532 base = deltainfo.base
2530 chainbase = deltainfo.chainbase
2533 chainbase = deltainfo.chainbase
2531 data = deltainfo.data
2534 data = deltainfo.data
2532 l = deltainfo.deltalen
2535 l = deltainfo.deltalen
2533 else:
2536 else:
2534 rawtext = deltacomputer.buildtext(revinfo, fh)
2537 rawtext = deltacomputer.buildtext(revinfo, fh)
2535 data = self.compress(rawtext)
2538 data = self.compress(rawtext)
2536 l = len(data[1]) + len(data[0])
2539 l = len(data[1]) + len(data[0])
2537 base = chainbase = curr
2540 base = chainbase = curr
2538
2541
2539 e = (offset_type(offset, flags), l, textlen,
2542 e = (offset_type(offset, flags), l, textlen,
2540 base, link, p1r, p2r, node)
2543 base, link, p1r, p2r, node)
2541 self.index.insert(-1, e)
2544 self.index.insert(-1, e)
2542 self.nodemap[node] = curr
2545 self.nodemap[node] = curr
2543
2546
2544 entry = self._io.packentry(e, self.node, self.version, curr)
2547 entry = self._io.packentry(e, self.node, self.version, curr)
2545 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2548 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2546
2549
2547 if alwayscache and rawtext is None:
2550 if alwayscache and rawtext is None:
2548 rawtext = deltacomputer._buildtext(revinfo, fh)
2551 rawtext = deltacomputer._buildtext(revinfo, fh)
2549
2552
2550 if type(rawtext) == bytes: # only accept immutable objects
2553 if type(rawtext) == bytes: # only accept immutable objects
2551 self._cache = (node, curr, rawtext)
2554 self._cache = (node, curr, rawtext)
2552 self._chainbasecache[curr] = chainbase
2555 self._chainbasecache[curr] = chainbase
2553 return node
2556 return node
2554
2557
2555 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2558 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2556 # Files opened in a+ mode have inconsistent behavior on various
2559 # Files opened in a+ mode have inconsistent behavior on various
2557 # platforms. Windows requires that a file positioning call be made
2560 # platforms. Windows requires that a file positioning call be made
2558 # when the file handle transitions between reads and writes. See
2561 # when the file handle transitions between reads and writes. See
2559 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2562 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2560 # platforms, Python or the platform itself can be buggy. Some versions
2563 # platforms, Python or the platform itself can be buggy. Some versions
2561 # of Solaris have been observed to not append at the end of the file
2564 # of Solaris have been observed to not append at the end of the file
2562 # if the file was seeked to before the end. See issue4943 for more.
2565 # if the file was seeked to before the end. See issue4943 for more.
2563 #
2566 #
2564 # We work around this issue by inserting a seek() before writing.
2567 # We work around this issue by inserting a seek() before writing.
2565 # Note: This is likely not necessary on Python 3.
2568 # Note: This is likely not necessary on Python 3.
2566 ifh.seek(0, os.SEEK_END)
2569 ifh.seek(0, os.SEEK_END)
2567 if dfh:
2570 if dfh:
2568 dfh.seek(0, os.SEEK_END)
2571 dfh.seek(0, os.SEEK_END)
2569
2572
2570 curr = len(self) - 1
2573 curr = len(self) - 1
2571 if not self._inline:
2574 if not self._inline:
2572 transaction.add(self.datafile, offset)
2575 transaction.add(self.datafile, offset)
2573 transaction.add(self.indexfile, curr * len(entry))
2576 transaction.add(self.indexfile, curr * len(entry))
2574 if data[0]:
2577 if data[0]:
2575 dfh.write(data[0])
2578 dfh.write(data[0])
2576 dfh.write(data[1])
2579 dfh.write(data[1])
2577 ifh.write(entry)
2580 ifh.write(entry)
2578 else:
2581 else:
2579 offset += curr * self._io.size
2582 offset += curr * self._io.size
2580 transaction.add(self.indexfile, offset, curr)
2583 transaction.add(self.indexfile, offset, curr)
2581 ifh.write(entry)
2584 ifh.write(entry)
2582 ifh.write(data[0])
2585 ifh.write(data[0])
2583 ifh.write(data[1])
2586 ifh.write(data[1])
2584 self._enforceinlinesize(transaction, ifh)
2587 self._enforceinlinesize(transaction, ifh)
2585
2588
2586 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2589 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2587 """
2590 """
2588 add a delta group
2591 add a delta group
2589
2592
2590 given a set of deltas, add them to the revision log. the
2593 given a set of deltas, add them to the revision log. the
2591 first delta is against its parent, which should be in our
2594 first delta is against its parent, which should be in our
2592 log, the rest are against the previous delta.
2595 log, the rest are against the previous delta.
2593
2596
2594 If ``addrevisioncb`` is defined, it will be called with arguments of
2597 If ``addrevisioncb`` is defined, it will be called with arguments of
2595 this revlog and the node that was added.
2598 this revlog and the node that was added.
2596 """
2599 """
2597
2600
2598 nodes = []
2601 nodes = []
2599
2602
2600 r = len(self)
2603 r = len(self)
2601 end = 0
2604 end = 0
2602 if r:
2605 if r:
2603 end = self.end(r - 1)
2606 end = self.end(r - 1)
2604 ifh = self._indexfp("a+")
2607 ifh = self._indexfp("a+")
2605 isize = r * self._io.size
2608 isize = r * self._io.size
2606 if self._inline:
2609 if self._inline:
2607 transaction.add(self.indexfile, end + isize, r)
2610 transaction.add(self.indexfile, end + isize, r)
2608 dfh = None
2611 dfh = None
2609 else:
2612 else:
2610 transaction.add(self.indexfile, isize, r)
2613 transaction.add(self.indexfile, isize, r)
2611 transaction.add(self.datafile, end)
2614 transaction.add(self.datafile, end)
2612 dfh = self._datafp("a+")
2615 dfh = self._datafp("a+")
2613 def flush():
2616 def flush():
2614 if dfh:
2617 if dfh:
2615 dfh.flush()
2618 dfh.flush()
2616 ifh.flush()
2619 ifh.flush()
2617 try:
2620 try:
2618 deltacomputer = _deltacomputer(self)
2621 deltacomputer = _deltacomputer(self)
2619 # loop through our set of deltas
2622 # loop through our set of deltas
2620 for data in deltas:
2623 for data in deltas:
2621 node, p1, p2, linknode, deltabase, delta, flags = data
2624 node, p1, p2, linknode, deltabase, delta, flags = data
2622 link = linkmapper(linknode)
2625 link = linkmapper(linknode)
2623 flags = flags or REVIDX_DEFAULT_FLAGS
2626 flags = flags or REVIDX_DEFAULT_FLAGS
2624
2627
2625 nodes.append(node)
2628 nodes.append(node)
2626
2629
2627 if node in self.nodemap:
2630 if node in self.nodemap:
2628 # this can happen if two branches make the same change
2631 # this can happen if two branches make the same change
2629 continue
2632 continue
2630
2633
2631 for p in (p1, p2):
2634 for p in (p1, p2):
2632 if p not in self.nodemap:
2635 if p not in self.nodemap:
2633 raise LookupError(p, self.indexfile,
2636 raise LookupError(p, self.indexfile,
2634 _('unknown parent'))
2637 _('unknown parent'))
2635
2638
2636 if deltabase not in self.nodemap:
2639 if deltabase not in self.nodemap:
2637 raise LookupError(deltabase, self.indexfile,
2640 raise LookupError(deltabase, self.indexfile,
2638 _('unknown delta base'))
2641 _('unknown delta base'))
2639
2642
2640 baserev = self.rev(deltabase)
2643 baserev = self.rev(deltabase)
2641
2644
2642 if baserev != nullrev and self.iscensored(baserev):
2645 if baserev != nullrev and self.iscensored(baserev):
2643 # if base is censored, delta must be full replacement in a
2646 # if base is censored, delta must be full replacement in a
2644 # single patch operation
2647 # single patch operation
2645 hlen = struct.calcsize(">lll")
2648 hlen = struct.calcsize(">lll")
2646 oldlen = self.rawsize(baserev)
2649 oldlen = self.rawsize(baserev)
2647 newlen = len(delta) - hlen
2650 newlen = len(delta) - hlen
2648 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2651 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2649 raise error.CensoredBaseError(self.indexfile,
2652 raise error.CensoredBaseError(self.indexfile,
2650 self.node(baserev))
2653 self.node(baserev))
2651
2654
2652 if not flags and self._peek_iscensored(baserev, delta, flush):
2655 if not flags and self._peek_iscensored(baserev, delta, flush):
2653 flags |= REVIDX_ISCENSORED
2656 flags |= REVIDX_ISCENSORED
2654
2657
2655 # We assume consumers of addrevisioncb will want to retrieve
2658 # We assume consumers of addrevisioncb will want to retrieve
2656 # the added revision, which will require a call to
2659 # the added revision, which will require a call to
2657 # revision(). revision() will fast path if there is a cache
2660 # revision(). revision() will fast path if there is a cache
2658 # hit. So, we tell _addrevision() to always cache in this case.
2661 # hit. So, we tell _addrevision() to always cache in this case.
2659 # We're only using addgroup() in the context of changegroup
2662 # We're only using addgroup() in the context of changegroup
2660 # generation so the revision data can always be handled as raw
2663 # generation so the revision data can always be handled as raw
2661 # by the flagprocessor.
2664 # by the flagprocessor.
2662 self._addrevision(node, None, transaction, link,
2665 self._addrevision(node, None, transaction, link,
2663 p1, p2, flags, (baserev, delta),
2666 p1, p2, flags, (baserev, delta),
2664 ifh, dfh,
2667 ifh, dfh,
2665 alwayscache=bool(addrevisioncb),
2668 alwayscache=bool(addrevisioncb),
2666 deltacomputer=deltacomputer)
2669 deltacomputer=deltacomputer)
2667
2670
2668 if addrevisioncb:
2671 if addrevisioncb:
2669 addrevisioncb(self, node)
2672 addrevisioncb(self, node)
2670
2673
2671 if not dfh and not self._inline:
2674 if not dfh and not self._inline:
2672 # addrevision switched from inline to conventional
2675 # addrevision switched from inline to conventional
2673 # reopen the index
2676 # reopen the index
2674 ifh.close()
2677 ifh.close()
2675 dfh = self._datafp("a+")
2678 dfh = self._datafp("a+")
2676 ifh = self._indexfp("a+")
2679 ifh = self._indexfp("a+")
2677 finally:
2680 finally:
2678 if dfh:
2681 if dfh:
2679 dfh.close()
2682 dfh.close()
2680 ifh.close()
2683 ifh.close()
2681
2684
2682 return nodes
2685 return nodes
2683
2686
2684 def iscensored(self, rev):
2687 def iscensored(self, rev):
2685 """Check if a file revision is censored."""
2688 """Check if a file revision is censored."""
2686 if not self._censorable:
2689 if not self._censorable:
2687 return False
2690 return False
2688
2691
2689 return self.flags(rev) & REVIDX_ISCENSORED
2692 return self.flags(rev) & REVIDX_ISCENSORED
2690
2693
2691 def _peek_iscensored(self, baserev, delta, flush):
2694 def _peek_iscensored(self, baserev, delta, flush):
2692 """Quickly check if a delta produces a censored revision."""
2695 """Quickly check if a delta produces a censored revision."""
2693 if not self._censorable:
2696 if not self._censorable:
2694 return False
2697 return False
2695
2698
2696 # Fragile heuristic: unless new file meta keys are added alphabetically
2699 # Fragile heuristic: unless new file meta keys are added alphabetically
2697 # preceding "censored", all censored revisions are prefixed by
2700 # preceding "censored", all censored revisions are prefixed by
2698 # "\1\ncensored:". A delta producing such a censored revision must be a
2701 # "\1\ncensored:". A delta producing such a censored revision must be a
2699 # full-replacement delta, so we inspect the first and only patch in the
2702 # full-replacement delta, so we inspect the first and only patch in the
2700 # delta for this prefix.
2703 # delta for this prefix.
2701 hlen = struct.calcsize(">lll")
2704 hlen = struct.calcsize(">lll")
2702 if len(delta) <= hlen:
2705 if len(delta) <= hlen:
2703 return False
2706 return False
2704
2707
2705 oldlen = self.rawsize(baserev)
2708 oldlen = self.rawsize(baserev)
2706 newlen = len(delta) - hlen
2709 newlen = len(delta) - hlen
2707 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2710 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2708 return False
2711 return False
2709
2712
2710 add = "\1\ncensored:"
2713 add = "\1\ncensored:"
2711 addlen = len(add)
2714 addlen = len(add)
2712 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2715 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2713
2716
2714 def getstrippoint(self, minlink):
2717 def getstrippoint(self, minlink):
2715 """find the minimum rev that must be stripped to strip the linkrev
2718 """find the minimum rev that must be stripped to strip the linkrev
2716
2719
2717 Returns a tuple containing the minimum rev and a set of all revs that
2720 Returns a tuple containing the minimum rev and a set of all revs that
2718 have linkrevs that will be broken by this strip.
2721 have linkrevs that will be broken by this strip.
2719 """
2722 """
2720 brokenrevs = set()
2723 brokenrevs = set()
2721 strippoint = len(self)
2724 strippoint = len(self)
2722
2725
2723 heads = {}
2726 heads = {}
2724 futurelargelinkrevs = set()
2727 futurelargelinkrevs = set()
2725 for head in self.headrevs():
2728 for head in self.headrevs():
2726 headlinkrev = self.linkrev(head)
2729 headlinkrev = self.linkrev(head)
2727 heads[head] = headlinkrev
2730 heads[head] = headlinkrev
2728 if headlinkrev >= minlink:
2731 if headlinkrev >= minlink:
2729 futurelargelinkrevs.add(headlinkrev)
2732 futurelargelinkrevs.add(headlinkrev)
2730
2733
2731 # This algorithm involves walking down the rev graph, starting at the
2734 # This algorithm involves walking down the rev graph, starting at the
2732 # heads. Since the revs are topologically sorted according to linkrev,
2735 # heads. Since the revs are topologically sorted according to linkrev,
2733 # once all head linkrevs are below the minlink, we know there are
2736 # once all head linkrevs are below the minlink, we know there are
2734 # no more revs that could have a linkrev greater than minlink.
2737 # no more revs that could have a linkrev greater than minlink.
2735 # So we can stop walking.
2738 # So we can stop walking.
2736 while futurelargelinkrevs:
2739 while futurelargelinkrevs:
2737 strippoint -= 1
2740 strippoint -= 1
2738 linkrev = heads.pop(strippoint)
2741 linkrev = heads.pop(strippoint)
2739
2742
2740 if linkrev < minlink:
2743 if linkrev < minlink:
2741 brokenrevs.add(strippoint)
2744 brokenrevs.add(strippoint)
2742 else:
2745 else:
2743 futurelargelinkrevs.remove(linkrev)
2746 futurelargelinkrevs.remove(linkrev)
2744
2747
2745 for p in self.parentrevs(strippoint):
2748 for p in self.parentrevs(strippoint):
2746 if p != nullrev:
2749 if p != nullrev:
2747 plinkrev = self.linkrev(p)
2750 plinkrev = self.linkrev(p)
2748 heads[p] = plinkrev
2751 heads[p] = plinkrev
2749 if plinkrev >= minlink:
2752 if plinkrev >= minlink:
2750 futurelargelinkrevs.add(plinkrev)
2753 futurelargelinkrevs.add(plinkrev)
2751
2754
2752 return strippoint, brokenrevs
2755 return strippoint, brokenrevs
2753
2756
2754 def strip(self, minlink, transaction):
2757 def strip(self, minlink, transaction):
2755 """truncate the revlog on the first revision with a linkrev >= minlink
2758 """truncate the revlog on the first revision with a linkrev >= minlink
2756
2759
2757 This function is called when we're stripping revision minlink and
2760 This function is called when we're stripping revision minlink and
2758 its descendants from the repository.
2761 its descendants from the repository.
2759
2762
2760 We have to remove all revisions with linkrev >= minlink, because
2763 We have to remove all revisions with linkrev >= minlink, because
2761 the equivalent changelog revisions will be renumbered after the
2764 the equivalent changelog revisions will be renumbered after the
2762 strip.
2765 strip.
2763
2766
2764 So we truncate the revlog on the first of these revisions, and
2767 So we truncate the revlog on the first of these revisions, and
2765 trust that the caller has saved the revisions that shouldn't be
2768 trust that the caller has saved the revisions that shouldn't be
2766 removed and that it'll re-add them after this truncation.
2769 removed and that it'll re-add them after this truncation.
2767 """
2770 """
2768 if len(self) == 0:
2771 if len(self) == 0:
2769 return
2772 return
2770
2773
2771 rev, _ = self.getstrippoint(minlink)
2774 rev, _ = self.getstrippoint(minlink)
2772 if rev == len(self):
2775 if rev == len(self):
2773 return
2776 return
2774
2777
2775 # first truncate the files on disk
2778 # first truncate the files on disk
2776 end = self.start(rev)
2779 end = self.start(rev)
2777 if not self._inline:
2780 if not self._inline:
2778 transaction.add(self.datafile, end)
2781 transaction.add(self.datafile, end)
2779 end = rev * self._io.size
2782 end = rev * self._io.size
2780 else:
2783 else:
2781 end += rev * self._io.size
2784 end += rev * self._io.size
2782
2785
2783 transaction.add(self.indexfile, end)
2786 transaction.add(self.indexfile, end)
2784
2787
2785 # then reset internal state in memory to forget those revisions
2788 # then reset internal state in memory to forget those revisions
2786 self._cache = None
2789 self._cache = None
2787 self._chaininfocache = {}
2790 self._chaininfocache = {}
2788 self._chunkclear()
2791 self._chunkclear()
2789 for x in xrange(rev, len(self)):
2792 for x in xrange(rev, len(self)):
2790 del self.nodemap[self.node(x)]
2793 del self.nodemap[self.node(x)]
2791
2794
2792 del self.index[rev:-1]
2795 del self.index[rev:-1]
2793 self._nodepos = None
2796 self._nodepos = None
2794
2797
2795 def checksize(self):
2798 def checksize(self):
2796 expected = 0
2799 expected = 0
2797 if len(self):
2800 if len(self):
2798 expected = max(0, self.end(len(self) - 1))
2801 expected = max(0, self.end(len(self) - 1))
2799
2802
2800 try:
2803 try:
2801 with self._datafp() as f:
2804 with self._datafp() as f:
2802 f.seek(0, 2)
2805 f.seek(0, 2)
2803 actual = f.tell()
2806 actual = f.tell()
2804 dd = actual - expected
2807 dd = actual - expected
2805 except IOError as inst:
2808 except IOError as inst:
2806 if inst.errno != errno.ENOENT:
2809 if inst.errno != errno.ENOENT:
2807 raise
2810 raise
2808 dd = 0
2811 dd = 0
2809
2812
2810 try:
2813 try:
2811 f = self.opener(self.indexfile)
2814 f = self.opener(self.indexfile)
2812 f.seek(0, 2)
2815 f.seek(0, 2)
2813 actual = f.tell()
2816 actual = f.tell()
2814 f.close()
2817 f.close()
2815 s = self._io.size
2818 s = self._io.size
2816 i = max(0, actual // s)
2819 i = max(0, actual // s)
2817 di = actual - (i * s)
2820 di = actual - (i * s)
2818 if self._inline:
2821 if self._inline:
2819 databytes = 0
2822 databytes = 0
2820 for r in self:
2823 for r in self:
2821 databytes += max(0, self.length(r))
2824 databytes += max(0, self.length(r))
2822 dd = 0
2825 dd = 0
2823 di = actual - len(self) * s - databytes
2826 di = actual - len(self) * s - databytes
2824 except IOError as inst:
2827 except IOError as inst:
2825 if inst.errno != errno.ENOENT:
2828 if inst.errno != errno.ENOENT:
2826 raise
2829 raise
2827 di = 0
2830 di = 0
2828
2831
2829 return (dd, di)
2832 return (dd, di)
2830
2833
2831 def files(self):
2834 def files(self):
2832 res = [self.indexfile]
2835 res = [self.indexfile]
2833 if not self._inline:
2836 if not self._inline:
2834 res.append(self.datafile)
2837 res.append(self.datafile)
2835 return res
2838 return res
2836
2839
2837 DELTAREUSEALWAYS = 'always'
2840 DELTAREUSEALWAYS = 'always'
2838 DELTAREUSESAMEREVS = 'samerevs'
2841 DELTAREUSESAMEREVS = 'samerevs'
2839 DELTAREUSENEVER = 'never'
2842 DELTAREUSENEVER = 'never'
2840
2843
2841 DELTAREUSEFULLADD = 'fulladd'
2844 DELTAREUSEFULLADD = 'fulladd'
2842
2845
2843 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2846 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2844
2847
2845 def clone(self, tr, destrevlog, addrevisioncb=None,
2848 def clone(self, tr, destrevlog, addrevisioncb=None,
2846 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2849 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2847 """Copy this revlog to another, possibly with format changes.
2850 """Copy this revlog to another, possibly with format changes.
2848
2851
2849 The destination revlog will contain the same revisions and nodes.
2852 The destination revlog will contain the same revisions and nodes.
2850 However, it may not be bit-for-bit identical due to e.g. delta encoding
2853 However, it may not be bit-for-bit identical due to e.g. delta encoding
2851 differences.
2854 differences.
2852
2855
2853 The ``deltareuse`` argument control how deltas from the existing revlog
2856 The ``deltareuse`` argument control how deltas from the existing revlog
2854 are preserved in the destination revlog. The argument can have the
2857 are preserved in the destination revlog. The argument can have the
2855 following values:
2858 following values:
2856
2859
2857 DELTAREUSEALWAYS
2860 DELTAREUSEALWAYS
2858 Deltas will always be reused (if possible), even if the destination
2861 Deltas will always be reused (if possible), even if the destination
2859 revlog would not select the same revisions for the delta. This is the
2862 revlog would not select the same revisions for the delta. This is the
2860 fastest mode of operation.
2863 fastest mode of operation.
2861 DELTAREUSESAMEREVS
2864 DELTAREUSESAMEREVS
2862 Deltas will be reused if the destination revlog would pick the same
2865 Deltas will be reused if the destination revlog would pick the same
2863 revisions for the delta. This mode strikes a balance between speed
2866 revisions for the delta. This mode strikes a balance between speed
2864 and optimization.
2867 and optimization.
2865 DELTAREUSENEVER
2868 DELTAREUSENEVER
2866 Deltas will never be reused. This is the slowest mode of execution.
2869 Deltas will never be reused. This is the slowest mode of execution.
2867 This mode can be used to recompute deltas (e.g. if the diff/delta
2870 This mode can be used to recompute deltas (e.g. if the diff/delta
2868 algorithm changes).
2871 algorithm changes).
2869
2872
2870 Delta computation can be slow, so the choice of delta reuse policy can
2873 Delta computation can be slow, so the choice of delta reuse policy can
2871 significantly affect run time.
2874 significantly affect run time.
2872
2875
2873 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2876 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2874 two extremes. Deltas will be reused if they are appropriate. But if the
2877 two extremes. Deltas will be reused if they are appropriate. But if the
2875 delta could choose a better revision, it will do so. This means if you
2878 delta could choose a better revision, it will do so. This means if you
2876 are converting a non-generaldelta revlog to a generaldelta revlog,
2879 are converting a non-generaldelta revlog to a generaldelta revlog,
2877 deltas will be recomputed if the delta's parent isn't a parent of the
2880 deltas will be recomputed if the delta's parent isn't a parent of the
2878 revision.
2881 revision.
2879
2882
2880 In addition to the delta policy, the ``deltabothparents`` argument
2883 In addition to the delta policy, the ``deltabothparents`` argument
2881 controls whether to compute deltas against both parents for merges.
2884 controls whether to compute deltas against both parents for merges.
2882 By default, the current default is used.
2885 By default, the current default is used.
2883 """
2886 """
2884 if deltareuse not in self.DELTAREUSEALL:
2887 if deltareuse not in self.DELTAREUSEALL:
2885 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2888 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2886
2889
2887 if len(destrevlog):
2890 if len(destrevlog):
2888 raise ValueError(_('destination revlog is not empty'))
2891 raise ValueError(_('destination revlog is not empty'))
2889
2892
2890 if getattr(self, 'filteredrevs', None):
2893 if getattr(self, 'filteredrevs', None):
2891 raise ValueError(_('source revlog has filtered revisions'))
2894 raise ValueError(_('source revlog has filtered revisions'))
2892 if getattr(destrevlog, 'filteredrevs', None):
2895 if getattr(destrevlog, 'filteredrevs', None):
2893 raise ValueError(_('destination revlog has filtered revisions'))
2896 raise ValueError(_('destination revlog has filtered revisions'))
2894
2897
2895 # lazydeltabase controls whether to reuse a cached delta, if possible.
2898 # lazydeltabase controls whether to reuse a cached delta, if possible.
2896 oldlazydeltabase = destrevlog._lazydeltabase
2899 oldlazydeltabase = destrevlog._lazydeltabase
2897 oldamd = destrevlog._deltabothparents
2900 oldamd = destrevlog._deltabothparents
2898
2901
2899 try:
2902 try:
2900 if deltareuse == self.DELTAREUSEALWAYS:
2903 if deltareuse == self.DELTAREUSEALWAYS:
2901 destrevlog._lazydeltabase = True
2904 destrevlog._lazydeltabase = True
2902 elif deltareuse == self.DELTAREUSESAMEREVS:
2905 elif deltareuse == self.DELTAREUSESAMEREVS:
2903 destrevlog._lazydeltabase = False
2906 destrevlog._lazydeltabase = False
2904
2907
2905 destrevlog._deltabothparents = deltabothparents or oldamd
2908 destrevlog._deltabothparents = deltabothparents or oldamd
2906
2909
2907 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2910 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2908 self.DELTAREUSESAMEREVS)
2911 self.DELTAREUSESAMEREVS)
2909
2912
2910 deltacomputer = _deltacomputer(destrevlog)
2913 deltacomputer = _deltacomputer(destrevlog)
2911 index = self.index
2914 index = self.index
2912 for rev in self:
2915 for rev in self:
2913 entry = index[rev]
2916 entry = index[rev]
2914
2917
2915 # Some classes override linkrev to take filtered revs into
2918 # Some classes override linkrev to take filtered revs into
2916 # account. Use raw entry from index.
2919 # account. Use raw entry from index.
2917 flags = entry[0] & 0xffff
2920 flags = entry[0] & 0xffff
2918 linkrev = entry[4]
2921 linkrev = entry[4]
2919 p1 = index[entry[5]][7]
2922 p1 = index[entry[5]][7]
2920 p2 = index[entry[6]][7]
2923 p2 = index[entry[6]][7]
2921 node = entry[7]
2924 node = entry[7]
2922
2925
2923 # (Possibly) reuse the delta from the revlog if allowed and
2926 # (Possibly) reuse the delta from the revlog if allowed and
2924 # the revlog chunk is a delta.
2927 # the revlog chunk is a delta.
2925 cachedelta = None
2928 cachedelta = None
2926 rawtext = None
2929 rawtext = None
2927 if populatecachedelta:
2930 if populatecachedelta:
2928 dp = self.deltaparent(rev)
2931 dp = self.deltaparent(rev)
2929 if dp != nullrev:
2932 if dp != nullrev:
2930 cachedelta = (dp, bytes(self._chunk(rev)))
2933 cachedelta = (dp, bytes(self._chunk(rev)))
2931
2934
2932 if not cachedelta:
2935 if not cachedelta:
2933 rawtext = self.revision(rev, raw=True)
2936 rawtext = self.revision(rev, raw=True)
2934
2937
2935
2938
2936 if deltareuse == self.DELTAREUSEFULLADD:
2939 if deltareuse == self.DELTAREUSEFULLADD:
2937 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2940 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2938 cachedelta=cachedelta,
2941 cachedelta=cachedelta,
2939 node=node, flags=flags,
2942 node=node, flags=flags,
2940 deltacomputer=deltacomputer)
2943 deltacomputer=deltacomputer)
2941 else:
2944 else:
2942 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2945 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2943 checkambig=False)
2946 checkambig=False)
2944 dfh = None
2947 dfh = None
2945 if not destrevlog._inline:
2948 if not destrevlog._inline:
2946 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2949 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2947 try:
2950 try:
2948 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2951 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2949 p2, flags, cachedelta, ifh, dfh,
2952 p2, flags, cachedelta, ifh, dfh,
2950 deltacomputer=deltacomputer)
2953 deltacomputer=deltacomputer)
2951 finally:
2954 finally:
2952 if dfh:
2955 if dfh:
2953 dfh.close()
2956 dfh.close()
2954 ifh.close()
2957 ifh.close()
2955
2958
2956 if addrevisioncb:
2959 if addrevisioncb:
2957 addrevisioncb(self, rev, node)
2960 addrevisioncb(self, rev, node)
2958 finally:
2961 finally:
2959 destrevlog._lazydeltabase = oldlazydeltabase
2962 destrevlog._lazydeltabase = oldlazydeltabase
2960 destrevlog._deltabothparents = oldamd
2963 destrevlog._deltabothparents = oldamd
General Comments 0
You need to be logged in to leave comments. Login now