##// END OF EJS Templates
revlogv2: track pending write in the docket and expose it to hooks...
marmoute -
r48015:2219853a default
parent child Browse files
Show More
@@ -1,625 +1,626 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 )
14 )
15 from .thirdparty import attr
15 from .thirdparty import attr
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 metadata,
20 metadata,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 dateutil,
25 dateutil,
26 stringutil,
26 stringutil,
27 )
27 )
28 from .revlogutils import (
28 from .revlogutils import (
29 constants as revlog_constants,
29 constants as revlog_constants,
30 flagutil,
30 flagutil,
31 )
31 )
32
32
33 _defaultextra = {b'branch': b'default'}
33 _defaultextra = {b'branch': b'default'}
34
34
35
35
36 def _string_escape(text):
36 def _string_escape(text):
37 """
37 """
38 >>> from .pycompat import bytechr as chr
38 >>> from .pycompat import bytechr as chr
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 >>> s
41 >>> s
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 >>> res = _string_escape(s)
43 >>> res = _string_escape(s)
44 >>> s == _string_unescape(res)
44 >>> s == _string_unescape(res)
45 True
45 True
46 """
46 """
47 # subset of the string_escape codec
47 # subset of the string_escape codec
48 text = (
48 text = (
49 text.replace(b'\\', b'\\\\')
49 text.replace(b'\\', b'\\\\')
50 .replace(b'\n', b'\\n')
50 .replace(b'\n', b'\\n')
51 .replace(b'\r', b'\\r')
51 .replace(b'\r', b'\\r')
52 )
52 )
53 return text.replace(b'\0', b'\\0')
53 return text.replace(b'\0', b'\\0')
54
54
55
55
56 def _string_unescape(text):
56 def _string_unescape(text):
57 if b'\\0' in text:
57 if b'\\0' in text:
58 # fix up \0 without getting into trouble with \\0
58 # fix up \0 without getting into trouble with \\0
59 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\\\', b'\\\\\n')
60 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\\0', b'\0')
61 text = text.replace(b'\n', b'')
61 text = text.replace(b'\n', b'')
62 return stringutil.unescapestr(text)
62 return stringutil.unescapestr(text)
63
63
64
64
65 def decodeextra(text):
65 def decodeextra(text):
66 """
66 """
67 >>> from .pycompat import bytechr as chr
67 >>> from .pycompat import bytechr as chr
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 ... ).items())
69 ... ).items())
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... b'baz': chr(92) + chr(0) + b'2'})
73 ... ).items())
73 ... ).items())
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 """
75 """
76 extra = _defaultextra.copy()
76 extra = _defaultextra.copy()
77 for l in text.split(b'\0'):
77 for l in text.split(b'\0'):
78 if l:
78 if l:
79 k, v = _string_unescape(l).split(b':', 1)
79 k, v = _string_unescape(l).split(b':', 1)
80 extra[k] = v
80 extra[k] = v
81 return extra
81 return extra
82
82
83
83
84 def encodeextra(d):
84 def encodeextra(d):
85 # keys must be sorted to produce a deterministic changelog entry
85 # keys must be sorted to produce a deterministic changelog entry
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 return b"\0".join(items)
87 return b"\0".join(items)
88
88
89
89
90 def stripdesc(desc):
90 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
91 """strip trailing whitespace and leading and trailing empty lines"""
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93
93
94
94
95 class appender(object):
95 class appender(object):
96 """the changelog index must be updated last on disk, so we use this class
96 """the changelog index must be updated last on disk, so we use this class
97 to delay writes to it"""
97 to delay writes to it"""
98
98
99 def __init__(self, vfs, name, mode, buf):
99 def __init__(self, vfs, name, mode, buf):
100 self.data = buf
100 self.data = buf
101 fp = vfs(name, mode)
101 fp = vfs(name, mode)
102 self.fp = fp
102 self.fp = fp
103 self.offset = fp.tell()
103 self.offset = fp.tell()
104 self.size = vfs.fstat(fp).st_size
104 self.size = vfs.fstat(fp).st_size
105 self._end = self.size
105 self._end = self.size
106
106
107 def end(self):
107 def end(self):
108 return self._end
108 return self._end
109
109
110 def tell(self):
110 def tell(self):
111 return self.offset
111 return self.offset
112
112
113 def flush(self):
113 def flush(self):
114 pass
114 pass
115
115
116 @property
116 @property
117 def closed(self):
117 def closed(self):
118 return self.fp.closed
118 return self.fp.closed
119
119
120 def close(self):
120 def close(self):
121 self.fp.close()
121 self.fp.close()
122
122
123 def seek(self, offset, whence=0):
123 def seek(self, offset, whence=0):
124 '''virtual file offset spans real file and data'''
124 '''virtual file offset spans real file and data'''
125 if whence == 0:
125 if whence == 0:
126 self.offset = offset
126 self.offset = offset
127 elif whence == 1:
127 elif whence == 1:
128 self.offset += offset
128 self.offset += offset
129 elif whence == 2:
129 elif whence == 2:
130 self.offset = self.end() + offset
130 self.offset = self.end() + offset
131 if self.offset < self.size:
131 if self.offset < self.size:
132 self.fp.seek(self.offset)
132 self.fp.seek(self.offset)
133
133
134 def read(self, count=-1):
134 def read(self, count=-1):
135 '''only trick here is reads that span real file and data'''
135 '''only trick here is reads that span real file and data'''
136 ret = b""
136 ret = b""
137 if self.offset < self.size:
137 if self.offset < self.size:
138 s = self.fp.read(count)
138 s = self.fp.read(count)
139 ret = s
139 ret = s
140 self.offset += len(s)
140 self.offset += len(s)
141 if count > 0:
141 if count > 0:
142 count -= len(s)
142 count -= len(s)
143 if count != 0:
143 if count != 0:
144 doff = self.offset - self.size
144 doff = self.offset - self.size
145 self.data.insert(0, b"".join(self.data))
145 self.data.insert(0, b"".join(self.data))
146 del self.data[1:]
146 del self.data[1:]
147 s = self.data[0][doff : doff + count]
147 s = self.data[0][doff : doff + count]
148 self.offset += len(s)
148 self.offset += len(s)
149 ret += s
149 ret += s
150 return ret
150 return ret
151
151
152 def write(self, s):
152 def write(self, s):
153 self.data.append(bytes(s))
153 self.data.append(bytes(s))
154 self.offset += len(s)
154 self.offset += len(s)
155 self._end += len(s)
155 self._end += len(s)
156
156
157 def __enter__(self):
157 def __enter__(self):
158 self.fp.__enter__()
158 self.fp.__enter__()
159 return self
159 return self
160
160
161 def __exit__(self, *args):
161 def __exit__(self, *args):
162 return self.fp.__exit__(*args)
162 return self.fp.__exit__(*args)
163
163
164
164
165 class _divertopener(object):
165 class _divertopener(object):
166 def __init__(self, opener, target):
166 def __init__(self, opener, target):
167 self._opener = opener
167 self._opener = opener
168 self._target = target
168 self._target = target
169
169
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
171 if name != self._target:
171 if name != self._target:
172 return self._opener(name, mode, **kwargs)
172 return self._opener(name, mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
174
174
175 def __getattr__(self, attr):
175 def __getattr__(self, attr):
176 return getattr(self._opener, attr)
176 return getattr(self._opener, attr)
177
177
178
178
179 def _delayopener(opener, target, buf):
179 def _delayopener(opener, target, buf):
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
181
181
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
183 if name != target:
183 if name != target:
184 return opener(name, mode, **kwargs)
184 return opener(name, mode, **kwargs)
185 assert not kwargs
185 assert not kwargs
186 return appender(opener, name, mode, buf)
186 return appender(opener, name, mode, buf)
187
187
188 return _delay
188 return _delay
189
189
190
190
191 @attr.s
191 @attr.s
192 class _changelogrevision(object):
192 class _changelogrevision(object):
193 # Extensions might modify _defaultextra, so let the constructor below pass
193 # Extensions might modify _defaultextra, so let the constructor below pass
194 # it in
194 # it in
195 extra = attr.ib()
195 extra = attr.ib()
196 manifest = attr.ib()
196 manifest = attr.ib()
197 user = attr.ib(default=b'')
197 user = attr.ib(default=b'')
198 date = attr.ib(default=(0, 0))
198 date = attr.ib(default=(0, 0))
199 files = attr.ib(default=attr.Factory(list))
199 files = attr.ib(default=attr.Factory(list))
200 filesadded = attr.ib(default=None)
200 filesadded = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
204 description = attr.ib(default=b'')
204 description = attr.ib(default=b'')
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
206
206
207
207
208 class changelogrevision(object):
208 class changelogrevision(object):
209 """Holds results of a parsed changelog revision.
209 """Holds results of a parsed changelog revision.
210
210
211 Changelog revisions consist of multiple pieces of data, including
211 Changelog revisions consist of multiple pieces of data, including
212 the manifest node, user, and date. This object exposes a view into
212 the manifest node, user, and date. This object exposes a view into
213 the parsed object.
213 the parsed object.
214 """
214 """
215
215
216 __slots__ = (
216 __slots__ = (
217 '_offsets',
217 '_offsets',
218 '_text',
218 '_text',
219 '_sidedata',
219 '_sidedata',
220 '_cpsd',
220 '_cpsd',
221 '_changes',
221 '_changes',
222 )
222 )
223
223
224 def __new__(cls, cl, text, sidedata, cpsd):
224 def __new__(cls, cl, text, sidedata, cpsd):
225 if not text:
225 if not text:
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
227
227
228 self = super(changelogrevision, cls).__new__(cls)
228 self = super(changelogrevision, cls).__new__(cls)
229 # We could return here and implement the following as an __init__.
229 # We could return here and implement the following as an __init__.
230 # But doing it here is equivalent and saves an extra function call.
230 # But doing it here is equivalent and saves an extra function call.
231
231
232 # format used:
232 # format used:
233 # nodeid\n : manifest node in ascii
233 # nodeid\n : manifest node in ascii
234 # user\n : user, no \n or \r allowed
234 # user\n : user, no \n or \r allowed
235 # time tz extra\n : date (time is int or float, timezone is int)
235 # time tz extra\n : date (time is int or float, timezone is int)
236 # : extra is metadata, encoded and separated by '\0'
236 # : extra is metadata, encoded and separated by '\0'
237 # : older versions ignore it
237 # : older versions ignore it
238 # files\n\n : files modified by the cset, no \n or \r allowed
238 # files\n\n : files modified by the cset, no \n or \r allowed
239 # (.*) : comment (free text, ideally utf-8)
239 # (.*) : comment (free text, ideally utf-8)
240 #
240 #
241 # changelog v0 doesn't use extra
241 # changelog v0 doesn't use extra
242
242
243 nl1 = text.index(b'\n')
243 nl1 = text.index(b'\n')
244 nl2 = text.index(b'\n', nl1 + 1)
244 nl2 = text.index(b'\n', nl1 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
246
246
247 # The list of files may be empty. Which means nl3 is the first of the
247 # The list of files may be empty. Which means nl3 is the first of the
248 # double newline that precedes the description.
248 # double newline that precedes the description.
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
250 doublenl = nl3
250 doublenl = nl3
251 else:
251 else:
252 doublenl = text.index(b'\n\n', nl3 + 1)
252 doublenl = text.index(b'\n\n', nl3 + 1)
253
253
254 self._offsets = (nl1, nl2, nl3, doublenl)
254 self._offsets = (nl1, nl2, nl3, doublenl)
255 self._text = text
255 self._text = text
256 self._sidedata = sidedata
256 self._sidedata = sidedata
257 self._cpsd = cpsd
257 self._cpsd = cpsd
258 self._changes = None
258 self._changes = None
259
259
260 return self
260 return self
261
261
262 @property
262 @property
263 def manifest(self):
263 def manifest(self):
264 return bin(self._text[0 : self._offsets[0]])
264 return bin(self._text[0 : self._offsets[0]])
265
265
266 @property
266 @property
267 def user(self):
267 def user(self):
268 off = self._offsets
268 off = self._offsets
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
270
270
271 @property
271 @property
272 def _rawdate(self):
272 def _rawdate(self):
273 off = self._offsets
273 off = self._offsets
274 dateextra = self._text[off[1] + 1 : off[2]]
274 dateextra = self._text[off[1] + 1 : off[2]]
275 return dateextra.split(b' ', 2)[0:2]
275 return dateextra.split(b' ', 2)[0:2]
276
276
277 @property
277 @property
278 def _rawextra(self):
278 def _rawextra(self):
279 off = self._offsets
279 off = self._offsets
280 dateextra = self._text[off[1] + 1 : off[2]]
280 dateextra = self._text[off[1] + 1 : off[2]]
281 fields = dateextra.split(b' ', 2)
281 fields = dateextra.split(b' ', 2)
282 if len(fields) != 3:
282 if len(fields) != 3:
283 return None
283 return None
284
284
285 return fields[2]
285 return fields[2]
286
286
287 @property
287 @property
288 def date(self):
288 def date(self):
289 raw = self._rawdate
289 raw = self._rawdate
290 time = float(raw[0])
290 time = float(raw[0])
291 # Various tools did silly things with the timezone.
291 # Various tools did silly things with the timezone.
292 try:
292 try:
293 timezone = int(raw[1])
293 timezone = int(raw[1])
294 except ValueError:
294 except ValueError:
295 timezone = 0
295 timezone = 0
296
296
297 return time, timezone
297 return time, timezone
298
298
299 @property
299 @property
300 def extra(self):
300 def extra(self):
301 raw = self._rawextra
301 raw = self._rawextra
302 if raw is None:
302 if raw is None:
303 return _defaultextra
303 return _defaultextra
304
304
305 return decodeextra(raw)
305 return decodeextra(raw)
306
306
307 @property
307 @property
308 def changes(self):
308 def changes(self):
309 if self._changes is not None:
309 if self._changes is not None:
310 return self._changes
310 return self._changes
311 if self._cpsd:
311 if self._cpsd:
312 changes = metadata.decode_files_sidedata(self._sidedata)
312 changes = metadata.decode_files_sidedata(self._sidedata)
313 else:
313 else:
314 changes = metadata.ChangingFiles(
314 changes = metadata.ChangingFiles(
315 touched=self.files or (),
315 touched=self.files or (),
316 added=self.filesadded or (),
316 added=self.filesadded or (),
317 removed=self.filesremoved or (),
317 removed=self.filesremoved or (),
318 p1_copies=self.p1copies or {},
318 p1_copies=self.p1copies or {},
319 p2_copies=self.p2copies or {},
319 p2_copies=self.p2copies or {},
320 )
320 )
321 self._changes = changes
321 self._changes = changes
322 return changes
322 return changes
323
323
324 @property
324 @property
325 def files(self):
325 def files(self):
326 if self._cpsd:
326 if self._cpsd:
327 return sorted(self.changes.touched)
327 return sorted(self.changes.touched)
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 if self._cpsd:
336 if self._cpsd:
337 return self.changes.added
337 return self.changes.added
338 else:
338 else:
339 rawindices = self.extra.get(b'filesadded')
339 rawindices = self.extra.get(b'filesadded')
340 if rawindices is None:
340 if rawindices is None:
341 return None
341 return None
342 return metadata.decodefileindices(self.files, rawindices)
342 return metadata.decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def filesremoved(self):
345 def filesremoved(self):
346 if self._cpsd:
346 if self._cpsd:
347 return self.changes.removed
347 return self.changes.removed
348 else:
348 else:
349 rawindices = self.extra.get(b'filesremoved')
349 rawindices = self.extra.get(b'filesremoved')
350 if rawindices is None:
350 if rawindices is None:
351 return None
351 return None
352 return metadata.decodefileindices(self.files, rawindices)
352 return metadata.decodefileindices(self.files, rawindices)
353
353
354 @property
354 @property
355 def p1copies(self):
355 def p1copies(self):
356 if self._cpsd:
356 if self._cpsd:
357 return self.changes.copied_from_p1
357 return self.changes.copied_from_p1
358 else:
358 else:
359 rawcopies = self.extra.get(b'p1copies')
359 rawcopies = self.extra.get(b'p1copies')
360 if rawcopies is None:
360 if rawcopies is None:
361 return None
361 return None
362 return metadata.decodecopies(self.files, rawcopies)
362 return metadata.decodecopies(self.files, rawcopies)
363
363
364 @property
364 @property
365 def p2copies(self):
365 def p2copies(self):
366 if self._cpsd:
366 if self._cpsd:
367 return self.changes.copied_from_p2
367 return self.changes.copied_from_p2
368 else:
368 else:
369 rawcopies = self.extra.get(b'p2copies')
369 rawcopies = self.extra.get(b'p2copies')
370 if rawcopies is None:
370 if rawcopies is None:
371 return None
371 return None
372 return metadata.decodecopies(self.files, rawcopies)
372 return metadata.decodecopies(self.files, rawcopies)
373
373
374 @property
374 @property
375 def description(self):
375 def description(self):
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
377
377
378 @property
378 @property
379 def branchinfo(self):
379 def branchinfo(self):
380 extra = self.extra
380 extra = self.extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
382
382
383
383
384 class changelog(revlog.revlog):
384 class changelog(revlog.revlog):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
386 """Load a changelog revlog using an opener.
386 """Load a changelog revlog using an opener.
387
387
388 If ``trypending`` is true, we attempt to load the index from a
388 If ``trypending`` is true, we attempt to load the index from a
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
390 The ``00changelog.i.a`` file contains index (and possibly inline
390 The ``00changelog.i.a`` file contains index (and possibly inline
391 revision) data for a transaction that hasn't been finalized yet.
391 revision) data for a transaction that hasn't been finalized yet.
392 It exists in a separate file to facilitate readers (such as
392 It exists in a separate file to facilitate readers (such as
393 hooks processes) accessing data before a transaction is finalized.
393 hooks processes) accessing data before a transaction is finalized.
394
394
395 ``concurrencychecker`` will be passed to the revlog init function, see
395 ``concurrencychecker`` will be passed to the revlog init function, see
396 the documentation there.
396 the documentation there.
397 """
397 """
398
399 revlog.revlog.__init__(
398 revlog.revlog.__init__(
400 self,
399 self,
401 opener,
400 opener,
402 target=(revlog_constants.KIND_CHANGELOG, None),
401 target=(revlog_constants.KIND_CHANGELOG, None),
403 radix=b'00changelog',
402 radix=b'00changelog',
404 checkambig=True,
403 checkambig=True,
405 mmaplargeindex=True,
404 mmaplargeindex=True,
406 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
405 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
407 concurrencychecker=concurrencychecker,
406 concurrencychecker=concurrencychecker,
408 trypending=trypending,
407 trypending=trypending,
409 )
408 )
410
409
411 if self._initempty and (self._format_version == revlog.REVLOGV1):
410 if self._initempty and (self._format_version == revlog.REVLOGV1):
412 # changelogs don't benefit from generaldelta.
411 # changelogs don't benefit from generaldelta.
413
412
414 self._format_flags &= ~revlog.FLAG_GENERALDELTA
413 self._format_flags &= ~revlog.FLAG_GENERALDELTA
415 self._generaldelta = False
414 self._generaldelta = False
416
415
417 # Delta chains for changelogs tend to be very small because entries
416 # Delta chains for changelogs tend to be very small because entries
418 # tend to be small and don't delta well with each. So disable delta
417 # tend to be small and don't delta well with each. So disable delta
419 # chains.
418 # chains.
420 self._storedeltachains = False
419 self._storedeltachains = False
421
420
422 self._realopener = opener
421 self._realopener = opener
423 self._delayed = False
422 self._delayed = False
424 self._delaybuf = None
423 self._delaybuf = None
425 self._divert = False
424 self._divert = False
426 self._filteredrevs = frozenset()
425 self._filteredrevs = frozenset()
427 self._filteredrevs_hashcache = {}
426 self._filteredrevs_hashcache = {}
428 self._copiesstorage = opener.options.get(b'copies-storage')
427 self._copiesstorage = opener.options.get(b'copies-storage')
429
428
430 @property
429 @property
431 def filteredrevs(self):
430 def filteredrevs(self):
432 return self._filteredrevs
431 return self._filteredrevs
433
432
434 @filteredrevs.setter
433 @filteredrevs.setter
435 def filteredrevs(self, val):
434 def filteredrevs(self, val):
436 # Ensure all updates go through this function
435 # Ensure all updates go through this function
437 assert isinstance(val, frozenset)
436 assert isinstance(val, frozenset)
438 self._filteredrevs = val
437 self._filteredrevs = val
439 self._filteredrevs_hashcache = {}
438 self._filteredrevs_hashcache = {}
440
439
441 def _write_docket(self, tr):
440 def _write_docket(self, tr):
442 if not self._delayed:
441 if not self._delayed:
443 super(changelog, self)._write_docket(tr)
442 super(changelog, self)._write_docket(tr)
444
443
445 def delayupdate(self, tr):
444 def delayupdate(self, tr):
446 """delay visibility of index updates to other readers"""
445 """delay visibility of index updates to other readers"""
447 if self._docket is None and not self._delayed:
446 if self._docket is None and not self._delayed:
448 if len(self) == 0:
447 if len(self) == 0:
449 self._divert = True
448 self._divert = True
450 if self._realopener.exists(self._indexfile + b'.a'):
449 if self._realopener.exists(self._indexfile + b'.a'):
451 self._realopener.unlink(self._indexfile + b'.a')
450 self._realopener.unlink(self._indexfile + b'.a')
452 self.opener = _divertopener(self._realopener, self._indexfile)
451 self.opener = _divertopener(self._realopener, self._indexfile)
453 else:
452 else:
454 self._delaybuf = []
453 self._delaybuf = []
455 self.opener = _delayopener(
454 self.opener = _delayopener(
456 self._realopener, self._indexfile, self._delaybuf
455 self._realopener, self._indexfile, self._delaybuf
457 )
456 )
458 self._delayed = True
457 self._delayed = True
459 tr.addpending(b'cl-%i' % id(self), self._writepending)
458 tr.addpending(b'cl-%i' % id(self), self._writepending)
460 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
459 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
461
460
462 def _finalize(self, tr):
461 def _finalize(self, tr):
463 """finalize index updates"""
462 """finalize index updates"""
464 self._delayed = False
463 self._delayed = False
465 self.opener = self._realopener
464 self.opener = self._realopener
466 # move redirected index data back into place
465 # move redirected index data back into place
467 if self._docket is not None:
466 if self._docket is not None:
468 self._write_docket(tr)
467 self._write_docket(tr)
469 elif self._divert:
468 elif self._divert:
470 assert not self._delaybuf
469 assert not self._delaybuf
471 tmpname = self._indexfile + b".a"
470 tmpname = self._indexfile + b".a"
472 nfile = self.opener.open(tmpname)
471 nfile = self.opener.open(tmpname)
473 nfile.close()
472 nfile.close()
474 self.opener.rename(tmpname, self._indexfile, checkambig=True)
473 self.opener.rename(tmpname, self._indexfile, checkambig=True)
475 elif self._delaybuf:
474 elif self._delaybuf:
476 fp = self.opener(self._indexfile, b'a', checkambig=True)
475 fp = self.opener(self._indexfile, b'a', checkambig=True)
477 fp.write(b"".join(self._delaybuf))
476 fp.write(b"".join(self._delaybuf))
478 fp.close()
477 fp.close()
479 self._delaybuf = None
478 self._delaybuf = None
480 self._divert = False
479 self._divert = False
481 # split when we're done
480 # split when we're done
482 self._enforceinlinesize(tr)
481 self._enforceinlinesize(tr)
483
482
484 def _writepending(self, tr):
483 def _writepending(self, tr):
485 """create a file containing the unfinalized state for
484 """create a file containing the unfinalized state for
486 pretxnchangegroup"""
485 pretxnchangegroup"""
486 if self._docket:
487 return self._docket.write(tr, pending=True)
487 if self._delaybuf:
488 if self._delaybuf:
488 # make a temporary copy of the index
489 # make a temporary copy of the index
489 fp1 = self._realopener(self._indexfile)
490 fp1 = self._realopener(self._indexfile)
490 pendingfilename = self._indexfile + b".a"
491 pendingfilename = self._indexfile + b".a"
491 # register as a temp file to ensure cleanup on failure
492 # register as a temp file to ensure cleanup on failure
492 tr.registertmp(pendingfilename)
493 tr.registertmp(pendingfilename)
493 # write existing data
494 # write existing data
494 fp2 = self._realopener(pendingfilename, b"w")
495 fp2 = self._realopener(pendingfilename, b"w")
495 fp2.write(fp1.read())
496 fp2.write(fp1.read())
496 # add pending data
497 # add pending data
497 fp2.write(b"".join(self._delaybuf))
498 fp2.write(b"".join(self._delaybuf))
498 fp2.close()
499 fp2.close()
499 # switch modes so finalize can simply rename
500 # switch modes so finalize can simply rename
500 self._delaybuf = None
501 self._delaybuf = None
501 self._divert = True
502 self._divert = True
502 self.opener = _divertopener(self._realopener, self._indexfile)
503 self.opener = _divertopener(self._realopener, self._indexfile)
503
504
504 if self._divert:
505 if self._divert:
505 return True
506 return True
506
507
507 return False
508 return False
508
509
509 def _enforceinlinesize(self, tr):
510 def _enforceinlinesize(self, tr):
510 if not self._delayed:
511 if not self._delayed:
511 revlog.revlog._enforceinlinesize(self, tr)
512 revlog.revlog._enforceinlinesize(self, tr)
512
513
513 def read(self, nodeorrev):
514 def read(self, nodeorrev):
514 """Obtain data from a parsed changelog revision.
515 """Obtain data from a parsed changelog revision.
515
516
516 Returns a 6-tuple of:
517 Returns a 6-tuple of:
517
518
518 - manifest node in binary
519 - manifest node in binary
519 - author/user as a localstr
520 - author/user as a localstr
520 - date as a 2-tuple of (time, timezone)
521 - date as a 2-tuple of (time, timezone)
521 - list of files
522 - list of files
522 - commit message as a localstr
523 - commit message as a localstr
523 - dict of extra metadata
524 - dict of extra metadata
524
525
525 Unless you need to access all fields, consider calling
526 Unless you need to access all fields, consider calling
526 ``changelogrevision`` instead, as it is faster for partial object
527 ``changelogrevision`` instead, as it is faster for partial object
527 access.
528 access.
528 """
529 """
529 d, s = self._revisiondata(nodeorrev)
530 d, s = self._revisiondata(nodeorrev)
530 c = changelogrevision(
531 c = changelogrevision(
531 self, d, s, self._copiesstorage == b'changeset-sidedata'
532 self, d, s, self._copiesstorage == b'changeset-sidedata'
532 )
533 )
533 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
534 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
534
535
535 def changelogrevision(self, nodeorrev):
536 def changelogrevision(self, nodeorrev):
536 """Obtain a ``changelogrevision`` for a node or revision."""
537 """Obtain a ``changelogrevision`` for a node or revision."""
537 text, sidedata = self._revisiondata(nodeorrev)
538 text, sidedata = self._revisiondata(nodeorrev)
538 return changelogrevision(
539 return changelogrevision(
539 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
540 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
540 )
541 )
541
542
542 def readfiles(self, nodeorrev):
543 def readfiles(self, nodeorrev):
543 """
544 """
544 short version of read that only returns the files modified by the cset
545 short version of read that only returns the files modified by the cset
545 """
546 """
546 text = self.revision(nodeorrev)
547 text = self.revision(nodeorrev)
547 if not text:
548 if not text:
548 return []
549 return []
549 last = text.index(b"\n\n")
550 last = text.index(b"\n\n")
550 l = text[:last].split(b'\n')
551 l = text[:last].split(b'\n')
551 return l[3:]
552 return l[3:]
552
553
553 def add(
554 def add(
554 self,
555 self,
555 manifest,
556 manifest,
556 files,
557 files,
557 desc,
558 desc,
558 transaction,
559 transaction,
559 p1,
560 p1,
560 p2,
561 p2,
561 user,
562 user,
562 date=None,
563 date=None,
563 extra=None,
564 extra=None,
564 ):
565 ):
565 # Convert to UTF-8 encoded bytestrings as the very first
566 # Convert to UTF-8 encoded bytestrings as the very first
566 # thing: calling any method on a localstr object will turn it
567 # thing: calling any method on a localstr object will turn it
567 # into a str object and the cached UTF-8 string is thus lost.
568 # into a str object and the cached UTF-8 string is thus lost.
568 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
569 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
569
570
570 user = user.strip()
571 user = user.strip()
571 # An empty username or a username with a "\n" will make the
572 # An empty username or a username with a "\n" will make the
572 # revision text contain two "\n\n" sequences -> corrupt
573 # revision text contain two "\n\n" sequences -> corrupt
573 # repository since read cannot unpack the revision.
574 # repository since read cannot unpack the revision.
574 if not user:
575 if not user:
575 raise error.StorageError(_(b"empty username"))
576 raise error.StorageError(_(b"empty username"))
576 if b"\n" in user:
577 if b"\n" in user:
577 raise error.StorageError(
578 raise error.StorageError(
578 _(b"username %r contains a newline") % pycompat.bytestr(user)
579 _(b"username %r contains a newline") % pycompat.bytestr(user)
579 )
580 )
580
581
581 desc = stripdesc(desc)
582 desc = stripdesc(desc)
582
583
583 if date:
584 if date:
584 parseddate = b"%d %d" % dateutil.parsedate(date)
585 parseddate = b"%d %d" % dateutil.parsedate(date)
585 else:
586 else:
586 parseddate = b"%d %d" % dateutil.makedate()
587 parseddate = b"%d %d" % dateutil.makedate()
587 if extra:
588 if extra:
588 branch = extra.get(b"branch")
589 branch = extra.get(b"branch")
589 if branch in (b"default", b""):
590 if branch in (b"default", b""):
590 del extra[b"branch"]
591 del extra[b"branch"]
591 elif branch in (b".", b"null", b"tip"):
592 elif branch in (b".", b"null", b"tip"):
592 raise error.StorageError(
593 raise error.StorageError(
593 _(b'the name \'%s\' is reserved') % branch
594 _(b'the name \'%s\' is reserved') % branch
594 )
595 )
595 sortedfiles = sorted(files.touched)
596 sortedfiles = sorted(files.touched)
596 flags = 0
597 flags = 0
597 sidedata = None
598 sidedata = None
598 if self._copiesstorage == b'changeset-sidedata':
599 if self._copiesstorage == b'changeset-sidedata':
599 if files.has_copies_info:
600 if files.has_copies_info:
600 flags |= flagutil.REVIDX_HASCOPIESINFO
601 flags |= flagutil.REVIDX_HASCOPIESINFO
601 sidedata = metadata.encode_files_sidedata(files)
602 sidedata = metadata.encode_files_sidedata(files)
602
603
603 if extra:
604 if extra:
604 extra = encodeextra(extra)
605 extra = encodeextra(extra)
605 parseddate = b"%s %s" % (parseddate, extra)
606 parseddate = b"%s %s" % (parseddate, extra)
606 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
607 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
607 text = b"\n".join(l)
608 text = b"\n".join(l)
608 rev = self.addrevision(
609 rev = self.addrevision(
609 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
610 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
610 )
611 )
611 return self.node(rev)
612 return self.node(rev)
612
613
613 def branchinfo(self, rev):
614 def branchinfo(self, rev):
614 """return the branch name and open/close state of a revision
615 """return the branch name and open/close state of a revision
615
616
616 This function exists because creating a changectx object
617 This function exists because creating a changectx object
617 just to access this is costly."""
618 just to access this is costly."""
618 return self.changelogrevision(rev).branchinfo
619 return self.changelogrevision(rev).branchinfo
619
620
620 def _nodeduplicatecallback(self, transaction, rev):
621 def _nodeduplicatecallback(self, transaction, rev):
621 # keep track of revisions that got "re-added", eg: unbunde of know rev.
622 # keep track of revisions that got "re-added", eg: unbunde of know rev.
622 #
623 #
623 # We track them in a list to preserve their order from the source bundle
624 # We track them in a list to preserve their order from the source bundle
624 duplicates = transaction.changes.setdefault(b'revduplicates', [])
625 duplicates = transaction.changes.setdefault(b'revduplicates', [])
625 duplicates.append(rev)
626 duplicates.append(rev)
@@ -1,2698 +1,2697 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * expose transaction content hooks during pre-commit validation
1154 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1155 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1156 # - for censoring operation
1155 # - for censoring operation
1157 # - for stripping operation
1156 # - for stripping operation
1158 # - for rollback operation
1157 # - for rollback operation
1159 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1160 # * store the data size in the docket to simplify sidedata rewrite.
1159 # * store the data size in the docket to simplify sidedata rewrite.
1161 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1162 # * Exchange-wise, we will also need to do something more efficient than
1161 # * Exchange-wise, we will also need to do something more efficient than
1163 # keeping references to the affected revlogs, especially memory-wise when
1162 # keeping references to the affected revlogs, especially memory-wise when
1164 # rewriting sidedata.
1163 # rewriting sidedata.
1165 # * sidedata compression
1164 # * sidedata compression
1166 # * introduce a proper solution to reduce the number of filelog related files.
1165 # * introduce a proper solution to reduce the number of filelog related files.
1167 # * Improvement to consider
1166 # * Improvement to consider
1168 # - track compression mode in the index entris instead of the chunks
1167 # - track compression mode in the index entris instead of the chunks
1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 # - keep track of chain base or size (probably not that useful anymore)
1170 # - keep track of chain base or size (probably not that useful anymore)
1172 # - store data and sidedata in different files
1171 # - store data and sidedata in different files
1173 coreconfigitem(
1172 coreconfigitem(
1174 b'experimental',
1173 b'experimental',
1175 b'revlogv2',
1174 b'revlogv2',
1176 default=None,
1175 default=None,
1177 )
1176 )
1178 coreconfigitem(
1177 coreconfigitem(
1179 b'experimental',
1178 b'experimental',
1180 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1181 default=None,
1180 default=None,
1182 )
1181 )
1183 coreconfigitem(
1182 coreconfigitem(
1184 b'experimental',
1183 b'experimental',
1185 b'rust.index',
1184 b'rust.index',
1186 default=False,
1185 default=False,
1187 )
1186 )
1188 coreconfigitem(
1187 coreconfigitem(
1189 b'experimental',
1188 b'experimental',
1190 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1191 default=50000,
1190 default=50000,
1192 )
1191 )
1193 coreconfigitem(
1192 coreconfigitem(
1194 b'experimental',
1193 b'experimental',
1195 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1196 default=100000,
1195 default=100000,
1197 )
1196 )
1198 coreconfigitem(
1197 coreconfigitem(
1199 b'experimental',
1198 b'experimental',
1200 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1201 default=False,
1200 default=False,
1202 )
1201 )
1203 coreconfigitem(
1202 coreconfigitem(
1204 b'experimental',
1203 b'experimental',
1205 b'single-head-per-branch',
1204 b'single-head-per-branch',
1206 default=False,
1205 default=False,
1207 )
1206 )
1208 coreconfigitem(
1207 coreconfigitem(
1209 b'experimental',
1208 b'experimental',
1210 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1211 default=False,
1210 default=False,
1212 )
1211 )
1213 coreconfigitem(
1212 coreconfigitem(
1214 b'experimental',
1213 b'experimental',
1215 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1216 default=False,
1215 default=False,
1217 )
1216 )
1218 coreconfigitem(
1217 coreconfigitem(
1219 b'experimental',
1218 b'experimental',
1220 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1221 default=False,
1220 default=False,
1222 )
1221 )
1223 coreconfigitem(
1222 coreconfigitem(
1224 b'experimental',
1223 b'experimental',
1225 b'sparse-read',
1224 b'sparse-read',
1226 default=False,
1225 default=False,
1227 )
1226 )
1228 coreconfigitem(
1227 coreconfigitem(
1229 b'experimental',
1228 b'experimental',
1230 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1231 default=0.50,
1230 default=0.50,
1232 )
1231 )
1233 coreconfigitem(
1232 coreconfigitem(
1234 b'experimental',
1233 b'experimental',
1235 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1236 default=b'65K',
1235 default=b'65K',
1237 )
1236 )
1238 coreconfigitem(
1237 coreconfigitem(
1239 b'experimental',
1238 b'experimental',
1240 b'treemanifest',
1239 b'treemanifest',
1241 default=False,
1240 default=False,
1242 )
1241 )
1243 coreconfigitem(
1242 coreconfigitem(
1244 b'experimental',
1243 b'experimental',
1245 b'update.atomic-file',
1244 b'update.atomic-file',
1246 default=False,
1245 default=False,
1247 )
1246 )
1248 coreconfigitem(
1247 coreconfigitem(
1249 b'experimental',
1248 b'experimental',
1250 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1251 default=False,
1250 default=False,
1252 )
1251 )
1253 coreconfigitem(
1252 coreconfigitem(
1254 b'experimental',
1253 b'experimental',
1255 b'web.apiserver',
1254 b'web.apiserver',
1256 default=False,
1255 default=False,
1257 )
1256 )
1258 coreconfigitem(
1257 coreconfigitem(
1259 b'experimental',
1258 b'experimental',
1260 b'web.api.http-v2',
1259 b'web.api.http-v2',
1261 default=False,
1260 default=False,
1262 )
1261 )
1263 coreconfigitem(
1262 coreconfigitem(
1264 b'experimental',
1263 b'experimental',
1265 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1266 default=False,
1265 default=False,
1267 )
1266 )
1268 coreconfigitem(
1267 coreconfigitem(
1269 b'experimental',
1268 b'experimental',
1270 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1271 default=False,
1270 default=False,
1272 )
1271 )
1273 coreconfigitem(
1272 coreconfigitem(
1274 b'experimental',
1273 b'experimental',
1275 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1276 default=False,
1275 default=False,
1277 )
1276 )
1278 coreconfigitem(
1277 coreconfigitem(
1279 b'experimental',
1278 b'experimental',
1280 b'xdiff',
1279 b'xdiff',
1281 default=False,
1280 default=False,
1282 )
1281 )
1283 coreconfigitem(
1282 coreconfigitem(
1284 b'extensions',
1283 b'extensions',
1285 b'.*',
1284 b'.*',
1286 default=None,
1285 default=None,
1287 generic=True,
1286 generic=True,
1288 )
1287 )
1289 coreconfigitem(
1288 coreconfigitem(
1290 b'extdata',
1289 b'extdata',
1291 b'.*',
1290 b'.*',
1292 default=None,
1291 default=None,
1293 generic=True,
1292 generic=True,
1294 )
1293 )
1295 coreconfigitem(
1294 coreconfigitem(
1296 b'format',
1295 b'format',
1297 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1298 default=False,
1297 default=False,
1299 )
1298 )
1300 coreconfigitem(
1299 coreconfigitem(
1301 b'format',
1300 b'format',
1302 b'chunkcachesize',
1301 b'chunkcachesize',
1303 default=None,
1302 default=None,
1304 experimental=True,
1303 experimental=True,
1305 )
1304 )
1306 coreconfigitem(
1305 coreconfigitem(
1307 b'format',
1306 b'format',
1308 b'dotencode',
1307 b'dotencode',
1309 default=True,
1308 default=True,
1310 )
1309 )
1311 coreconfigitem(
1310 coreconfigitem(
1312 b'format',
1311 b'format',
1313 b'generaldelta',
1312 b'generaldelta',
1314 default=False,
1313 default=False,
1315 experimental=True,
1314 experimental=True,
1316 )
1315 )
1317 coreconfigitem(
1316 coreconfigitem(
1318 b'format',
1317 b'format',
1319 b'manifestcachesize',
1318 b'manifestcachesize',
1320 default=None,
1319 default=None,
1321 experimental=True,
1320 experimental=True,
1322 )
1321 )
1323 coreconfigitem(
1322 coreconfigitem(
1324 b'format',
1323 b'format',
1325 b'maxchainlen',
1324 b'maxchainlen',
1326 default=dynamicdefault,
1325 default=dynamicdefault,
1327 experimental=True,
1326 experimental=True,
1328 )
1327 )
1329 coreconfigitem(
1328 coreconfigitem(
1330 b'format',
1329 b'format',
1331 b'obsstore-version',
1330 b'obsstore-version',
1332 default=None,
1331 default=None,
1333 )
1332 )
1334 coreconfigitem(
1333 coreconfigitem(
1335 b'format',
1334 b'format',
1336 b'sparse-revlog',
1335 b'sparse-revlog',
1337 default=True,
1336 default=True,
1338 )
1337 )
1339 coreconfigitem(
1338 coreconfigitem(
1340 b'format',
1339 b'format',
1341 b'revlog-compression',
1340 b'revlog-compression',
1342 default=lambda: [b'zstd', b'zlib'],
1341 default=lambda: [b'zstd', b'zlib'],
1343 alias=[(b'experimental', b'format.compression')],
1342 alias=[(b'experimental', b'format.compression')],
1344 )
1343 )
1345 coreconfigitem(
1344 coreconfigitem(
1346 b'format',
1345 b'format',
1347 b'usefncache',
1346 b'usefncache',
1348 default=True,
1347 default=True,
1349 )
1348 )
1350 coreconfigitem(
1349 coreconfigitem(
1351 b'format',
1350 b'format',
1352 b'usegeneraldelta',
1351 b'usegeneraldelta',
1353 default=True,
1352 default=True,
1354 )
1353 )
1355 coreconfigitem(
1354 coreconfigitem(
1356 b'format',
1355 b'format',
1357 b'usestore',
1356 b'usestore',
1358 default=True,
1357 default=True,
1359 )
1358 )
1360
1359
1361
1360
1362 def _persistent_nodemap_default():
1361 def _persistent_nodemap_default():
1363 """compute `use-persistent-nodemap` default value
1362 """compute `use-persistent-nodemap` default value
1364
1363
1365 The feature is disabled unless a fast implementation is available.
1364 The feature is disabled unless a fast implementation is available.
1366 """
1365 """
1367 from . import policy
1366 from . import policy
1368
1367
1369 return policy.importrust('revlog') is not None
1368 return policy.importrust('revlog') is not None
1370
1369
1371
1370
1372 coreconfigitem(
1371 coreconfigitem(
1373 b'format',
1372 b'format',
1374 b'use-persistent-nodemap',
1373 b'use-persistent-nodemap',
1375 default=_persistent_nodemap_default,
1374 default=_persistent_nodemap_default,
1376 )
1375 )
1377 coreconfigitem(
1376 coreconfigitem(
1378 b'format',
1377 b'format',
1379 b'exp-use-copies-side-data-changeset',
1378 b'exp-use-copies-side-data-changeset',
1380 default=False,
1379 default=False,
1381 experimental=True,
1380 experimental=True,
1382 )
1381 )
1383 coreconfigitem(
1382 coreconfigitem(
1384 b'format',
1383 b'format',
1385 b'use-share-safe',
1384 b'use-share-safe',
1386 default=False,
1385 default=False,
1387 )
1386 )
1388 coreconfigitem(
1387 coreconfigitem(
1389 b'format',
1388 b'format',
1390 b'internal-phase',
1389 b'internal-phase',
1391 default=False,
1390 default=False,
1392 experimental=True,
1391 experimental=True,
1393 )
1392 )
1394 coreconfigitem(
1393 coreconfigitem(
1395 b'fsmonitor',
1394 b'fsmonitor',
1396 b'warn_when_unused',
1395 b'warn_when_unused',
1397 default=True,
1396 default=True,
1398 )
1397 )
1399 coreconfigitem(
1398 coreconfigitem(
1400 b'fsmonitor',
1399 b'fsmonitor',
1401 b'warn_update_file_count',
1400 b'warn_update_file_count',
1402 default=50000,
1401 default=50000,
1403 )
1402 )
1404 coreconfigitem(
1403 coreconfigitem(
1405 b'fsmonitor',
1404 b'fsmonitor',
1406 b'warn_update_file_count_rust',
1405 b'warn_update_file_count_rust',
1407 default=400000,
1406 default=400000,
1408 )
1407 )
1409 coreconfigitem(
1408 coreconfigitem(
1410 b'help',
1409 b'help',
1411 br'hidden-command\..*',
1410 br'hidden-command\..*',
1412 default=False,
1411 default=False,
1413 generic=True,
1412 generic=True,
1414 )
1413 )
1415 coreconfigitem(
1414 coreconfigitem(
1416 b'help',
1415 b'help',
1417 br'hidden-topic\..*',
1416 br'hidden-topic\..*',
1418 default=False,
1417 default=False,
1419 generic=True,
1418 generic=True,
1420 )
1419 )
1421 coreconfigitem(
1420 coreconfigitem(
1422 b'hooks',
1421 b'hooks',
1423 b'[^:]*',
1422 b'[^:]*',
1424 default=dynamicdefault,
1423 default=dynamicdefault,
1425 generic=True,
1424 generic=True,
1426 )
1425 )
1427 coreconfigitem(
1426 coreconfigitem(
1428 b'hooks',
1427 b'hooks',
1429 b'.*:run-with-plain',
1428 b'.*:run-with-plain',
1430 default=True,
1429 default=True,
1431 generic=True,
1430 generic=True,
1432 )
1431 )
1433 coreconfigitem(
1432 coreconfigitem(
1434 b'hgweb-paths',
1433 b'hgweb-paths',
1435 b'.*',
1434 b'.*',
1436 default=list,
1435 default=list,
1437 generic=True,
1436 generic=True,
1438 )
1437 )
1439 coreconfigitem(
1438 coreconfigitem(
1440 b'hostfingerprints',
1439 b'hostfingerprints',
1441 b'.*',
1440 b'.*',
1442 default=list,
1441 default=list,
1443 generic=True,
1442 generic=True,
1444 )
1443 )
1445 coreconfigitem(
1444 coreconfigitem(
1446 b'hostsecurity',
1445 b'hostsecurity',
1447 b'ciphers',
1446 b'ciphers',
1448 default=None,
1447 default=None,
1449 )
1448 )
1450 coreconfigitem(
1449 coreconfigitem(
1451 b'hostsecurity',
1450 b'hostsecurity',
1452 b'minimumprotocol',
1451 b'minimumprotocol',
1453 default=dynamicdefault,
1452 default=dynamicdefault,
1454 )
1453 )
1455 coreconfigitem(
1454 coreconfigitem(
1456 b'hostsecurity',
1455 b'hostsecurity',
1457 b'.*:minimumprotocol$',
1456 b'.*:minimumprotocol$',
1458 default=dynamicdefault,
1457 default=dynamicdefault,
1459 generic=True,
1458 generic=True,
1460 )
1459 )
1461 coreconfigitem(
1460 coreconfigitem(
1462 b'hostsecurity',
1461 b'hostsecurity',
1463 b'.*:ciphers$',
1462 b'.*:ciphers$',
1464 default=dynamicdefault,
1463 default=dynamicdefault,
1465 generic=True,
1464 generic=True,
1466 )
1465 )
1467 coreconfigitem(
1466 coreconfigitem(
1468 b'hostsecurity',
1467 b'hostsecurity',
1469 b'.*:fingerprints$',
1468 b'.*:fingerprints$',
1470 default=list,
1469 default=list,
1471 generic=True,
1470 generic=True,
1472 )
1471 )
1473 coreconfigitem(
1472 coreconfigitem(
1474 b'hostsecurity',
1473 b'hostsecurity',
1475 b'.*:verifycertsfile$',
1474 b'.*:verifycertsfile$',
1476 default=None,
1475 default=None,
1477 generic=True,
1476 generic=True,
1478 )
1477 )
1479
1478
1480 coreconfigitem(
1479 coreconfigitem(
1481 b'http_proxy',
1480 b'http_proxy',
1482 b'always',
1481 b'always',
1483 default=False,
1482 default=False,
1484 )
1483 )
1485 coreconfigitem(
1484 coreconfigitem(
1486 b'http_proxy',
1485 b'http_proxy',
1487 b'host',
1486 b'host',
1488 default=None,
1487 default=None,
1489 )
1488 )
1490 coreconfigitem(
1489 coreconfigitem(
1491 b'http_proxy',
1490 b'http_proxy',
1492 b'no',
1491 b'no',
1493 default=list,
1492 default=list,
1494 )
1493 )
1495 coreconfigitem(
1494 coreconfigitem(
1496 b'http_proxy',
1495 b'http_proxy',
1497 b'passwd',
1496 b'passwd',
1498 default=None,
1497 default=None,
1499 )
1498 )
1500 coreconfigitem(
1499 coreconfigitem(
1501 b'http_proxy',
1500 b'http_proxy',
1502 b'user',
1501 b'user',
1503 default=None,
1502 default=None,
1504 )
1503 )
1505
1504
1506 coreconfigitem(
1505 coreconfigitem(
1507 b'http',
1506 b'http',
1508 b'timeout',
1507 b'timeout',
1509 default=None,
1508 default=None,
1510 )
1509 )
1511
1510
1512 coreconfigitem(
1511 coreconfigitem(
1513 b'logtoprocess',
1512 b'logtoprocess',
1514 b'commandexception',
1513 b'commandexception',
1515 default=None,
1514 default=None,
1516 )
1515 )
1517 coreconfigitem(
1516 coreconfigitem(
1518 b'logtoprocess',
1517 b'logtoprocess',
1519 b'commandfinish',
1518 b'commandfinish',
1520 default=None,
1519 default=None,
1521 )
1520 )
1522 coreconfigitem(
1521 coreconfigitem(
1523 b'logtoprocess',
1522 b'logtoprocess',
1524 b'command',
1523 b'command',
1525 default=None,
1524 default=None,
1526 )
1525 )
1527 coreconfigitem(
1526 coreconfigitem(
1528 b'logtoprocess',
1527 b'logtoprocess',
1529 b'develwarn',
1528 b'develwarn',
1530 default=None,
1529 default=None,
1531 )
1530 )
1532 coreconfigitem(
1531 coreconfigitem(
1533 b'logtoprocess',
1532 b'logtoprocess',
1534 b'uiblocked',
1533 b'uiblocked',
1535 default=None,
1534 default=None,
1536 )
1535 )
1537 coreconfigitem(
1536 coreconfigitem(
1538 b'merge',
1537 b'merge',
1539 b'checkunknown',
1538 b'checkunknown',
1540 default=b'abort',
1539 default=b'abort',
1541 )
1540 )
1542 coreconfigitem(
1541 coreconfigitem(
1543 b'merge',
1542 b'merge',
1544 b'checkignored',
1543 b'checkignored',
1545 default=b'abort',
1544 default=b'abort',
1546 )
1545 )
1547 coreconfigitem(
1546 coreconfigitem(
1548 b'experimental',
1547 b'experimental',
1549 b'merge.checkpathconflicts',
1548 b'merge.checkpathconflicts',
1550 default=False,
1549 default=False,
1551 )
1550 )
1552 coreconfigitem(
1551 coreconfigitem(
1553 b'merge',
1552 b'merge',
1554 b'followcopies',
1553 b'followcopies',
1555 default=True,
1554 default=True,
1556 )
1555 )
1557 coreconfigitem(
1556 coreconfigitem(
1558 b'merge',
1557 b'merge',
1559 b'on-failure',
1558 b'on-failure',
1560 default=b'continue',
1559 default=b'continue',
1561 )
1560 )
1562 coreconfigitem(
1561 coreconfigitem(
1563 b'merge',
1562 b'merge',
1564 b'preferancestor',
1563 b'preferancestor',
1565 default=lambda: [b'*'],
1564 default=lambda: [b'*'],
1566 experimental=True,
1565 experimental=True,
1567 )
1566 )
1568 coreconfigitem(
1567 coreconfigitem(
1569 b'merge',
1568 b'merge',
1570 b'strict-capability-check',
1569 b'strict-capability-check',
1571 default=False,
1570 default=False,
1572 )
1571 )
1573 coreconfigitem(
1572 coreconfigitem(
1574 b'merge-tools',
1573 b'merge-tools',
1575 b'.*',
1574 b'.*',
1576 default=None,
1575 default=None,
1577 generic=True,
1576 generic=True,
1578 )
1577 )
1579 coreconfigitem(
1578 coreconfigitem(
1580 b'merge-tools',
1579 b'merge-tools',
1581 br'.*\.args$',
1580 br'.*\.args$',
1582 default=b"$local $base $other",
1581 default=b"$local $base $other",
1583 generic=True,
1582 generic=True,
1584 priority=-1,
1583 priority=-1,
1585 )
1584 )
1586 coreconfigitem(
1585 coreconfigitem(
1587 b'merge-tools',
1586 b'merge-tools',
1588 br'.*\.binary$',
1587 br'.*\.binary$',
1589 default=False,
1588 default=False,
1590 generic=True,
1589 generic=True,
1591 priority=-1,
1590 priority=-1,
1592 )
1591 )
1593 coreconfigitem(
1592 coreconfigitem(
1594 b'merge-tools',
1593 b'merge-tools',
1595 br'.*\.check$',
1594 br'.*\.check$',
1596 default=list,
1595 default=list,
1597 generic=True,
1596 generic=True,
1598 priority=-1,
1597 priority=-1,
1599 )
1598 )
1600 coreconfigitem(
1599 coreconfigitem(
1601 b'merge-tools',
1600 b'merge-tools',
1602 br'.*\.checkchanged$',
1601 br'.*\.checkchanged$',
1603 default=False,
1602 default=False,
1604 generic=True,
1603 generic=True,
1605 priority=-1,
1604 priority=-1,
1606 )
1605 )
1607 coreconfigitem(
1606 coreconfigitem(
1608 b'merge-tools',
1607 b'merge-tools',
1609 br'.*\.executable$',
1608 br'.*\.executable$',
1610 default=dynamicdefault,
1609 default=dynamicdefault,
1611 generic=True,
1610 generic=True,
1612 priority=-1,
1611 priority=-1,
1613 )
1612 )
1614 coreconfigitem(
1613 coreconfigitem(
1615 b'merge-tools',
1614 b'merge-tools',
1616 br'.*\.fixeol$',
1615 br'.*\.fixeol$',
1617 default=False,
1616 default=False,
1618 generic=True,
1617 generic=True,
1619 priority=-1,
1618 priority=-1,
1620 )
1619 )
1621 coreconfigitem(
1620 coreconfigitem(
1622 b'merge-tools',
1621 b'merge-tools',
1623 br'.*\.gui$',
1622 br'.*\.gui$',
1624 default=False,
1623 default=False,
1625 generic=True,
1624 generic=True,
1626 priority=-1,
1625 priority=-1,
1627 )
1626 )
1628 coreconfigitem(
1627 coreconfigitem(
1629 b'merge-tools',
1628 b'merge-tools',
1630 br'.*\.mergemarkers$',
1629 br'.*\.mergemarkers$',
1631 default=b'basic',
1630 default=b'basic',
1632 generic=True,
1631 generic=True,
1633 priority=-1,
1632 priority=-1,
1634 )
1633 )
1635 coreconfigitem(
1634 coreconfigitem(
1636 b'merge-tools',
1635 b'merge-tools',
1637 br'.*\.mergemarkertemplate$',
1636 br'.*\.mergemarkertemplate$',
1638 default=dynamicdefault, # take from command-templates.mergemarker
1637 default=dynamicdefault, # take from command-templates.mergemarker
1639 generic=True,
1638 generic=True,
1640 priority=-1,
1639 priority=-1,
1641 )
1640 )
1642 coreconfigitem(
1641 coreconfigitem(
1643 b'merge-tools',
1642 b'merge-tools',
1644 br'.*\.priority$',
1643 br'.*\.priority$',
1645 default=0,
1644 default=0,
1646 generic=True,
1645 generic=True,
1647 priority=-1,
1646 priority=-1,
1648 )
1647 )
1649 coreconfigitem(
1648 coreconfigitem(
1650 b'merge-tools',
1649 b'merge-tools',
1651 br'.*\.premerge$',
1650 br'.*\.premerge$',
1652 default=dynamicdefault,
1651 default=dynamicdefault,
1653 generic=True,
1652 generic=True,
1654 priority=-1,
1653 priority=-1,
1655 )
1654 )
1656 coreconfigitem(
1655 coreconfigitem(
1657 b'merge-tools',
1656 b'merge-tools',
1658 br'.*\.symlink$',
1657 br'.*\.symlink$',
1659 default=False,
1658 default=False,
1660 generic=True,
1659 generic=True,
1661 priority=-1,
1660 priority=-1,
1662 )
1661 )
1663 coreconfigitem(
1662 coreconfigitem(
1664 b'pager',
1663 b'pager',
1665 b'attend-.*',
1664 b'attend-.*',
1666 default=dynamicdefault,
1665 default=dynamicdefault,
1667 generic=True,
1666 generic=True,
1668 )
1667 )
1669 coreconfigitem(
1668 coreconfigitem(
1670 b'pager',
1669 b'pager',
1671 b'ignore',
1670 b'ignore',
1672 default=list,
1671 default=list,
1673 )
1672 )
1674 coreconfigitem(
1673 coreconfigitem(
1675 b'pager',
1674 b'pager',
1676 b'pager',
1675 b'pager',
1677 default=dynamicdefault,
1676 default=dynamicdefault,
1678 )
1677 )
1679 coreconfigitem(
1678 coreconfigitem(
1680 b'patch',
1679 b'patch',
1681 b'eol',
1680 b'eol',
1682 default=b'strict',
1681 default=b'strict',
1683 )
1682 )
1684 coreconfigitem(
1683 coreconfigitem(
1685 b'patch',
1684 b'patch',
1686 b'fuzz',
1685 b'fuzz',
1687 default=2,
1686 default=2,
1688 )
1687 )
1689 coreconfigitem(
1688 coreconfigitem(
1690 b'paths',
1689 b'paths',
1691 b'default',
1690 b'default',
1692 default=None,
1691 default=None,
1693 )
1692 )
1694 coreconfigitem(
1693 coreconfigitem(
1695 b'paths',
1694 b'paths',
1696 b'default-push',
1695 b'default-push',
1697 default=None,
1696 default=None,
1698 )
1697 )
1699 coreconfigitem(
1698 coreconfigitem(
1700 b'paths',
1699 b'paths',
1701 b'.*',
1700 b'.*',
1702 default=None,
1701 default=None,
1703 generic=True,
1702 generic=True,
1704 )
1703 )
1705 coreconfigitem(
1704 coreconfigitem(
1706 b'phases',
1705 b'phases',
1707 b'checksubrepos',
1706 b'checksubrepos',
1708 default=b'follow',
1707 default=b'follow',
1709 )
1708 )
1710 coreconfigitem(
1709 coreconfigitem(
1711 b'phases',
1710 b'phases',
1712 b'new-commit',
1711 b'new-commit',
1713 default=b'draft',
1712 default=b'draft',
1714 )
1713 )
1715 coreconfigitem(
1714 coreconfigitem(
1716 b'phases',
1715 b'phases',
1717 b'publish',
1716 b'publish',
1718 default=True,
1717 default=True,
1719 )
1718 )
1720 coreconfigitem(
1719 coreconfigitem(
1721 b'profiling',
1720 b'profiling',
1722 b'enabled',
1721 b'enabled',
1723 default=False,
1722 default=False,
1724 )
1723 )
1725 coreconfigitem(
1724 coreconfigitem(
1726 b'profiling',
1725 b'profiling',
1727 b'format',
1726 b'format',
1728 default=b'text',
1727 default=b'text',
1729 )
1728 )
1730 coreconfigitem(
1729 coreconfigitem(
1731 b'profiling',
1730 b'profiling',
1732 b'freq',
1731 b'freq',
1733 default=1000,
1732 default=1000,
1734 )
1733 )
1735 coreconfigitem(
1734 coreconfigitem(
1736 b'profiling',
1735 b'profiling',
1737 b'limit',
1736 b'limit',
1738 default=30,
1737 default=30,
1739 )
1738 )
1740 coreconfigitem(
1739 coreconfigitem(
1741 b'profiling',
1740 b'profiling',
1742 b'nested',
1741 b'nested',
1743 default=0,
1742 default=0,
1744 )
1743 )
1745 coreconfigitem(
1744 coreconfigitem(
1746 b'profiling',
1745 b'profiling',
1747 b'output',
1746 b'output',
1748 default=None,
1747 default=None,
1749 )
1748 )
1750 coreconfigitem(
1749 coreconfigitem(
1751 b'profiling',
1750 b'profiling',
1752 b'showmax',
1751 b'showmax',
1753 default=0.999,
1752 default=0.999,
1754 )
1753 )
1755 coreconfigitem(
1754 coreconfigitem(
1756 b'profiling',
1755 b'profiling',
1757 b'showmin',
1756 b'showmin',
1758 default=dynamicdefault,
1757 default=dynamicdefault,
1759 )
1758 )
1760 coreconfigitem(
1759 coreconfigitem(
1761 b'profiling',
1760 b'profiling',
1762 b'showtime',
1761 b'showtime',
1763 default=True,
1762 default=True,
1764 )
1763 )
1765 coreconfigitem(
1764 coreconfigitem(
1766 b'profiling',
1765 b'profiling',
1767 b'sort',
1766 b'sort',
1768 default=b'inlinetime',
1767 default=b'inlinetime',
1769 )
1768 )
1770 coreconfigitem(
1769 coreconfigitem(
1771 b'profiling',
1770 b'profiling',
1772 b'statformat',
1771 b'statformat',
1773 default=b'hotpath',
1772 default=b'hotpath',
1774 )
1773 )
1775 coreconfigitem(
1774 coreconfigitem(
1776 b'profiling',
1775 b'profiling',
1777 b'time-track',
1776 b'time-track',
1778 default=dynamicdefault,
1777 default=dynamicdefault,
1779 )
1778 )
1780 coreconfigitem(
1779 coreconfigitem(
1781 b'profiling',
1780 b'profiling',
1782 b'type',
1781 b'type',
1783 default=b'stat',
1782 default=b'stat',
1784 )
1783 )
1785 coreconfigitem(
1784 coreconfigitem(
1786 b'progress',
1785 b'progress',
1787 b'assume-tty',
1786 b'assume-tty',
1788 default=False,
1787 default=False,
1789 )
1788 )
1790 coreconfigitem(
1789 coreconfigitem(
1791 b'progress',
1790 b'progress',
1792 b'changedelay',
1791 b'changedelay',
1793 default=1,
1792 default=1,
1794 )
1793 )
1795 coreconfigitem(
1794 coreconfigitem(
1796 b'progress',
1795 b'progress',
1797 b'clear-complete',
1796 b'clear-complete',
1798 default=True,
1797 default=True,
1799 )
1798 )
1800 coreconfigitem(
1799 coreconfigitem(
1801 b'progress',
1800 b'progress',
1802 b'debug',
1801 b'debug',
1803 default=False,
1802 default=False,
1804 )
1803 )
1805 coreconfigitem(
1804 coreconfigitem(
1806 b'progress',
1805 b'progress',
1807 b'delay',
1806 b'delay',
1808 default=3,
1807 default=3,
1809 )
1808 )
1810 coreconfigitem(
1809 coreconfigitem(
1811 b'progress',
1810 b'progress',
1812 b'disable',
1811 b'disable',
1813 default=False,
1812 default=False,
1814 )
1813 )
1815 coreconfigitem(
1814 coreconfigitem(
1816 b'progress',
1815 b'progress',
1817 b'estimateinterval',
1816 b'estimateinterval',
1818 default=60.0,
1817 default=60.0,
1819 )
1818 )
1820 coreconfigitem(
1819 coreconfigitem(
1821 b'progress',
1820 b'progress',
1822 b'format',
1821 b'format',
1823 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1822 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1824 )
1823 )
1825 coreconfigitem(
1824 coreconfigitem(
1826 b'progress',
1825 b'progress',
1827 b'refresh',
1826 b'refresh',
1828 default=0.1,
1827 default=0.1,
1829 )
1828 )
1830 coreconfigitem(
1829 coreconfigitem(
1831 b'progress',
1830 b'progress',
1832 b'width',
1831 b'width',
1833 default=dynamicdefault,
1832 default=dynamicdefault,
1834 )
1833 )
1835 coreconfigitem(
1834 coreconfigitem(
1836 b'pull',
1835 b'pull',
1837 b'confirm',
1836 b'confirm',
1838 default=False,
1837 default=False,
1839 )
1838 )
1840 coreconfigitem(
1839 coreconfigitem(
1841 b'push',
1840 b'push',
1842 b'pushvars.server',
1841 b'pushvars.server',
1843 default=False,
1842 default=False,
1844 )
1843 )
1845 coreconfigitem(
1844 coreconfigitem(
1846 b'rewrite',
1845 b'rewrite',
1847 b'backup-bundle',
1846 b'backup-bundle',
1848 default=True,
1847 default=True,
1849 alias=[(b'ui', b'history-editing-backup')],
1848 alias=[(b'ui', b'history-editing-backup')],
1850 )
1849 )
1851 coreconfigitem(
1850 coreconfigitem(
1852 b'rewrite',
1851 b'rewrite',
1853 b'update-timestamp',
1852 b'update-timestamp',
1854 default=False,
1853 default=False,
1855 )
1854 )
1856 coreconfigitem(
1855 coreconfigitem(
1857 b'rewrite',
1856 b'rewrite',
1858 b'empty-successor',
1857 b'empty-successor',
1859 default=b'skip',
1858 default=b'skip',
1860 experimental=True,
1859 experimental=True,
1861 )
1860 )
1862 coreconfigitem(
1861 coreconfigitem(
1863 b'storage',
1862 b'storage',
1864 b'new-repo-backend',
1863 b'new-repo-backend',
1865 default=b'revlogv1',
1864 default=b'revlogv1',
1866 experimental=True,
1865 experimental=True,
1867 )
1866 )
1868 coreconfigitem(
1867 coreconfigitem(
1869 b'storage',
1868 b'storage',
1870 b'revlog.optimize-delta-parent-choice',
1869 b'revlog.optimize-delta-parent-choice',
1871 default=True,
1870 default=True,
1872 alias=[(b'format', b'aggressivemergedeltas')],
1871 alias=[(b'format', b'aggressivemergedeltas')],
1873 )
1872 )
1874 # experimental as long as rust is experimental (or a C version is implemented)
1873 # experimental as long as rust is experimental (or a C version is implemented)
1875 coreconfigitem(
1874 coreconfigitem(
1876 b'storage',
1875 b'storage',
1877 b'revlog.persistent-nodemap.mmap',
1876 b'revlog.persistent-nodemap.mmap',
1878 default=True,
1877 default=True,
1879 )
1878 )
1880 # experimental as long as format.use-persistent-nodemap is.
1879 # experimental as long as format.use-persistent-nodemap is.
1881 coreconfigitem(
1880 coreconfigitem(
1882 b'storage',
1881 b'storage',
1883 b'revlog.persistent-nodemap.slow-path',
1882 b'revlog.persistent-nodemap.slow-path',
1884 default=b"abort",
1883 default=b"abort",
1885 )
1884 )
1886
1885
1887 coreconfigitem(
1886 coreconfigitem(
1888 b'storage',
1887 b'storage',
1889 b'revlog.reuse-external-delta',
1888 b'revlog.reuse-external-delta',
1890 default=True,
1889 default=True,
1891 )
1890 )
1892 coreconfigitem(
1891 coreconfigitem(
1893 b'storage',
1892 b'storage',
1894 b'revlog.reuse-external-delta-parent',
1893 b'revlog.reuse-external-delta-parent',
1895 default=None,
1894 default=None,
1896 )
1895 )
1897 coreconfigitem(
1896 coreconfigitem(
1898 b'storage',
1897 b'storage',
1899 b'revlog.zlib.level',
1898 b'revlog.zlib.level',
1900 default=None,
1899 default=None,
1901 )
1900 )
1902 coreconfigitem(
1901 coreconfigitem(
1903 b'storage',
1902 b'storage',
1904 b'revlog.zstd.level',
1903 b'revlog.zstd.level',
1905 default=None,
1904 default=None,
1906 )
1905 )
1907 coreconfigitem(
1906 coreconfigitem(
1908 b'server',
1907 b'server',
1909 b'bookmarks-pushkey-compat',
1908 b'bookmarks-pushkey-compat',
1910 default=True,
1909 default=True,
1911 )
1910 )
1912 coreconfigitem(
1911 coreconfigitem(
1913 b'server',
1912 b'server',
1914 b'bundle1',
1913 b'bundle1',
1915 default=True,
1914 default=True,
1916 )
1915 )
1917 coreconfigitem(
1916 coreconfigitem(
1918 b'server',
1917 b'server',
1919 b'bundle1gd',
1918 b'bundle1gd',
1920 default=None,
1919 default=None,
1921 )
1920 )
1922 coreconfigitem(
1921 coreconfigitem(
1923 b'server',
1922 b'server',
1924 b'bundle1.pull',
1923 b'bundle1.pull',
1925 default=None,
1924 default=None,
1926 )
1925 )
1927 coreconfigitem(
1926 coreconfigitem(
1928 b'server',
1927 b'server',
1929 b'bundle1gd.pull',
1928 b'bundle1gd.pull',
1930 default=None,
1929 default=None,
1931 )
1930 )
1932 coreconfigitem(
1931 coreconfigitem(
1933 b'server',
1932 b'server',
1934 b'bundle1.push',
1933 b'bundle1.push',
1935 default=None,
1934 default=None,
1936 )
1935 )
1937 coreconfigitem(
1936 coreconfigitem(
1938 b'server',
1937 b'server',
1939 b'bundle1gd.push',
1938 b'bundle1gd.push',
1940 default=None,
1939 default=None,
1941 )
1940 )
1942 coreconfigitem(
1941 coreconfigitem(
1943 b'server',
1942 b'server',
1944 b'bundle2.stream',
1943 b'bundle2.stream',
1945 default=True,
1944 default=True,
1946 alias=[(b'experimental', b'bundle2.stream')],
1945 alias=[(b'experimental', b'bundle2.stream')],
1947 )
1946 )
1948 coreconfigitem(
1947 coreconfigitem(
1949 b'server',
1948 b'server',
1950 b'compressionengines',
1949 b'compressionengines',
1951 default=list,
1950 default=list,
1952 )
1951 )
1953 coreconfigitem(
1952 coreconfigitem(
1954 b'server',
1953 b'server',
1955 b'concurrent-push-mode',
1954 b'concurrent-push-mode',
1956 default=b'check-related',
1955 default=b'check-related',
1957 )
1956 )
1958 coreconfigitem(
1957 coreconfigitem(
1959 b'server',
1958 b'server',
1960 b'disablefullbundle',
1959 b'disablefullbundle',
1961 default=False,
1960 default=False,
1962 )
1961 )
1963 coreconfigitem(
1962 coreconfigitem(
1964 b'server',
1963 b'server',
1965 b'maxhttpheaderlen',
1964 b'maxhttpheaderlen',
1966 default=1024,
1965 default=1024,
1967 )
1966 )
1968 coreconfigitem(
1967 coreconfigitem(
1969 b'server',
1968 b'server',
1970 b'pullbundle',
1969 b'pullbundle',
1971 default=False,
1970 default=False,
1972 )
1971 )
1973 coreconfigitem(
1972 coreconfigitem(
1974 b'server',
1973 b'server',
1975 b'preferuncompressed',
1974 b'preferuncompressed',
1976 default=False,
1975 default=False,
1977 )
1976 )
1978 coreconfigitem(
1977 coreconfigitem(
1979 b'server',
1978 b'server',
1980 b'streamunbundle',
1979 b'streamunbundle',
1981 default=False,
1980 default=False,
1982 )
1981 )
1983 coreconfigitem(
1982 coreconfigitem(
1984 b'server',
1983 b'server',
1985 b'uncompressed',
1984 b'uncompressed',
1986 default=True,
1985 default=True,
1987 )
1986 )
1988 coreconfigitem(
1987 coreconfigitem(
1989 b'server',
1988 b'server',
1990 b'uncompressedallowsecret',
1989 b'uncompressedallowsecret',
1991 default=False,
1990 default=False,
1992 )
1991 )
1993 coreconfigitem(
1992 coreconfigitem(
1994 b'server',
1993 b'server',
1995 b'view',
1994 b'view',
1996 default=b'served',
1995 default=b'served',
1997 )
1996 )
1998 coreconfigitem(
1997 coreconfigitem(
1999 b'server',
1998 b'server',
2000 b'validate',
1999 b'validate',
2001 default=False,
2000 default=False,
2002 )
2001 )
2003 coreconfigitem(
2002 coreconfigitem(
2004 b'server',
2003 b'server',
2005 b'zliblevel',
2004 b'zliblevel',
2006 default=-1,
2005 default=-1,
2007 )
2006 )
2008 coreconfigitem(
2007 coreconfigitem(
2009 b'server',
2008 b'server',
2010 b'zstdlevel',
2009 b'zstdlevel',
2011 default=3,
2010 default=3,
2012 )
2011 )
2013 coreconfigitem(
2012 coreconfigitem(
2014 b'share',
2013 b'share',
2015 b'pool',
2014 b'pool',
2016 default=None,
2015 default=None,
2017 )
2016 )
2018 coreconfigitem(
2017 coreconfigitem(
2019 b'share',
2018 b'share',
2020 b'poolnaming',
2019 b'poolnaming',
2021 default=b'identity',
2020 default=b'identity',
2022 )
2021 )
2023 coreconfigitem(
2022 coreconfigitem(
2024 b'share',
2023 b'share',
2025 b'safe-mismatch.source-not-safe',
2024 b'safe-mismatch.source-not-safe',
2026 default=b'abort',
2025 default=b'abort',
2027 )
2026 )
2028 coreconfigitem(
2027 coreconfigitem(
2029 b'share',
2028 b'share',
2030 b'safe-mismatch.source-safe',
2029 b'safe-mismatch.source-safe',
2031 default=b'abort',
2030 default=b'abort',
2032 )
2031 )
2033 coreconfigitem(
2032 coreconfigitem(
2034 b'share',
2033 b'share',
2035 b'safe-mismatch.source-not-safe.warn',
2034 b'safe-mismatch.source-not-safe.warn',
2036 default=True,
2035 default=True,
2037 )
2036 )
2038 coreconfigitem(
2037 coreconfigitem(
2039 b'share',
2038 b'share',
2040 b'safe-mismatch.source-safe.warn',
2039 b'safe-mismatch.source-safe.warn',
2041 default=True,
2040 default=True,
2042 )
2041 )
2043 coreconfigitem(
2042 coreconfigitem(
2044 b'shelve',
2043 b'shelve',
2045 b'maxbackups',
2044 b'maxbackups',
2046 default=10,
2045 default=10,
2047 )
2046 )
2048 coreconfigitem(
2047 coreconfigitem(
2049 b'smtp',
2048 b'smtp',
2050 b'host',
2049 b'host',
2051 default=None,
2050 default=None,
2052 )
2051 )
2053 coreconfigitem(
2052 coreconfigitem(
2054 b'smtp',
2053 b'smtp',
2055 b'local_hostname',
2054 b'local_hostname',
2056 default=None,
2055 default=None,
2057 )
2056 )
2058 coreconfigitem(
2057 coreconfigitem(
2059 b'smtp',
2058 b'smtp',
2060 b'password',
2059 b'password',
2061 default=None,
2060 default=None,
2062 )
2061 )
2063 coreconfigitem(
2062 coreconfigitem(
2064 b'smtp',
2063 b'smtp',
2065 b'port',
2064 b'port',
2066 default=dynamicdefault,
2065 default=dynamicdefault,
2067 )
2066 )
2068 coreconfigitem(
2067 coreconfigitem(
2069 b'smtp',
2068 b'smtp',
2070 b'tls',
2069 b'tls',
2071 default=b'none',
2070 default=b'none',
2072 )
2071 )
2073 coreconfigitem(
2072 coreconfigitem(
2074 b'smtp',
2073 b'smtp',
2075 b'username',
2074 b'username',
2076 default=None,
2075 default=None,
2077 )
2076 )
2078 coreconfigitem(
2077 coreconfigitem(
2079 b'sparse',
2078 b'sparse',
2080 b'missingwarning',
2079 b'missingwarning',
2081 default=True,
2080 default=True,
2082 experimental=True,
2081 experimental=True,
2083 )
2082 )
2084 coreconfigitem(
2083 coreconfigitem(
2085 b'subrepos',
2084 b'subrepos',
2086 b'allowed',
2085 b'allowed',
2087 default=dynamicdefault, # to make backporting simpler
2086 default=dynamicdefault, # to make backporting simpler
2088 )
2087 )
2089 coreconfigitem(
2088 coreconfigitem(
2090 b'subrepos',
2089 b'subrepos',
2091 b'hg:allowed',
2090 b'hg:allowed',
2092 default=dynamicdefault,
2091 default=dynamicdefault,
2093 )
2092 )
2094 coreconfigitem(
2093 coreconfigitem(
2095 b'subrepos',
2094 b'subrepos',
2096 b'git:allowed',
2095 b'git:allowed',
2097 default=dynamicdefault,
2096 default=dynamicdefault,
2098 )
2097 )
2099 coreconfigitem(
2098 coreconfigitem(
2100 b'subrepos',
2099 b'subrepos',
2101 b'svn:allowed',
2100 b'svn:allowed',
2102 default=dynamicdefault,
2101 default=dynamicdefault,
2103 )
2102 )
2104 coreconfigitem(
2103 coreconfigitem(
2105 b'templates',
2104 b'templates',
2106 b'.*',
2105 b'.*',
2107 default=None,
2106 default=None,
2108 generic=True,
2107 generic=True,
2109 )
2108 )
2110 coreconfigitem(
2109 coreconfigitem(
2111 b'templateconfig',
2110 b'templateconfig',
2112 b'.*',
2111 b'.*',
2113 default=dynamicdefault,
2112 default=dynamicdefault,
2114 generic=True,
2113 generic=True,
2115 )
2114 )
2116 coreconfigitem(
2115 coreconfigitem(
2117 b'trusted',
2116 b'trusted',
2118 b'groups',
2117 b'groups',
2119 default=list,
2118 default=list,
2120 )
2119 )
2121 coreconfigitem(
2120 coreconfigitem(
2122 b'trusted',
2121 b'trusted',
2123 b'users',
2122 b'users',
2124 default=list,
2123 default=list,
2125 )
2124 )
2126 coreconfigitem(
2125 coreconfigitem(
2127 b'ui',
2126 b'ui',
2128 b'_usedassubrepo',
2127 b'_usedassubrepo',
2129 default=False,
2128 default=False,
2130 )
2129 )
2131 coreconfigitem(
2130 coreconfigitem(
2132 b'ui',
2131 b'ui',
2133 b'allowemptycommit',
2132 b'allowemptycommit',
2134 default=False,
2133 default=False,
2135 )
2134 )
2136 coreconfigitem(
2135 coreconfigitem(
2137 b'ui',
2136 b'ui',
2138 b'archivemeta',
2137 b'archivemeta',
2139 default=True,
2138 default=True,
2140 )
2139 )
2141 coreconfigitem(
2140 coreconfigitem(
2142 b'ui',
2141 b'ui',
2143 b'askusername',
2142 b'askusername',
2144 default=False,
2143 default=False,
2145 )
2144 )
2146 coreconfigitem(
2145 coreconfigitem(
2147 b'ui',
2146 b'ui',
2148 b'available-memory',
2147 b'available-memory',
2149 default=None,
2148 default=None,
2150 )
2149 )
2151
2150
2152 coreconfigitem(
2151 coreconfigitem(
2153 b'ui',
2152 b'ui',
2154 b'clonebundlefallback',
2153 b'clonebundlefallback',
2155 default=False,
2154 default=False,
2156 )
2155 )
2157 coreconfigitem(
2156 coreconfigitem(
2158 b'ui',
2157 b'ui',
2159 b'clonebundleprefers',
2158 b'clonebundleprefers',
2160 default=list,
2159 default=list,
2161 )
2160 )
2162 coreconfigitem(
2161 coreconfigitem(
2163 b'ui',
2162 b'ui',
2164 b'clonebundles',
2163 b'clonebundles',
2165 default=True,
2164 default=True,
2166 )
2165 )
2167 coreconfigitem(
2166 coreconfigitem(
2168 b'ui',
2167 b'ui',
2169 b'color',
2168 b'color',
2170 default=b'auto',
2169 default=b'auto',
2171 )
2170 )
2172 coreconfigitem(
2171 coreconfigitem(
2173 b'ui',
2172 b'ui',
2174 b'commitsubrepos',
2173 b'commitsubrepos',
2175 default=False,
2174 default=False,
2176 )
2175 )
2177 coreconfigitem(
2176 coreconfigitem(
2178 b'ui',
2177 b'ui',
2179 b'debug',
2178 b'debug',
2180 default=False,
2179 default=False,
2181 )
2180 )
2182 coreconfigitem(
2181 coreconfigitem(
2183 b'ui',
2182 b'ui',
2184 b'debugger',
2183 b'debugger',
2185 default=None,
2184 default=None,
2186 )
2185 )
2187 coreconfigitem(
2186 coreconfigitem(
2188 b'ui',
2187 b'ui',
2189 b'editor',
2188 b'editor',
2190 default=dynamicdefault,
2189 default=dynamicdefault,
2191 )
2190 )
2192 coreconfigitem(
2191 coreconfigitem(
2193 b'ui',
2192 b'ui',
2194 b'detailed-exit-code',
2193 b'detailed-exit-code',
2195 default=False,
2194 default=False,
2196 experimental=True,
2195 experimental=True,
2197 )
2196 )
2198 coreconfigitem(
2197 coreconfigitem(
2199 b'ui',
2198 b'ui',
2200 b'fallbackencoding',
2199 b'fallbackencoding',
2201 default=None,
2200 default=None,
2202 )
2201 )
2203 coreconfigitem(
2202 coreconfigitem(
2204 b'ui',
2203 b'ui',
2205 b'forcecwd',
2204 b'forcecwd',
2206 default=None,
2205 default=None,
2207 )
2206 )
2208 coreconfigitem(
2207 coreconfigitem(
2209 b'ui',
2208 b'ui',
2210 b'forcemerge',
2209 b'forcemerge',
2211 default=None,
2210 default=None,
2212 )
2211 )
2213 coreconfigitem(
2212 coreconfigitem(
2214 b'ui',
2213 b'ui',
2215 b'formatdebug',
2214 b'formatdebug',
2216 default=False,
2215 default=False,
2217 )
2216 )
2218 coreconfigitem(
2217 coreconfigitem(
2219 b'ui',
2218 b'ui',
2220 b'formatjson',
2219 b'formatjson',
2221 default=False,
2220 default=False,
2222 )
2221 )
2223 coreconfigitem(
2222 coreconfigitem(
2224 b'ui',
2223 b'ui',
2225 b'formatted',
2224 b'formatted',
2226 default=None,
2225 default=None,
2227 )
2226 )
2228 coreconfigitem(
2227 coreconfigitem(
2229 b'ui',
2228 b'ui',
2230 b'interactive',
2229 b'interactive',
2231 default=None,
2230 default=None,
2232 )
2231 )
2233 coreconfigitem(
2232 coreconfigitem(
2234 b'ui',
2233 b'ui',
2235 b'interface',
2234 b'interface',
2236 default=None,
2235 default=None,
2237 )
2236 )
2238 coreconfigitem(
2237 coreconfigitem(
2239 b'ui',
2238 b'ui',
2240 b'interface.chunkselector',
2239 b'interface.chunkselector',
2241 default=None,
2240 default=None,
2242 )
2241 )
2243 coreconfigitem(
2242 coreconfigitem(
2244 b'ui',
2243 b'ui',
2245 b'large-file-limit',
2244 b'large-file-limit',
2246 default=10000000,
2245 default=10000000,
2247 )
2246 )
2248 coreconfigitem(
2247 coreconfigitem(
2249 b'ui',
2248 b'ui',
2250 b'logblockedtimes',
2249 b'logblockedtimes',
2251 default=False,
2250 default=False,
2252 )
2251 )
2253 coreconfigitem(
2252 coreconfigitem(
2254 b'ui',
2253 b'ui',
2255 b'merge',
2254 b'merge',
2256 default=None,
2255 default=None,
2257 )
2256 )
2258 coreconfigitem(
2257 coreconfigitem(
2259 b'ui',
2258 b'ui',
2260 b'mergemarkers',
2259 b'mergemarkers',
2261 default=b'basic',
2260 default=b'basic',
2262 )
2261 )
2263 coreconfigitem(
2262 coreconfigitem(
2264 b'ui',
2263 b'ui',
2265 b'message-output',
2264 b'message-output',
2266 default=b'stdio',
2265 default=b'stdio',
2267 )
2266 )
2268 coreconfigitem(
2267 coreconfigitem(
2269 b'ui',
2268 b'ui',
2270 b'nontty',
2269 b'nontty',
2271 default=False,
2270 default=False,
2272 )
2271 )
2273 coreconfigitem(
2272 coreconfigitem(
2274 b'ui',
2273 b'ui',
2275 b'origbackuppath',
2274 b'origbackuppath',
2276 default=None,
2275 default=None,
2277 )
2276 )
2278 coreconfigitem(
2277 coreconfigitem(
2279 b'ui',
2278 b'ui',
2280 b'paginate',
2279 b'paginate',
2281 default=True,
2280 default=True,
2282 )
2281 )
2283 coreconfigitem(
2282 coreconfigitem(
2284 b'ui',
2283 b'ui',
2285 b'patch',
2284 b'patch',
2286 default=None,
2285 default=None,
2287 )
2286 )
2288 coreconfigitem(
2287 coreconfigitem(
2289 b'ui',
2288 b'ui',
2290 b'portablefilenames',
2289 b'portablefilenames',
2291 default=b'warn',
2290 default=b'warn',
2292 )
2291 )
2293 coreconfigitem(
2292 coreconfigitem(
2294 b'ui',
2293 b'ui',
2295 b'promptecho',
2294 b'promptecho',
2296 default=False,
2295 default=False,
2297 )
2296 )
2298 coreconfigitem(
2297 coreconfigitem(
2299 b'ui',
2298 b'ui',
2300 b'quiet',
2299 b'quiet',
2301 default=False,
2300 default=False,
2302 )
2301 )
2303 coreconfigitem(
2302 coreconfigitem(
2304 b'ui',
2303 b'ui',
2305 b'quietbookmarkmove',
2304 b'quietbookmarkmove',
2306 default=False,
2305 default=False,
2307 )
2306 )
2308 coreconfigitem(
2307 coreconfigitem(
2309 b'ui',
2308 b'ui',
2310 b'relative-paths',
2309 b'relative-paths',
2311 default=b'legacy',
2310 default=b'legacy',
2312 )
2311 )
2313 coreconfigitem(
2312 coreconfigitem(
2314 b'ui',
2313 b'ui',
2315 b'remotecmd',
2314 b'remotecmd',
2316 default=b'hg',
2315 default=b'hg',
2317 )
2316 )
2318 coreconfigitem(
2317 coreconfigitem(
2319 b'ui',
2318 b'ui',
2320 b'report_untrusted',
2319 b'report_untrusted',
2321 default=True,
2320 default=True,
2322 )
2321 )
2323 coreconfigitem(
2322 coreconfigitem(
2324 b'ui',
2323 b'ui',
2325 b'rollback',
2324 b'rollback',
2326 default=True,
2325 default=True,
2327 )
2326 )
2328 coreconfigitem(
2327 coreconfigitem(
2329 b'ui',
2328 b'ui',
2330 b'signal-safe-lock',
2329 b'signal-safe-lock',
2331 default=True,
2330 default=True,
2332 )
2331 )
2333 coreconfigitem(
2332 coreconfigitem(
2334 b'ui',
2333 b'ui',
2335 b'slash',
2334 b'slash',
2336 default=False,
2335 default=False,
2337 )
2336 )
2338 coreconfigitem(
2337 coreconfigitem(
2339 b'ui',
2338 b'ui',
2340 b'ssh',
2339 b'ssh',
2341 default=b'ssh',
2340 default=b'ssh',
2342 )
2341 )
2343 coreconfigitem(
2342 coreconfigitem(
2344 b'ui',
2343 b'ui',
2345 b'ssherrorhint',
2344 b'ssherrorhint',
2346 default=None,
2345 default=None,
2347 )
2346 )
2348 coreconfigitem(
2347 coreconfigitem(
2349 b'ui',
2348 b'ui',
2350 b'statuscopies',
2349 b'statuscopies',
2351 default=False,
2350 default=False,
2352 )
2351 )
2353 coreconfigitem(
2352 coreconfigitem(
2354 b'ui',
2353 b'ui',
2355 b'strict',
2354 b'strict',
2356 default=False,
2355 default=False,
2357 )
2356 )
2358 coreconfigitem(
2357 coreconfigitem(
2359 b'ui',
2358 b'ui',
2360 b'style',
2359 b'style',
2361 default=b'',
2360 default=b'',
2362 )
2361 )
2363 coreconfigitem(
2362 coreconfigitem(
2364 b'ui',
2363 b'ui',
2365 b'supportcontact',
2364 b'supportcontact',
2366 default=None,
2365 default=None,
2367 )
2366 )
2368 coreconfigitem(
2367 coreconfigitem(
2369 b'ui',
2368 b'ui',
2370 b'textwidth',
2369 b'textwidth',
2371 default=78,
2370 default=78,
2372 )
2371 )
2373 coreconfigitem(
2372 coreconfigitem(
2374 b'ui',
2373 b'ui',
2375 b'timeout',
2374 b'timeout',
2376 default=b'600',
2375 default=b'600',
2377 )
2376 )
2378 coreconfigitem(
2377 coreconfigitem(
2379 b'ui',
2378 b'ui',
2380 b'timeout.warn',
2379 b'timeout.warn',
2381 default=0,
2380 default=0,
2382 )
2381 )
2383 coreconfigitem(
2382 coreconfigitem(
2384 b'ui',
2383 b'ui',
2385 b'timestamp-output',
2384 b'timestamp-output',
2386 default=False,
2385 default=False,
2387 )
2386 )
2388 coreconfigitem(
2387 coreconfigitem(
2389 b'ui',
2388 b'ui',
2390 b'traceback',
2389 b'traceback',
2391 default=False,
2390 default=False,
2392 )
2391 )
2393 coreconfigitem(
2392 coreconfigitem(
2394 b'ui',
2393 b'ui',
2395 b'tweakdefaults',
2394 b'tweakdefaults',
2396 default=False,
2395 default=False,
2397 )
2396 )
2398 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2397 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2399 coreconfigitem(
2398 coreconfigitem(
2400 b'ui',
2399 b'ui',
2401 b'verbose',
2400 b'verbose',
2402 default=False,
2401 default=False,
2403 )
2402 )
2404 coreconfigitem(
2403 coreconfigitem(
2405 b'verify',
2404 b'verify',
2406 b'skipflags',
2405 b'skipflags',
2407 default=None,
2406 default=None,
2408 )
2407 )
2409 coreconfigitem(
2408 coreconfigitem(
2410 b'web',
2409 b'web',
2411 b'allowbz2',
2410 b'allowbz2',
2412 default=False,
2411 default=False,
2413 )
2412 )
2414 coreconfigitem(
2413 coreconfigitem(
2415 b'web',
2414 b'web',
2416 b'allowgz',
2415 b'allowgz',
2417 default=False,
2416 default=False,
2418 )
2417 )
2419 coreconfigitem(
2418 coreconfigitem(
2420 b'web',
2419 b'web',
2421 b'allow-pull',
2420 b'allow-pull',
2422 alias=[(b'web', b'allowpull')],
2421 alias=[(b'web', b'allowpull')],
2423 default=True,
2422 default=True,
2424 )
2423 )
2425 coreconfigitem(
2424 coreconfigitem(
2426 b'web',
2425 b'web',
2427 b'allow-push',
2426 b'allow-push',
2428 alias=[(b'web', b'allow_push')],
2427 alias=[(b'web', b'allow_push')],
2429 default=list,
2428 default=list,
2430 )
2429 )
2431 coreconfigitem(
2430 coreconfigitem(
2432 b'web',
2431 b'web',
2433 b'allowzip',
2432 b'allowzip',
2434 default=False,
2433 default=False,
2435 )
2434 )
2436 coreconfigitem(
2435 coreconfigitem(
2437 b'web',
2436 b'web',
2438 b'archivesubrepos',
2437 b'archivesubrepos',
2439 default=False,
2438 default=False,
2440 )
2439 )
2441 coreconfigitem(
2440 coreconfigitem(
2442 b'web',
2441 b'web',
2443 b'cache',
2442 b'cache',
2444 default=True,
2443 default=True,
2445 )
2444 )
2446 coreconfigitem(
2445 coreconfigitem(
2447 b'web',
2446 b'web',
2448 b'comparisoncontext',
2447 b'comparisoncontext',
2449 default=5,
2448 default=5,
2450 )
2449 )
2451 coreconfigitem(
2450 coreconfigitem(
2452 b'web',
2451 b'web',
2453 b'contact',
2452 b'contact',
2454 default=None,
2453 default=None,
2455 )
2454 )
2456 coreconfigitem(
2455 coreconfigitem(
2457 b'web',
2456 b'web',
2458 b'deny_push',
2457 b'deny_push',
2459 default=list,
2458 default=list,
2460 )
2459 )
2461 coreconfigitem(
2460 coreconfigitem(
2462 b'web',
2461 b'web',
2463 b'guessmime',
2462 b'guessmime',
2464 default=False,
2463 default=False,
2465 )
2464 )
2466 coreconfigitem(
2465 coreconfigitem(
2467 b'web',
2466 b'web',
2468 b'hidden',
2467 b'hidden',
2469 default=False,
2468 default=False,
2470 )
2469 )
2471 coreconfigitem(
2470 coreconfigitem(
2472 b'web',
2471 b'web',
2473 b'labels',
2472 b'labels',
2474 default=list,
2473 default=list,
2475 )
2474 )
2476 coreconfigitem(
2475 coreconfigitem(
2477 b'web',
2476 b'web',
2478 b'logoimg',
2477 b'logoimg',
2479 default=b'hglogo.png',
2478 default=b'hglogo.png',
2480 )
2479 )
2481 coreconfigitem(
2480 coreconfigitem(
2482 b'web',
2481 b'web',
2483 b'logourl',
2482 b'logourl',
2484 default=b'https://mercurial-scm.org/',
2483 default=b'https://mercurial-scm.org/',
2485 )
2484 )
2486 coreconfigitem(
2485 coreconfigitem(
2487 b'web',
2486 b'web',
2488 b'accesslog',
2487 b'accesslog',
2489 default=b'-',
2488 default=b'-',
2490 )
2489 )
2491 coreconfigitem(
2490 coreconfigitem(
2492 b'web',
2491 b'web',
2493 b'address',
2492 b'address',
2494 default=b'',
2493 default=b'',
2495 )
2494 )
2496 coreconfigitem(
2495 coreconfigitem(
2497 b'web',
2496 b'web',
2498 b'allow-archive',
2497 b'allow-archive',
2499 alias=[(b'web', b'allow_archive')],
2498 alias=[(b'web', b'allow_archive')],
2500 default=list,
2499 default=list,
2501 )
2500 )
2502 coreconfigitem(
2501 coreconfigitem(
2503 b'web',
2502 b'web',
2504 b'allow_read',
2503 b'allow_read',
2505 default=list,
2504 default=list,
2506 )
2505 )
2507 coreconfigitem(
2506 coreconfigitem(
2508 b'web',
2507 b'web',
2509 b'baseurl',
2508 b'baseurl',
2510 default=None,
2509 default=None,
2511 )
2510 )
2512 coreconfigitem(
2511 coreconfigitem(
2513 b'web',
2512 b'web',
2514 b'cacerts',
2513 b'cacerts',
2515 default=None,
2514 default=None,
2516 )
2515 )
2517 coreconfigitem(
2516 coreconfigitem(
2518 b'web',
2517 b'web',
2519 b'certificate',
2518 b'certificate',
2520 default=None,
2519 default=None,
2521 )
2520 )
2522 coreconfigitem(
2521 coreconfigitem(
2523 b'web',
2522 b'web',
2524 b'collapse',
2523 b'collapse',
2525 default=False,
2524 default=False,
2526 )
2525 )
2527 coreconfigitem(
2526 coreconfigitem(
2528 b'web',
2527 b'web',
2529 b'csp',
2528 b'csp',
2530 default=None,
2529 default=None,
2531 )
2530 )
2532 coreconfigitem(
2531 coreconfigitem(
2533 b'web',
2532 b'web',
2534 b'deny_read',
2533 b'deny_read',
2535 default=list,
2534 default=list,
2536 )
2535 )
2537 coreconfigitem(
2536 coreconfigitem(
2538 b'web',
2537 b'web',
2539 b'descend',
2538 b'descend',
2540 default=True,
2539 default=True,
2541 )
2540 )
2542 coreconfigitem(
2541 coreconfigitem(
2543 b'web',
2542 b'web',
2544 b'description',
2543 b'description',
2545 default=b"",
2544 default=b"",
2546 )
2545 )
2547 coreconfigitem(
2546 coreconfigitem(
2548 b'web',
2547 b'web',
2549 b'encoding',
2548 b'encoding',
2550 default=lambda: encoding.encoding,
2549 default=lambda: encoding.encoding,
2551 )
2550 )
2552 coreconfigitem(
2551 coreconfigitem(
2553 b'web',
2552 b'web',
2554 b'errorlog',
2553 b'errorlog',
2555 default=b'-',
2554 default=b'-',
2556 )
2555 )
2557 coreconfigitem(
2556 coreconfigitem(
2558 b'web',
2557 b'web',
2559 b'ipv6',
2558 b'ipv6',
2560 default=False,
2559 default=False,
2561 )
2560 )
2562 coreconfigitem(
2561 coreconfigitem(
2563 b'web',
2562 b'web',
2564 b'maxchanges',
2563 b'maxchanges',
2565 default=10,
2564 default=10,
2566 )
2565 )
2567 coreconfigitem(
2566 coreconfigitem(
2568 b'web',
2567 b'web',
2569 b'maxfiles',
2568 b'maxfiles',
2570 default=10,
2569 default=10,
2571 )
2570 )
2572 coreconfigitem(
2571 coreconfigitem(
2573 b'web',
2572 b'web',
2574 b'maxshortchanges',
2573 b'maxshortchanges',
2575 default=60,
2574 default=60,
2576 )
2575 )
2577 coreconfigitem(
2576 coreconfigitem(
2578 b'web',
2577 b'web',
2579 b'motd',
2578 b'motd',
2580 default=b'',
2579 default=b'',
2581 )
2580 )
2582 coreconfigitem(
2581 coreconfigitem(
2583 b'web',
2582 b'web',
2584 b'name',
2583 b'name',
2585 default=dynamicdefault,
2584 default=dynamicdefault,
2586 )
2585 )
2587 coreconfigitem(
2586 coreconfigitem(
2588 b'web',
2587 b'web',
2589 b'port',
2588 b'port',
2590 default=8000,
2589 default=8000,
2591 )
2590 )
2592 coreconfigitem(
2591 coreconfigitem(
2593 b'web',
2592 b'web',
2594 b'prefix',
2593 b'prefix',
2595 default=b'',
2594 default=b'',
2596 )
2595 )
2597 coreconfigitem(
2596 coreconfigitem(
2598 b'web',
2597 b'web',
2599 b'push_ssl',
2598 b'push_ssl',
2600 default=True,
2599 default=True,
2601 )
2600 )
2602 coreconfigitem(
2601 coreconfigitem(
2603 b'web',
2602 b'web',
2604 b'refreshinterval',
2603 b'refreshinterval',
2605 default=20,
2604 default=20,
2606 )
2605 )
2607 coreconfigitem(
2606 coreconfigitem(
2608 b'web',
2607 b'web',
2609 b'server-header',
2608 b'server-header',
2610 default=None,
2609 default=None,
2611 )
2610 )
2612 coreconfigitem(
2611 coreconfigitem(
2613 b'web',
2612 b'web',
2614 b'static',
2613 b'static',
2615 default=None,
2614 default=None,
2616 )
2615 )
2617 coreconfigitem(
2616 coreconfigitem(
2618 b'web',
2617 b'web',
2619 b'staticurl',
2618 b'staticurl',
2620 default=None,
2619 default=None,
2621 )
2620 )
2622 coreconfigitem(
2621 coreconfigitem(
2623 b'web',
2622 b'web',
2624 b'stripes',
2623 b'stripes',
2625 default=1,
2624 default=1,
2626 )
2625 )
2627 coreconfigitem(
2626 coreconfigitem(
2628 b'web',
2627 b'web',
2629 b'style',
2628 b'style',
2630 default=b'paper',
2629 default=b'paper',
2631 )
2630 )
2632 coreconfigitem(
2631 coreconfigitem(
2633 b'web',
2632 b'web',
2634 b'templates',
2633 b'templates',
2635 default=None,
2634 default=None,
2636 )
2635 )
2637 coreconfigitem(
2636 coreconfigitem(
2638 b'web',
2637 b'web',
2639 b'view',
2638 b'view',
2640 default=b'served',
2639 default=b'served',
2641 experimental=True,
2640 experimental=True,
2642 )
2641 )
2643 coreconfigitem(
2642 coreconfigitem(
2644 b'worker',
2643 b'worker',
2645 b'backgroundclose',
2644 b'backgroundclose',
2646 default=dynamicdefault,
2645 default=dynamicdefault,
2647 )
2646 )
2648 # Windows defaults to a limit of 512 open files. A buffer of 128
2647 # Windows defaults to a limit of 512 open files. A buffer of 128
2649 # should give us enough headway.
2648 # should give us enough headway.
2650 coreconfigitem(
2649 coreconfigitem(
2651 b'worker',
2650 b'worker',
2652 b'backgroundclosemaxqueue',
2651 b'backgroundclosemaxqueue',
2653 default=384,
2652 default=384,
2654 )
2653 )
2655 coreconfigitem(
2654 coreconfigitem(
2656 b'worker',
2655 b'worker',
2657 b'backgroundcloseminfilecount',
2656 b'backgroundcloseminfilecount',
2658 default=2048,
2657 default=2048,
2659 )
2658 )
2660 coreconfigitem(
2659 coreconfigitem(
2661 b'worker',
2660 b'worker',
2662 b'backgroundclosethreadcount',
2661 b'backgroundclosethreadcount',
2663 default=4,
2662 default=4,
2664 )
2663 )
2665 coreconfigitem(
2664 coreconfigitem(
2666 b'worker',
2665 b'worker',
2667 b'enabled',
2666 b'enabled',
2668 default=True,
2667 default=True,
2669 )
2668 )
2670 coreconfigitem(
2669 coreconfigitem(
2671 b'worker',
2670 b'worker',
2672 b'numcpus',
2671 b'numcpus',
2673 default=None,
2672 default=None,
2674 )
2673 )
2675
2674
2676 # Rebase related configuration moved to core because other extension are doing
2675 # Rebase related configuration moved to core because other extension are doing
2677 # strange things. For example, shelve import the extensions to reuse some bit
2676 # strange things. For example, shelve import the extensions to reuse some bit
2678 # without formally loading it.
2677 # without formally loading it.
2679 coreconfigitem(
2678 coreconfigitem(
2680 b'commands',
2679 b'commands',
2681 b'rebase.requiredest',
2680 b'rebase.requiredest',
2682 default=False,
2681 default=False,
2683 )
2682 )
2684 coreconfigitem(
2683 coreconfigitem(
2685 b'experimental',
2684 b'experimental',
2686 b'rebaseskipobsolete',
2685 b'rebaseskipobsolete',
2687 default=True,
2686 default=True,
2688 )
2687 )
2689 coreconfigitem(
2688 coreconfigitem(
2690 b'rebase',
2689 b'rebase',
2691 b'singletransaction',
2690 b'singletransaction',
2692 default=False,
2691 default=False,
2693 )
2692 )
2694 coreconfigitem(
2693 coreconfigitem(
2695 b'rebase',
2694 b'rebase',
2696 b'experimental.inmemory',
2695 b'experimental.inmemory',
2697 default=False,
2696 default=False,
2698 )
2697 )
@@ -1,3254 +1,3256 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 FEATURES_BY_VERSION,
38 FEATURES_BY_VERSION,
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 INDEX_HEADER,
41 INDEX_HEADER,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 SUPPORTED_FLAGS,
50 SUPPORTED_FLAGS,
51 )
51 )
52 from .revlogutils.flagutil import (
52 from .revlogutils.flagutil import (
53 REVIDX_DEFAULT_FLAGS,
53 REVIDX_DEFAULT_FLAGS,
54 REVIDX_ELLIPSIS,
54 REVIDX_ELLIPSIS,
55 REVIDX_EXTSTORED,
55 REVIDX_EXTSTORED,
56 REVIDX_FLAGS_ORDER,
56 REVIDX_FLAGS_ORDER,
57 REVIDX_HASCOPIESINFO,
57 REVIDX_HASCOPIESINFO,
58 REVIDX_ISCENSORED,
58 REVIDX_ISCENSORED,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 )
60 )
61 from .thirdparty import attr
61 from .thirdparty import attr
62 from . import (
62 from . import (
63 ancestor,
63 ancestor,
64 dagop,
64 dagop,
65 error,
65 error,
66 mdiff,
66 mdiff,
67 policy,
67 policy,
68 pycompat,
68 pycompat,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 docket as docketutil,
78 docket as docketutil,
79 flagutil,
79 flagutil,
80 nodemap as nodemaputil,
80 nodemap as nodemaputil,
81 revlogv0,
81 revlogv0,
82 sidedata as sidedatautil,
82 sidedata as sidedatautil,
83 )
83 )
84 from .utils import (
84 from .utils import (
85 storageutil,
85 storageutil,
86 stringutil,
86 stringutil,
87 )
87 )
88
88
89 # blanked usage of all the name to prevent pyflakes constraints
89 # blanked usage of all the name to prevent pyflakes constraints
90 # We need these name available in the module for extensions.
90 # We need these name available in the module for extensions.
91
91
92 REVLOGV0
92 REVLOGV0
93 REVLOGV1
93 REVLOGV1
94 REVLOGV2
94 REVLOGV2
95 FLAG_INLINE_DATA
95 FLAG_INLINE_DATA
96 FLAG_GENERALDELTA
96 FLAG_GENERALDELTA
97 REVLOG_DEFAULT_FLAGS
97 REVLOG_DEFAULT_FLAGS
98 REVLOG_DEFAULT_FORMAT
98 REVLOG_DEFAULT_FORMAT
99 REVLOG_DEFAULT_VERSION
99 REVLOG_DEFAULT_VERSION
100 REVLOGV1_FLAGS
100 REVLOGV1_FLAGS
101 REVLOGV2_FLAGS
101 REVLOGV2_FLAGS
102 REVIDX_ISCENSORED
102 REVIDX_ISCENSORED
103 REVIDX_ELLIPSIS
103 REVIDX_ELLIPSIS
104 REVIDX_HASCOPIESINFO
104 REVIDX_HASCOPIESINFO
105 REVIDX_EXTSTORED
105 REVIDX_EXTSTORED
106 REVIDX_DEFAULT_FLAGS
106 REVIDX_DEFAULT_FLAGS
107 REVIDX_FLAGS_ORDER
107 REVIDX_FLAGS_ORDER
108 REVIDX_RAWTEXT_CHANGING_FLAGS
108 REVIDX_RAWTEXT_CHANGING_FLAGS
109
109
110 parsers = policy.importmod('parsers')
110 parsers = policy.importmod('parsers')
111 rustancestor = policy.importrust('ancestor')
111 rustancestor = policy.importrust('ancestor')
112 rustdagop = policy.importrust('dagop')
112 rustdagop = policy.importrust('dagop')
113 rustrevlog = policy.importrust('revlog')
113 rustrevlog = policy.importrust('revlog')
114
114
115 # Aliased for performance.
115 # Aliased for performance.
116 _zlibdecompress = zlib.decompress
116 _zlibdecompress = zlib.decompress
117
117
118 # max size of revlog with inline data
118 # max size of revlog with inline data
119 _maxinline = 131072
119 _maxinline = 131072
120 _chunksize = 1048576
120 _chunksize = 1048576
121
121
122 # Flag processors for REVIDX_ELLIPSIS.
122 # Flag processors for REVIDX_ELLIPSIS.
123 def ellipsisreadprocessor(rl, text):
123 def ellipsisreadprocessor(rl, text):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsiswriteprocessor(rl, text):
127 def ellipsiswriteprocessor(rl, text):
128 return text, False
128 return text, False
129
129
130
130
131 def ellipsisrawprocessor(rl, text):
131 def ellipsisrawprocessor(rl, text):
132 return False
132 return False
133
133
134
134
135 ellipsisprocessor = (
135 ellipsisprocessor = (
136 ellipsisreadprocessor,
136 ellipsisreadprocessor,
137 ellipsiswriteprocessor,
137 ellipsiswriteprocessor,
138 ellipsisrawprocessor,
138 ellipsisrawprocessor,
139 )
139 )
140
140
141
141
142 def offset_type(offset, type):
142 def offset_type(offset, type):
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
144 raise ValueError(b'unknown revlog index flags')
144 raise ValueError(b'unknown revlog index flags')
145 return int(int(offset) << 16 | type)
145 return int(int(offset) << 16 | type)
146
146
147
147
148 def _verify_revision(rl, skipflags, state, node):
148 def _verify_revision(rl, skipflags, state, node):
149 """Verify the integrity of the given revlog ``node`` while providing a hook
149 """Verify the integrity of the given revlog ``node`` while providing a hook
150 point for extensions to influence the operation."""
150 point for extensions to influence the operation."""
151 if skipflags:
151 if skipflags:
152 state[b'skipread'].add(node)
152 state[b'skipread'].add(node)
153 else:
153 else:
154 # Side-effect: read content and verify hash.
154 # Side-effect: read content and verify hash.
155 rl.revision(node)
155 rl.revision(node)
156
156
157
157
158 # True if a fast implementation for persistent-nodemap is available
158 # True if a fast implementation for persistent-nodemap is available
159 #
159 #
160 # We also consider we have a "fast" implementation in "pure" python because
160 # We also consider we have a "fast" implementation in "pure" python because
161 # people using pure don't really have performance consideration (and a
161 # people using pure don't really have performance consideration (and a
162 # wheelbarrow of other slowness source)
162 # wheelbarrow of other slowness source)
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
164 parsers, 'BaseIndexObject'
164 parsers, 'BaseIndexObject'
165 )
165 )
166
166
167
167
168 @attr.s(slots=True, frozen=True)
168 @attr.s(slots=True, frozen=True)
169 class _revisioninfo(object):
169 class _revisioninfo(object):
170 """Information about a revision that allows building its fulltext
170 """Information about a revision that allows building its fulltext
171 node: expected hash of the revision
171 node: expected hash of the revision
172 p1, p2: parent revs of the revision
172 p1, p2: parent revs of the revision
173 btext: built text cache consisting of a one-element list
173 btext: built text cache consisting of a one-element list
174 cachedelta: (baserev, uncompressed_delta) or None
174 cachedelta: (baserev, uncompressed_delta) or None
175 flags: flags associated to the revision storage
175 flags: flags associated to the revision storage
176
176
177 One of btext[0] or cachedelta must be set.
177 One of btext[0] or cachedelta must be set.
178 """
178 """
179
179
180 node = attr.ib()
180 node = attr.ib()
181 p1 = attr.ib()
181 p1 = attr.ib()
182 p2 = attr.ib()
182 p2 = attr.ib()
183 btext = attr.ib()
183 btext = attr.ib()
184 textlen = attr.ib()
184 textlen = attr.ib()
185 cachedelta = attr.ib()
185 cachedelta = attr.ib()
186 flags = attr.ib()
186 flags = attr.ib()
187
187
188
188
189 @interfaceutil.implementer(repository.irevisiondelta)
189 @interfaceutil.implementer(repository.irevisiondelta)
190 @attr.s(slots=True)
190 @attr.s(slots=True)
191 class revlogrevisiondelta(object):
191 class revlogrevisiondelta(object):
192 node = attr.ib()
192 node = attr.ib()
193 p1node = attr.ib()
193 p1node = attr.ib()
194 p2node = attr.ib()
194 p2node = attr.ib()
195 basenode = attr.ib()
195 basenode = attr.ib()
196 flags = attr.ib()
196 flags = attr.ib()
197 baserevisionsize = attr.ib()
197 baserevisionsize = attr.ib()
198 revision = attr.ib()
198 revision = attr.ib()
199 delta = attr.ib()
199 delta = attr.ib()
200 sidedata = attr.ib()
200 sidedata = attr.ib()
201 protocol_flags = attr.ib()
201 protocol_flags = attr.ib()
202 linknode = attr.ib(default=None)
202 linknode = attr.ib(default=None)
203
203
204
204
205 @interfaceutil.implementer(repository.iverifyproblem)
205 @interfaceutil.implementer(repository.iverifyproblem)
206 @attr.s(frozen=True)
206 @attr.s(frozen=True)
207 class revlogproblem(object):
207 class revlogproblem(object):
208 warning = attr.ib(default=None)
208 warning = attr.ib(default=None)
209 error = attr.ib(default=None)
209 error = attr.ib(default=None)
210 node = attr.ib(default=None)
210 node = attr.ib(default=None)
211
211
212
212
213 def parse_index_v1(data, inline):
213 def parse_index_v1(data, inline):
214 # call the C implementation to parse the index data
214 # call the C implementation to parse the index data
215 index, cache = parsers.parse_index2(data, inline)
215 index, cache = parsers.parse_index2(data, inline)
216 return index, cache
216 return index, cache
217
217
218
218
219 def parse_index_v2(data, inline):
219 def parse_index_v2(data, inline):
220 # call the C implementation to parse the index data
220 # call the C implementation to parse the index data
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
222 return index, cache
222 return index, cache
223
223
224
224
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
226
226
227 def parse_index_v1_nodemap(data, inline):
227 def parse_index_v1_nodemap(data, inline):
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
229 return index, cache
229 return index, cache
230
230
231
231
232 else:
232 else:
233 parse_index_v1_nodemap = None
233 parse_index_v1_nodemap = None
234
234
235
235
236 def parse_index_v1_mixed(data, inline):
236 def parse_index_v1_mixed(data, inline):
237 index, cache = parse_index_v1(data, inline)
237 index, cache = parse_index_v1(data, inline)
238 return rustrevlog.MixedIndex(index), cache
238 return rustrevlog.MixedIndex(index), cache
239
239
240
240
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
242 # signed integer)
242 # signed integer)
243 _maxentrysize = 0x7FFFFFFF
243 _maxentrysize = 0x7FFFFFFF
244
244
245
245
246 class revlog(object):
246 class revlog(object):
247 """
247 """
248 the underlying revision storage object
248 the underlying revision storage object
249
249
250 A revlog consists of two parts, an index and the revision data.
250 A revlog consists of two parts, an index and the revision data.
251
251
252 The index is a file with a fixed record size containing
252 The index is a file with a fixed record size containing
253 information on each revision, including its nodeid (hash), the
253 information on each revision, including its nodeid (hash), the
254 nodeids of its parents, the position and offset of its data within
254 nodeids of its parents, the position and offset of its data within
255 the data file, and the revision it's based on. Finally, each entry
255 the data file, and the revision it's based on. Finally, each entry
256 contains a linkrev entry that can serve as a pointer to external
256 contains a linkrev entry that can serve as a pointer to external
257 data.
257 data.
258
258
259 The revision data itself is a linear collection of data chunks.
259 The revision data itself is a linear collection of data chunks.
260 Each chunk represents a revision and is usually represented as a
260 Each chunk represents a revision and is usually represented as a
261 delta against the previous chunk. To bound lookup time, runs of
261 delta against the previous chunk. To bound lookup time, runs of
262 deltas are limited to about 2 times the length of the original
262 deltas are limited to about 2 times the length of the original
263 version data. This makes retrieval of a version proportional to
263 version data. This makes retrieval of a version proportional to
264 its size, or O(1) relative to the number of revisions.
264 its size, or O(1) relative to the number of revisions.
265
265
266 Both pieces of the revlog are written to in an append-only
266 Both pieces of the revlog are written to in an append-only
267 fashion, which means we never need to rewrite a file to insert or
267 fashion, which means we never need to rewrite a file to insert or
268 remove data, and can use some simple techniques to avoid the need
268 remove data, and can use some simple techniques to avoid the need
269 for locking while reading.
269 for locking while reading.
270
270
271 If checkambig, indexfile is opened with checkambig=True at
271 If checkambig, indexfile is opened with checkambig=True at
272 writing, to avoid file stat ambiguity.
272 writing, to avoid file stat ambiguity.
273
273
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
275 index will be mmapped rather than read if it is larger than the
275 index will be mmapped rather than read if it is larger than the
276 configured threshold.
276 configured threshold.
277
277
278 If censorable is True, the revlog can have censored revisions.
278 If censorable is True, the revlog can have censored revisions.
279
279
280 If `upperboundcomp` is not None, this is the expected maximal gain from
280 If `upperboundcomp` is not None, this is the expected maximal gain from
281 compression for the data content.
281 compression for the data content.
282
282
283 `concurrencychecker` is an optional function that receives 3 arguments: a
283 `concurrencychecker` is an optional function that receives 3 arguments: a
284 file handle, a filename, and an expected position. It should check whether
284 file handle, a filename, and an expected position. It should check whether
285 the current position in the file handle is valid, and log/warn/fail (by
285 the current position in the file handle is valid, and log/warn/fail (by
286 raising).
286 raising).
287 """
287 """
288
288
289 _flagserrorclass = error.RevlogError
289 _flagserrorclass = error.RevlogError
290
290
291 def __init__(
291 def __init__(
292 self,
292 self,
293 opener,
293 opener,
294 target,
294 target,
295 radix,
295 radix,
296 postfix=None, # only exist for `tmpcensored` now
296 postfix=None, # only exist for `tmpcensored` now
297 checkambig=False,
297 checkambig=False,
298 mmaplargeindex=False,
298 mmaplargeindex=False,
299 censorable=False,
299 censorable=False,
300 upperboundcomp=None,
300 upperboundcomp=None,
301 persistentnodemap=False,
301 persistentnodemap=False,
302 concurrencychecker=None,
302 concurrencychecker=None,
303 trypending=False,
303 trypending=False,
304 ):
304 ):
305 """
305 """
306 create a revlog object
306 create a revlog object
307
307
308 opener is a function that abstracts the file opening operation
308 opener is a function that abstracts the file opening operation
309 and can be used to implement COW semantics or the like.
309 and can be used to implement COW semantics or the like.
310
310
311 `target`: a (KIND, ID) tuple that identify the content stored in
311 `target`: a (KIND, ID) tuple that identify the content stored in
312 this revlog. It help the rest of the code to understand what the revlog
312 this revlog. It help the rest of the code to understand what the revlog
313 is about without having to resort to heuristic and index filename
313 is about without having to resort to heuristic and index filename
314 analysis. Note: that this must be reliably be set by normal code, but
314 analysis. Note: that this must be reliably be set by normal code, but
315 that test, debug, or performance measurement code might not set this to
315 that test, debug, or performance measurement code might not set this to
316 accurate value.
316 accurate value.
317 """
317 """
318 self.upperboundcomp = upperboundcomp
318 self.upperboundcomp = upperboundcomp
319
319
320 self.radix = radix
320 self.radix = radix
321
321
322 self._docket_file = None
322 self._docket_file = None
323 self._indexfile = None
323 self._indexfile = None
324 self._datafile = None
324 self._datafile = None
325 self._nodemap_file = None
325 self._nodemap_file = None
326 self.postfix = postfix
326 self.postfix = postfix
327 self._trypending = trypending
327 self._trypending = trypending
328 self.opener = opener
328 self.opener = opener
329 if persistentnodemap:
329 if persistentnodemap:
330 self._nodemap_file = nodemaputil.get_nodemap_file(self)
330 self._nodemap_file = nodemaputil.get_nodemap_file(self)
331
331
332 assert target[0] in ALL_KINDS
332 assert target[0] in ALL_KINDS
333 assert len(target) == 2
333 assert len(target) == 2
334 self.target = target
334 self.target = target
335 # When True, indexfile is opened with checkambig=True at writing, to
335 # When True, indexfile is opened with checkambig=True at writing, to
336 # avoid file stat ambiguity.
336 # avoid file stat ambiguity.
337 self._checkambig = checkambig
337 self._checkambig = checkambig
338 self._mmaplargeindex = mmaplargeindex
338 self._mmaplargeindex = mmaplargeindex
339 self._censorable = censorable
339 self._censorable = censorable
340 # 3-tuple of (node, rev, text) for a raw revision.
340 # 3-tuple of (node, rev, text) for a raw revision.
341 self._revisioncache = None
341 self._revisioncache = None
342 # Maps rev to chain base rev.
342 # Maps rev to chain base rev.
343 self._chainbasecache = util.lrucachedict(100)
343 self._chainbasecache = util.lrucachedict(100)
344 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
344 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
345 self._chunkcache = (0, b'')
345 self._chunkcache = (0, b'')
346 # How much data to read and cache into the raw revlog data cache.
346 # How much data to read and cache into the raw revlog data cache.
347 self._chunkcachesize = 65536
347 self._chunkcachesize = 65536
348 self._maxchainlen = None
348 self._maxchainlen = None
349 self._deltabothparents = True
349 self._deltabothparents = True
350 self.index = None
350 self.index = None
351 self._docket = None
351 self._docket = None
352 self._nodemap_docket = None
352 self._nodemap_docket = None
353 # Mapping of partial identifiers to full nodes.
353 # Mapping of partial identifiers to full nodes.
354 self._pcache = {}
354 self._pcache = {}
355 # Mapping of revision integer to full node.
355 # Mapping of revision integer to full node.
356 self._compengine = b'zlib'
356 self._compengine = b'zlib'
357 self._compengineopts = {}
357 self._compengineopts = {}
358 self._maxdeltachainspan = -1
358 self._maxdeltachainspan = -1
359 self._withsparseread = False
359 self._withsparseread = False
360 self._sparserevlog = False
360 self._sparserevlog = False
361 self.hassidedata = False
361 self.hassidedata = False
362 self._srdensitythreshold = 0.50
362 self._srdensitythreshold = 0.50
363 self._srmingapsize = 262144
363 self._srmingapsize = 262144
364
364
365 # Make copy of flag processors so each revlog instance can support
365 # Make copy of flag processors so each revlog instance can support
366 # custom flags.
366 # custom flags.
367 self._flagprocessors = dict(flagutil.flagprocessors)
367 self._flagprocessors = dict(flagutil.flagprocessors)
368
368
369 # 2-tuple of file handles being used for active writing.
369 # 2-tuple of file handles being used for active writing.
370 self._writinghandles = None
370 self._writinghandles = None
371 # prevent nesting of addgroup
371 # prevent nesting of addgroup
372 self._adding_group = None
372 self._adding_group = None
373
373
374 self._loadindex()
374 self._loadindex()
375
375
376 self._concurrencychecker = concurrencychecker
376 self._concurrencychecker = concurrencychecker
377
377
378 def _init_opts(self):
378 def _init_opts(self):
379 """process options (from above/config) to setup associated default revlog mode
379 """process options (from above/config) to setup associated default revlog mode
380
380
381 These values might be affected when actually reading on disk information.
381 These values might be affected when actually reading on disk information.
382
382
383 The relevant values are returned for use in _loadindex().
383 The relevant values are returned for use in _loadindex().
384
384
385 * newversionflags:
385 * newversionflags:
386 version header to use if we need to create a new revlog
386 version header to use if we need to create a new revlog
387
387
388 * mmapindexthreshold:
388 * mmapindexthreshold:
389 minimal index size for start to use mmap
389 minimal index size for start to use mmap
390
390
391 * force_nodemap:
391 * force_nodemap:
392 force the usage of a "development" version of the nodemap code
392 force the usage of a "development" version of the nodemap code
393 """
393 """
394 mmapindexthreshold = None
394 mmapindexthreshold = None
395 opts = self.opener.options
395 opts = self.opener.options
396
396
397 if b'revlogv2' in opts:
397 if b'revlogv2' in opts:
398 new_header = REVLOGV2 | FLAG_INLINE_DATA
398 new_header = REVLOGV2 | FLAG_INLINE_DATA
399 elif b'revlogv1' in opts:
399 elif b'revlogv1' in opts:
400 new_header = REVLOGV1 | FLAG_INLINE_DATA
400 new_header = REVLOGV1 | FLAG_INLINE_DATA
401 if b'generaldelta' in opts:
401 if b'generaldelta' in opts:
402 new_header |= FLAG_GENERALDELTA
402 new_header |= FLAG_GENERALDELTA
403 elif b'revlogv0' in self.opener.options:
403 elif b'revlogv0' in self.opener.options:
404 new_header = REVLOGV0
404 new_header = REVLOGV0
405 else:
405 else:
406 new_header = REVLOG_DEFAULT_VERSION
406 new_header = REVLOG_DEFAULT_VERSION
407
407
408 if b'chunkcachesize' in opts:
408 if b'chunkcachesize' in opts:
409 self._chunkcachesize = opts[b'chunkcachesize']
409 self._chunkcachesize = opts[b'chunkcachesize']
410 if b'maxchainlen' in opts:
410 if b'maxchainlen' in opts:
411 self._maxchainlen = opts[b'maxchainlen']
411 self._maxchainlen = opts[b'maxchainlen']
412 if b'deltabothparents' in opts:
412 if b'deltabothparents' in opts:
413 self._deltabothparents = opts[b'deltabothparents']
413 self._deltabothparents = opts[b'deltabothparents']
414 self._lazydelta = bool(opts.get(b'lazydelta', True))
414 self._lazydelta = bool(opts.get(b'lazydelta', True))
415 self._lazydeltabase = False
415 self._lazydeltabase = False
416 if self._lazydelta:
416 if self._lazydelta:
417 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
417 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
418 if b'compengine' in opts:
418 if b'compengine' in opts:
419 self._compengine = opts[b'compengine']
419 self._compengine = opts[b'compengine']
420 if b'zlib.level' in opts:
420 if b'zlib.level' in opts:
421 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
421 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
422 if b'zstd.level' in opts:
422 if b'zstd.level' in opts:
423 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
423 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
424 if b'maxdeltachainspan' in opts:
424 if b'maxdeltachainspan' in opts:
425 self._maxdeltachainspan = opts[b'maxdeltachainspan']
425 self._maxdeltachainspan = opts[b'maxdeltachainspan']
426 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
426 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
427 mmapindexthreshold = opts[b'mmapindexthreshold']
427 mmapindexthreshold = opts[b'mmapindexthreshold']
428 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
428 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
429 withsparseread = bool(opts.get(b'with-sparse-read', False))
429 withsparseread = bool(opts.get(b'with-sparse-read', False))
430 # sparse-revlog forces sparse-read
430 # sparse-revlog forces sparse-read
431 self._withsparseread = self._sparserevlog or withsparseread
431 self._withsparseread = self._sparserevlog or withsparseread
432 if b'sparse-read-density-threshold' in opts:
432 if b'sparse-read-density-threshold' in opts:
433 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
433 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
434 if b'sparse-read-min-gap-size' in opts:
434 if b'sparse-read-min-gap-size' in opts:
435 self._srmingapsize = opts[b'sparse-read-min-gap-size']
435 self._srmingapsize = opts[b'sparse-read-min-gap-size']
436 if opts.get(b'enableellipsis'):
436 if opts.get(b'enableellipsis'):
437 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
437 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
438
438
439 # revlog v0 doesn't have flag processors
439 # revlog v0 doesn't have flag processors
440 for flag, processor in pycompat.iteritems(
440 for flag, processor in pycompat.iteritems(
441 opts.get(b'flagprocessors', {})
441 opts.get(b'flagprocessors', {})
442 ):
442 ):
443 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
443 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
444
444
445 if self._chunkcachesize <= 0:
445 if self._chunkcachesize <= 0:
446 raise error.RevlogError(
446 raise error.RevlogError(
447 _(b'revlog chunk cache size %r is not greater than 0')
447 _(b'revlog chunk cache size %r is not greater than 0')
448 % self._chunkcachesize
448 % self._chunkcachesize
449 )
449 )
450 elif self._chunkcachesize & (self._chunkcachesize - 1):
450 elif self._chunkcachesize & (self._chunkcachesize - 1):
451 raise error.RevlogError(
451 raise error.RevlogError(
452 _(b'revlog chunk cache size %r is not a power of 2')
452 _(b'revlog chunk cache size %r is not a power of 2')
453 % self._chunkcachesize
453 % self._chunkcachesize
454 )
454 )
455 force_nodemap = opts.get(b'devel-force-nodemap', False)
455 force_nodemap = opts.get(b'devel-force-nodemap', False)
456 return new_header, mmapindexthreshold, force_nodemap
456 return new_header, mmapindexthreshold, force_nodemap
457
457
458 def _get_data(self, filepath, mmap_threshold, size=None):
458 def _get_data(self, filepath, mmap_threshold, size=None):
459 """return a file content with or without mmap
459 """return a file content with or without mmap
460
460
461 If the file is missing return the empty string"""
461 If the file is missing return the empty string"""
462 try:
462 try:
463 with self.opener(filepath) as fp:
463 with self.opener(filepath) as fp:
464 if mmap_threshold is not None:
464 if mmap_threshold is not None:
465 file_size = self.opener.fstat(fp).st_size
465 file_size = self.opener.fstat(fp).st_size
466 if file_size >= mmap_threshold:
466 if file_size >= mmap_threshold:
467 if size is not None:
467 if size is not None:
468 # avoid potentiel mmap crash
468 # avoid potentiel mmap crash
469 size = min(file_size, size)
469 size = min(file_size, size)
470 # TODO: should .close() to release resources without
470 # TODO: should .close() to release resources without
471 # relying on Python GC
471 # relying on Python GC
472 if size is None:
472 if size is None:
473 return util.buffer(util.mmapread(fp))
473 return util.buffer(util.mmapread(fp))
474 else:
474 else:
475 return util.buffer(util.mmapread(fp, size))
475 return util.buffer(util.mmapread(fp, size))
476 if size is None:
476 if size is None:
477 return fp.read()
477 return fp.read()
478 else:
478 else:
479 return fp.read(size)
479 return fp.read(size)
480 except IOError as inst:
480 except IOError as inst:
481 if inst.errno != errno.ENOENT:
481 if inst.errno != errno.ENOENT:
482 raise
482 raise
483 return b''
483 return b''
484
484
485 def _loadindex(self):
485 def _loadindex(self):
486
486
487 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
487 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
488
488
489 if self.postfix is not None:
489 if self.postfix is not None:
490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
491 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
491 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
492 entry_point = b'%s.i.a' % self.radix
492 entry_point = b'%s.i.a' % self.radix
493 else:
493 else:
494 entry_point = b'%s.i' % self.radix
494 entry_point = b'%s.i' % self.radix
495
495
496 entry_data = b''
496 entry_data = b''
497 self._initempty = True
497 self._initempty = True
498 entry_data = self._get_data(entry_point, mmapindexthreshold)
498 entry_data = self._get_data(entry_point, mmapindexthreshold)
499 if len(entry_data) > 0:
499 if len(entry_data) > 0:
500 header = INDEX_HEADER.unpack(entry_data[:4])[0]
500 header = INDEX_HEADER.unpack(entry_data[:4])[0]
501 self._initempty = False
501 self._initempty = False
502 else:
502 else:
503 header = new_header
503 header = new_header
504
504
505 self._format_flags = header & ~0xFFFF
505 self._format_flags = header & ~0xFFFF
506 self._format_version = header & 0xFFFF
506 self._format_version = header & 0xFFFF
507
507
508 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
508 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
509 if supported_flags is None:
509 if supported_flags is None:
510 msg = _(b'unknown version (%d) in revlog %s')
510 msg = _(b'unknown version (%d) in revlog %s')
511 msg %= (self._format_version, self.display_id)
511 msg %= (self._format_version, self.display_id)
512 raise error.RevlogError(msg)
512 raise error.RevlogError(msg)
513 elif self._format_flags & ~supported_flags:
513 elif self._format_flags & ~supported_flags:
514 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
514 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
515 display_flag = self._format_flags >> 16
515 display_flag = self._format_flags >> 16
516 msg %= (display_flag, self._format_version, self.display_id)
516 msg %= (display_flag, self._format_version, self.display_id)
517 raise error.RevlogError(msg)
517 raise error.RevlogError(msg)
518
518
519 features = FEATURES_BY_VERSION[self._format_version]
519 features = FEATURES_BY_VERSION[self._format_version]
520 self._inline = features[b'inline'](self._format_flags)
520 self._inline = features[b'inline'](self._format_flags)
521 self._generaldelta = features[b'generaldelta'](self._format_flags)
521 self._generaldelta = features[b'generaldelta'](self._format_flags)
522 self.hassidedata = features[b'sidedata']
522 self.hassidedata = features[b'sidedata']
523
523
524 if not features[b'docket']:
524 if not features[b'docket']:
525 self._indexfile = entry_point
525 self._indexfile = entry_point
526 index_data = entry_data
526 index_data = entry_data
527 else:
527 else:
528 self._docket_file = entry_point
528 self._docket_file = entry_point
529 if self._initempty:
529 if self._initempty:
530 self._docket = docketutil.default_docket(self, header)
530 self._docket = docketutil.default_docket(self, header)
531 else:
531 else:
532 self._docket = docketutil.parse_docket(self, entry_data)
532 self._docket = docketutil.parse_docket(
533 self, entry_data, use_pending=self._trypending
534 )
533 self._indexfile = self._docket.index_filepath()
535 self._indexfile = self._docket.index_filepath()
534 index_data = b''
536 index_data = b''
535 index_size = self._docket.index_end
537 index_size = self._docket.index_end
536 if index_size > 0:
538 if index_size > 0:
537 index_data = self._get_data(
539 index_data = self._get_data(
538 self._indexfile, mmapindexthreshold, size=index_size
540 self._indexfile, mmapindexthreshold, size=index_size
539 )
541 )
540 if len(index_data) < index_size:
542 if len(index_data) < index_size:
541 msg = _(b'too few index data for %s: got %d, expected %d')
543 msg = _(b'too few index data for %s: got %d, expected %d')
542 msg %= (self.display_id, len(index_data), index_size)
544 msg %= (self.display_id, len(index_data), index_size)
543 raise error.RevlogError(msg)
545 raise error.RevlogError(msg)
544
546
545 self._inline = False
547 self._inline = False
546 # generaldelta implied by version 2 revlogs.
548 # generaldelta implied by version 2 revlogs.
547 self._generaldelta = True
549 self._generaldelta = True
548 # the logic for persistent nodemap will be dealt with within the
550 # the logic for persistent nodemap will be dealt with within the
549 # main docket, so disable it for now.
551 # main docket, so disable it for now.
550 self._nodemap_file = None
552 self._nodemap_file = None
551
553
552 if self.postfix is None:
554 if self.postfix is None:
553 self._datafile = b'%s.d' % self.radix
555 self._datafile = b'%s.d' % self.radix
554 else:
556 else:
555 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
557 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
556
558
557 self.nodeconstants = sha1nodeconstants
559 self.nodeconstants = sha1nodeconstants
558 self.nullid = self.nodeconstants.nullid
560 self.nullid = self.nodeconstants.nullid
559
561
560 # sparse-revlog can't be on without general-delta (issue6056)
562 # sparse-revlog can't be on without general-delta (issue6056)
561 if not self._generaldelta:
563 if not self._generaldelta:
562 self._sparserevlog = False
564 self._sparserevlog = False
563
565
564 self._storedeltachains = True
566 self._storedeltachains = True
565
567
566 devel_nodemap = (
568 devel_nodemap = (
567 self._nodemap_file
569 self._nodemap_file
568 and force_nodemap
570 and force_nodemap
569 and parse_index_v1_nodemap is not None
571 and parse_index_v1_nodemap is not None
570 )
572 )
571
573
572 use_rust_index = False
574 use_rust_index = False
573 if rustrevlog is not None:
575 if rustrevlog is not None:
574 if self._nodemap_file is not None:
576 if self._nodemap_file is not None:
575 use_rust_index = True
577 use_rust_index = True
576 else:
578 else:
577 use_rust_index = self.opener.options.get(b'rust.index')
579 use_rust_index = self.opener.options.get(b'rust.index')
578
580
579 self._parse_index = parse_index_v1
581 self._parse_index = parse_index_v1
580 if self._format_version == REVLOGV0:
582 if self._format_version == REVLOGV0:
581 self._parse_index = revlogv0.parse_index_v0
583 self._parse_index = revlogv0.parse_index_v0
582 elif self._format_version == REVLOGV2:
584 elif self._format_version == REVLOGV2:
583 self._parse_index = parse_index_v2
585 self._parse_index = parse_index_v2
584 elif devel_nodemap:
586 elif devel_nodemap:
585 self._parse_index = parse_index_v1_nodemap
587 self._parse_index = parse_index_v1_nodemap
586 elif use_rust_index:
588 elif use_rust_index:
587 self._parse_index = parse_index_v1_mixed
589 self._parse_index = parse_index_v1_mixed
588 try:
590 try:
589 d = self._parse_index(index_data, self._inline)
591 d = self._parse_index(index_data, self._inline)
590 index, _chunkcache = d
592 index, _chunkcache = d
591 use_nodemap = (
593 use_nodemap = (
592 not self._inline
594 not self._inline
593 and self._nodemap_file is not None
595 and self._nodemap_file is not None
594 and util.safehasattr(index, 'update_nodemap_data')
596 and util.safehasattr(index, 'update_nodemap_data')
595 )
597 )
596 if use_nodemap:
598 if use_nodemap:
597 nodemap_data = nodemaputil.persisted_data(self)
599 nodemap_data = nodemaputil.persisted_data(self)
598 if nodemap_data is not None:
600 if nodemap_data is not None:
599 docket = nodemap_data[0]
601 docket = nodemap_data[0]
600 if (
602 if (
601 len(d[0]) > docket.tip_rev
603 len(d[0]) > docket.tip_rev
602 and d[0][docket.tip_rev][7] == docket.tip_node
604 and d[0][docket.tip_rev][7] == docket.tip_node
603 ):
605 ):
604 # no changelog tampering
606 # no changelog tampering
605 self._nodemap_docket = docket
607 self._nodemap_docket = docket
606 index.update_nodemap_data(*nodemap_data)
608 index.update_nodemap_data(*nodemap_data)
607 except (ValueError, IndexError):
609 except (ValueError, IndexError):
608 raise error.RevlogError(
610 raise error.RevlogError(
609 _(b"index %s is corrupted") % self.display_id
611 _(b"index %s is corrupted") % self.display_id
610 )
612 )
611 self.index, self._chunkcache = d
613 self.index, self._chunkcache = d
612 if not self._chunkcache:
614 if not self._chunkcache:
613 self._chunkclear()
615 self._chunkclear()
614 # revnum -> (chain-length, sum-delta-length)
616 # revnum -> (chain-length, sum-delta-length)
615 self._chaininfocache = util.lrucachedict(500)
617 self._chaininfocache = util.lrucachedict(500)
616 # revlog header -> revlog compressor
618 # revlog header -> revlog compressor
617 self._decompressors = {}
619 self._decompressors = {}
618
620
619 @util.propertycache
621 @util.propertycache
620 def revlog_kind(self):
622 def revlog_kind(self):
621 return self.target[0]
623 return self.target[0]
622
624
623 @util.propertycache
625 @util.propertycache
624 def display_id(self):
626 def display_id(self):
625 """The public facing "ID" of the revlog that we use in message"""
627 """The public facing "ID" of the revlog that we use in message"""
626 # Maybe we should build a user facing representation of
628 # Maybe we should build a user facing representation of
627 # revlog.target instead of using `self.radix`
629 # revlog.target instead of using `self.radix`
628 return self.radix
630 return self.radix
629
631
630 @util.propertycache
632 @util.propertycache
631 def _compressor(self):
633 def _compressor(self):
632 engine = util.compengines[self._compengine]
634 engine = util.compengines[self._compengine]
633 return engine.revlogcompressor(self._compengineopts)
635 return engine.revlogcompressor(self._compengineopts)
634
636
635 def _indexfp(self):
637 def _indexfp(self):
636 """file object for the revlog's index file"""
638 """file object for the revlog's index file"""
637 return self.opener(self._indexfile, mode=b"r")
639 return self.opener(self._indexfile, mode=b"r")
638
640
639 def __index_write_fp(self):
641 def __index_write_fp(self):
640 # You should not use this directly and use `_writing` instead
642 # You should not use this directly and use `_writing` instead
641 try:
643 try:
642 f = self.opener(
644 f = self.opener(
643 self._indexfile, mode=b"r+", checkambig=self._checkambig
645 self._indexfile, mode=b"r+", checkambig=self._checkambig
644 )
646 )
645 if self._docket is None:
647 if self._docket is None:
646 f.seek(0, os.SEEK_END)
648 f.seek(0, os.SEEK_END)
647 else:
649 else:
648 f.seek(self._docket.index_end, os.SEEK_SET)
650 f.seek(self._docket.index_end, os.SEEK_SET)
649 return f
651 return f
650 except IOError as inst:
652 except IOError as inst:
651 if inst.errno != errno.ENOENT:
653 if inst.errno != errno.ENOENT:
652 raise
654 raise
653 return self.opener(
655 return self.opener(
654 self._indexfile, mode=b"w+", checkambig=self._checkambig
656 self._indexfile, mode=b"w+", checkambig=self._checkambig
655 )
657 )
656
658
657 def __index_new_fp(self):
659 def __index_new_fp(self):
658 # You should not use this unless you are upgrading from inline revlog
660 # You should not use this unless you are upgrading from inline revlog
659 return self.opener(
661 return self.opener(
660 self._indexfile,
662 self._indexfile,
661 mode=b"w",
663 mode=b"w",
662 checkambig=self._checkambig,
664 checkambig=self._checkambig,
663 atomictemp=True,
665 atomictemp=True,
664 )
666 )
665
667
666 def _datafp(self, mode=b'r'):
668 def _datafp(self, mode=b'r'):
667 """file object for the revlog's data file"""
669 """file object for the revlog's data file"""
668 return self.opener(self._datafile, mode=mode)
670 return self.opener(self._datafile, mode=mode)
669
671
670 @contextlib.contextmanager
672 @contextlib.contextmanager
671 def _datareadfp(self, existingfp=None):
673 def _datareadfp(self, existingfp=None):
672 """file object suitable to read data"""
674 """file object suitable to read data"""
673 # Use explicit file handle, if given.
675 # Use explicit file handle, if given.
674 if existingfp is not None:
676 if existingfp is not None:
675 yield existingfp
677 yield existingfp
676
678
677 # Use a file handle being actively used for writes, if available.
679 # Use a file handle being actively used for writes, if available.
678 # There is some danger to doing this because reads will seek the
680 # There is some danger to doing this because reads will seek the
679 # file. However, _writeentry() performs a SEEK_END before all writes,
681 # file. However, _writeentry() performs a SEEK_END before all writes,
680 # so we should be safe.
682 # so we should be safe.
681 elif self._writinghandles:
683 elif self._writinghandles:
682 if self._inline:
684 if self._inline:
683 yield self._writinghandles[0]
685 yield self._writinghandles[0]
684 else:
686 else:
685 yield self._writinghandles[1]
687 yield self._writinghandles[1]
686
688
687 # Otherwise open a new file handle.
689 # Otherwise open a new file handle.
688 else:
690 else:
689 if self._inline:
691 if self._inline:
690 func = self._indexfp
692 func = self._indexfp
691 else:
693 else:
692 func = self._datafp
694 func = self._datafp
693 with func() as fp:
695 with func() as fp:
694 yield fp
696 yield fp
695
697
696 def tiprev(self):
698 def tiprev(self):
697 return len(self.index) - 1
699 return len(self.index) - 1
698
700
699 def tip(self):
701 def tip(self):
700 return self.node(self.tiprev())
702 return self.node(self.tiprev())
701
703
702 def __contains__(self, rev):
704 def __contains__(self, rev):
703 return 0 <= rev < len(self)
705 return 0 <= rev < len(self)
704
706
705 def __len__(self):
707 def __len__(self):
706 return len(self.index)
708 return len(self.index)
707
709
708 def __iter__(self):
710 def __iter__(self):
709 return iter(pycompat.xrange(len(self)))
711 return iter(pycompat.xrange(len(self)))
710
712
711 def revs(self, start=0, stop=None):
713 def revs(self, start=0, stop=None):
712 """iterate over all rev in this revlog (from start to stop)"""
714 """iterate over all rev in this revlog (from start to stop)"""
713 return storageutil.iterrevs(len(self), start=start, stop=stop)
715 return storageutil.iterrevs(len(self), start=start, stop=stop)
714
716
715 @property
717 @property
716 def nodemap(self):
718 def nodemap(self):
717 msg = (
719 msg = (
718 b"revlog.nodemap is deprecated, "
720 b"revlog.nodemap is deprecated, "
719 b"use revlog.index.[has_node|rev|get_rev]"
721 b"use revlog.index.[has_node|rev|get_rev]"
720 )
722 )
721 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
722 return self.index.nodemap
724 return self.index.nodemap
723
725
724 @property
726 @property
725 def _nodecache(self):
727 def _nodecache(self):
726 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
728 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
727 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
729 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
728 return self.index.nodemap
730 return self.index.nodemap
729
731
730 def hasnode(self, node):
732 def hasnode(self, node):
731 try:
733 try:
732 self.rev(node)
734 self.rev(node)
733 return True
735 return True
734 except KeyError:
736 except KeyError:
735 return False
737 return False
736
738
737 def candelta(self, baserev, rev):
739 def candelta(self, baserev, rev):
738 """whether two revisions (baserev, rev) can be delta-ed or not"""
740 """whether two revisions (baserev, rev) can be delta-ed or not"""
739 # Disable delta if either rev requires a content-changing flag
741 # Disable delta if either rev requires a content-changing flag
740 # processor (ex. LFS). This is because such flag processor can alter
742 # processor (ex. LFS). This is because such flag processor can alter
741 # the rawtext content that the delta will be based on, and two clients
743 # the rawtext content that the delta will be based on, and two clients
742 # could have a same revlog node with different flags (i.e. different
744 # could have a same revlog node with different flags (i.e. different
743 # rawtext contents) and the delta could be incompatible.
745 # rawtext contents) and the delta could be incompatible.
744 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
746 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
745 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
747 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
746 ):
748 ):
747 return False
749 return False
748 return True
750 return True
749
751
750 def update_caches(self, transaction):
752 def update_caches(self, transaction):
751 if self._nodemap_file is not None:
753 if self._nodemap_file is not None:
752 if transaction is None:
754 if transaction is None:
753 nodemaputil.update_persistent_nodemap(self)
755 nodemaputil.update_persistent_nodemap(self)
754 else:
756 else:
755 nodemaputil.setup_persistent_nodemap(transaction, self)
757 nodemaputil.setup_persistent_nodemap(transaction, self)
756
758
757 def clearcaches(self):
759 def clearcaches(self):
758 self._revisioncache = None
760 self._revisioncache = None
759 self._chainbasecache.clear()
761 self._chainbasecache.clear()
760 self._chunkcache = (0, b'')
762 self._chunkcache = (0, b'')
761 self._pcache = {}
763 self._pcache = {}
762 self._nodemap_docket = None
764 self._nodemap_docket = None
763 self.index.clearcaches()
765 self.index.clearcaches()
764 # The python code is the one responsible for validating the docket, we
766 # The python code is the one responsible for validating the docket, we
765 # end up having to refresh it here.
767 # end up having to refresh it here.
766 use_nodemap = (
768 use_nodemap = (
767 not self._inline
769 not self._inline
768 and self._nodemap_file is not None
770 and self._nodemap_file is not None
769 and util.safehasattr(self.index, 'update_nodemap_data')
771 and util.safehasattr(self.index, 'update_nodemap_data')
770 )
772 )
771 if use_nodemap:
773 if use_nodemap:
772 nodemap_data = nodemaputil.persisted_data(self)
774 nodemap_data = nodemaputil.persisted_data(self)
773 if nodemap_data is not None:
775 if nodemap_data is not None:
774 self._nodemap_docket = nodemap_data[0]
776 self._nodemap_docket = nodemap_data[0]
775 self.index.update_nodemap_data(*nodemap_data)
777 self.index.update_nodemap_data(*nodemap_data)
776
778
777 def rev(self, node):
779 def rev(self, node):
778 try:
780 try:
779 return self.index.rev(node)
781 return self.index.rev(node)
780 except TypeError:
782 except TypeError:
781 raise
783 raise
782 except error.RevlogError:
784 except error.RevlogError:
783 # parsers.c radix tree lookup failed
785 # parsers.c radix tree lookup failed
784 if (
786 if (
785 node == self.nodeconstants.wdirid
787 node == self.nodeconstants.wdirid
786 or node in self.nodeconstants.wdirfilenodeids
788 or node in self.nodeconstants.wdirfilenodeids
787 ):
789 ):
788 raise error.WdirUnsupported
790 raise error.WdirUnsupported
789 raise error.LookupError(node, self.display_id, _(b'no node'))
791 raise error.LookupError(node, self.display_id, _(b'no node'))
790
792
791 # Accessors for index entries.
793 # Accessors for index entries.
792
794
793 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
795 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
794 # are flags.
796 # are flags.
795 def start(self, rev):
797 def start(self, rev):
796 return int(self.index[rev][0] >> 16)
798 return int(self.index[rev][0] >> 16)
797
799
798 def flags(self, rev):
800 def flags(self, rev):
799 return self.index[rev][0] & 0xFFFF
801 return self.index[rev][0] & 0xFFFF
800
802
801 def length(self, rev):
803 def length(self, rev):
802 return self.index[rev][1]
804 return self.index[rev][1]
803
805
804 def sidedata_length(self, rev):
806 def sidedata_length(self, rev):
805 if not self.hassidedata:
807 if not self.hassidedata:
806 return 0
808 return 0
807 return self.index[rev][9]
809 return self.index[rev][9]
808
810
809 def rawsize(self, rev):
811 def rawsize(self, rev):
810 """return the length of the uncompressed text for a given revision"""
812 """return the length of the uncompressed text for a given revision"""
811 l = self.index[rev][2]
813 l = self.index[rev][2]
812 if l >= 0:
814 if l >= 0:
813 return l
815 return l
814
816
815 t = self.rawdata(rev)
817 t = self.rawdata(rev)
816 return len(t)
818 return len(t)
817
819
818 def size(self, rev):
820 def size(self, rev):
819 """length of non-raw text (processed by a "read" flag processor)"""
821 """length of non-raw text (processed by a "read" flag processor)"""
820 # fast path: if no "read" flag processor could change the content,
822 # fast path: if no "read" flag processor could change the content,
821 # size is rawsize. note: ELLIPSIS is known to not change the content.
823 # size is rawsize. note: ELLIPSIS is known to not change the content.
822 flags = self.flags(rev)
824 flags = self.flags(rev)
823 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
825 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
824 return self.rawsize(rev)
826 return self.rawsize(rev)
825
827
826 return len(self.revision(rev, raw=False))
828 return len(self.revision(rev, raw=False))
827
829
828 def chainbase(self, rev):
830 def chainbase(self, rev):
829 base = self._chainbasecache.get(rev)
831 base = self._chainbasecache.get(rev)
830 if base is not None:
832 if base is not None:
831 return base
833 return base
832
834
833 index = self.index
835 index = self.index
834 iterrev = rev
836 iterrev = rev
835 base = index[iterrev][3]
837 base = index[iterrev][3]
836 while base != iterrev:
838 while base != iterrev:
837 iterrev = base
839 iterrev = base
838 base = index[iterrev][3]
840 base = index[iterrev][3]
839
841
840 self._chainbasecache[rev] = base
842 self._chainbasecache[rev] = base
841 return base
843 return base
842
844
843 def linkrev(self, rev):
845 def linkrev(self, rev):
844 return self.index[rev][4]
846 return self.index[rev][4]
845
847
846 def parentrevs(self, rev):
848 def parentrevs(self, rev):
847 try:
849 try:
848 entry = self.index[rev]
850 entry = self.index[rev]
849 except IndexError:
851 except IndexError:
850 if rev == wdirrev:
852 if rev == wdirrev:
851 raise error.WdirUnsupported
853 raise error.WdirUnsupported
852 raise
854 raise
853 if entry[5] == nullrev:
855 if entry[5] == nullrev:
854 return entry[6], entry[5]
856 return entry[6], entry[5]
855 else:
857 else:
856 return entry[5], entry[6]
858 return entry[5], entry[6]
857
859
858 # fast parentrevs(rev) where rev isn't filtered
860 # fast parentrevs(rev) where rev isn't filtered
859 _uncheckedparentrevs = parentrevs
861 _uncheckedparentrevs = parentrevs
860
862
861 def node(self, rev):
863 def node(self, rev):
862 try:
864 try:
863 return self.index[rev][7]
865 return self.index[rev][7]
864 except IndexError:
866 except IndexError:
865 if rev == wdirrev:
867 if rev == wdirrev:
866 raise error.WdirUnsupported
868 raise error.WdirUnsupported
867 raise
869 raise
868
870
869 # Derived from index values.
871 # Derived from index values.
870
872
871 def end(self, rev):
873 def end(self, rev):
872 return self.start(rev) + self.length(rev)
874 return self.start(rev) + self.length(rev)
873
875
874 def parents(self, node):
876 def parents(self, node):
875 i = self.index
877 i = self.index
876 d = i[self.rev(node)]
878 d = i[self.rev(node)]
877 # inline node() to avoid function call overhead
879 # inline node() to avoid function call overhead
878 if d[5] == self.nullid:
880 if d[5] == self.nullid:
879 return i[d[6]][7], i[d[5]][7]
881 return i[d[6]][7], i[d[5]][7]
880 else:
882 else:
881 return i[d[5]][7], i[d[6]][7]
883 return i[d[5]][7], i[d[6]][7]
882
884
883 def chainlen(self, rev):
885 def chainlen(self, rev):
884 return self._chaininfo(rev)[0]
886 return self._chaininfo(rev)[0]
885
887
886 def _chaininfo(self, rev):
888 def _chaininfo(self, rev):
887 chaininfocache = self._chaininfocache
889 chaininfocache = self._chaininfocache
888 if rev in chaininfocache:
890 if rev in chaininfocache:
889 return chaininfocache[rev]
891 return chaininfocache[rev]
890 index = self.index
892 index = self.index
891 generaldelta = self._generaldelta
893 generaldelta = self._generaldelta
892 iterrev = rev
894 iterrev = rev
893 e = index[iterrev]
895 e = index[iterrev]
894 clen = 0
896 clen = 0
895 compresseddeltalen = 0
897 compresseddeltalen = 0
896 while iterrev != e[3]:
898 while iterrev != e[3]:
897 clen += 1
899 clen += 1
898 compresseddeltalen += e[1]
900 compresseddeltalen += e[1]
899 if generaldelta:
901 if generaldelta:
900 iterrev = e[3]
902 iterrev = e[3]
901 else:
903 else:
902 iterrev -= 1
904 iterrev -= 1
903 if iterrev in chaininfocache:
905 if iterrev in chaininfocache:
904 t = chaininfocache[iterrev]
906 t = chaininfocache[iterrev]
905 clen += t[0]
907 clen += t[0]
906 compresseddeltalen += t[1]
908 compresseddeltalen += t[1]
907 break
909 break
908 e = index[iterrev]
910 e = index[iterrev]
909 else:
911 else:
910 # Add text length of base since decompressing that also takes
912 # Add text length of base since decompressing that also takes
911 # work. For cache hits the length is already included.
913 # work. For cache hits the length is already included.
912 compresseddeltalen += e[1]
914 compresseddeltalen += e[1]
913 r = (clen, compresseddeltalen)
915 r = (clen, compresseddeltalen)
914 chaininfocache[rev] = r
916 chaininfocache[rev] = r
915 return r
917 return r
916
918
917 def _deltachain(self, rev, stoprev=None):
919 def _deltachain(self, rev, stoprev=None):
918 """Obtain the delta chain for a revision.
920 """Obtain the delta chain for a revision.
919
921
920 ``stoprev`` specifies a revision to stop at. If not specified, we
922 ``stoprev`` specifies a revision to stop at. If not specified, we
921 stop at the base of the chain.
923 stop at the base of the chain.
922
924
923 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
925 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
924 revs in ascending order and ``stopped`` is a bool indicating whether
926 revs in ascending order and ``stopped`` is a bool indicating whether
925 ``stoprev`` was hit.
927 ``stoprev`` was hit.
926 """
928 """
927 # Try C implementation.
929 # Try C implementation.
928 try:
930 try:
929 return self.index.deltachain(rev, stoprev, self._generaldelta)
931 return self.index.deltachain(rev, stoprev, self._generaldelta)
930 except AttributeError:
932 except AttributeError:
931 pass
933 pass
932
934
933 chain = []
935 chain = []
934
936
935 # Alias to prevent attribute lookup in tight loop.
937 # Alias to prevent attribute lookup in tight loop.
936 index = self.index
938 index = self.index
937 generaldelta = self._generaldelta
939 generaldelta = self._generaldelta
938
940
939 iterrev = rev
941 iterrev = rev
940 e = index[iterrev]
942 e = index[iterrev]
941 while iterrev != e[3] and iterrev != stoprev:
943 while iterrev != e[3] and iterrev != stoprev:
942 chain.append(iterrev)
944 chain.append(iterrev)
943 if generaldelta:
945 if generaldelta:
944 iterrev = e[3]
946 iterrev = e[3]
945 else:
947 else:
946 iterrev -= 1
948 iterrev -= 1
947 e = index[iterrev]
949 e = index[iterrev]
948
950
949 if iterrev == stoprev:
951 if iterrev == stoprev:
950 stopped = True
952 stopped = True
951 else:
953 else:
952 chain.append(iterrev)
954 chain.append(iterrev)
953 stopped = False
955 stopped = False
954
956
955 chain.reverse()
957 chain.reverse()
956 return chain, stopped
958 return chain, stopped
957
959
958 def ancestors(self, revs, stoprev=0, inclusive=False):
960 def ancestors(self, revs, stoprev=0, inclusive=False):
959 """Generate the ancestors of 'revs' in reverse revision order.
961 """Generate the ancestors of 'revs' in reverse revision order.
960 Does not generate revs lower than stoprev.
962 Does not generate revs lower than stoprev.
961
963
962 See the documentation for ancestor.lazyancestors for more details."""
964 See the documentation for ancestor.lazyancestors for more details."""
963
965
964 # first, make sure start revisions aren't filtered
966 # first, make sure start revisions aren't filtered
965 revs = list(revs)
967 revs = list(revs)
966 checkrev = self.node
968 checkrev = self.node
967 for r in revs:
969 for r in revs:
968 checkrev(r)
970 checkrev(r)
969 # and we're sure ancestors aren't filtered as well
971 # and we're sure ancestors aren't filtered as well
970
972
971 if rustancestor is not None:
973 if rustancestor is not None:
972 lazyancestors = rustancestor.LazyAncestors
974 lazyancestors = rustancestor.LazyAncestors
973 arg = self.index
975 arg = self.index
974 else:
976 else:
975 lazyancestors = ancestor.lazyancestors
977 lazyancestors = ancestor.lazyancestors
976 arg = self._uncheckedparentrevs
978 arg = self._uncheckedparentrevs
977 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
979 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
978
980
979 def descendants(self, revs):
981 def descendants(self, revs):
980 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
982 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
981
983
982 def findcommonmissing(self, common=None, heads=None):
984 def findcommonmissing(self, common=None, heads=None):
983 """Return a tuple of the ancestors of common and the ancestors of heads
985 """Return a tuple of the ancestors of common and the ancestors of heads
984 that are not ancestors of common. In revset terminology, we return the
986 that are not ancestors of common. In revset terminology, we return the
985 tuple:
987 tuple:
986
988
987 ::common, (::heads) - (::common)
989 ::common, (::heads) - (::common)
988
990
989 The list is sorted by revision number, meaning it is
991 The list is sorted by revision number, meaning it is
990 topologically sorted.
992 topologically sorted.
991
993
992 'heads' and 'common' are both lists of node IDs. If heads is
994 'heads' and 'common' are both lists of node IDs. If heads is
993 not supplied, uses all of the revlog's heads. If common is not
995 not supplied, uses all of the revlog's heads. If common is not
994 supplied, uses nullid."""
996 supplied, uses nullid."""
995 if common is None:
997 if common is None:
996 common = [self.nullid]
998 common = [self.nullid]
997 if heads is None:
999 if heads is None:
998 heads = self.heads()
1000 heads = self.heads()
999
1001
1000 common = [self.rev(n) for n in common]
1002 common = [self.rev(n) for n in common]
1001 heads = [self.rev(n) for n in heads]
1003 heads = [self.rev(n) for n in heads]
1002
1004
1003 # we want the ancestors, but inclusive
1005 # we want the ancestors, but inclusive
1004 class lazyset(object):
1006 class lazyset(object):
1005 def __init__(self, lazyvalues):
1007 def __init__(self, lazyvalues):
1006 self.addedvalues = set()
1008 self.addedvalues = set()
1007 self.lazyvalues = lazyvalues
1009 self.lazyvalues = lazyvalues
1008
1010
1009 def __contains__(self, value):
1011 def __contains__(self, value):
1010 return value in self.addedvalues or value in self.lazyvalues
1012 return value in self.addedvalues or value in self.lazyvalues
1011
1013
1012 def __iter__(self):
1014 def __iter__(self):
1013 added = self.addedvalues
1015 added = self.addedvalues
1014 for r in added:
1016 for r in added:
1015 yield r
1017 yield r
1016 for r in self.lazyvalues:
1018 for r in self.lazyvalues:
1017 if not r in added:
1019 if not r in added:
1018 yield r
1020 yield r
1019
1021
1020 def add(self, value):
1022 def add(self, value):
1021 self.addedvalues.add(value)
1023 self.addedvalues.add(value)
1022
1024
1023 def update(self, values):
1025 def update(self, values):
1024 self.addedvalues.update(values)
1026 self.addedvalues.update(values)
1025
1027
1026 has = lazyset(self.ancestors(common))
1028 has = lazyset(self.ancestors(common))
1027 has.add(nullrev)
1029 has.add(nullrev)
1028 has.update(common)
1030 has.update(common)
1029
1031
1030 # take all ancestors from heads that aren't in has
1032 # take all ancestors from heads that aren't in has
1031 missing = set()
1033 missing = set()
1032 visit = collections.deque(r for r in heads if r not in has)
1034 visit = collections.deque(r for r in heads if r not in has)
1033 while visit:
1035 while visit:
1034 r = visit.popleft()
1036 r = visit.popleft()
1035 if r in missing:
1037 if r in missing:
1036 continue
1038 continue
1037 else:
1039 else:
1038 missing.add(r)
1040 missing.add(r)
1039 for p in self.parentrevs(r):
1041 for p in self.parentrevs(r):
1040 if p not in has:
1042 if p not in has:
1041 visit.append(p)
1043 visit.append(p)
1042 missing = list(missing)
1044 missing = list(missing)
1043 missing.sort()
1045 missing.sort()
1044 return has, [self.node(miss) for miss in missing]
1046 return has, [self.node(miss) for miss in missing]
1045
1047
1046 def incrementalmissingrevs(self, common=None):
1048 def incrementalmissingrevs(self, common=None):
1047 """Return an object that can be used to incrementally compute the
1049 """Return an object that can be used to incrementally compute the
1048 revision numbers of the ancestors of arbitrary sets that are not
1050 revision numbers of the ancestors of arbitrary sets that are not
1049 ancestors of common. This is an ancestor.incrementalmissingancestors
1051 ancestors of common. This is an ancestor.incrementalmissingancestors
1050 object.
1052 object.
1051
1053
1052 'common' is a list of revision numbers. If common is not supplied, uses
1054 'common' is a list of revision numbers. If common is not supplied, uses
1053 nullrev.
1055 nullrev.
1054 """
1056 """
1055 if common is None:
1057 if common is None:
1056 common = [nullrev]
1058 common = [nullrev]
1057
1059
1058 if rustancestor is not None:
1060 if rustancestor is not None:
1059 return rustancestor.MissingAncestors(self.index, common)
1061 return rustancestor.MissingAncestors(self.index, common)
1060 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1062 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1061
1063
1062 def findmissingrevs(self, common=None, heads=None):
1064 def findmissingrevs(self, common=None, heads=None):
1063 """Return the revision numbers of the ancestors of heads that
1065 """Return the revision numbers of the ancestors of heads that
1064 are not ancestors of common.
1066 are not ancestors of common.
1065
1067
1066 More specifically, return a list of revision numbers corresponding to
1068 More specifically, return a list of revision numbers corresponding to
1067 nodes N such that every N satisfies the following constraints:
1069 nodes N such that every N satisfies the following constraints:
1068
1070
1069 1. N is an ancestor of some node in 'heads'
1071 1. N is an ancestor of some node in 'heads'
1070 2. N is not an ancestor of any node in 'common'
1072 2. N is not an ancestor of any node in 'common'
1071
1073
1072 The list is sorted by revision number, meaning it is
1074 The list is sorted by revision number, meaning it is
1073 topologically sorted.
1075 topologically sorted.
1074
1076
1075 'heads' and 'common' are both lists of revision numbers. If heads is
1077 'heads' and 'common' are both lists of revision numbers. If heads is
1076 not supplied, uses all of the revlog's heads. If common is not
1078 not supplied, uses all of the revlog's heads. If common is not
1077 supplied, uses nullid."""
1079 supplied, uses nullid."""
1078 if common is None:
1080 if common is None:
1079 common = [nullrev]
1081 common = [nullrev]
1080 if heads is None:
1082 if heads is None:
1081 heads = self.headrevs()
1083 heads = self.headrevs()
1082
1084
1083 inc = self.incrementalmissingrevs(common=common)
1085 inc = self.incrementalmissingrevs(common=common)
1084 return inc.missingancestors(heads)
1086 return inc.missingancestors(heads)
1085
1087
1086 def findmissing(self, common=None, heads=None):
1088 def findmissing(self, common=None, heads=None):
1087 """Return the ancestors of heads that are not ancestors of common.
1089 """Return the ancestors of heads that are not ancestors of common.
1088
1090
1089 More specifically, return a list of nodes N such that every N
1091 More specifically, return a list of nodes N such that every N
1090 satisfies the following constraints:
1092 satisfies the following constraints:
1091
1093
1092 1. N is an ancestor of some node in 'heads'
1094 1. N is an ancestor of some node in 'heads'
1093 2. N is not an ancestor of any node in 'common'
1095 2. N is not an ancestor of any node in 'common'
1094
1096
1095 The list is sorted by revision number, meaning it is
1097 The list is sorted by revision number, meaning it is
1096 topologically sorted.
1098 topologically sorted.
1097
1099
1098 'heads' and 'common' are both lists of node IDs. If heads is
1100 'heads' and 'common' are both lists of node IDs. If heads is
1099 not supplied, uses all of the revlog's heads. If common is not
1101 not supplied, uses all of the revlog's heads. If common is not
1100 supplied, uses nullid."""
1102 supplied, uses nullid."""
1101 if common is None:
1103 if common is None:
1102 common = [self.nullid]
1104 common = [self.nullid]
1103 if heads is None:
1105 if heads is None:
1104 heads = self.heads()
1106 heads = self.heads()
1105
1107
1106 common = [self.rev(n) for n in common]
1108 common = [self.rev(n) for n in common]
1107 heads = [self.rev(n) for n in heads]
1109 heads = [self.rev(n) for n in heads]
1108
1110
1109 inc = self.incrementalmissingrevs(common=common)
1111 inc = self.incrementalmissingrevs(common=common)
1110 return [self.node(r) for r in inc.missingancestors(heads)]
1112 return [self.node(r) for r in inc.missingancestors(heads)]
1111
1113
1112 def nodesbetween(self, roots=None, heads=None):
1114 def nodesbetween(self, roots=None, heads=None):
1113 """Return a topological path from 'roots' to 'heads'.
1115 """Return a topological path from 'roots' to 'heads'.
1114
1116
1115 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1117 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1116 topologically sorted list of all nodes N that satisfy both of
1118 topologically sorted list of all nodes N that satisfy both of
1117 these constraints:
1119 these constraints:
1118
1120
1119 1. N is a descendant of some node in 'roots'
1121 1. N is a descendant of some node in 'roots'
1120 2. N is an ancestor of some node in 'heads'
1122 2. N is an ancestor of some node in 'heads'
1121
1123
1122 Every node is considered to be both a descendant and an ancestor
1124 Every node is considered to be both a descendant and an ancestor
1123 of itself, so every reachable node in 'roots' and 'heads' will be
1125 of itself, so every reachable node in 'roots' and 'heads' will be
1124 included in 'nodes'.
1126 included in 'nodes'.
1125
1127
1126 'outroots' is the list of reachable nodes in 'roots', i.e., the
1128 'outroots' is the list of reachable nodes in 'roots', i.e., the
1127 subset of 'roots' that is returned in 'nodes'. Likewise,
1129 subset of 'roots' that is returned in 'nodes'. Likewise,
1128 'outheads' is the subset of 'heads' that is also in 'nodes'.
1130 'outheads' is the subset of 'heads' that is also in 'nodes'.
1129
1131
1130 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1132 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1131 unspecified, uses nullid as the only root. If 'heads' is
1133 unspecified, uses nullid as the only root. If 'heads' is
1132 unspecified, uses list of all of the revlog's heads."""
1134 unspecified, uses list of all of the revlog's heads."""
1133 nonodes = ([], [], [])
1135 nonodes = ([], [], [])
1134 if roots is not None:
1136 if roots is not None:
1135 roots = list(roots)
1137 roots = list(roots)
1136 if not roots:
1138 if not roots:
1137 return nonodes
1139 return nonodes
1138 lowestrev = min([self.rev(n) for n in roots])
1140 lowestrev = min([self.rev(n) for n in roots])
1139 else:
1141 else:
1140 roots = [self.nullid] # Everybody's a descendant of nullid
1142 roots = [self.nullid] # Everybody's a descendant of nullid
1141 lowestrev = nullrev
1143 lowestrev = nullrev
1142 if (lowestrev == nullrev) and (heads is None):
1144 if (lowestrev == nullrev) and (heads is None):
1143 # We want _all_ the nodes!
1145 # We want _all_ the nodes!
1144 return (
1146 return (
1145 [self.node(r) for r in self],
1147 [self.node(r) for r in self],
1146 [self.nullid],
1148 [self.nullid],
1147 list(self.heads()),
1149 list(self.heads()),
1148 )
1150 )
1149 if heads is None:
1151 if heads is None:
1150 # All nodes are ancestors, so the latest ancestor is the last
1152 # All nodes are ancestors, so the latest ancestor is the last
1151 # node.
1153 # node.
1152 highestrev = len(self) - 1
1154 highestrev = len(self) - 1
1153 # Set ancestors to None to signal that every node is an ancestor.
1155 # Set ancestors to None to signal that every node is an ancestor.
1154 ancestors = None
1156 ancestors = None
1155 # Set heads to an empty dictionary for later discovery of heads
1157 # Set heads to an empty dictionary for later discovery of heads
1156 heads = {}
1158 heads = {}
1157 else:
1159 else:
1158 heads = list(heads)
1160 heads = list(heads)
1159 if not heads:
1161 if not heads:
1160 return nonodes
1162 return nonodes
1161 ancestors = set()
1163 ancestors = set()
1162 # Turn heads into a dictionary so we can remove 'fake' heads.
1164 # Turn heads into a dictionary so we can remove 'fake' heads.
1163 # Also, later we will be using it to filter out the heads we can't
1165 # Also, later we will be using it to filter out the heads we can't
1164 # find from roots.
1166 # find from roots.
1165 heads = dict.fromkeys(heads, False)
1167 heads = dict.fromkeys(heads, False)
1166 # Start at the top and keep marking parents until we're done.
1168 # Start at the top and keep marking parents until we're done.
1167 nodestotag = set(heads)
1169 nodestotag = set(heads)
1168 # Remember where the top was so we can use it as a limit later.
1170 # Remember where the top was so we can use it as a limit later.
1169 highestrev = max([self.rev(n) for n in nodestotag])
1171 highestrev = max([self.rev(n) for n in nodestotag])
1170 while nodestotag:
1172 while nodestotag:
1171 # grab a node to tag
1173 # grab a node to tag
1172 n = nodestotag.pop()
1174 n = nodestotag.pop()
1173 # Never tag nullid
1175 # Never tag nullid
1174 if n == self.nullid:
1176 if n == self.nullid:
1175 continue
1177 continue
1176 # A node's revision number represents its place in a
1178 # A node's revision number represents its place in a
1177 # topologically sorted list of nodes.
1179 # topologically sorted list of nodes.
1178 r = self.rev(n)
1180 r = self.rev(n)
1179 if r >= lowestrev:
1181 if r >= lowestrev:
1180 if n not in ancestors:
1182 if n not in ancestors:
1181 # If we are possibly a descendant of one of the roots
1183 # If we are possibly a descendant of one of the roots
1182 # and we haven't already been marked as an ancestor
1184 # and we haven't already been marked as an ancestor
1183 ancestors.add(n) # Mark as ancestor
1185 ancestors.add(n) # Mark as ancestor
1184 # Add non-nullid parents to list of nodes to tag.
1186 # Add non-nullid parents to list of nodes to tag.
1185 nodestotag.update(
1187 nodestotag.update(
1186 [p for p in self.parents(n) if p != self.nullid]
1188 [p for p in self.parents(n) if p != self.nullid]
1187 )
1189 )
1188 elif n in heads: # We've seen it before, is it a fake head?
1190 elif n in heads: # We've seen it before, is it a fake head?
1189 # So it is, real heads should not be the ancestors of
1191 # So it is, real heads should not be the ancestors of
1190 # any other heads.
1192 # any other heads.
1191 heads.pop(n)
1193 heads.pop(n)
1192 if not ancestors:
1194 if not ancestors:
1193 return nonodes
1195 return nonodes
1194 # Now that we have our set of ancestors, we want to remove any
1196 # Now that we have our set of ancestors, we want to remove any
1195 # roots that are not ancestors.
1197 # roots that are not ancestors.
1196
1198
1197 # If one of the roots was nullid, everything is included anyway.
1199 # If one of the roots was nullid, everything is included anyway.
1198 if lowestrev > nullrev:
1200 if lowestrev > nullrev:
1199 # But, since we weren't, let's recompute the lowest rev to not
1201 # But, since we weren't, let's recompute the lowest rev to not
1200 # include roots that aren't ancestors.
1202 # include roots that aren't ancestors.
1201
1203
1202 # Filter out roots that aren't ancestors of heads
1204 # Filter out roots that aren't ancestors of heads
1203 roots = [root for root in roots if root in ancestors]
1205 roots = [root for root in roots if root in ancestors]
1204 # Recompute the lowest revision
1206 # Recompute the lowest revision
1205 if roots:
1207 if roots:
1206 lowestrev = min([self.rev(root) for root in roots])
1208 lowestrev = min([self.rev(root) for root in roots])
1207 else:
1209 else:
1208 # No more roots? Return empty list
1210 # No more roots? Return empty list
1209 return nonodes
1211 return nonodes
1210 else:
1212 else:
1211 # We are descending from nullid, and don't need to care about
1213 # We are descending from nullid, and don't need to care about
1212 # any other roots.
1214 # any other roots.
1213 lowestrev = nullrev
1215 lowestrev = nullrev
1214 roots = [self.nullid]
1216 roots = [self.nullid]
1215 # Transform our roots list into a set.
1217 # Transform our roots list into a set.
1216 descendants = set(roots)
1218 descendants = set(roots)
1217 # Also, keep the original roots so we can filter out roots that aren't
1219 # Also, keep the original roots so we can filter out roots that aren't
1218 # 'real' roots (i.e. are descended from other roots).
1220 # 'real' roots (i.e. are descended from other roots).
1219 roots = descendants.copy()
1221 roots = descendants.copy()
1220 # Our topologically sorted list of output nodes.
1222 # Our topologically sorted list of output nodes.
1221 orderedout = []
1223 orderedout = []
1222 # Don't start at nullid since we don't want nullid in our output list,
1224 # Don't start at nullid since we don't want nullid in our output list,
1223 # and if nullid shows up in descendants, empty parents will look like
1225 # and if nullid shows up in descendants, empty parents will look like
1224 # they're descendants.
1226 # they're descendants.
1225 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1227 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1226 n = self.node(r)
1228 n = self.node(r)
1227 isdescendant = False
1229 isdescendant = False
1228 if lowestrev == nullrev: # Everybody is a descendant of nullid
1230 if lowestrev == nullrev: # Everybody is a descendant of nullid
1229 isdescendant = True
1231 isdescendant = True
1230 elif n in descendants:
1232 elif n in descendants:
1231 # n is already a descendant
1233 # n is already a descendant
1232 isdescendant = True
1234 isdescendant = True
1233 # This check only needs to be done here because all the roots
1235 # This check only needs to be done here because all the roots
1234 # will start being marked is descendants before the loop.
1236 # will start being marked is descendants before the loop.
1235 if n in roots:
1237 if n in roots:
1236 # If n was a root, check if it's a 'real' root.
1238 # If n was a root, check if it's a 'real' root.
1237 p = tuple(self.parents(n))
1239 p = tuple(self.parents(n))
1238 # If any of its parents are descendants, it's not a root.
1240 # If any of its parents are descendants, it's not a root.
1239 if (p[0] in descendants) or (p[1] in descendants):
1241 if (p[0] in descendants) or (p[1] in descendants):
1240 roots.remove(n)
1242 roots.remove(n)
1241 else:
1243 else:
1242 p = tuple(self.parents(n))
1244 p = tuple(self.parents(n))
1243 # A node is a descendant if either of its parents are
1245 # A node is a descendant if either of its parents are
1244 # descendants. (We seeded the dependents list with the roots
1246 # descendants. (We seeded the dependents list with the roots
1245 # up there, remember?)
1247 # up there, remember?)
1246 if (p[0] in descendants) or (p[1] in descendants):
1248 if (p[0] in descendants) or (p[1] in descendants):
1247 descendants.add(n)
1249 descendants.add(n)
1248 isdescendant = True
1250 isdescendant = True
1249 if isdescendant and ((ancestors is None) or (n in ancestors)):
1251 if isdescendant and ((ancestors is None) or (n in ancestors)):
1250 # Only include nodes that are both descendants and ancestors.
1252 # Only include nodes that are both descendants and ancestors.
1251 orderedout.append(n)
1253 orderedout.append(n)
1252 if (ancestors is not None) and (n in heads):
1254 if (ancestors is not None) and (n in heads):
1253 # We're trying to figure out which heads are reachable
1255 # We're trying to figure out which heads are reachable
1254 # from roots.
1256 # from roots.
1255 # Mark this head as having been reached
1257 # Mark this head as having been reached
1256 heads[n] = True
1258 heads[n] = True
1257 elif ancestors is None:
1259 elif ancestors is None:
1258 # Otherwise, we're trying to discover the heads.
1260 # Otherwise, we're trying to discover the heads.
1259 # Assume this is a head because if it isn't, the next step
1261 # Assume this is a head because if it isn't, the next step
1260 # will eventually remove it.
1262 # will eventually remove it.
1261 heads[n] = True
1263 heads[n] = True
1262 # But, obviously its parents aren't.
1264 # But, obviously its parents aren't.
1263 for p in self.parents(n):
1265 for p in self.parents(n):
1264 heads.pop(p, None)
1266 heads.pop(p, None)
1265 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1267 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1266 roots = list(roots)
1268 roots = list(roots)
1267 assert orderedout
1269 assert orderedout
1268 assert roots
1270 assert roots
1269 assert heads
1271 assert heads
1270 return (orderedout, roots, heads)
1272 return (orderedout, roots, heads)
1271
1273
1272 def headrevs(self, revs=None):
1274 def headrevs(self, revs=None):
1273 if revs is None:
1275 if revs is None:
1274 try:
1276 try:
1275 return self.index.headrevs()
1277 return self.index.headrevs()
1276 except AttributeError:
1278 except AttributeError:
1277 return self._headrevs()
1279 return self._headrevs()
1278 if rustdagop is not None:
1280 if rustdagop is not None:
1279 return rustdagop.headrevs(self.index, revs)
1281 return rustdagop.headrevs(self.index, revs)
1280 return dagop.headrevs(revs, self._uncheckedparentrevs)
1282 return dagop.headrevs(revs, self._uncheckedparentrevs)
1281
1283
1282 def computephases(self, roots):
1284 def computephases(self, roots):
1283 return self.index.computephasesmapsets(roots)
1285 return self.index.computephasesmapsets(roots)
1284
1286
1285 def _headrevs(self):
1287 def _headrevs(self):
1286 count = len(self)
1288 count = len(self)
1287 if not count:
1289 if not count:
1288 return [nullrev]
1290 return [nullrev]
1289 # we won't iter over filtered rev so nobody is a head at start
1291 # we won't iter over filtered rev so nobody is a head at start
1290 ishead = [0] * (count + 1)
1292 ishead = [0] * (count + 1)
1291 index = self.index
1293 index = self.index
1292 for r in self:
1294 for r in self:
1293 ishead[r] = 1 # I may be an head
1295 ishead[r] = 1 # I may be an head
1294 e = index[r]
1296 e = index[r]
1295 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1297 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1296 return [r for r, val in enumerate(ishead) if val]
1298 return [r for r, val in enumerate(ishead) if val]
1297
1299
1298 def heads(self, start=None, stop=None):
1300 def heads(self, start=None, stop=None):
1299 """return the list of all nodes that have no children
1301 """return the list of all nodes that have no children
1300
1302
1301 if start is specified, only heads that are descendants of
1303 if start is specified, only heads that are descendants of
1302 start will be returned
1304 start will be returned
1303 if stop is specified, it will consider all the revs from stop
1305 if stop is specified, it will consider all the revs from stop
1304 as if they had no children
1306 as if they had no children
1305 """
1307 """
1306 if start is None and stop is None:
1308 if start is None and stop is None:
1307 if not len(self):
1309 if not len(self):
1308 return [self.nullid]
1310 return [self.nullid]
1309 return [self.node(r) for r in self.headrevs()]
1311 return [self.node(r) for r in self.headrevs()]
1310
1312
1311 if start is None:
1313 if start is None:
1312 start = nullrev
1314 start = nullrev
1313 else:
1315 else:
1314 start = self.rev(start)
1316 start = self.rev(start)
1315
1317
1316 stoprevs = {self.rev(n) for n in stop or []}
1318 stoprevs = {self.rev(n) for n in stop or []}
1317
1319
1318 revs = dagop.headrevssubset(
1320 revs = dagop.headrevssubset(
1319 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1321 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1320 )
1322 )
1321
1323
1322 return [self.node(rev) for rev in revs]
1324 return [self.node(rev) for rev in revs]
1323
1325
1324 def children(self, node):
1326 def children(self, node):
1325 """find the children of a given node"""
1327 """find the children of a given node"""
1326 c = []
1328 c = []
1327 p = self.rev(node)
1329 p = self.rev(node)
1328 for r in self.revs(start=p + 1):
1330 for r in self.revs(start=p + 1):
1329 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1331 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1330 if prevs:
1332 if prevs:
1331 for pr in prevs:
1333 for pr in prevs:
1332 if pr == p:
1334 if pr == p:
1333 c.append(self.node(r))
1335 c.append(self.node(r))
1334 elif p == nullrev:
1336 elif p == nullrev:
1335 c.append(self.node(r))
1337 c.append(self.node(r))
1336 return c
1338 return c
1337
1339
1338 def commonancestorsheads(self, a, b):
1340 def commonancestorsheads(self, a, b):
1339 """calculate all the heads of the common ancestors of nodes a and b"""
1341 """calculate all the heads of the common ancestors of nodes a and b"""
1340 a, b = self.rev(a), self.rev(b)
1342 a, b = self.rev(a), self.rev(b)
1341 ancs = self._commonancestorsheads(a, b)
1343 ancs = self._commonancestorsheads(a, b)
1342 return pycompat.maplist(self.node, ancs)
1344 return pycompat.maplist(self.node, ancs)
1343
1345
1344 def _commonancestorsheads(self, *revs):
1346 def _commonancestorsheads(self, *revs):
1345 """calculate all the heads of the common ancestors of revs"""
1347 """calculate all the heads of the common ancestors of revs"""
1346 try:
1348 try:
1347 ancs = self.index.commonancestorsheads(*revs)
1349 ancs = self.index.commonancestorsheads(*revs)
1348 except (AttributeError, OverflowError): # C implementation failed
1350 except (AttributeError, OverflowError): # C implementation failed
1349 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1351 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1350 return ancs
1352 return ancs
1351
1353
1352 def isancestor(self, a, b):
1354 def isancestor(self, a, b):
1353 """return True if node a is an ancestor of node b
1355 """return True if node a is an ancestor of node b
1354
1356
1355 A revision is considered an ancestor of itself."""
1357 A revision is considered an ancestor of itself."""
1356 a, b = self.rev(a), self.rev(b)
1358 a, b = self.rev(a), self.rev(b)
1357 return self.isancestorrev(a, b)
1359 return self.isancestorrev(a, b)
1358
1360
1359 def isancestorrev(self, a, b):
1361 def isancestorrev(self, a, b):
1360 """return True if revision a is an ancestor of revision b
1362 """return True if revision a is an ancestor of revision b
1361
1363
1362 A revision is considered an ancestor of itself.
1364 A revision is considered an ancestor of itself.
1363
1365
1364 The implementation of this is trivial but the use of
1366 The implementation of this is trivial but the use of
1365 reachableroots is not."""
1367 reachableroots is not."""
1366 if a == nullrev:
1368 if a == nullrev:
1367 return True
1369 return True
1368 elif a == b:
1370 elif a == b:
1369 return True
1371 return True
1370 elif a > b:
1372 elif a > b:
1371 return False
1373 return False
1372 return bool(self.reachableroots(a, [b], [a], includepath=False))
1374 return bool(self.reachableroots(a, [b], [a], includepath=False))
1373
1375
1374 def reachableroots(self, minroot, heads, roots, includepath=False):
1376 def reachableroots(self, minroot, heads, roots, includepath=False):
1375 """return (heads(::(<roots> and <roots>::<heads>)))
1377 """return (heads(::(<roots> and <roots>::<heads>)))
1376
1378
1377 If includepath is True, return (<roots>::<heads>)."""
1379 If includepath is True, return (<roots>::<heads>)."""
1378 try:
1380 try:
1379 return self.index.reachableroots2(
1381 return self.index.reachableroots2(
1380 minroot, heads, roots, includepath
1382 minroot, heads, roots, includepath
1381 )
1383 )
1382 except AttributeError:
1384 except AttributeError:
1383 return dagop._reachablerootspure(
1385 return dagop._reachablerootspure(
1384 self.parentrevs, minroot, roots, heads, includepath
1386 self.parentrevs, minroot, roots, heads, includepath
1385 )
1387 )
1386
1388
1387 def ancestor(self, a, b):
1389 def ancestor(self, a, b):
1388 """calculate the "best" common ancestor of nodes a and b"""
1390 """calculate the "best" common ancestor of nodes a and b"""
1389
1391
1390 a, b = self.rev(a), self.rev(b)
1392 a, b = self.rev(a), self.rev(b)
1391 try:
1393 try:
1392 ancs = self.index.ancestors(a, b)
1394 ancs = self.index.ancestors(a, b)
1393 except (AttributeError, OverflowError):
1395 except (AttributeError, OverflowError):
1394 ancs = ancestor.ancestors(self.parentrevs, a, b)
1396 ancs = ancestor.ancestors(self.parentrevs, a, b)
1395 if ancs:
1397 if ancs:
1396 # choose a consistent winner when there's a tie
1398 # choose a consistent winner when there's a tie
1397 return min(map(self.node, ancs))
1399 return min(map(self.node, ancs))
1398 return self.nullid
1400 return self.nullid
1399
1401
1400 def _match(self, id):
1402 def _match(self, id):
1401 if isinstance(id, int):
1403 if isinstance(id, int):
1402 # rev
1404 # rev
1403 return self.node(id)
1405 return self.node(id)
1404 if len(id) == self.nodeconstants.nodelen:
1406 if len(id) == self.nodeconstants.nodelen:
1405 # possibly a binary node
1407 # possibly a binary node
1406 # odds of a binary node being all hex in ASCII are 1 in 10**25
1408 # odds of a binary node being all hex in ASCII are 1 in 10**25
1407 try:
1409 try:
1408 node = id
1410 node = id
1409 self.rev(node) # quick search the index
1411 self.rev(node) # quick search the index
1410 return node
1412 return node
1411 except error.LookupError:
1413 except error.LookupError:
1412 pass # may be partial hex id
1414 pass # may be partial hex id
1413 try:
1415 try:
1414 # str(rev)
1416 # str(rev)
1415 rev = int(id)
1417 rev = int(id)
1416 if b"%d" % rev != id:
1418 if b"%d" % rev != id:
1417 raise ValueError
1419 raise ValueError
1418 if rev < 0:
1420 if rev < 0:
1419 rev = len(self) + rev
1421 rev = len(self) + rev
1420 if rev < 0 or rev >= len(self):
1422 if rev < 0 or rev >= len(self):
1421 raise ValueError
1423 raise ValueError
1422 return self.node(rev)
1424 return self.node(rev)
1423 except (ValueError, OverflowError):
1425 except (ValueError, OverflowError):
1424 pass
1426 pass
1425 if len(id) == 2 * self.nodeconstants.nodelen:
1427 if len(id) == 2 * self.nodeconstants.nodelen:
1426 try:
1428 try:
1427 # a full hex nodeid?
1429 # a full hex nodeid?
1428 node = bin(id)
1430 node = bin(id)
1429 self.rev(node)
1431 self.rev(node)
1430 return node
1432 return node
1431 except (TypeError, error.LookupError):
1433 except (TypeError, error.LookupError):
1432 pass
1434 pass
1433
1435
1434 def _partialmatch(self, id):
1436 def _partialmatch(self, id):
1435 # we don't care wdirfilenodeids as they should be always full hash
1437 # we don't care wdirfilenodeids as they should be always full hash
1436 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1438 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1437 try:
1439 try:
1438 partial = self.index.partialmatch(id)
1440 partial = self.index.partialmatch(id)
1439 if partial and self.hasnode(partial):
1441 if partial and self.hasnode(partial):
1440 if maybewdir:
1442 if maybewdir:
1441 # single 'ff...' match in radix tree, ambiguous with wdir
1443 # single 'ff...' match in radix tree, ambiguous with wdir
1442 raise error.RevlogError
1444 raise error.RevlogError
1443 return partial
1445 return partial
1444 if maybewdir:
1446 if maybewdir:
1445 # no 'ff...' match in radix tree, wdir identified
1447 # no 'ff...' match in radix tree, wdir identified
1446 raise error.WdirUnsupported
1448 raise error.WdirUnsupported
1447 return None
1449 return None
1448 except error.RevlogError:
1450 except error.RevlogError:
1449 # parsers.c radix tree lookup gave multiple matches
1451 # parsers.c radix tree lookup gave multiple matches
1450 # fast path: for unfiltered changelog, radix tree is accurate
1452 # fast path: for unfiltered changelog, radix tree is accurate
1451 if not getattr(self, 'filteredrevs', None):
1453 if not getattr(self, 'filteredrevs', None):
1452 raise error.AmbiguousPrefixLookupError(
1454 raise error.AmbiguousPrefixLookupError(
1453 id, self.display_id, _(b'ambiguous identifier')
1455 id, self.display_id, _(b'ambiguous identifier')
1454 )
1456 )
1455 # fall through to slow path that filters hidden revisions
1457 # fall through to slow path that filters hidden revisions
1456 except (AttributeError, ValueError):
1458 except (AttributeError, ValueError):
1457 # we are pure python, or key was too short to search radix tree
1459 # we are pure python, or key was too short to search radix tree
1458 pass
1460 pass
1459
1461
1460 if id in self._pcache:
1462 if id in self._pcache:
1461 return self._pcache[id]
1463 return self._pcache[id]
1462
1464
1463 if len(id) <= 40:
1465 if len(id) <= 40:
1464 try:
1466 try:
1465 # hex(node)[:...]
1467 # hex(node)[:...]
1466 l = len(id) // 2 # grab an even number of digits
1468 l = len(id) // 2 # grab an even number of digits
1467 prefix = bin(id[: l * 2])
1469 prefix = bin(id[: l * 2])
1468 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1470 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1469 nl = [
1471 nl = [
1470 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1472 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1471 ]
1473 ]
1472 if self.nodeconstants.nullhex.startswith(id):
1474 if self.nodeconstants.nullhex.startswith(id):
1473 nl.append(self.nullid)
1475 nl.append(self.nullid)
1474 if len(nl) > 0:
1476 if len(nl) > 0:
1475 if len(nl) == 1 and not maybewdir:
1477 if len(nl) == 1 and not maybewdir:
1476 self._pcache[id] = nl[0]
1478 self._pcache[id] = nl[0]
1477 return nl[0]
1479 return nl[0]
1478 raise error.AmbiguousPrefixLookupError(
1480 raise error.AmbiguousPrefixLookupError(
1479 id, self.display_id, _(b'ambiguous identifier')
1481 id, self.display_id, _(b'ambiguous identifier')
1480 )
1482 )
1481 if maybewdir:
1483 if maybewdir:
1482 raise error.WdirUnsupported
1484 raise error.WdirUnsupported
1483 return None
1485 return None
1484 except TypeError:
1486 except TypeError:
1485 pass
1487 pass
1486
1488
1487 def lookup(self, id):
1489 def lookup(self, id):
1488 """locate a node based on:
1490 """locate a node based on:
1489 - revision number or str(revision number)
1491 - revision number or str(revision number)
1490 - nodeid or subset of hex nodeid
1492 - nodeid or subset of hex nodeid
1491 """
1493 """
1492 n = self._match(id)
1494 n = self._match(id)
1493 if n is not None:
1495 if n is not None:
1494 return n
1496 return n
1495 n = self._partialmatch(id)
1497 n = self._partialmatch(id)
1496 if n:
1498 if n:
1497 return n
1499 return n
1498
1500
1499 raise error.LookupError(id, self.display_id, _(b'no match found'))
1501 raise error.LookupError(id, self.display_id, _(b'no match found'))
1500
1502
1501 def shortest(self, node, minlength=1):
1503 def shortest(self, node, minlength=1):
1502 """Find the shortest unambiguous prefix that matches node."""
1504 """Find the shortest unambiguous prefix that matches node."""
1503
1505
1504 def isvalid(prefix):
1506 def isvalid(prefix):
1505 try:
1507 try:
1506 matchednode = self._partialmatch(prefix)
1508 matchednode = self._partialmatch(prefix)
1507 except error.AmbiguousPrefixLookupError:
1509 except error.AmbiguousPrefixLookupError:
1508 return False
1510 return False
1509 except error.WdirUnsupported:
1511 except error.WdirUnsupported:
1510 # single 'ff...' match
1512 # single 'ff...' match
1511 return True
1513 return True
1512 if matchednode is None:
1514 if matchednode is None:
1513 raise error.LookupError(node, self.display_id, _(b'no node'))
1515 raise error.LookupError(node, self.display_id, _(b'no node'))
1514 return True
1516 return True
1515
1517
1516 def maybewdir(prefix):
1518 def maybewdir(prefix):
1517 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1519 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1518
1520
1519 hexnode = hex(node)
1521 hexnode = hex(node)
1520
1522
1521 def disambiguate(hexnode, minlength):
1523 def disambiguate(hexnode, minlength):
1522 """Disambiguate against wdirid."""
1524 """Disambiguate against wdirid."""
1523 for length in range(minlength, len(hexnode) + 1):
1525 for length in range(minlength, len(hexnode) + 1):
1524 prefix = hexnode[:length]
1526 prefix = hexnode[:length]
1525 if not maybewdir(prefix):
1527 if not maybewdir(prefix):
1526 return prefix
1528 return prefix
1527
1529
1528 if not getattr(self, 'filteredrevs', None):
1530 if not getattr(self, 'filteredrevs', None):
1529 try:
1531 try:
1530 length = max(self.index.shortest(node), minlength)
1532 length = max(self.index.shortest(node), minlength)
1531 return disambiguate(hexnode, length)
1533 return disambiguate(hexnode, length)
1532 except error.RevlogError:
1534 except error.RevlogError:
1533 if node != self.nodeconstants.wdirid:
1535 if node != self.nodeconstants.wdirid:
1534 raise error.LookupError(
1536 raise error.LookupError(
1535 node, self.display_id, _(b'no node')
1537 node, self.display_id, _(b'no node')
1536 )
1538 )
1537 except AttributeError:
1539 except AttributeError:
1538 # Fall through to pure code
1540 # Fall through to pure code
1539 pass
1541 pass
1540
1542
1541 if node == self.nodeconstants.wdirid:
1543 if node == self.nodeconstants.wdirid:
1542 for length in range(minlength, len(hexnode) + 1):
1544 for length in range(minlength, len(hexnode) + 1):
1543 prefix = hexnode[:length]
1545 prefix = hexnode[:length]
1544 if isvalid(prefix):
1546 if isvalid(prefix):
1545 return prefix
1547 return prefix
1546
1548
1547 for length in range(minlength, len(hexnode) + 1):
1549 for length in range(minlength, len(hexnode) + 1):
1548 prefix = hexnode[:length]
1550 prefix = hexnode[:length]
1549 if isvalid(prefix):
1551 if isvalid(prefix):
1550 return disambiguate(hexnode, length)
1552 return disambiguate(hexnode, length)
1551
1553
1552 def cmp(self, node, text):
1554 def cmp(self, node, text):
1553 """compare text with a given file revision
1555 """compare text with a given file revision
1554
1556
1555 returns True if text is different than what is stored.
1557 returns True if text is different than what is stored.
1556 """
1558 """
1557 p1, p2 = self.parents(node)
1559 p1, p2 = self.parents(node)
1558 return storageutil.hashrevisionsha1(text, p1, p2) != node
1560 return storageutil.hashrevisionsha1(text, p1, p2) != node
1559
1561
1560 def _cachesegment(self, offset, data):
1562 def _cachesegment(self, offset, data):
1561 """Add a segment to the revlog cache.
1563 """Add a segment to the revlog cache.
1562
1564
1563 Accepts an absolute offset and the data that is at that location.
1565 Accepts an absolute offset and the data that is at that location.
1564 """
1566 """
1565 o, d = self._chunkcache
1567 o, d = self._chunkcache
1566 # try to add to existing cache
1568 # try to add to existing cache
1567 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1569 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1568 self._chunkcache = o, d + data
1570 self._chunkcache = o, d + data
1569 else:
1571 else:
1570 self._chunkcache = offset, data
1572 self._chunkcache = offset, data
1571
1573
1572 def _readsegment(self, offset, length, df=None):
1574 def _readsegment(self, offset, length, df=None):
1573 """Load a segment of raw data from the revlog.
1575 """Load a segment of raw data from the revlog.
1574
1576
1575 Accepts an absolute offset, length to read, and an optional existing
1577 Accepts an absolute offset, length to read, and an optional existing
1576 file handle to read from.
1578 file handle to read from.
1577
1579
1578 If an existing file handle is passed, it will be seeked and the
1580 If an existing file handle is passed, it will be seeked and the
1579 original seek position will NOT be restored.
1581 original seek position will NOT be restored.
1580
1582
1581 Returns a str or buffer of raw byte data.
1583 Returns a str or buffer of raw byte data.
1582
1584
1583 Raises if the requested number of bytes could not be read.
1585 Raises if the requested number of bytes could not be read.
1584 """
1586 """
1585 # Cache data both forward and backward around the requested
1587 # Cache data both forward and backward around the requested
1586 # data, in a fixed size window. This helps speed up operations
1588 # data, in a fixed size window. This helps speed up operations
1587 # involving reading the revlog backwards.
1589 # involving reading the revlog backwards.
1588 cachesize = self._chunkcachesize
1590 cachesize = self._chunkcachesize
1589 realoffset = offset & ~(cachesize - 1)
1591 realoffset = offset & ~(cachesize - 1)
1590 reallength = (
1592 reallength = (
1591 (offset + length + cachesize) & ~(cachesize - 1)
1593 (offset + length + cachesize) & ~(cachesize - 1)
1592 ) - realoffset
1594 ) - realoffset
1593 with self._datareadfp(df) as df:
1595 with self._datareadfp(df) as df:
1594 df.seek(realoffset)
1596 df.seek(realoffset)
1595 d = df.read(reallength)
1597 d = df.read(reallength)
1596
1598
1597 self._cachesegment(realoffset, d)
1599 self._cachesegment(realoffset, d)
1598 if offset != realoffset or reallength != length:
1600 if offset != realoffset or reallength != length:
1599 startoffset = offset - realoffset
1601 startoffset = offset - realoffset
1600 if len(d) - startoffset < length:
1602 if len(d) - startoffset < length:
1601 raise error.RevlogError(
1603 raise error.RevlogError(
1602 _(
1604 _(
1603 b'partial read of revlog %s; expected %d bytes from '
1605 b'partial read of revlog %s; expected %d bytes from '
1604 b'offset %d, got %d'
1606 b'offset %d, got %d'
1605 )
1607 )
1606 % (
1608 % (
1607 self._indexfile if self._inline else self._datafile,
1609 self._indexfile if self._inline else self._datafile,
1608 length,
1610 length,
1609 offset,
1611 offset,
1610 len(d) - startoffset,
1612 len(d) - startoffset,
1611 )
1613 )
1612 )
1614 )
1613
1615
1614 return util.buffer(d, startoffset, length)
1616 return util.buffer(d, startoffset, length)
1615
1617
1616 if len(d) < length:
1618 if len(d) < length:
1617 raise error.RevlogError(
1619 raise error.RevlogError(
1618 _(
1620 _(
1619 b'partial read of revlog %s; expected %d bytes from offset '
1621 b'partial read of revlog %s; expected %d bytes from offset '
1620 b'%d, got %d'
1622 b'%d, got %d'
1621 )
1623 )
1622 % (
1624 % (
1623 self._indexfile if self._inline else self._datafile,
1625 self._indexfile if self._inline else self._datafile,
1624 length,
1626 length,
1625 offset,
1627 offset,
1626 len(d),
1628 len(d),
1627 )
1629 )
1628 )
1630 )
1629
1631
1630 return d
1632 return d
1631
1633
1632 def _getsegment(self, offset, length, df=None):
1634 def _getsegment(self, offset, length, df=None):
1633 """Obtain a segment of raw data from the revlog.
1635 """Obtain a segment of raw data from the revlog.
1634
1636
1635 Accepts an absolute offset, length of bytes to obtain, and an
1637 Accepts an absolute offset, length of bytes to obtain, and an
1636 optional file handle to the already-opened revlog. If the file
1638 optional file handle to the already-opened revlog. If the file
1637 handle is used, it's original seek position will not be preserved.
1639 handle is used, it's original seek position will not be preserved.
1638
1640
1639 Requests for data may be returned from a cache.
1641 Requests for data may be returned from a cache.
1640
1642
1641 Returns a str or a buffer instance of raw byte data.
1643 Returns a str or a buffer instance of raw byte data.
1642 """
1644 """
1643 o, d = self._chunkcache
1645 o, d = self._chunkcache
1644 l = len(d)
1646 l = len(d)
1645
1647
1646 # is it in the cache?
1648 # is it in the cache?
1647 cachestart = offset - o
1649 cachestart = offset - o
1648 cacheend = cachestart + length
1650 cacheend = cachestart + length
1649 if cachestart >= 0 and cacheend <= l:
1651 if cachestart >= 0 and cacheend <= l:
1650 if cachestart == 0 and cacheend == l:
1652 if cachestart == 0 and cacheend == l:
1651 return d # avoid a copy
1653 return d # avoid a copy
1652 return util.buffer(d, cachestart, cacheend - cachestart)
1654 return util.buffer(d, cachestart, cacheend - cachestart)
1653
1655
1654 return self._readsegment(offset, length, df=df)
1656 return self._readsegment(offset, length, df=df)
1655
1657
1656 def _getsegmentforrevs(self, startrev, endrev, df=None):
1658 def _getsegmentforrevs(self, startrev, endrev, df=None):
1657 """Obtain a segment of raw data corresponding to a range of revisions.
1659 """Obtain a segment of raw data corresponding to a range of revisions.
1658
1660
1659 Accepts the start and end revisions and an optional already-open
1661 Accepts the start and end revisions and an optional already-open
1660 file handle to be used for reading. If the file handle is read, its
1662 file handle to be used for reading. If the file handle is read, its
1661 seek position will not be preserved.
1663 seek position will not be preserved.
1662
1664
1663 Requests for data may be satisfied by a cache.
1665 Requests for data may be satisfied by a cache.
1664
1666
1665 Returns a 2-tuple of (offset, data) for the requested range of
1667 Returns a 2-tuple of (offset, data) for the requested range of
1666 revisions. Offset is the integer offset from the beginning of the
1668 revisions. Offset is the integer offset from the beginning of the
1667 revlog and data is a str or buffer of the raw byte data.
1669 revlog and data is a str or buffer of the raw byte data.
1668
1670
1669 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1671 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1670 to determine where each revision's data begins and ends.
1672 to determine where each revision's data begins and ends.
1671 """
1673 """
1672 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1674 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1673 # (functions are expensive).
1675 # (functions are expensive).
1674 index = self.index
1676 index = self.index
1675 istart = index[startrev]
1677 istart = index[startrev]
1676 start = int(istart[0] >> 16)
1678 start = int(istart[0] >> 16)
1677 if startrev == endrev:
1679 if startrev == endrev:
1678 end = start + istart[1]
1680 end = start + istart[1]
1679 else:
1681 else:
1680 iend = index[endrev]
1682 iend = index[endrev]
1681 end = int(iend[0] >> 16) + iend[1]
1683 end = int(iend[0] >> 16) + iend[1]
1682
1684
1683 if self._inline:
1685 if self._inline:
1684 start += (startrev + 1) * self.index.entry_size
1686 start += (startrev + 1) * self.index.entry_size
1685 end += (endrev + 1) * self.index.entry_size
1687 end += (endrev + 1) * self.index.entry_size
1686 length = end - start
1688 length = end - start
1687
1689
1688 return start, self._getsegment(start, length, df=df)
1690 return start, self._getsegment(start, length, df=df)
1689
1691
1690 def _chunk(self, rev, df=None):
1692 def _chunk(self, rev, df=None):
1691 """Obtain a single decompressed chunk for a revision.
1693 """Obtain a single decompressed chunk for a revision.
1692
1694
1693 Accepts an integer revision and an optional already-open file handle
1695 Accepts an integer revision and an optional already-open file handle
1694 to be used for reading. If used, the seek position of the file will not
1696 to be used for reading. If used, the seek position of the file will not
1695 be preserved.
1697 be preserved.
1696
1698
1697 Returns a str holding uncompressed data for the requested revision.
1699 Returns a str holding uncompressed data for the requested revision.
1698 """
1700 """
1699 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1701 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1700
1702
1701 def _chunks(self, revs, df=None, targetsize=None):
1703 def _chunks(self, revs, df=None, targetsize=None):
1702 """Obtain decompressed chunks for the specified revisions.
1704 """Obtain decompressed chunks for the specified revisions.
1703
1705
1704 Accepts an iterable of numeric revisions that are assumed to be in
1706 Accepts an iterable of numeric revisions that are assumed to be in
1705 ascending order. Also accepts an optional already-open file handle
1707 ascending order. Also accepts an optional already-open file handle
1706 to be used for reading. If used, the seek position of the file will
1708 to be used for reading. If used, the seek position of the file will
1707 not be preserved.
1709 not be preserved.
1708
1710
1709 This function is similar to calling ``self._chunk()`` multiple times,
1711 This function is similar to calling ``self._chunk()`` multiple times,
1710 but is faster.
1712 but is faster.
1711
1713
1712 Returns a list with decompressed data for each requested revision.
1714 Returns a list with decompressed data for each requested revision.
1713 """
1715 """
1714 if not revs:
1716 if not revs:
1715 return []
1717 return []
1716 start = self.start
1718 start = self.start
1717 length = self.length
1719 length = self.length
1718 inline = self._inline
1720 inline = self._inline
1719 iosize = self.index.entry_size
1721 iosize = self.index.entry_size
1720 buffer = util.buffer
1722 buffer = util.buffer
1721
1723
1722 l = []
1724 l = []
1723 ladd = l.append
1725 ladd = l.append
1724
1726
1725 if not self._withsparseread:
1727 if not self._withsparseread:
1726 slicedchunks = (revs,)
1728 slicedchunks = (revs,)
1727 else:
1729 else:
1728 slicedchunks = deltautil.slicechunk(
1730 slicedchunks = deltautil.slicechunk(
1729 self, revs, targetsize=targetsize
1731 self, revs, targetsize=targetsize
1730 )
1732 )
1731
1733
1732 for revschunk in slicedchunks:
1734 for revschunk in slicedchunks:
1733 firstrev = revschunk[0]
1735 firstrev = revschunk[0]
1734 # Skip trailing revisions with empty diff
1736 # Skip trailing revisions with empty diff
1735 for lastrev in revschunk[::-1]:
1737 for lastrev in revschunk[::-1]:
1736 if length(lastrev) != 0:
1738 if length(lastrev) != 0:
1737 break
1739 break
1738
1740
1739 try:
1741 try:
1740 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1742 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1741 except OverflowError:
1743 except OverflowError:
1742 # issue4215 - we can't cache a run of chunks greater than
1744 # issue4215 - we can't cache a run of chunks greater than
1743 # 2G on Windows
1745 # 2G on Windows
1744 return [self._chunk(rev, df=df) for rev in revschunk]
1746 return [self._chunk(rev, df=df) for rev in revschunk]
1745
1747
1746 decomp = self.decompress
1748 decomp = self.decompress
1747 for rev in revschunk:
1749 for rev in revschunk:
1748 chunkstart = start(rev)
1750 chunkstart = start(rev)
1749 if inline:
1751 if inline:
1750 chunkstart += (rev + 1) * iosize
1752 chunkstart += (rev + 1) * iosize
1751 chunklength = length(rev)
1753 chunklength = length(rev)
1752 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1754 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1753
1755
1754 return l
1756 return l
1755
1757
1756 def _chunkclear(self):
1758 def _chunkclear(self):
1757 """Clear the raw chunk cache."""
1759 """Clear the raw chunk cache."""
1758 self._chunkcache = (0, b'')
1760 self._chunkcache = (0, b'')
1759
1761
1760 def deltaparent(self, rev):
1762 def deltaparent(self, rev):
1761 """return deltaparent of the given revision"""
1763 """return deltaparent of the given revision"""
1762 base = self.index[rev][3]
1764 base = self.index[rev][3]
1763 if base == rev:
1765 if base == rev:
1764 return nullrev
1766 return nullrev
1765 elif self._generaldelta:
1767 elif self._generaldelta:
1766 return base
1768 return base
1767 else:
1769 else:
1768 return rev - 1
1770 return rev - 1
1769
1771
1770 def issnapshot(self, rev):
1772 def issnapshot(self, rev):
1771 """tells whether rev is a snapshot"""
1773 """tells whether rev is a snapshot"""
1772 if not self._sparserevlog:
1774 if not self._sparserevlog:
1773 return self.deltaparent(rev) == nullrev
1775 return self.deltaparent(rev) == nullrev
1774 elif util.safehasattr(self.index, b'issnapshot'):
1776 elif util.safehasattr(self.index, b'issnapshot'):
1775 # directly assign the method to cache the testing and access
1777 # directly assign the method to cache the testing and access
1776 self.issnapshot = self.index.issnapshot
1778 self.issnapshot = self.index.issnapshot
1777 return self.issnapshot(rev)
1779 return self.issnapshot(rev)
1778 if rev == nullrev:
1780 if rev == nullrev:
1779 return True
1781 return True
1780 entry = self.index[rev]
1782 entry = self.index[rev]
1781 base = entry[3]
1783 base = entry[3]
1782 if base == rev:
1784 if base == rev:
1783 return True
1785 return True
1784 if base == nullrev:
1786 if base == nullrev:
1785 return True
1787 return True
1786 p1 = entry[5]
1788 p1 = entry[5]
1787 p2 = entry[6]
1789 p2 = entry[6]
1788 if base == p1 or base == p2:
1790 if base == p1 or base == p2:
1789 return False
1791 return False
1790 return self.issnapshot(base)
1792 return self.issnapshot(base)
1791
1793
1792 def snapshotdepth(self, rev):
1794 def snapshotdepth(self, rev):
1793 """number of snapshot in the chain before this one"""
1795 """number of snapshot in the chain before this one"""
1794 if not self.issnapshot(rev):
1796 if not self.issnapshot(rev):
1795 raise error.ProgrammingError(b'revision %d not a snapshot')
1797 raise error.ProgrammingError(b'revision %d not a snapshot')
1796 return len(self._deltachain(rev)[0]) - 1
1798 return len(self._deltachain(rev)[0]) - 1
1797
1799
1798 def revdiff(self, rev1, rev2):
1800 def revdiff(self, rev1, rev2):
1799 """return or calculate a delta between two revisions
1801 """return or calculate a delta between two revisions
1800
1802
1801 The delta calculated is in binary form and is intended to be written to
1803 The delta calculated is in binary form and is intended to be written to
1802 revlog data directly. So this function needs raw revision data.
1804 revlog data directly. So this function needs raw revision data.
1803 """
1805 """
1804 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1806 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1805 return bytes(self._chunk(rev2))
1807 return bytes(self._chunk(rev2))
1806
1808
1807 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1809 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1808
1810
1809 def _processflags(self, text, flags, operation, raw=False):
1811 def _processflags(self, text, flags, operation, raw=False):
1810 """deprecated entry point to access flag processors"""
1812 """deprecated entry point to access flag processors"""
1811 msg = b'_processflag(...) use the specialized variant'
1813 msg = b'_processflag(...) use the specialized variant'
1812 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1814 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1813 if raw:
1815 if raw:
1814 return text, flagutil.processflagsraw(self, text, flags)
1816 return text, flagutil.processflagsraw(self, text, flags)
1815 elif operation == b'read':
1817 elif operation == b'read':
1816 return flagutil.processflagsread(self, text, flags)
1818 return flagutil.processflagsread(self, text, flags)
1817 else: # write operation
1819 else: # write operation
1818 return flagutil.processflagswrite(self, text, flags)
1820 return flagutil.processflagswrite(self, text, flags)
1819
1821
1820 def revision(self, nodeorrev, _df=None, raw=False):
1822 def revision(self, nodeorrev, _df=None, raw=False):
1821 """return an uncompressed revision of a given node or revision
1823 """return an uncompressed revision of a given node or revision
1822 number.
1824 number.
1823
1825
1824 _df - an existing file handle to read from. (internal-only)
1826 _df - an existing file handle to read from. (internal-only)
1825 raw - an optional argument specifying if the revision data is to be
1827 raw - an optional argument specifying if the revision data is to be
1826 treated as raw data when applying flag transforms. 'raw' should be set
1828 treated as raw data when applying flag transforms. 'raw' should be set
1827 to True when generating changegroups or in debug commands.
1829 to True when generating changegroups or in debug commands.
1828 """
1830 """
1829 if raw:
1831 if raw:
1830 msg = (
1832 msg = (
1831 b'revlog.revision(..., raw=True) is deprecated, '
1833 b'revlog.revision(..., raw=True) is deprecated, '
1832 b'use revlog.rawdata(...)'
1834 b'use revlog.rawdata(...)'
1833 )
1835 )
1834 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1836 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1835 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1837 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1836
1838
1837 def sidedata(self, nodeorrev, _df=None):
1839 def sidedata(self, nodeorrev, _df=None):
1838 """a map of extra data related to the changeset but not part of the hash
1840 """a map of extra data related to the changeset but not part of the hash
1839
1841
1840 This function currently return a dictionary. However, more advanced
1842 This function currently return a dictionary. However, more advanced
1841 mapping object will likely be used in the future for a more
1843 mapping object will likely be used in the future for a more
1842 efficient/lazy code.
1844 efficient/lazy code.
1843 """
1845 """
1844 return self._revisiondata(nodeorrev, _df)[1]
1846 return self._revisiondata(nodeorrev, _df)[1]
1845
1847
1846 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1848 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1847 # deal with <nodeorrev> argument type
1849 # deal with <nodeorrev> argument type
1848 if isinstance(nodeorrev, int):
1850 if isinstance(nodeorrev, int):
1849 rev = nodeorrev
1851 rev = nodeorrev
1850 node = self.node(rev)
1852 node = self.node(rev)
1851 else:
1853 else:
1852 node = nodeorrev
1854 node = nodeorrev
1853 rev = None
1855 rev = None
1854
1856
1855 # fast path the special `nullid` rev
1857 # fast path the special `nullid` rev
1856 if node == self.nullid:
1858 if node == self.nullid:
1857 return b"", {}
1859 return b"", {}
1858
1860
1859 # ``rawtext`` is the text as stored inside the revlog. Might be the
1861 # ``rawtext`` is the text as stored inside the revlog. Might be the
1860 # revision or might need to be processed to retrieve the revision.
1862 # revision or might need to be processed to retrieve the revision.
1861 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1863 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1862
1864
1863 if self.hassidedata:
1865 if self.hassidedata:
1864 if rev is None:
1866 if rev is None:
1865 rev = self.rev(node)
1867 rev = self.rev(node)
1866 sidedata = self._sidedata(rev)
1868 sidedata = self._sidedata(rev)
1867 else:
1869 else:
1868 sidedata = {}
1870 sidedata = {}
1869
1871
1870 if raw and validated:
1872 if raw and validated:
1871 # if we don't want to process the raw text and that raw
1873 # if we don't want to process the raw text and that raw
1872 # text is cached, we can exit early.
1874 # text is cached, we can exit early.
1873 return rawtext, sidedata
1875 return rawtext, sidedata
1874 if rev is None:
1876 if rev is None:
1875 rev = self.rev(node)
1877 rev = self.rev(node)
1876 # the revlog's flag for this revision
1878 # the revlog's flag for this revision
1877 # (usually alter its state or content)
1879 # (usually alter its state or content)
1878 flags = self.flags(rev)
1880 flags = self.flags(rev)
1879
1881
1880 if validated and flags == REVIDX_DEFAULT_FLAGS:
1882 if validated and flags == REVIDX_DEFAULT_FLAGS:
1881 # no extra flags set, no flag processor runs, text = rawtext
1883 # no extra flags set, no flag processor runs, text = rawtext
1882 return rawtext, sidedata
1884 return rawtext, sidedata
1883
1885
1884 if raw:
1886 if raw:
1885 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1887 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1886 text = rawtext
1888 text = rawtext
1887 else:
1889 else:
1888 r = flagutil.processflagsread(self, rawtext, flags)
1890 r = flagutil.processflagsread(self, rawtext, flags)
1889 text, validatehash = r
1891 text, validatehash = r
1890 if validatehash:
1892 if validatehash:
1891 self.checkhash(text, node, rev=rev)
1893 self.checkhash(text, node, rev=rev)
1892 if not validated:
1894 if not validated:
1893 self._revisioncache = (node, rev, rawtext)
1895 self._revisioncache = (node, rev, rawtext)
1894
1896
1895 return text, sidedata
1897 return text, sidedata
1896
1898
1897 def _rawtext(self, node, rev, _df=None):
1899 def _rawtext(self, node, rev, _df=None):
1898 """return the possibly unvalidated rawtext for a revision
1900 """return the possibly unvalidated rawtext for a revision
1899
1901
1900 returns (rev, rawtext, validated)
1902 returns (rev, rawtext, validated)
1901 """
1903 """
1902
1904
1903 # revision in the cache (could be useful to apply delta)
1905 # revision in the cache (could be useful to apply delta)
1904 cachedrev = None
1906 cachedrev = None
1905 # An intermediate text to apply deltas to
1907 # An intermediate text to apply deltas to
1906 basetext = None
1908 basetext = None
1907
1909
1908 # Check if we have the entry in cache
1910 # Check if we have the entry in cache
1909 # The cache entry looks like (node, rev, rawtext)
1911 # The cache entry looks like (node, rev, rawtext)
1910 if self._revisioncache:
1912 if self._revisioncache:
1911 if self._revisioncache[0] == node:
1913 if self._revisioncache[0] == node:
1912 return (rev, self._revisioncache[2], True)
1914 return (rev, self._revisioncache[2], True)
1913 cachedrev = self._revisioncache[1]
1915 cachedrev = self._revisioncache[1]
1914
1916
1915 if rev is None:
1917 if rev is None:
1916 rev = self.rev(node)
1918 rev = self.rev(node)
1917
1919
1918 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1920 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1919 if stopped:
1921 if stopped:
1920 basetext = self._revisioncache[2]
1922 basetext = self._revisioncache[2]
1921
1923
1922 # drop cache to save memory, the caller is expected to
1924 # drop cache to save memory, the caller is expected to
1923 # update self._revisioncache after validating the text
1925 # update self._revisioncache after validating the text
1924 self._revisioncache = None
1926 self._revisioncache = None
1925
1927
1926 targetsize = None
1928 targetsize = None
1927 rawsize = self.index[rev][2]
1929 rawsize = self.index[rev][2]
1928 if 0 <= rawsize:
1930 if 0 <= rawsize:
1929 targetsize = 4 * rawsize
1931 targetsize = 4 * rawsize
1930
1932
1931 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1933 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1932 if basetext is None:
1934 if basetext is None:
1933 basetext = bytes(bins[0])
1935 basetext = bytes(bins[0])
1934 bins = bins[1:]
1936 bins = bins[1:]
1935
1937
1936 rawtext = mdiff.patches(basetext, bins)
1938 rawtext = mdiff.patches(basetext, bins)
1937 del basetext # let us have a chance to free memory early
1939 del basetext # let us have a chance to free memory early
1938 return (rev, rawtext, False)
1940 return (rev, rawtext, False)
1939
1941
1940 def _sidedata(self, rev):
1942 def _sidedata(self, rev):
1941 """Return the sidedata for a given revision number."""
1943 """Return the sidedata for a given revision number."""
1942 index_entry = self.index[rev]
1944 index_entry = self.index[rev]
1943 sidedata_offset = index_entry[8]
1945 sidedata_offset = index_entry[8]
1944 sidedata_size = index_entry[9]
1946 sidedata_size = index_entry[9]
1945
1947
1946 if self._inline:
1948 if self._inline:
1947 sidedata_offset += self.index.entry_size * (1 + rev)
1949 sidedata_offset += self.index.entry_size * (1 + rev)
1948 if sidedata_size == 0:
1950 if sidedata_size == 0:
1949 return {}
1951 return {}
1950
1952
1951 segment = self._getsegment(sidedata_offset, sidedata_size)
1953 segment = self._getsegment(sidedata_offset, sidedata_size)
1952 sidedata = sidedatautil.deserialize_sidedata(segment)
1954 sidedata = sidedatautil.deserialize_sidedata(segment)
1953 return sidedata
1955 return sidedata
1954
1956
1955 def rawdata(self, nodeorrev, _df=None):
1957 def rawdata(self, nodeorrev, _df=None):
1956 """return an uncompressed raw data of a given node or revision number.
1958 """return an uncompressed raw data of a given node or revision number.
1957
1959
1958 _df - an existing file handle to read from. (internal-only)
1960 _df - an existing file handle to read from. (internal-only)
1959 """
1961 """
1960 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1962 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1961
1963
1962 def hash(self, text, p1, p2):
1964 def hash(self, text, p1, p2):
1963 """Compute a node hash.
1965 """Compute a node hash.
1964
1966
1965 Available as a function so that subclasses can replace the hash
1967 Available as a function so that subclasses can replace the hash
1966 as needed.
1968 as needed.
1967 """
1969 """
1968 return storageutil.hashrevisionsha1(text, p1, p2)
1970 return storageutil.hashrevisionsha1(text, p1, p2)
1969
1971
1970 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1972 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1971 """Check node hash integrity.
1973 """Check node hash integrity.
1972
1974
1973 Available as a function so that subclasses can extend hash mismatch
1975 Available as a function so that subclasses can extend hash mismatch
1974 behaviors as needed.
1976 behaviors as needed.
1975 """
1977 """
1976 try:
1978 try:
1977 if p1 is None and p2 is None:
1979 if p1 is None and p2 is None:
1978 p1, p2 = self.parents(node)
1980 p1, p2 = self.parents(node)
1979 if node != self.hash(text, p1, p2):
1981 if node != self.hash(text, p1, p2):
1980 # Clear the revision cache on hash failure. The revision cache
1982 # Clear the revision cache on hash failure. The revision cache
1981 # only stores the raw revision and clearing the cache does have
1983 # only stores the raw revision and clearing the cache does have
1982 # the side-effect that we won't have a cache hit when the raw
1984 # the side-effect that we won't have a cache hit when the raw
1983 # revision data is accessed. But this case should be rare and
1985 # revision data is accessed. But this case should be rare and
1984 # it is extra work to teach the cache about the hash
1986 # it is extra work to teach the cache about the hash
1985 # verification state.
1987 # verification state.
1986 if self._revisioncache and self._revisioncache[0] == node:
1988 if self._revisioncache and self._revisioncache[0] == node:
1987 self._revisioncache = None
1989 self._revisioncache = None
1988
1990
1989 revornode = rev
1991 revornode = rev
1990 if revornode is None:
1992 if revornode is None:
1991 revornode = templatefilters.short(hex(node))
1993 revornode = templatefilters.short(hex(node))
1992 raise error.RevlogError(
1994 raise error.RevlogError(
1993 _(b"integrity check failed on %s:%s")
1995 _(b"integrity check failed on %s:%s")
1994 % (self.display_id, pycompat.bytestr(revornode))
1996 % (self.display_id, pycompat.bytestr(revornode))
1995 )
1997 )
1996 except error.RevlogError:
1998 except error.RevlogError:
1997 if self._censorable and storageutil.iscensoredtext(text):
1999 if self._censorable and storageutil.iscensoredtext(text):
1998 raise error.CensoredNodeError(self.display_id, node, text)
2000 raise error.CensoredNodeError(self.display_id, node, text)
1999 raise
2001 raise
2000
2002
2001 def _enforceinlinesize(self, tr):
2003 def _enforceinlinesize(self, tr):
2002 """Check if the revlog is too big for inline and convert if so.
2004 """Check if the revlog is too big for inline and convert if so.
2003
2005
2004 This should be called after revisions are added to the revlog. If the
2006 This should be called after revisions are added to the revlog. If the
2005 revlog has grown too large to be an inline revlog, it will convert it
2007 revlog has grown too large to be an inline revlog, it will convert it
2006 to use multiple index and data files.
2008 to use multiple index and data files.
2007 """
2009 """
2008 tiprev = len(self) - 1
2010 tiprev = len(self) - 1
2009 total_size = self.start(tiprev) + self.length(tiprev)
2011 total_size = self.start(tiprev) + self.length(tiprev)
2010 if not self._inline or total_size < _maxinline:
2012 if not self._inline or total_size < _maxinline:
2011 return
2013 return
2012
2014
2013 troffset = tr.findoffset(self._indexfile)
2015 troffset = tr.findoffset(self._indexfile)
2014 if troffset is None:
2016 if troffset is None:
2015 raise error.RevlogError(
2017 raise error.RevlogError(
2016 _(b"%s not found in the transaction") % self._indexfile
2018 _(b"%s not found in the transaction") % self._indexfile
2017 )
2019 )
2018 trindex = 0
2020 trindex = 0
2019 tr.add(self._datafile, 0)
2021 tr.add(self._datafile, 0)
2020
2022
2021 existing_handles = False
2023 existing_handles = False
2022 if self._writinghandles is not None:
2024 if self._writinghandles is not None:
2023 existing_handles = True
2025 existing_handles = True
2024 fp = self._writinghandles[0]
2026 fp = self._writinghandles[0]
2025 fp.flush()
2027 fp.flush()
2026 fp.close()
2028 fp.close()
2027 # We can't use the cached file handle after close(). So prevent
2029 # We can't use the cached file handle after close(). So prevent
2028 # its usage.
2030 # its usage.
2029 self._writinghandles = None
2031 self._writinghandles = None
2030
2032
2031 new_dfh = self._datafp(b'w+')
2033 new_dfh = self._datafp(b'w+')
2032 new_dfh.truncate(0) # drop any potentially existing data
2034 new_dfh.truncate(0) # drop any potentially existing data
2033 try:
2035 try:
2034 with self._indexfp() as read_ifh:
2036 with self._indexfp() as read_ifh:
2035 for r in self:
2037 for r in self:
2036 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2038 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2037 if troffset <= self.start(r):
2039 if troffset <= self.start(r):
2038 trindex = r
2040 trindex = r
2039 new_dfh.flush()
2041 new_dfh.flush()
2040
2042
2041 with self.__index_new_fp() as fp:
2043 with self.__index_new_fp() as fp:
2042 self._format_flags &= ~FLAG_INLINE_DATA
2044 self._format_flags &= ~FLAG_INLINE_DATA
2043 self._inline = False
2045 self._inline = False
2044 for i in self:
2046 for i in self:
2045 e = self.index.entry_binary(i)
2047 e = self.index.entry_binary(i)
2046 if i == 0 and self._docket is None:
2048 if i == 0 and self._docket is None:
2047 header = self._format_flags | self._format_version
2049 header = self._format_flags | self._format_version
2048 header = self.index.pack_header(header)
2050 header = self.index.pack_header(header)
2049 e = header + e
2051 e = header + e
2050 fp.write(e)
2052 fp.write(e)
2051 if self._docket is not None:
2053 if self._docket is not None:
2052 self._docket.index_end = fp.tell()
2054 self._docket.index_end = fp.tell()
2053 # the temp file replace the real index when we exit the context
2055 # the temp file replace the real index when we exit the context
2054 # manager
2056 # manager
2055
2057
2056 tr.replace(self._indexfile, trindex * self.index.entry_size)
2058 tr.replace(self._indexfile, trindex * self.index.entry_size)
2057 nodemaputil.setup_persistent_nodemap(tr, self)
2059 nodemaputil.setup_persistent_nodemap(tr, self)
2058 self._chunkclear()
2060 self._chunkclear()
2059
2061
2060 if existing_handles:
2062 if existing_handles:
2061 # switched from inline to conventional reopen the index
2063 # switched from inline to conventional reopen the index
2062 ifh = self.__index_write_fp()
2064 ifh = self.__index_write_fp()
2063 self._writinghandles = (ifh, new_dfh)
2065 self._writinghandles = (ifh, new_dfh)
2064 new_dfh = None
2066 new_dfh = None
2065 finally:
2067 finally:
2066 if new_dfh is not None:
2068 if new_dfh is not None:
2067 new_dfh.close()
2069 new_dfh.close()
2068
2070
2069 def _nodeduplicatecallback(self, transaction, node):
2071 def _nodeduplicatecallback(self, transaction, node):
2070 """called when trying to add a node already stored."""
2072 """called when trying to add a node already stored."""
2071
2073
2072 @contextlib.contextmanager
2074 @contextlib.contextmanager
2073 def _writing(self, transaction):
2075 def _writing(self, transaction):
2074 if self._trypending:
2076 if self._trypending:
2075 msg = b'try to write in a `trypending` revlog: %s'
2077 msg = b'try to write in a `trypending` revlog: %s'
2076 msg %= self.display_id
2078 msg %= self.display_id
2077 raise error.ProgrammingError(msg)
2079 raise error.ProgrammingError(msg)
2078 if self._writinghandles is not None:
2080 if self._writinghandles is not None:
2079 yield
2081 yield
2080 else:
2082 else:
2081 r = len(self)
2083 r = len(self)
2082 dsize = 0
2084 dsize = 0
2083 if r:
2085 if r:
2084 dsize = self.end(r - 1)
2086 dsize = self.end(r - 1)
2085 dfh = None
2087 dfh = None
2086 if not self._inline:
2088 if not self._inline:
2087 try:
2089 try:
2088 dfh = self._datafp(b"r+")
2090 dfh = self._datafp(b"r+")
2089 dfh.seek(0, os.SEEK_END)
2091 dfh.seek(0, os.SEEK_END)
2090 except IOError as inst:
2092 except IOError as inst:
2091 if inst.errno != errno.ENOENT:
2093 if inst.errno != errno.ENOENT:
2092 raise
2094 raise
2093 dfh = self._datafp(b"w+")
2095 dfh = self._datafp(b"w+")
2094 transaction.add(self._datafile, dsize)
2096 transaction.add(self._datafile, dsize)
2095 try:
2097 try:
2096 isize = r * self.index.entry_size
2098 isize = r * self.index.entry_size
2097 ifh = self.__index_write_fp()
2099 ifh = self.__index_write_fp()
2098 if self._inline:
2100 if self._inline:
2099 transaction.add(self._indexfile, dsize + isize)
2101 transaction.add(self._indexfile, dsize + isize)
2100 else:
2102 else:
2101 transaction.add(self._indexfile, isize)
2103 transaction.add(self._indexfile, isize)
2102 try:
2104 try:
2103 self._writinghandles = (ifh, dfh)
2105 self._writinghandles = (ifh, dfh)
2104 try:
2106 try:
2105 yield
2107 yield
2106 if self._docket is not None:
2108 if self._docket is not None:
2107 self._write_docket(transaction)
2109 self._write_docket(transaction)
2108 finally:
2110 finally:
2109 self._writinghandles = None
2111 self._writinghandles = None
2110 finally:
2112 finally:
2111 ifh.close()
2113 ifh.close()
2112 finally:
2114 finally:
2113 if dfh is not None:
2115 if dfh is not None:
2114 dfh.close()
2116 dfh.close()
2115
2117
2116 def _write_docket(self, transaction):
2118 def _write_docket(self, transaction):
2117 """write the current docket on disk
2119 """write the current docket on disk
2118
2120
2119 Exist as a method to help changelog to implement transaction logic
2121 Exist as a method to help changelog to implement transaction logic
2120
2122
2121 We could also imagine using the same transaction logic for all revlog
2123 We could also imagine using the same transaction logic for all revlog
2122 since docket are cheap."""
2124 since docket are cheap."""
2123 self._docket.write(transaction)
2125 self._docket.write(transaction)
2124
2126
2125 def addrevision(
2127 def addrevision(
2126 self,
2128 self,
2127 text,
2129 text,
2128 transaction,
2130 transaction,
2129 link,
2131 link,
2130 p1,
2132 p1,
2131 p2,
2133 p2,
2132 cachedelta=None,
2134 cachedelta=None,
2133 node=None,
2135 node=None,
2134 flags=REVIDX_DEFAULT_FLAGS,
2136 flags=REVIDX_DEFAULT_FLAGS,
2135 deltacomputer=None,
2137 deltacomputer=None,
2136 sidedata=None,
2138 sidedata=None,
2137 ):
2139 ):
2138 """add a revision to the log
2140 """add a revision to the log
2139
2141
2140 text - the revision data to add
2142 text - the revision data to add
2141 transaction - the transaction object used for rollback
2143 transaction - the transaction object used for rollback
2142 link - the linkrev data to add
2144 link - the linkrev data to add
2143 p1, p2 - the parent nodeids of the revision
2145 p1, p2 - the parent nodeids of the revision
2144 cachedelta - an optional precomputed delta
2146 cachedelta - an optional precomputed delta
2145 node - nodeid of revision; typically node is not specified, and it is
2147 node - nodeid of revision; typically node is not specified, and it is
2146 computed by default as hash(text, p1, p2), however subclasses might
2148 computed by default as hash(text, p1, p2), however subclasses might
2147 use different hashing method (and override checkhash() in such case)
2149 use different hashing method (and override checkhash() in such case)
2148 flags - the known flags to set on the revision
2150 flags - the known flags to set on the revision
2149 deltacomputer - an optional deltacomputer instance shared between
2151 deltacomputer - an optional deltacomputer instance shared between
2150 multiple calls
2152 multiple calls
2151 """
2153 """
2152 if link == nullrev:
2154 if link == nullrev:
2153 raise error.RevlogError(
2155 raise error.RevlogError(
2154 _(b"attempted to add linkrev -1 to %s") % self.display_id
2156 _(b"attempted to add linkrev -1 to %s") % self.display_id
2155 )
2157 )
2156
2158
2157 if sidedata is None:
2159 if sidedata is None:
2158 sidedata = {}
2160 sidedata = {}
2159 elif sidedata and not self.hassidedata:
2161 elif sidedata and not self.hassidedata:
2160 raise error.ProgrammingError(
2162 raise error.ProgrammingError(
2161 _(b"trying to add sidedata to a revlog who don't support them")
2163 _(b"trying to add sidedata to a revlog who don't support them")
2162 )
2164 )
2163
2165
2164 if flags:
2166 if flags:
2165 node = node or self.hash(text, p1, p2)
2167 node = node or self.hash(text, p1, p2)
2166
2168
2167 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2169 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2168
2170
2169 # If the flag processor modifies the revision data, ignore any provided
2171 # If the flag processor modifies the revision data, ignore any provided
2170 # cachedelta.
2172 # cachedelta.
2171 if rawtext != text:
2173 if rawtext != text:
2172 cachedelta = None
2174 cachedelta = None
2173
2175
2174 if len(rawtext) > _maxentrysize:
2176 if len(rawtext) > _maxentrysize:
2175 raise error.RevlogError(
2177 raise error.RevlogError(
2176 _(
2178 _(
2177 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2179 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2178 )
2180 )
2179 % (self.display_id, len(rawtext))
2181 % (self.display_id, len(rawtext))
2180 )
2182 )
2181
2183
2182 node = node or self.hash(rawtext, p1, p2)
2184 node = node or self.hash(rawtext, p1, p2)
2183 rev = self.index.get_rev(node)
2185 rev = self.index.get_rev(node)
2184 if rev is not None:
2186 if rev is not None:
2185 return rev
2187 return rev
2186
2188
2187 if validatehash:
2189 if validatehash:
2188 self.checkhash(rawtext, node, p1=p1, p2=p2)
2190 self.checkhash(rawtext, node, p1=p1, p2=p2)
2189
2191
2190 return self.addrawrevision(
2192 return self.addrawrevision(
2191 rawtext,
2193 rawtext,
2192 transaction,
2194 transaction,
2193 link,
2195 link,
2194 p1,
2196 p1,
2195 p2,
2197 p2,
2196 node,
2198 node,
2197 flags,
2199 flags,
2198 cachedelta=cachedelta,
2200 cachedelta=cachedelta,
2199 deltacomputer=deltacomputer,
2201 deltacomputer=deltacomputer,
2200 sidedata=sidedata,
2202 sidedata=sidedata,
2201 )
2203 )
2202
2204
2203 def addrawrevision(
2205 def addrawrevision(
2204 self,
2206 self,
2205 rawtext,
2207 rawtext,
2206 transaction,
2208 transaction,
2207 link,
2209 link,
2208 p1,
2210 p1,
2209 p2,
2211 p2,
2210 node,
2212 node,
2211 flags,
2213 flags,
2212 cachedelta=None,
2214 cachedelta=None,
2213 deltacomputer=None,
2215 deltacomputer=None,
2214 sidedata=None,
2216 sidedata=None,
2215 ):
2217 ):
2216 """add a raw revision with known flags, node and parents
2218 """add a raw revision with known flags, node and parents
2217 useful when reusing a revision not stored in this revlog (ex: received
2219 useful when reusing a revision not stored in this revlog (ex: received
2218 over wire, or read from an external bundle).
2220 over wire, or read from an external bundle).
2219 """
2221 """
2220 with self._writing(transaction):
2222 with self._writing(transaction):
2221 return self._addrevision(
2223 return self._addrevision(
2222 node,
2224 node,
2223 rawtext,
2225 rawtext,
2224 transaction,
2226 transaction,
2225 link,
2227 link,
2226 p1,
2228 p1,
2227 p2,
2229 p2,
2228 flags,
2230 flags,
2229 cachedelta,
2231 cachedelta,
2230 deltacomputer=deltacomputer,
2232 deltacomputer=deltacomputer,
2231 sidedata=sidedata,
2233 sidedata=sidedata,
2232 )
2234 )
2233
2235
2234 def compress(self, data):
2236 def compress(self, data):
2235 """Generate a possibly-compressed representation of data."""
2237 """Generate a possibly-compressed representation of data."""
2236 if not data:
2238 if not data:
2237 return b'', data
2239 return b'', data
2238
2240
2239 compressed = self._compressor.compress(data)
2241 compressed = self._compressor.compress(data)
2240
2242
2241 if compressed:
2243 if compressed:
2242 # The revlog compressor added the header in the returned data.
2244 # The revlog compressor added the header in the returned data.
2243 return b'', compressed
2245 return b'', compressed
2244
2246
2245 if data[0:1] == b'\0':
2247 if data[0:1] == b'\0':
2246 return b'', data
2248 return b'', data
2247 return b'u', data
2249 return b'u', data
2248
2250
2249 def decompress(self, data):
2251 def decompress(self, data):
2250 """Decompress a revlog chunk.
2252 """Decompress a revlog chunk.
2251
2253
2252 The chunk is expected to begin with a header identifying the
2254 The chunk is expected to begin with a header identifying the
2253 format type so it can be routed to an appropriate decompressor.
2255 format type so it can be routed to an appropriate decompressor.
2254 """
2256 """
2255 if not data:
2257 if not data:
2256 return data
2258 return data
2257
2259
2258 # Revlogs are read much more frequently than they are written and many
2260 # Revlogs are read much more frequently than they are written and many
2259 # chunks only take microseconds to decompress, so performance is
2261 # chunks only take microseconds to decompress, so performance is
2260 # important here.
2262 # important here.
2261 #
2263 #
2262 # We can make a few assumptions about revlogs:
2264 # We can make a few assumptions about revlogs:
2263 #
2265 #
2264 # 1) the majority of chunks will be compressed (as opposed to inline
2266 # 1) the majority of chunks will be compressed (as opposed to inline
2265 # raw data).
2267 # raw data).
2266 # 2) decompressing *any* data will likely by at least 10x slower than
2268 # 2) decompressing *any* data will likely by at least 10x slower than
2267 # returning raw inline data.
2269 # returning raw inline data.
2268 # 3) we want to prioritize common and officially supported compression
2270 # 3) we want to prioritize common and officially supported compression
2269 # engines
2271 # engines
2270 #
2272 #
2271 # It follows that we want to optimize for "decompress compressed data
2273 # It follows that we want to optimize for "decompress compressed data
2272 # when encoded with common and officially supported compression engines"
2274 # when encoded with common and officially supported compression engines"
2273 # case over "raw data" and "data encoded by less common or non-official
2275 # case over "raw data" and "data encoded by less common or non-official
2274 # compression engines." That is why we have the inline lookup first
2276 # compression engines." That is why we have the inline lookup first
2275 # followed by the compengines lookup.
2277 # followed by the compengines lookup.
2276 #
2278 #
2277 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2279 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2278 # compressed chunks. And this matters for changelog and manifest reads.
2280 # compressed chunks. And this matters for changelog and manifest reads.
2279 t = data[0:1]
2281 t = data[0:1]
2280
2282
2281 if t == b'x':
2283 if t == b'x':
2282 try:
2284 try:
2283 return _zlibdecompress(data)
2285 return _zlibdecompress(data)
2284 except zlib.error as e:
2286 except zlib.error as e:
2285 raise error.RevlogError(
2287 raise error.RevlogError(
2286 _(b'revlog decompress error: %s')
2288 _(b'revlog decompress error: %s')
2287 % stringutil.forcebytestr(e)
2289 % stringutil.forcebytestr(e)
2288 )
2290 )
2289 # '\0' is more common than 'u' so it goes first.
2291 # '\0' is more common than 'u' so it goes first.
2290 elif t == b'\0':
2292 elif t == b'\0':
2291 return data
2293 return data
2292 elif t == b'u':
2294 elif t == b'u':
2293 return util.buffer(data, 1)
2295 return util.buffer(data, 1)
2294
2296
2295 try:
2297 try:
2296 compressor = self._decompressors[t]
2298 compressor = self._decompressors[t]
2297 except KeyError:
2299 except KeyError:
2298 try:
2300 try:
2299 engine = util.compengines.forrevlogheader(t)
2301 engine = util.compengines.forrevlogheader(t)
2300 compressor = engine.revlogcompressor(self._compengineopts)
2302 compressor = engine.revlogcompressor(self._compengineopts)
2301 self._decompressors[t] = compressor
2303 self._decompressors[t] = compressor
2302 except KeyError:
2304 except KeyError:
2303 raise error.RevlogError(
2305 raise error.RevlogError(
2304 _(b'unknown compression type %s') % binascii.hexlify(t)
2306 _(b'unknown compression type %s') % binascii.hexlify(t)
2305 )
2307 )
2306
2308
2307 return compressor.decompress(data)
2309 return compressor.decompress(data)
2308
2310
2309 def _addrevision(
2311 def _addrevision(
2310 self,
2312 self,
2311 node,
2313 node,
2312 rawtext,
2314 rawtext,
2313 transaction,
2315 transaction,
2314 link,
2316 link,
2315 p1,
2317 p1,
2316 p2,
2318 p2,
2317 flags,
2319 flags,
2318 cachedelta,
2320 cachedelta,
2319 alwayscache=False,
2321 alwayscache=False,
2320 deltacomputer=None,
2322 deltacomputer=None,
2321 sidedata=None,
2323 sidedata=None,
2322 ):
2324 ):
2323 """internal function to add revisions to the log
2325 """internal function to add revisions to the log
2324
2326
2325 see addrevision for argument descriptions.
2327 see addrevision for argument descriptions.
2326
2328
2327 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2329 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2328
2330
2329 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2331 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2330 be used.
2332 be used.
2331
2333
2332 invariants:
2334 invariants:
2333 - rawtext is optional (can be None); if not set, cachedelta must be set.
2335 - rawtext is optional (can be None); if not set, cachedelta must be set.
2334 if both are set, they must correspond to each other.
2336 if both are set, they must correspond to each other.
2335 """
2337 """
2336 if node == self.nullid:
2338 if node == self.nullid:
2337 raise error.RevlogError(
2339 raise error.RevlogError(
2338 _(b"%s: attempt to add null revision") % self.display_id
2340 _(b"%s: attempt to add null revision") % self.display_id
2339 )
2341 )
2340 if (
2342 if (
2341 node == self.nodeconstants.wdirid
2343 node == self.nodeconstants.wdirid
2342 or node in self.nodeconstants.wdirfilenodeids
2344 or node in self.nodeconstants.wdirfilenodeids
2343 ):
2345 ):
2344 raise error.RevlogError(
2346 raise error.RevlogError(
2345 _(b"%s: attempt to add wdir revision") % self.display_id
2347 _(b"%s: attempt to add wdir revision") % self.display_id
2346 )
2348 )
2347 if self._writinghandles is None:
2349 if self._writinghandles is None:
2348 msg = b'adding revision outside `revlog._writing` context'
2350 msg = b'adding revision outside `revlog._writing` context'
2349 raise error.ProgrammingError(msg)
2351 raise error.ProgrammingError(msg)
2350
2352
2351 if self._inline:
2353 if self._inline:
2352 fh = self._writinghandles[0]
2354 fh = self._writinghandles[0]
2353 else:
2355 else:
2354 fh = self._writinghandles[1]
2356 fh = self._writinghandles[1]
2355
2357
2356 btext = [rawtext]
2358 btext = [rawtext]
2357
2359
2358 curr = len(self)
2360 curr = len(self)
2359 prev = curr - 1
2361 prev = curr - 1
2360
2362
2361 offset = self._get_data_offset(prev)
2363 offset = self._get_data_offset(prev)
2362
2364
2363 if self._concurrencychecker:
2365 if self._concurrencychecker:
2364 ifh, dfh = self._writinghandles
2366 ifh, dfh = self._writinghandles
2365 if self._inline:
2367 if self._inline:
2366 # offset is "as if" it were in the .d file, so we need to add on
2368 # offset is "as if" it were in the .d file, so we need to add on
2367 # the size of the entry metadata.
2369 # the size of the entry metadata.
2368 self._concurrencychecker(
2370 self._concurrencychecker(
2369 ifh, self._indexfile, offset + curr * self.index.entry_size
2371 ifh, self._indexfile, offset + curr * self.index.entry_size
2370 )
2372 )
2371 else:
2373 else:
2372 # Entries in the .i are a consistent size.
2374 # Entries in the .i are a consistent size.
2373 self._concurrencychecker(
2375 self._concurrencychecker(
2374 ifh, self._indexfile, curr * self.index.entry_size
2376 ifh, self._indexfile, curr * self.index.entry_size
2375 )
2377 )
2376 self._concurrencychecker(dfh, self._datafile, offset)
2378 self._concurrencychecker(dfh, self._datafile, offset)
2377
2379
2378 p1r, p2r = self.rev(p1), self.rev(p2)
2380 p1r, p2r = self.rev(p1), self.rev(p2)
2379
2381
2380 # full versions are inserted when the needed deltas
2382 # full versions are inserted when the needed deltas
2381 # become comparable to the uncompressed text
2383 # become comparable to the uncompressed text
2382 if rawtext is None:
2384 if rawtext is None:
2383 # need rawtext size, before changed by flag processors, which is
2385 # need rawtext size, before changed by flag processors, which is
2384 # the non-raw size. use revlog explicitly to avoid filelog's extra
2386 # the non-raw size. use revlog explicitly to avoid filelog's extra
2385 # logic that might remove metadata size.
2387 # logic that might remove metadata size.
2386 textlen = mdiff.patchedsize(
2388 textlen = mdiff.patchedsize(
2387 revlog.size(self, cachedelta[0]), cachedelta[1]
2389 revlog.size(self, cachedelta[0]), cachedelta[1]
2388 )
2390 )
2389 else:
2391 else:
2390 textlen = len(rawtext)
2392 textlen = len(rawtext)
2391
2393
2392 if deltacomputer is None:
2394 if deltacomputer is None:
2393 deltacomputer = deltautil.deltacomputer(self)
2395 deltacomputer = deltautil.deltacomputer(self)
2394
2396
2395 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2397 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2396
2398
2397 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2399 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2398
2400
2399 if sidedata and self.hassidedata:
2401 if sidedata and self.hassidedata:
2400 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2402 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2401 sidedata_offset = offset + deltainfo.deltalen
2403 sidedata_offset = offset + deltainfo.deltalen
2402 else:
2404 else:
2403 serialized_sidedata = b""
2405 serialized_sidedata = b""
2404 # Don't store the offset if the sidedata is empty, that way
2406 # Don't store the offset if the sidedata is empty, that way
2405 # we can easily detect empty sidedata and they will be no different
2407 # we can easily detect empty sidedata and they will be no different
2406 # than ones we manually add.
2408 # than ones we manually add.
2407 sidedata_offset = 0
2409 sidedata_offset = 0
2408
2410
2409 e = (
2411 e = (
2410 offset_type(offset, flags),
2412 offset_type(offset, flags),
2411 deltainfo.deltalen,
2413 deltainfo.deltalen,
2412 textlen,
2414 textlen,
2413 deltainfo.base,
2415 deltainfo.base,
2414 link,
2416 link,
2415 p1r,
2417 p1r,
2416 p2r,
2418 p2r,
2417 node,
2419 node,
2418 sidedata_offset,
2420 sidedata_offset,
2419 len(serialized_sidedata),
2421 len(serialized_sidedata),
2420 )
2422 )
2421
2423
2422 self.index.append(e)
2424 self.index.append(e)
2423 entry = self.index.entry_binary(curr)
2425 entry = self.index.entry_binary(curr)
2424 if curr == 0 and self._docket is None:
2426 if curr == 0 and self._docket is None:
2425 header = self._format_flags | self._format_version
2427 header = self._format_flags | self._format_version
2426 header = self.index.pack_header(header)
2428 header = self.index.pack_header(header)
2427 entry = header + entry
2429 entry = header + entry
2428 self._writeentry(
2430 self._writeentry(
2429 transaction,
2431 transaction,
2430 entry,
2432 entry,
2431 deltainfo.data,
2433 deltainfo.data,
2432 link,
2434 link,
2433 offset,
2435 offset,
2434 serialized_sidedata,
2436 serialized_sidedata,
2435 )
2437 )
2436
2438
2437 rawtext = btext[0]
2439 rawtext = btext[0]
2438
2440
2439 if alwayscache and rawtext is None:
2441 if alwayscache and rawtext is None:
2440 rawtext = deltacomputer.buildtext(revinfo, fh)
2442 rawtext = deltacomputer.buildtext(revinfo, fh)
2441
2443
2442 if type(rawtext) == bytes: # only accept immutable objects
2444 if type(rawtext) == bytes: # only accept immutable objects
2443 self._revisioncache = (node, curr, rawtext)
2445 self._revisioncache = (node, curr, rawtext)
2444 self._chainbasecache[curr] = deltainfo.chainbase
2446 self._chainbasecache[curr] = deltainfo.chainbase
2445 return curr
2447 return curr
2446
2448
2447 def _get_data_offset(self, prev):
2449 def _get_data_offset(self, prev):
2448 """Returns the current offset in the (in-transaction) data file.
2450 """Returns the current offset in the (in-transaction) data file.
2449 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2451 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2450 file to store that information: since sidedata can be rewritten to the
2452 file to store that information: since sidedata can be rewritten to the
2451 end of the data file within a transaction, you can have cases where, for
2453 end of the data file within a transaction, you can have cases where, for
2452 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2454 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2453 to `n - 1`'s sidedata being written after `n`'s data.
2455 to `n - 1`'s sidedata being written after `n`'s data.
2454
2456
2455 TODO cache this in a docket file before getting out of experimental."""
2457 TODO cache this in a docket file before getting out of experimental."""
2456 if self._format_version != REVLOGV2:
2458 if self._format_version != REVLOGV2:
2457 return self.end(prev)
2459 return self.end(prev)
2458
2460
2459 offset = 0
2461 offset = 0
2460 for rev, entry in enumerate(self.index):
2462 for rev, entry in enumerate(self.index):
2461 sidedata_end = entry[8] + entry[9]
2463 sidedata_end = entry[8] + entry[9]
2462 # Sidedata for a previous rev has potentially been written after
2464 # Sidedata for a previous rev has potentially been written after
2463 # this rev's end, so take the max.
2465 # this rev's end, so take the max.
2464 offset = max(self.end(rev), offset, sidedata_end)
2466 offset = max(self.end(rev), offset, sidedata_end)
2465 return offset
2467 return offset
2466
2468
2467 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2469 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2468 # Files opened in a+ mode have inconsistent behavior on various
2470 # Files opened in a+ mode have inconsistent behavior on various
2469 # platforms. Windows requires that a file positioning call be made
2471 # platforms. Windows requires that a file positioning call be made
2470 # when the file handle transitions between reads and writes. See
2472 # when the file handle transitions between reads and writes. See
2471 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2473 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2472 # platforms, Python or the platform itself can be buggy. Some versions
2474 # platforms, Python or the platform itself can be buggy. Some versions
2473 # of Solaris have been observed to not append at the end of the file
2475 # of Solaris have been observed to not append at the end of the file
2474 # if the file was seeked to before the end. See issue4943 for more.
2476 # if the file was seeked to before the end. See issue4943 for more.
2475 #
2477 #
2476 # We work around this issue by inserting a seek() before writing.
2478 # We work around this issue by inserting a seek() before writing.
2477 # Note: This is likely not necessary on Python 3. However, because
2479 # Note: This is likely not necessary on Python 3. However, because
2478 # the file handle is reused for reads and may be seeked there, we need
2480 # the file handle is reused for reads and may be seeked there, we need
2479 # to be careful before changing this.
2481 # to be careful before changing this.
2480 if self._writinghandles is None:
2482 if self._writinghandles is None:
2481 msg = b'adding revision outside `revlog._writing` context'
2483 msg = b'adding revision outside `revlog._writing` context'
2482 raise error.ProgrammingError(msg)
2484 raise error.ProgrammingError(msg)
2483 ifh, dfh = self._writinghandles
2485 ifh, dfh = self._writinghandles
2484 if self._docket is None:
2486 if self._docket is None:
2485 ifh.seek(0, os.SEEK_END)
2487 ifh.seek(0, os.SEEK_END)
2486 else:
2488 else:
2487 ifh.seek(self._docket.index_end, os.SEEK_SET)
2489 ifh.seek(self._docket.index_end, os.SEEK_SET)
2488 if dfh:
2490 if dfh:
2489 dfh.seek(0, os.SEEK_END)
2491 dfh.seek(0, os.SEEK_END)
2490
2492
2491 curr = len(self) - 1
2493 curr = len(self) - 1
2492 if not self._inline:
2494 if not self._inline:
2493 transaction.add(self._datafile, offset)
2495 transaction.add(self._datafile, offset)
2494 transaction.add(self._indexfile, curr * len(entry))
2496 transaction.add(self._indexfile, curr * len(entry))
2495 if data[0]:
2497 if data[0]:
2496 dfh.write(data[0])
2498 dfh.write(data[0])
2497 dfh.write(data[1])
2499 dfh.write(data[1])
2498 if sidedata:
2500 if sidedata:
2499 dfh.write(sidedata)
2501 dfh.write(sidedata)
2500 ifh.write(entry)
2502 ifh.write(entry)
2501 else:
2503 else:
2502 offset += curr * self.index.entry_size
2504 offset += curr * self.index.entry_size
2503 transaction.add(self._indexfile, offset)
2505 transaction.add(self._indexfile, offset)
2504 ifh.write(entry)
2506 ifh.write(entry)
2505 ifh.write(data[0])
2507 ifh.write(data[0])
2506 ifh.write(data[1])
2508 ifh.write(data[1])
2507 if sidedata:
2509 if sidedata:
2508 ifh.write(sidedata)
2510 ifh.write(sidedata)
2509 self._enforceinlinesize(transaction)
2511 self._enforceinlinesize(transaction)
2510 if self._docket is not None:
2512 if self._docket is not None:
2511 self._docket.index_end = self._writinghandles[0].tell()
2513 self._docket.index_end = self._writinghandles[0].tell()
2512
2514
2513 nodemaputil.setup_persistent_nodemap(transaction, self)
2515 nodemaputil.setup_persistent_nodemap(transaction, self)
2514
2516
2515 def addgroup(
2517 def addgroup(
2516 self,
2518 self,
2517 deltas,
2519 deltas,
2518 linkmapper,
2520 linkmapper,
2519 transaction,
2521 transaction,
2520 alwayscache=False,
2522 alwayscache=False,
2521 addrevisioncb=None,
2523 addrevisioncb=None,
2522 duplicaterevisioncb=None,
2524 duplicaterevisioncb=None,
2523 ):
2525 ):
2524 """
2526 """
2525 add a delta group
2527 add a delta group
2526
2528
2527 given a set of deltas, add them to the revision log. the
2529 given a set of deltas, add them to the revision log. the
2528 first delta is against its parent, which should be in our
2530 first delta is against its parent, which should be in our
2529 log, the rest are against the previous delta.
2531 log, the rest are against the previous delta.
2530
2532
2531 If ``addrevisioncb`` is defined, it will be called with arguments of
2533 If ``addrevisioncb`` is defined, it will be called with arguments of
2532 this revlog and the node that was added.
2534 this revlog and the node that was added.
2533 """
2535 """
2534
2536
2535 if self._adding_group:
2537 if self._adding_group:
2536 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2538 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2537
2539
2538 self._adding_group = True
2540 self._adding_group = True
2539 empty = True
2541 empty = True
2540 try:
2542 try:
2541 with self._writing(transaction):
2543 with self._writing(transaction):
2542 deltacomputer = deltautil.deltacomputer(self)
2544 deltacomputer = deltautil.deltacomputer(self)
2543 # loop through our set of deltas
2545 # loop through our set of deltas
2544 for data in deltas:
2546 for data in deltas:
2545 (
2547 (
2546 node,
2548 node,
2547 p1,
2549 p1,
2548 p2,
2550 p2,
2549 linknode,
2551 linknode,
2550 deltabase,
2552 deltabase,
2551 delta,
2553 delta,
2552 flags,
2554 flags,
2553 sidedata,
2555 sidedata,
2554 ) = data
2556 ) = data
2555 link = linkmapper(linknode)
2557 link = linkmapper(linknode)
2556 flags = flags or REVIDX_DEFAULT_FLAGS
2558 flags = flags or REVIDX_DEFAULT_FLAGS
2557
2559
2558 rev = self.index.get_rev(node)
2560 rev = self.index.get_rev(node)
2559 if rev is not None:
2561 if rev is not None:
2560 # this can happen if two branches make the same change
2562 # this can happen if two branches make the same change
2561 self._nodeduplicatecallback(transaction, rev)
2563 self._nodeduplicatecallback(transaction, rev)
2562 if duplicaterevisioncb:
2564 if duplicaterevisioncb:
2563 duplicaterevisioncb(self, rev)
2565 duplicaterevisioncb(self, rev)
2564 empty = False
2566 empty = False
2565 continue
2567 continue
2566
2568
2567 for p in (p1, p2):
2569 for p in (p1, p2):
2568 if not self.index.has_node(p):
2570 if not self.index.has_node(p):
2569 raise error.LookupError(
2571 raise error.LookupError(
2570 p, self.radix, _(b'unknown parent')
2572 p, self.radix, _(b'unknown parent')
2571 )
2573 )
2572
2574
2573 if not self.index.has_node(deltabase):
2575 if not self.index.has_node(deltabase):
2574 raise error.LookupError(
2576 raise error.LookupError(
2575 deltabase, self.display_id, _(b'unknown delta base')
2577 deltabase, self.display_id, _(b'unknown delta base')
2576 )
2578 )
2577
2579
2578 baserev = self.rev(deltabase)
2580 baserev = self.rev(deltabase)
2579
2581
2580 if baserev != nullrev and self.iscensored(baserev):
2582 if baserev != nullrev and self.iscensored(baserev):
2581 # if base is censored, delta must be full replacement in a
2583 # if base is censored, delta must be full replacement in a
2582 # single patch operation
2584 # single patch operation
2583 hlen = struct.calcsize(b">lll")
2585 hlen = struct.calcsize(b">lll")
2584 oldlen = self.rawsize(baserev)
2586 oldlen = self.rawsize(baserev)
2585 newlen = len(delta) - hlen
2587 newlen = len(delta) - hlen
2586 if delta[:hlen] != mdiff.replacediffheader(
2588 if delta[:hlen] != mdiff.replacediffheader(
2587 oldlen, newlen
2589 oldlen, newlen
2588 ):
2590 ):
2589 raise error.CensoredBaseError(
2591 raise error.CensoredBaseError(
2590 self.display_id, self.node(baserev)
2592 self.display_id, self.node(baserev)
2591 )
2593 )
2592
2594
2593 if not flags and self._peek_iscensored(baserev, delta):
2595 if not flags and self._peek_iscensored(baserev, delta):
2594 flags |= REVIDX_ISCENSORED
2596 flags |= REVIDX_ISCENSORED
2595
2597
2596 # We assume consumers of addrevisioncb will want to retrieve
2598 # We assume consumers of addrevisioncb will want to retrieve
2597 # the added revision, which will require a call to
2599 # the added revision, which will require a call to
2598 # revision(). revision() will fast path if there is a cache
2600 # revision(). revision() will fast path if there is a cache
2599 # hit. So, we tell _addrevision() to always cache in this case.
2601 # hit. So, we tell _addrevision() to always cache in this case.
2600 # We're only using addgroup() in the context of changegroup
2602 # We're only using addgroup() in the context of changegroup
2601 # generation so the revision data can always be handled as raw
2603 # generation so the revision data can always be handled as raw
2602 # by the flagprocessor.
2604 # by the flagprocessor.
2603 rev = self._addrevision(
2605 rev = self._addrevision(
2604 node,
2606 node,
2605 None,
2607 None,
2606 transaction,
2608 transaction,
2607 link,
2609 link,
2608 p1,
2610 p1,
2609 p2,
2611 p2,
2610 flags,
2612 flags,
2611 (baserev, delta),
2613 (baserev, delta),
2612 alwayscache=alwayscache,
2614 alwayscache=alwayscache,
2613 deltacomputer=deltacomputer,
2615 deltacomputer=deltacomputer,
2614 sidedata=sidedata,
2616 sidedata=sidedata,
2615 )
2617 )
2616
2618
2617 if addrevisioncb:
2619 if addrevisioncb:
2618 addrevisioncb(self, rev)
2620 addrevisioncb(self, rev)
2619 empty = False
2621 empty = False
2620 finally:
2622 finally:
2621 self._adding_group = False
2623 self._adding_group = False
2622 return not empty
2624 return not empty
2623
2625
2624 def iscensored(self, rev):
2626 def iscensored(self, rev):
2625 """Check if a file revision is censored."""
2627 """Check if a file revision is censored."""
2626 if not self._censorable:
2628 if not self._censorable:
2627 return False
2629 return False
2628
2630
2629 return self.flags(rev) & REVIDX_ISCENSORED
2631 return self.flags(rev) & REVIDX_ISCENSORED
2630
2632
2631 def _peek_iscensored(self, baserev, delta):
2633 def _peek_iscensored(self, baserev, delta):
2632 """Quickly check if a delta produces a censored revision."""
2634 """Quickly check if a delta produces a censored revision."""
2633 if not self._censorable:
2635 if not self._censorable:
2634 return False
2636 return False
2635
2637
2636 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2638 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2637
2639
2638 def getstrippoint(self, minlink):
2640 def getstrippoint(self, minlink):
2639 """find the minimum rev that must be stripped to strip the linkrev
2641 """find the minimum rev that must be stripped to strip the linkrev
2640
2642
2641 Returns a tuple containing the minimum rev and a set of all revs that
2643 Returns a tuple containing the minimum rev and a set of all revs that
2642 have linkrevs that will be broken by this strip.
2644 have linkrevs that will be broken by this strip.
2643 """
2645 """
2644 return storageutil.resolvestripinfo(
2646 return storageutil.resolvestripinfo(
2645 minlink,
2647 minlink,
2646 len(self) - 1,
2648 len(self) - 1,
2647 self.headrevs(),
2649 self.headrevs(),
2648 self.linkrev,
2650 self.linkrev,
2649 self.parentrevs,
2651 self.parentrevs,
2650 )
2652 )
2651
2653
2652 def strip(self, minlink, transaction):
2654 def strip(self, minlink, transaction):
2653 """truncate the revlog on the first revision with a linkrev >= minlink
2655 """truncate the revlog on the first revision with a linkrev >= minlink
2654
2656
2655 This function is called when we're stripping revision minlink and
2657 This function is called when we're stripping revision minlink and
2656 its descendants from the repository.
2658 its descendants from the repository.
2657
2659
2658 We have to remove all revisions with linkrev >= minlink, because
2660 We have to remove all revisions with linkrev >= minlink, because
2659 the equivalent changelog revisions will be renumbered after the
2661 the equivalent changelog revisions will be renumbered after the
2660 strip.
2662 strip.
2661
2663
2662 So we truncate the revlog on the first of these revisions, and
2664 So we truncate the revlog on the first of these revisions, and
2663 trust that the caller has saved the revisions that shouldn't be
2665 trust that the caller has saved the revisions that shouldn't be
2664 removed and that it'll re-add them after this truncation.
2666 removed and that it'll re-add them after this truncation.
2665 """
2667 """
2666 if len(self) == 0:
2668 if len(self) == 0:
2667 return
2669 return
2668
2670
2669 rev, _ = self.getstrippoint(minlink)
2671 rev, _ = self.getstrippoint(minlink)
2670 if rev == len(self):
2672 if rev == len(self):
2671 return
2673 return
2672
2674
2673 # first truncate the files on disk
2675 # first truncate the files on disk
2674 end = self.start(rev)
2676 end = self.start(rev)
2675 if not self._inline:
2677 if not self._inline:
2676 transaction.add(self._datafile, end)
2678 transaction.add(self._datafile, end)
2677 end = rev * self.index.entry_size
2679 end = rev * self.index.entry_size
2678 else:
2680 else:
2679 end += rev * self.index.entry_size
2681 end += rev * self.index.entry_size
2680
2682
2681 transaction.add(self._indexfile, end)
2683 transaction.add(self._indexfile, end)
2682 if self._docket is not None:
2684 if self._docket is not None:
2683 # XXX we could, leverage the docket while stripping. However it is
2685 # XXX we could, leverage the docket while stripping. However it is
2684 # not powerfull enough at the time of this comment
2686 # not powerfull enough at the time of this comment
2685 self._docket.index_end = end
2687 self._docket.index_end = end
2686 self._docket.write(transaction, stripping=True)
2688 self._docket.write(transaction, stripping=True)
2687
2689
2688 # then reset internal state in memory to forget those revisions
2690 # then reset internal state in memory to forget those revisions
2689 self._revisioncache = None
2691 self._revisioncache = None
2690 self._chaininfocache = util.lrucachedict(500)
2692 self._chaininfocache = util.lrucachedict(500)
2691 self._chunkclear()
2693 self._chunkclear()
2692
2694
2693 del self.index[rev:-1]
2695 del self.index[rev:-1]
2694
2696
2695 def checksize(self):
2697 def checksize(self):
2696 """Check size of index and data files
2698 """Check size of index and data files
2697
2699
2698 return a (dd, di) tuple.
2700 return a (dd, di) tuple.
2699 - dd: extra bytes for the "data" file
2701 - dd: extra bytes for the "data" file
2700 - di: extra bytes for the "index" file
2702 - di: extra bytes for the "index" file
2701
2703
2702 A healthy revlog will return (0, 0).
2704 A healthy revlog will return (0, 0).
2703 """
2705 """
2704 expected = 0
2706 expected = 0
2705 if len(self):
2707 if len(self):
2706 expected = max(0, self.end(len(self) - 1))
2708 expected = max(0, self.end(len(self) - 1))
2707
2709
2708 try:
2710 try:
2709 with self._datafp() as f:
2711 with self._datafp() as f:
2710 f.seek(0, io.SEEK_END)
2712 f.seek(0, io.SEEK_END)
2711 actual = f.tell()
2713 actual = f.tell()
2712 dd = actual - expected
2714 dd = actual - expected
2713 except IOError as inst:
2715 except IOError as inst:
2714 if inst.errno != errno.ENOENT:
2716 if inst.errno != errno.ENOENT:
2715 raise
2717 raise
2716 dd = 0
2718 dd = 0
2717
2719
2718 try:
2720 try:
2719 f = self.opener(self._indexfile)
2721 f = self.opener(self._indexfile)
2720 f.seek(0, io.SEEK_END)
2722 f.seek(0, io.SEEK_END)
2721 actual = f.tell()
2723 actual = f.tell()
2722 f.close()
2724 f.close()
2723 s = self.index.entry_size
2725 s = self.index.entry_size
2724 i = max(0, actual // s)
2726 i = max(0, actual // s)
2725 di = actual - (i * s)
2727 di = actual - (i * s)
2726 if self._inline:
2728 if self._inline:
2727 databytes = 0
2729 databytes = 0
2728 for r in self:
2730 for r in self:
2729 databytes += max(0, self.length(r))
2731 databytes += max(0, self.length(r))
2730 dd = 0
2732 dd = 0
2731 di = actual - len(self) * s - databytes
2733 di = actual - len(self) * s - databytes
2732 except IOError as inst:
2734 except IOError as inst:
2733 if inst.errno != errno.ENOENT:
2735 if inst.errno != errno.ENOENT:
2734 raise
2736 raise
2735 di = 0
2737 di = 0
2736
2738
2737 return (dd, di)
2739 return (dd, di)
2738
2740
2739 def files(self):
2741 def files(self):
2740 res = [self._indexfile]
2742 res = [self._indexfile]
2741 if not self._inline:
2743 if not self._inline:
2742 res.append(self._datafile)
2744 res.append(self._datafile)
2743 return res
2745 return res
2744
2746
2745 def emitrevisions(
2747 def emitrevisions(
2746 self,
2748 self,
2747 nodes,
2749 nodes,
2748 nodesorder=None,
2750 nodesorder=None,
2749 revisiondata=False,
2751 revisiondata=False,
2750 assumehaveparentrevisions=False,
2752 assumehaveparentrevisions=False,
2751 deltamode=repository.CG_DELTAMODE_STD,
2753 deltamode=repository.CG_DELTAMODE_STD,
2752 sidedata_helpers=None,
2754 sidedata_helpers=None,
2753 ):
2755 ):
2754 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2756 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2755 raise error.ProgrammingError(
2757 raise error.ProgrammingError(
2756 b'unhandled value for nodesorder: %s' % nodesorder
2758 b'unhandled value for nodesorder: %s' % nodesorder
2757 )
2759 )
2758
2760
2759 if nodesorder is None and not self._generaldelta:
2761 if nodesorder is None and not self._generaldelta:
2760 nodesorder = b'storage'
2762 nodesorder = b'storage'
2761
2763
2762 if (
2764 if (
2763 not self._storedeltachains
2765 not self._storedeltachains
2764 and deltamode != repository.CG_DELTAMODE_PREV
2766 and deltamode != repository.CG_DELTAMODE_PREV
2765 ):
2767 ):
2766 deltamode = repository.CG_DELTAMODE_FULL
2768 deltamode = repository.CG_DELTAMODE_FULL
2767
2769
2768 return storageutil.emitrevisions(
2770 return storageutil.emitrevisions(
2769 self,
2771 self,
2770 nodes,
2772 nodes,
2771 nodesorder,
2773 nodesorder,
2772 revlogrevisiondelta,
2774 revlogrevisiondelta,
2773 deltaparentfn=self.deltaparent,
2775 deltaparentfn=self.deltaparent,
2774 candeltafn=self.candelta,
2776 candeltafn=self.candelta,
2775 rawsizefn=self.rawsize,
2777 rawsizefn=self.rawsize,
2776 revdifffn=self.revdiff,
2778 revdifffn=self.revdiff,
2777 flagsfn=self.flags,
2779 flagsfn=self.flags,
2778 deltamode=deltamode,
2780 deltamode=deltamode,
2779 revisiondata=revisiondata,
2781 revisiondata=revisiondata,
2780 assumehaveparentrevisions=assumehaveparentrevisions,
2782 assumehaveparentrevisions=assumehaveparentrevisions,
2781 sidedata_helpers=sidedata_helpers,
2783 sidedata_helpers=sidedata_helpers,
2782 )
2784 )
2783
2785
2784 DELTAREUSEALWAYS = b'always'
2786 DELTAREUSEALWAYS = b'always'
2785 DELTAREUSESAMEREVS = b'samerevs'
2787 DELTAREUSESAMEREVS = b'samerevs'
2786 DELTAREUSENEVER = b'never'
2788 DELTAREUSENEVER = b'never'
2787
2789
2788 DELTAREUSEFULLADD = b'fulladd'
2790 DELTAREUSEFULLADD = b'fulladd'
2789
2791
2790 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2792 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2791
2793
2792 def clone(
2794 def clone(
2793 self,
2795 self,
2794 tr,
2796 tr,
2795 destrevlog,
2797 destrevlog,
2796 addrevisioncb=None,
2798 addrevisioncb=None,
2797 deltareuse=DELTAREUSESAMEREVS,
2799 deltareuse=DELTAREUSESAMEREVS,
2798 forcedeltabothparents=None,
2800 forcedeltabothparents=None,
2799 sidedata_helpers=None,
2801 sidedata_helpers=None,
2800 ):
2802 ):
2801 """Copy this revlog to another, possibly with format changes.
2803 """Copy this revlog to another, possibly with format changes.
2802
2804
2803 The destination revlog will contain the same revisions and nodes.
2805 The destination revlog will contain the same revisions and nodes.
2804 However, it may not be bit-for-bit identical due to e.g. delta encoding
2806 However, it may not be bit-for-bit identical due to e.g. delta encoding
2805 differences.
2807 differences.
2806
2808
2807 The ``deltareuse`` argument control how deltas from the existing revlog
2809 The ``deltareuse`` argument control how deltas from the existing revlog
2808 are preserved in the destination revlog. The argument can have the
2810 are preserved in the destination revlog. The argument can have the
2809 following values:
2811 following values:
2810
2812
2811 DELTAREUSEALWAYS
2813 DELTAREUSEALWAYS
2812 Deltas will always be reused (if possible), even if the destination
2814 Deltas will always be reused (if possible), even if the destination
2813 revlog would not select the same revisions for the delta. This is the
2815 revlog would not select the same revisions for the delta. This is the
2814 fastest mode of operation.
2816 fastest mode of operation.
2815 DELTAREUSESAMEREVS
2817 DELTAREUSESAMEREVS
2816 Deltas will be reused if the destination revlog would pick the same
2818 Deltas will be reused if the destination revlog would pick the same
2817 revisions for the delta. This mode strikes a balance between speed
2819 revisions for the delta. This mode strikes a balance between speed
2818 and optimization.
2820 and optimization.
2819 DELTAREUSENEVER
2821 DELTAREUSENEVER
2820 Deltas will never be reused. This is the slowest mode of execution.
2822 Deltas will never be reused. This is the slowest mode of execution.
2821 This mode can be used to recompute deltas (e.g. if the diff/delta
2823 This mode can be used to recompute deltas (e.g. if the diff/delta
2822 algorithm changes).
2824 algorithm changes).
2823 DELTAREUSEFULLADD
2825 DELTAREUSEFULLADD
2824 Revision will be re-added as if their were new content. This is
2826 Revision will be re-added as if their were new content. This is
2825 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2827 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2826 eg: large file detection and handling.
2828 eg: large file detection and handling.
2827
2829
2828 Delta computation can be slow, so the choice of delta reuse policy can
2830 Delta computation can be slow, so the choice of delta reuse policy can
2829 significantly affect run time.
2831 significantly affect run time.
2830
2832
2831 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2833 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2832 two extremes. Deltas will be reused if they are appropriate. But if the
2834 two extremes. Deltas will be reused if they are appropriate. But if the
2833 delta could choose a better revision, it will do so. This means if you
2835 delta could choose a better revision, it will do so. This means if you
2834 are converting a non-generaldelta revlog to a generaldelta revlog,
2836 are converting a non-generaldelta revlog to a generaldelta revlog,
2835 deltas will be recomputed if the delta's parent isn't a parent of the
2837 deltas will be recomputed if the delta's parent isn't a parent of the
2836 revision.
2838 revision.
2837
2839
2838 In addition to the delta policy, the ``forcedeltabothparents``
2840 In addition to the delta policy, the ``forcedeltabothparents``
2839 argument controls whether to force compute deltas against both parents
2841 argument controls whether to force compute deltas against both parents
2840 for merges. By default, the current default is used.
2842 for merges. By default, the current default is used.
2841
2843
2842 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2844 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2843 `sidedata_helpers`.
2845 `sidedata_helpers`.
2844 """
2846 """
2845 if deltareuse not in self.DELTAREUSEALL:
2847 if deltareuse not in self.DELTAREUSEALL:
2846 raise ValueError(
2848 raise ValueError(
2847 _(b'value for deltareuse invalid: %s') % deltareuse
2849 _(b'value for deltareuse invalid: %s') % deltareuse
2848 )
2850 )
2849
2851
2850 if len(destrevlog):
2852 if len(destrevlog):
2851 raise ValueError(_(b'destination revlog is not empty'))
2853 raise ValueError(_(b'destination revlog is not empty'))
2852
2854
2853 if getattr(self, 'filteredrevs', None):
2855 if getattr(self, 'filteredrevs', None):
2854 raise ValueError(_(b'source revlog has filtered revisions'))
2856 raise ValueError(_(b'source revlog has filtered revisions'))
2855 if getattr(destrevlog, 'filteredrevs', None):
2857 if getattr(destrevlog, 'filteredrevs', None):
2856 raise ValueError(_(b'destination revlog has filtered revisions'))
2858 raise ValueError(_(b'destination revlog has filtered revisions'))
2857
2859
2858 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2860 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2859 # if possible.
2861 # if possible.
2860 oldlazydelta = destrevlog._lazydelta
2862 oldlazydelta = destrevlog._lazydelta
2861 oldlazydeltabase = destrevlog._lazydeltabase
2863 oldlazydeltabase = destrevlog._lazydeltabase
2862 oldamd = destrevlog._deltabothparents
2864 oldamd = destrevlog._deltabothparents
2863
2865
2864 try:
2866 try:
2865 if deltareuse == self.DELTAREUSEALWAYS:
2867 if deltareuse == self.DELTAREUSEALWAYS:
2866 destrevlog._lazydeltabase = True
2868 destrevlog._lazydeltabase = True
2867 destrevlog._lazydelta = True
2869 destrevlog._lazydelta = True
2868 elif deltareuse == self.DELTAREUSESAMEREVS:
2870 elif deltareuse == self.DELTAREUSESAMEREVS:
2869 destrevlog._lazydeltabase = False
2871 destrevlog._lazydeltabase = False
2870 destrevlog._lazydelta = True
2872 destrevlog._lazydelta = True
2871 elif deltareuse == self.DELTAREUSENEVER:
2873 elif deltareuse == self.DELTAREUSENEVER:
2872 destrevlog._lazydeltabase = False
2874 destrevlog._lazydeltabase = False
2873 destrevlog._lazydelta = False
2875 destrevlog._lazydelta = False
2874
2876
2875 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2877 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2876
2878
2877 self._clone(
2879 self._clone(
2878 tr,
2880 tr,
2879 destrevlog,
2881 destrevlog,
2880 addrevisioncb,
2882 addrevisioncb,
2881 deltareuse,
2883 deltareuse,
2882 forcedeltabothparents,
2884 forcedeltabothparents,
2883 sidedata_helpers,
2885 sidedata_helpers,
2884 )
2886 )
2885
2887
2886 finally:
2888 finally:
2887 destrevlog._lazydelta = oldlazydelta
2889 destrevlog._lazydelta = oldlazydelta
2888 destrevlog._lazydeltabase = oldlazydeltabase
2890 destrevlog._lazydeltabase = oldlazydeltabase
2889 destrevlog._deltabothparents = oldamd
2891 destrevlog._deltabothparents = oldamd
2890
2892
2891 def _clone(
2893 def _clone(
2892 self,
2894 self,
2893 tr,
2895 tr,
2894 destrevlog,
2896 destrevlog,
2895 addrevisioncb,
2897 addrevisioncb,
2896 deltareuse,
2898 deltareuse,
2897 forcedeltabothparents,
2899 forcedeltabothparents,
2898 sidedata_helpers,
2900 sidedata_helpers,
2899 ):
2901 ):
2900 """perform the core duty of `revlog.clone` after parameter processing"""
2902 """perform the core duty of `revlog.clone` after parameter processing"""
2901 deltacomputer = deltautil.deltacomputer(destrevlog)
2903 deltacomputer = deltautil.deltacomputer(destrevlog)
2902 index = self.index
2904 index = self.index
2903 for rev in self:
2905 for rev in self:
2904 entry = index[rev]
2906 entry = index[rev]
2905
2907
2906 # Some classes override linkrev to take filtered revs into
2908 # Some classes override linkrev to take filtered revs into
2907 # account. Use raw entry from index.
2909 # account. Use raw entry from index.
2908 flags = entry[0] & 0xFFFF
2910 flags = entry[0] & 0xFFFF
2909 linkrev = entry[4]
2911 linkrev = entry[4]
2910 p1 = index[entry[5]][7]
2912 p1 = index[entry[5]][7]
2911 p2 = index[entry[6]][7]
2913 p2 = index[entry[6]][7]
2912 node = entry[7]
2914 node = entry[7]
2913
2915
2914 # (Possibly) reuse the delta from the revlog if allowed and
2916 # (Possibly) reuse the delta from the revlog if allowed and
2915 # the revlog chunk is a delta.
2917 # the revlog chunk is a delta.
2916 cachedelta = None
2918 cachedelta = None
2917 rawtext = None
2919 rawtext = None
2918 if deltareuse == self.DELTAREUSEFULLADD:
2920 if deltareuse == self.DELTAREUSEFULLADD:
2919 text, sidedata = self._revisiondata(rev)
2921 text, sidedata = self._revisiondata(rev)
2920
2922
2921 if sidedata_helpers is not None:
2923 if sidedata_helpers is not None:
2922 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2924 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2923 self, sidedata_helpers, sidedata, rev
2925 self, sidedata_helpers, sidedata, rev
2924 )
2926 )
2925 flags = flags | new_flags[0] & ~new_flags[1]
2927 flags = flags | new_flags[0] & ~new_flags[1]
2926
2928
2927 destrevlog.addrevision(
2929 destrevlog.addrevision(
2928 text,
2930 text,
2929 tr,
2931 tr,
2930 linkrev,
2932 linkrev,
2931 p1,
2933 p1,
2932 p2,
2934 p2,
2933 cachedelta=cachedelta,
2935 cachedelta=cachedelta,
2934 node=node,
2936 node=node,
2935 flags=flags,
2937 flags=flags,
2936 deltacomputer=deltacomputer,
2938 deltacomputer=deltacomputer,
2937 sidedata=sidedata,
2939 sidedata=sidedata,
2938 )
2940 )
2939 else:
2941 else:
2940 if destrevlog._lazydelta:
2942 if destrevlog._lazydelta:
2941 dp = self.deltaparent(rev)
2943 dp = self.deltaparent(rev)
2942 if dp != nullrev:
2944 if dp != nullrev:
2943 cachedelta = (dp, bytes(self._chunk(rev)))
2945 cachedelta = (dp, bytes(self._chunk(rev)))
2944
2946
2945 sidedata = None
2947 sidedata = None
2946 if not cachedelta:
2948 if not cachedelta:
2947 rawtext, sidedata = self._revisiondata(rev)
2949 rawtext, sidedata = self._revisiondata(rev)
2948 if sidedata is None:
2950 if sidedata is None:
2949 sidedata = self.sidedata(rev)
2951 sidedata = self.sidedata(rev)
2950
2952
2951 if sidedata_helpers is not None:
2953 if sidedata_helpers is not None:
2952 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2954 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2953 self, sidedata_helpers, sidedata, rev
2955 self, sidedata_helpers, sidedata, rev
2954 )
2956 )
2955 flags = flags | new_flags[0] & ~new_flags[1]
2957 flags = flags | new_flags[0] & ~new_flags[1]
2956
2958
2957 with destrevlog._writing(tr):
2959 with destrevlog._writing(tr):
2958 destrevlog._addrevision(
2960 destrevlog._addrevision(
2959 node,
2961 node,
2960 rawtext,
2962 rawtext,
2961 tr,
2963 tr,
2962 linkrev,
2964 linkrev,
2963 p1,
2965 p1,
2964 p2,
2966 p2,
2965 flags,
2967 flags,
2966 cachedelta,
2968 cachedelta,
2967 deltacomputer=deltacomputer,
2969 deltacomputer=deltacomputer,
2968 sidedata=sidedata,
2970 sidedata=sidedata,
2969 )
2971 )
2970
2972
2971 if addrevisioncb:
2973 if addrevisioncb:
2972 addrevisioncb(self, rev, node)
2974 addrevisioncb(self, rev, node)
2973
2975
2974 def censorrevision(self, tr, censornode, tombstone=b''):
2976 def censorrevision(self, tr, censornode, tombstone=b''):
2975 if self._format_version == REVLOGV0:
2977 if self._format_version == REVLOGV0:
2976 raise error.RevlogError(
2978 raise error.RevlogError(
2977 _(b'cannot censor with version %d revlogs')
2979 _(b'cannot censor with version %d revlogs')
2978 % self._format_version
2980 % self._format_version
2979 )
2981 )
2980
2982
2981 censorrev = self.rev(censornode)
2983 censorrev = self.rev(censornode)
2982 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2984 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2983
2985
2984 if len(tombstone) > self.rawsize(censorrev):
2986 if len(tombstone) > self.rawsize(censorrev):
2985 raise error.Abort(
2987 raise error.Abort(
2986 _(b'censor tombstone must be no longer than censored data')
2988 _(b'censor tombstone must be no longer than censored data')
2987 )
2989 )
2988
2990
2989 # Rewriting the revlog in place is hard. Our strategy for censoring is
2991 # Rewriting the revlog in place is hard. Our strategy for censoring is
2990 # to create a new revlog, copy all revisions to it, then replace the
2992 # to create a new revlog, copy all revisions to it, then replace the
2991 # revlogs on transaction close.
2993 # revlogs on transaction close.
2992 #
2994 #
2993 # This is a bit dangerous. We could easily have a mismatch of state.
2995 # This is a bit dangerous. We could easily have a mismatch of state.
2994 newrl = revlog(
2996 newrl = revlog(
2995 self.opener,
2997 self.opener,
2996 target=self.target,
2998 target=self.target,
2997 radix=self.radix,
2999 radix=self.radix,
2998 postfix=b'tmpcensored',
3000 postfix=b'tmpcensored',
2999 censorable=True,
3001 censorable=True,
3000 )
3002 )
3001 newrl._format_version = self._format_version
3003 newrl._format_version = self._format_version
3002 newrl._format_flags = self._format_flags
3004 newrl._format_flags = self._format_flags
3003 newrl._generaldelta = self._generaldelta
3005 newrl._generaldelta = self._generaldelta
3004 newrl._parse_index = self._parse_index
3006 newrl._parse_index = self._parse_index
3005
3007
3006 for rev in self.revs():
3008 for rev in self.revs():
3007 node = self.node(rev)
3009 node = self.node(rev)
3008 p1, p2 = self.parents(node)
3010 p1, p2 = self.parents(node)
3009
3011
3010 if rev == censorrev:
3012 if rev == censorrev:
3011 newrl.addrawrevision(
3013 newrl.addrawrevision(
3012 tombstone,
3014 tombstone,
3013 tr,
3015 tr,
3014 self.linkrev(censorrev),
3016 self.linkrev(censorrev),
3015 p1,
3017 p1,
3016 p2,
3018 p2,
3017 censornode,
3019 censornode,
3018 REVIDX_ISCENSORED,
3020 REVIDX_ISCENSORED,
3019 )
3021 )
3020
3022
3021 if newrl.deltaparent(rev) != nullrev:
3023 if newrl.deltaparent(rev) != nullrev:
3022 raise error.Abort(
3024 raise error.Abort(
3023 _(
3025 _(
3024 b'censored revision stored as delta; '
3026 b'censored revision stored as delta; '
3025 b'cannot censor'
3027 b'cannot censor'
3026 ),
3028 ),
3027 hint=_(
3029 hint=_(
3028 b'censoring of revlogs is not '
3030 b'censoring of revlogs is not '
3029 b'fully implemented; please report '
3031 b'fully implemented; please report '
3030 b'this bug'
3032 b'this bug'
3031 ),
3033 ),
3032 )
3034 )
3033 continue
3035 continue
3034
3036
3035 if self.iscensored(rev):
3037 if self.iscensored(rev):
3036 if self.deltaparent(rev) != nullrev:
3038 if self.deltaparent(rev) != nullrev:
3037 raise error.Abort(
3039 raise error.Abort(
3038 _(
3040 _(
3039 b'cannot censor due to censored '
3041 b'cannot censor due to censored '
3040 b'revision having delta stored'
3042 b'revision having delta stored'
3041 )
3043 )
3042 )
3044 )
3043 rawtext = self._chunk(rev)
3045 rawtext = self._chunk(rev)
3044 else:
3046 else:
3045 rawtext = self.rawdata(rev)
3047 rawtext = self.rawdata(rev)
3046
3048
3047 newrl.addrawrevision(
3049 newrl.addrawrevision(
3048 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3050 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3049 )
3051 )
3050
3052
3051 tr.addbackup(self._indexfile, location=b'store')
3053 tr.addbackup(self._indexfile, location=b'store')
3052 if not self._inline:
3054 if not self._inline:
3053 tr.addbackup(self._datafile, location=b'store')
3055 tr.addbackup(self._datafile, location=b'store')
3054
3056
3055 self.opener.rename(newrl._indexfile, self._indexfile)
3057 self.opener.rename(newrl._indexfile, self._indexfile)
3056 if not self._inline:
3058 if not self._inline:
3057 self.opener.rename(newrl._datafile, self._datafile)
3059 self.opener.rename(newrl._datafile, self._datafile)
3058
3060
3059 self.clearcaches()
3061 self.clearcaches()
3060 self._loadindex()
3062 self._loadindex()
3061
3063
3062 def verifyintegrity(self, state):
3064 def verifyintegrity(self, state):
3063 """Verifies the integrity of the revlog.
3065 """Verifies the integrity of the revlog.
3064
3066
3065 Yields ``revlogproblem`` instances describing problems that are
3067 Yields ``revlogproblem`` instances describing problems that are
3066 found.
3068 found.
3067 """
3069 """
3068 dd, di = self.checksize()
3070 dd, di = self.checksize()
3069 if dd:
3071 if dd:
3070 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3072 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3071 if di:
3073 if di:
3072 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3074 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3073
3075
3074 version = self._format_version
3076 version = self._format_version
3075
3077
3076 # The verifier tells us what version revlog we should be.
3078 # The verifier tells us what version revlog we should be.
3077 if version != state[b'expectedversion']:
3079 if version != state[b'expectedversion']:
3078 yield revlogproblem(
3080 yield revlogproblem(
3079 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3081 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3080 % (self.display_id, version, state[b'expectedversion'])
3082 % (self.display_id, version, state[b'expectedversion'])
3081 )
3083 )
3082
3084
3083 state[b'skipread'] = set()
3085 state[b'skipread'] = set()
3084 state[b'safe_renamed'] = set()
3086 state[b'safe_renamed'] = set()
3085
3087
3086 for rev in self:
3088 for rev in self:
3087 node = self.node(rev)
3089 node = self.node(rev)
3088
3090
3089 # Verify contents. 4 cases to care about:
3091 # Verify contents. 4 cases to care about:
3090 #
3092 #
3091 # common: the most common case
3093 # common: the most common case
3092 # rename: with a rename
3094 # rename: with a rename
3093 # meta: file content starts with b'\1\n', the metadata
3095 # meta: file content starts with b'\1\n', the metadata
3094 # header defined in filelog.py, but without a rename
3096 # header defined in filelog.py, but without a rename
3095 # ext: content stored externally
3097 # ext: content stored externally
3096 #
3098 #
3097 # More formally, their differences are shown below:
3099 # More formally, their differences are shown below:
3098 #
3100 #
3099 # | common | rename | meta | ext
3101 # | common | rename | meta | ext
3100 # -------------------------------------------------------
3102 # -------------------------------------------------------
3101 # flags() | 0 | 0 | 0 | not 0
3103 # flags() | 0 | 0 | 0 | not 0
3102 # renamed() | False | True | False | ?
3104 # renamed() | False | True | False | ?
3103 # rawtext[0:2]=='\1\n'| False | True | True | ?
3105 # rawtext[0:2]=='\1\n'| False | True | True | ?
3104 #
3106 #
3105 # "rawtext" means the raw text stored in revlog data, which
3107 # "rawtext" means the raw text stored in revlog data, which
3106 # could be retrieved by "rawdata(rev)". "text"
3108 # could be retrieved by "rawdata(rev)". "text"
3107 # mentioned below is "revision(rev)".
3109 # mentioned below is "revision(rev)".
3108 #
3110 #
3109 # There are 3 different lengths stored physically:
3111 # There are 3 different lengths stored physically:
3110 # 1. L1: rawsize, stored in revlog index
3112 # 1. L1: rawsize, stored in revlog index
3111 # 2. L2: len(rawtext), stored in revlog data
3113 # 2. L2: len(rawtext), stored in revlog data
3112 # 3. L3: len(text), stored in revlog data if flags==0, or
3114 # 3. L3: len(text), stored in revlog data if flags==0, or
3113 # possibly somewhere else if flags!=0
3115 # possibly somewhere else if flags!=0
3114 #
3116 #
3115 # L1 should be equal to L2. L3 could be different from them.
3117 # L1 should be equal to L2. L3 could be different from them.
3116 # "text" may or may not affect commit hash depending on flag
3118 # "text" may or may not affect commit hash depending on flag
3117 # processors (see flagutil.addflagprocessor).
3119 # processors (see flagutil.addflagprocessor).
3118 #
3120 #
3119 # | common | rename | meta | ext
3121 # | common | rename | meta | ext
3120 # -------------------------------------------------
3122 # -------------------------------------------------
3121 # rawsize() | L1 | L1 | L1 | L1
3123 # rawsize() | L1 | L1 | L1 | L1
3122 # size() | L1 | L2-LM | L1(*) | L1 (?)
3124 # size() | L1 | L2-LM | L1(*) | L1 (?)
3123 # len(rawtext) | L2 | L2 | L2 | L2
3125 # len(rawtext) | L2 | L2 | L2 | L2
3124 # len(text) | L2 | L2 | L2 | L3
3126 # len(text) | L2 | L2 | L2 | L3
3125 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3127 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3126 #
3128 #
3127 # LM: length of metadata, depending on rawtext
3129 # LM: length of metadata, depending on rawtext
3128 # (*): not ideal, see comment in filelog.size
3130 # (*): not ideal, see comment in filelog.size
3129 # (?): could be "- len(meta)" if the resolved content has
3131 # (?): could be "- len(meta)" if the resolved content has
3130 # rename metadata
3132 # rename metadata
3131 #
3133 #
3132 # Checks needed to be done:
3134 # Checks needed to be done:
3133 # 1. length check: L1 == L2, in all cases.
3135 # 1. length check: L1 == L2, in all cases.
3134 # 2. hash check: depending on flag processor, we may need to
3136 # 2. hash check: depending on flag processor, we may need to
3135 # use either "text" (external), or "rawtext" (in revlog).
3137 # use either "text" (external), or "rawtext" (in revlog).
3136
3138
3137 try:
3139 try:
3138 skipflags = state.get(b'skipflags', 0)
3140 skipflags = state.get(b'skipflags', 0)
3139 if skipflags:
3141 if skipflags:
3140 skipflags &= self.flags(rev)
3142 skipflags &= self.flags(rev)
3141
3143
3142 _verify_revision(self, skipflags, state, node)
3144 _verify_revision(self, skipflags, state, node)
3143
3145
3144 l1 = self.rawsize(rev)
3146 l1 = self.rawsize(rev)
3145 l2 = len(self.rawdata(node))
3147 l2 = len(self.rawdata(node))
3146
3148
3147 if l1 != l2:
3149 if l1 != l2:
3148 yield revlogproblem(
3150 yield revlogproblem(
3149 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3151 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3150 node=node,
3152 node=node,
3151 )
3153 )
3152
3154
3153 except error.CensoredNodeError:
3155 except error.CensoredNodeError:
3154 if state[b'erroroncensored']:
3156 if state[b'erroroncensored']:
3155 yield revlogproblem(
3157 yield revlogproblem(
3156 error=_(b'censored file data'), node=node
3158 error=_(b'censored file data'), node=node
3157 )
3159 )
3158 state[b'skipread'].add(node)
3160 state[b'skipread'].add(node)
3159 except Exception as e:
3161 except Exception as e:
3160 yield revlogproblem(
3162 yield revlogproblem(
3161 error=_(b'unpacking %s: %s')
3163 error=_(b'unpacking %s: %s')
3162 % (short(node), stringutil.forcebytestr(e)),
3164 % (short(node), stringutil.forcebytestr(e)),
3163 node=node,
3165 node=node,
3164 )
3166 )
3165 state[b'skipread'].add(node)
3167 state[b'skipread'].add(node)
3166
3168
3167 def storageinfo(
3169 def storageinfo(
3168 self,
3170 self,
3169 exclusivefiles=False,
3171 exclusivefiles=False,
3170 sharedfiles=False,
3172 sharedfiles=False,
3171 revisionscount=False,
3173 revisionscount=False,
3172 trackedsize=False,
3174 trackedsize=False,
3173 storedsize=False,
3175 storedsize=False,
3174 ):
3176 ):
3175 d = {}
3177 d = {}
3176
3178
3177 if exclusivefiles:
3179 if exclusivefiles:
3178 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3180 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3179 if not self._inline:
3181 if not self._inline:
3180 d[b'exclusivefiles'].append((self.opener, self._datafile))
3182 d[b'exclusivefiles'].append((self.opener, self._datafile))
3181
3183
3182 if sharedfiles:
3184 if sharedfiles:
3183 d[b'sharedfiles'] = []
3185 d[b'sharedfiles'] = []
3184
3186
3185 if revisionscount:
3187 if revisionscount:
3186 d[b'revisionscount'] = len(self)
3188 d[b'revisionscount'] = len(self)
3187
3189
3188 if trackedsize:
3190 if trackedsize:
3189 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3191 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3190
3192
3191 if storedsize:
3193 if storedsize:
3192 d[b'storedsize'] = sum(
3194 d[b'storedsize'] = sum(
3193 self.opener.stat(path).st_size for path in self.files()
3195 self.opener.stat(path).st_size for path in self.files()
3194 )
3196 )
3195
3197
3196 return d
3198 return d
3197
3199
3198 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3200 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3199 if not self.hassidedata:
3201 if not self.hassidedata:
3200 return
3202 return
3201 # revlog formats with sidedata support does not support inline
3203 # revlog formats with sidedata support does not support inline
3202 assert not self._inline
3204 assert not self._inline
3203 if not helpers[1] and not helpers[2]:
3205 if not helpers[1] and not helpers[2]:
3204 # Nothing to generate or remove
3206 # Nothing to generate or remove
3205 return
3207 return
3206
3208
3207 new_entries = []
3209 new_entries = []
3208 # append the new sidedata
3210 # append the new sidedata
3209 with self._writing(transaction):
3211 with self._writing(transaction):
3210 ifh, dfh = self._writinghandles
3212 ifh, dfh = self._writinghandles
3211 dfh.seek(0, os.SEEK_END)
3213 dfh.seek(0, os.SEEK_END)
3212 current_offset = dfh.tell()
3214 current_offset = dfh.tell()
3213 for rev in range(startrev, endrev + 1):
3215 for rev in range(startrev, endrev + 1):
3214 entry = self.index[rev]
3216 entry = self.index[rev]
3215 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3217 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3216 store=self,
3218 store=self,
3217 sidedata_helpers=helpers,
3219 sidedata_helpers=helpers,
3218 sidedata={},
3220 sidedata={},
3219 rev=rev,
3221 rev=rev,
3220 )
3222 )
3221
3223
3222 serialized_sidedata = sidedatautil.serialize_sidedata(
3224 serialized_sidedata = sidedatautil.serialize_sidedata(
3223 new_sidedata
3225 new_sidedata
3224 )
3226 )
3225 if entry[8] != 0 or entry[9] != 0:
3227 if entry[8] != 0 or entry[9] != 0:
3226 # rewriting entries that already have sidedata is not
3228 # rewriting entries that already have sidedata is not
3227 # supported yet, because it introduces garbage data in the
3229 # supported yet, because it introduces garbage data in the
3228 # revlog.
3230 # revlog.
3229 msg = b"rewriting existing sidedata is not supported yet"
3231 msg = b"rewriting existing sidedata is not supported yet"
3230 raise error.Abort(msg)
3232 raise error.Abort(msg)
3231
3233
3232 # Apply (potential) flags to add and to remove after running
3234 # Apply (potential) flags to add and to remove after running
3233 # the sidedata helpers
3235 # the sidedata helpers
3234 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3236 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3235 entry = (new_offset_flags,) + entry[1:8]
3237 entry = (new_offset_flags,) + entry[1:8]
3236 entry += (current_offset, len(serialized_sidedata))
3238 entry += (current_offset, len(serialized_sidedata))
3237
3239
3238 # the sidedata computation might have move the file cursors around
3240 # the sidedata computation might have move the file cursors around
3239 dfh.seek(current_offset, os.SEEK_SET)
3241 dfh.seek(current_offset, os.SEEK_SET)
3240 dfh.write(serialized_sidedata)
3242 dfh.write(serialized_sidedata)
3241 new_entries.append(entry)
3243 new_entries.append(entry)
3242 current_offset += len(serialized_sidedata)
3244 current_offset += len(serialized_sidedata)
3243
3245
3244 # rewrite the new index entries
3246 # rewrite the new index entries
3245 ifh.seek(startrev * self.index.entry_size)
3247 ifh.seek(startrev * self.index.entry_size)
3246 for i, e in enumerate(new_entries):
3248 for i, e in enumerate(new_entries):
3247 rev = startrev + i
3249 rev = startrev + i
3248 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3250 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3249 packed = self.index.entry_binary(rev)
3251 packed = self.index.entry_binary(rev)
3250 if rev == 0 and self._docket is None:
3252 if rev == 0 and self._docket is None:
3251 header = self._format_flags | self._format_version
3253 header = self._format_flags | self._format_version
3252 header = self.index.pack_header(header)
3254 header = self.index.pack_header(header)
3253 packed = header + packed
3255 packed = header + packed
3254 ifh.write(packed)
3256 ifh.write(packed)
@@ -1,100 +1,138 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18 from __future__ import absolute_import
18 from __future__ import absolute_import
19
19
20 import struct
20 import struct
21
21
22 from .. import (
23 error,
24 )
25
22 from . import (
26 from . import (
23 constants,
27 constants,
24 )
28 )
25
29
26 # Docket format
30 # Docket format
27 #
31 #
28 # * 4 bytes: revlog version
32 # * 4 bytes: revlog version
29 # | This is mandatory as docket must be compatible with the previous
33 # | This is mandatory as docket must be compatible with the previous
30 # | revlog index header.
34 # | revlog index header.
31 # * 8 bytes: size of index data
35 # * 8 bytes: size of index data
32 S_HEADER = struct.Struct(constants.INDEX_HEADER.format + 'L')
36 # * 8 bytes: pending size of index data
37 S_HEADER = struct.Struct(constants.INDEX_HEADER.format + 'LL')
33
38
34
39
35 class RevlogDocket(object):
40 class RevlogDocket(object):
36 """metadata associated with revlog"""
41 """metadata associated with revlog"""
37
42
38 def __init__(self, revlog, version_header=None, index_end=0):
43 def __init__(
44 self,
45 revlog,
46 use_pending=False,
47 version_header=None,
48 index_end=0,
49 pending_index_end=0,
50 ):
39 self._version_header = version_header
51 self._version_header = version_header
52 self._read_only = bool(use_pending)
40 self._dirty = False
53 self._dirty = False
41 self._radix = revlog.radix
54 self._radix = revlog.radix
42 self._path = revlog._docket_file
55 self._path = revlog._docket_file
43 self._opener = revlog.opener
56 self._opener = revlog.opener
44 self._index_end = index_end
57 # this assert should be True as long as we have a single index filename
58 assert index_end <= pending_index_end
59 self._initial_index_end = index_end
60 self._pending_index_end = pending_index_end
61 if use_pending:
62 self._index_end = self._pending_index_end
63 else:
64 self._index_end = self._initial_index_end
45
65
46 def index_filepath(self):
66 def index_filepath(self):
47 """file path to the current index file associated to this docket"""
67 """file path to the current index file associated to this docket"""
48 # very simplistic version at first
68 # very simplistic version at first
49 return b"%s.idx" % self._radix
69 return b"%s.idx" % self._radix
50
70
51 @property
71 @property
52 def index_end(self):
72 def index_end(self):
53 return self._index_end
73 return self._index_end
54
74
55 @index_end.setter
75 @index_end.setter
56 def index_end(self, new_size):
76 def index_end(self, new_size):
57 if new_size != self._index_end:
77 if new_size != self._index_end:
58 self._index_end = new_size
78 self._index_end = new_size
59 self._dirty = True
79 self._dirty = True
60
80
61 def write(self, transaction, stripping=False):
81 def write(self, transaction, pending=False, stripping=False):
62 """write the modification of disk if any
82 """write the modification of disk if any
63
83
64 This make the new content visible to all process"""
84 This make the new content visible to all process"""
65 if self._dirty:
85 if not self._dirty:
86 return False
87 else:
88 if self._read_only:
89 msg = b'writing read-only docket: %s'
90 msg %= self._path
91 raise error.ProgrammingError(msg)
66 if not stripping:
92 if not stripping:
67 # XXX we could, leverage the docket while stripping. However it
93 # XXX we could, leverage the docket while stripping. However it
68 # is not powerfull enough at the time of this comment
94 # is not powerfull enough at the time of this comment
69 transaction.addbackup(self._path, location=b'store')
95 transaction.addbackup(self._path, location=b'store')
70 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
96 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
71 f.write(self._serialize())
97 f.write(self._serialize(pending=pending))
72 self._dirty = False
98 # if pending we still need to the write final data eventually
99 self._dirty = pending
100 return True
73
101
74 def _serialize(self):
102 def _serialize(self, pending=False):
103 if pending:
104 official_index_end = self._initial_index_end
105 else:
106 official_index_end = self._index_end
107
108 # this assert should be True as long as we have a single index filename
109 assert official_index_end <= self._index_end
75 data = (
110 data = (
76 self._version_header,
111 self._version_header,
112 official_index_end,
77 self._index_end,
113 self._index_end,
78 )
114 )
79 return S_HEADER.pack(*data)
115 return S_HEADER.pack(*data)
80
116
81
117
82 def default_docket(revlog, version_header):
118 def default_docket(revlog, version_header):
83 """given a revlog version a new docket object for the given revlog"""
119 """given a revlog version a new docket object for the given revlog"""
84 if (version_header & 0xFFFF) != constants.REVLOGV2:
120 if (version_header & 0xFFFF) != constants.REVLOGV2:
85 return None
121 return None
86 docket = RevlogDocket(revlog, version_header=version_header)
122 docket = RevlogDocket(revlog, version_header=version_header)
87 docket._dirty = True
123 docket._dirty = True
88 return docket
124 return docket
89
125
90
126
91 def parse_docket(revlog, data):
127 def parse_docket(revlog, data, use_pending=False):
92 """given some docket data return a docket object for the given revlog"""
128 """given some docket data return a docket object for the given revlog"""
93 header = S_HEADER.unpack(data[: S_HEADER.size])
129 header = S_HEADER.unpack(data[: S_HEADER.size])
94 version_header, index_size = header
130 version_header, index_size, pending_index_size = header
95 docket = RevlogDocket(
131 docket = RevlogDocket(
96 revlog,
132 revlog,
133 use_pending=use_pending,
97 version_header=version_header,
134 version_header=version_header,
98 index_end=index_size,
135 index_end=index_size,
136 pending_index_end=pending_index_size,
99 )
137 )
100 return docket
138 return docket
@@ -1,268 +1,262 b''
1 Test transaction safety
1 Test transaction safety
2 =======================
2 =======================
3
3
4 #testcases revlogv1 revlogv2
4 #testcases revlogv1 revlogv2
5
5
6 #if revlogv1
6 #if revlogv1
7
7
8 $ cat << EOF >> $HGRCPATH
8 $ cat << EOF >> $HGRCPATH
9 > [experimental]
9 > [experimental]
10 > revlogv2=no
10 > revlogv2=no
11 > EOF
11 > EOF
12
12
13 #endif
13 #endif
14
14
15 #if revlogv2
15 #if revlogv2
16
16
17 $ cat << EOF >> $HGRCPATH
17 $ cat << EOF >> $HGRCPATH
18 > [experimental]
18 > [experimental]
19 > revlogv2=enable-unstable-format-and-corrupt-my-data
19 > revlogv2=enable-unstable-format-and-corrupt-my-data
20 > EOF
20 > EOF
21
21
22 #endif
22 #endif
23
23
24 This test basic case to make sure external process do not see transaction
24 This test basic case to make sure external process do not see transaction
25 content until it is committed.
25 content until it is committed.
26
26
27 # TODO: also add an external reader accessing revlog files while they are written
27 # TODO: also add an external reader accessing revlog files while they are written
28 # (instead of during transaction finalisation)
28 # (instead of during transaction finalisation)
29
29
30 # TODO: also add stream clone and hardlink clone happening during these transaction.
30 # TODO: also add stream clone and hardlink clone happening during these transaction.
31
31
32 setup
32 setup
33 -----
33 -----
34
34
35 synchronisation+output script:
35 synchronisation+output script:
36
36
37 $ mkdir sync
37 $ mkdir sync
38 $ mkdir output
38 $ mkdir output
39 $ mkdir script
39 $ mkdir script
40 $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
40 $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
41 $ export HG_TEST_FILE_EXT_WAITING
41 $ export HG_TEST_FILE_EXT_WAITING
42 $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
42 $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
43 $ export HG_TEST_FILE_EXT_UNLOCK
43 $ export HG_TEST_FILE_EXT_UNLOCK
44 $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
44 $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
45 $ export HG_TEST_FILE_EXT_DONE
45 $ export HG_TEST_FILE_EXT_DONE
46 $ cat << EOF > script/external.sh
46 $ cat << EOF > script/external.sh
47 > #!/bin/sh
47 > #!/bin/sh
48 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_UNLOCK $HG_TEST_FILE_EXT_WAITING
48 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_UNLOCK $HG_TEST_FILE_EXT_WAITING
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out 2>/dev/null
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out
50 > touch $HG_TEST_FILE_EXT_DONE
50 > touch $HG_TEST_FILE_EXT_DONE
51 > EOF
51 > EOF
52 $ chmod +x script/external.sh
52 $ chmod +x script/external.sh
53 $ cat << EOF > script/internal.sh
53 $ cat << EOF > script/internal.sh
54 > #!/bin/sh
54 > #!/bin/sh
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out 2>/dev/null
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out
56 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_DONE $HG_TEST_FILE_EXT_UNLOCK
56 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_DONE $HG_TEST_FILE_EXT_UNLOCK
57 > EOF
57 > EOF
58 $ chmod +x script/internal.sh
58 $ chmod +x script/internal.sh
59
59
60
60
61 Automated commands:
61 Automated commands:
62
62
63 $ make_one_commit() {
63 $ make_one_commit() {
64 > rm -f $TESTTMP/sync/*
64 > rm -f $TESTTMP/sync/*
65 > rm -f $TESTTMP/output/*
65 > rm -f $TESTTMP/output/*
66 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
66 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
67 > echo x >> a
67 > echo x >> a
68 > $TESTTMP/script/external.sh & hg commit -m "$1"
68 > $TESTTMP/script/external.sh & hg commit -m "$1"
69 > cat $TESTTMP/output/external.out
69 > cat $TESTTMP/output/external.out
70 > cat $TESTTMP/output/internal.out
70 > cat $TESTTMP/output/internal.out
71 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
71 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
72 > }
72 > }
73
73
74
74
75 $ make_one_pull() {
75 $ make_one_pull() {
76 > rm -f $TESTTMP/sync/*
76 > rm -f $TESTTMP/sync/*
77 > rm -f $TESTTMP/output/*
77 > rm -f $TESTTMP/output/*
78 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
78 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
79 > echo x >> a
79 > echo x >> a
80 > $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
80 > $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
81 > cat $TESTTMP/output/external.out
81 > cat $TESTTMP/output/external.out
82 > cat $TESTTMP/output/internal.out
82 > cat $TESTTMP/output/internal.out
83 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
83 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
84 > }
84 > }
85
85
86 prepare a large source to which to pull from:
86 prepare a large source to which to pull from:
87
87
88 The source is large to unsure we don't use inline more after the pull
88 The source is large to unsure we don't use inline more after the pull
89
89
90 $ hg init other-repo
90 $ hg init other-repo
91 $ hg -R other-repo debugbuilddag .+500
91 $ hg -R other-repo debugbuilddag .+500
92
92
93
93
94 prepare an empty repository where to make test:
94 prepare an empty repository where to make test:
95
95
96 $ hg init repo
96 $ hg init repo
97 $ cd repo
97 $ cd repo
98 $ touch a
98 $ touch a
99 $ hg add a
99 $ hg add a
100
100
101 prepare a small extension to controll inline size
101 prepare a small extension to controll inline size
102
102
103 $ mkdir $TESTTMP/ext
103 $ mkdir $TESTTMP/ext
104 $ cat << EOF > $TESTTMP/ext/small_inline.py
104 $ cat << EOF > $TESTTMP/ext/small_inline.py
105 > from mercurial import revlog
105 > from mercurial import revlog
106 > revlog._maxinline = 64 * 100
106 > revlog._maxinline = 64 * 100
107 > EOF
107 > EOF
108
108
109
109
110
110
111
111
112 $ cat << EOF >> $HGRCPATH
112 $ cat << EOF >> $HGRCPATH
113 > [extensions]
113 > [extensions]
114 > small_inline=$TESTTMP/ext/small_inline.py
114 > small_inline=$TESTTMP/ext/small_inline.py
115 > [hooks]
115 > [hooks]
116 > pretxnclose = $TESTTMP/script/internal.sh
116 > pretxnclose = $TESTTMP/script/internal.sh
117 > EOF
117 > EOF
118
118
119 check this is true for the initial commit (inline → inline)
119 check this is true for the initial commit (inline → inline)
120 -----------------------------------------------------------
120 -----------------------------------------------------------
121
121
122 the repository should still be inline (for relevant format)
122 the repository should still be inline (for relevant format)
123
123
124 $ make_one_commit first
124 $ make_one_commit first
125 pre-commit: -1
125 pre-commit: -1
126 external: -1
126 external: -1
127 internal: 0 first (revlogv1 !)
127 internal: 0 first
128 internal: -1 (revlogv2 known-bad-output !)
129 post-tr: 0 first
128 post-tr: 0 first
130
129
131 #if revlogv1
130 #if revlogv1
132
131
133 $ hg debugrevlog -c | grep inline
132 $ hg debugrevlog -c | grep inline
134 flags : inline
133 flags : inline
135
134
136 #endif
135 #endif
137
136
138 check this is true for extra commit (inline → inline)
137 check this is true for extra commit (inline → inline)
139 -----------------------------------------------------
138 -----------------------------------------------------
140
139
141 the repository should still be inline (for relevant format)
140 the repository should still be inline (for relevant format)
142
141
143 #if revlogv1
142 #if revlogv1
144
143
145 $ hg debugrevlog -c | grep inline
144 $ hg debugrevlog -c | grep inline
146 flags : inline
145 flags : inline
147
146
148 #endif
147 #endif
149
148
150 $ make_one_commit second
149 $ make_one_commit second
151 pre-commit: 0 first
150 pre-commit: 0 first
152 external: 0 first
151 external: 0 first
153 internal: 1 second (revlogv1 !)
152 internal: 1 second
154 internal: 0 first (revlogv2 known-bad-output !)
155 post-tr: 1 second
153 post-tr: 1 second
156
154
157 #if revlogv1
155 #if revlogv1
158
156
159 $ hg debugrevlog -c | grep inline
157 $ hg debugrevlog -c | grep inline
160 flags : inline
158 flags : inline
161
159
162 #endif
160 #endif
163
161
164 check this is true for a small pull (inline → inline)
162 check this is true for a small pull (inline → inline)
165 -----------------------------------------------------
163 -----------------------------------------------------
166
164
167 the repository should still be inline (for relevant format)
165 the repository should still be inline (for relevant format)
168
166
169 #if revlogv1
167 #if revlogv1
170
168
171 $ hg debugrevlog -c | grep inline
169 $ hg debugrevlog -c | grep inline
172 flags : inline
170 flags : inline
173
171
174 #endif
172 #endif
175
173
176 $ make_one_pull 3
174 $ make_one_pull 3
177 pre-commit: 1 second
175 pre-commit: 1 second
178 warning: repository is unrelated
176 warning: repository is unrelated
179 external: 1 second
177 external: 1 second
180 internal: 5 r3 (revlogv1 !)
178 internal: 5 r3
181 internal: 1 second (revlogv2 known-bad-output !)
182 post-tr: 5 r3
179 post-tr: 5 r3
183
180
184 #if revlogv1
181 #if revlogv1
185
182
186 $ hg debugrevlog -c | grep inline
183 $ hg debugrevlog -c | grep inline
187 flags : inline
184 flags : inline
188
185
189 #endif
186 #endif
190
187
191 Make a large pull (inline → no-inline)
188 Make a large pull (inline → no-inline)
192 ---------------------------------------
189 ---------------------------------------
193
190
194 the repository should no longer be inline (for relevant format)
191 the repository should no longer be inline (for relevant format)
195
192
196 #if revlogv1
193 #if revlogv1
197
194
198 $ hg debugrevlog -c | grep inline
195 $ hg debugrevlog -c | grep inline
199 flags : inline
196 flags : inline
200
197
201 #endif
198 #endif
202
199
203 $ make_one_pull 400
200 $ make_one_pull 400
204 pre-commit: 5 r3
201 pre-commit: 5 r3
205 external: 5 r3
202 external: 5 r3
206 internal: 402 r400 (revlogv1 !)
203 internal: 402 r400
207 internal: 5 r3 (revlogv2 known-bad-output !)
208 post-tr: 402 r400
204 post-tr: 402 r400
209
205
210 #if revlogv1
206 #if revlogv1
211
207
212 $ hg debugrevlog -c | grep inline
208 $ hg debugrevlog -c | grep inline
213 [1]
209 [1]
214
210
215 #endif
211 #endif
216
212
217 check this is true for extra commit (no-inline → no-inline)
213 check this is true for extra commit (no-inline → no-inline)
218 -----------------------------------------------------------
214 -----------------------------------------------------------
219
215
220 the repository should no longer be inline (for relevant format)
216 the repository should no longer be inline (for relevant format)
221
217
222 #if revlogv1
218 #if revlogv1
223
219
224 $ hg debugrevlog -c | grep inline
220 $ hg debugrevlog -c | grep inline
225 [1]
221 [1]
226
222
227 #endif
223 #endif
228
224
229 $ make_one_commit third
225 $ make_one_commit third
230 pre-commit: 402 r400
226 pre-commit: 402 r400
231 external: 402 r400
227 external: 402 r400
232 internal: 403 third (revlogv1 !)
228 internal: 403 third
233 internal: 402 r400 (revlogv2 known-bad-output !)
234 post-tr: 403 third
229 post-tr: 403 third
235
230
236 #if revlogv1
231 #if revlogv1
237
232
238 $ hg debugrevlog -c | grep inline
233 $ hg debugrevlog -c | grep inline
239 [1]
234 [1]
240
235
241 #endif
236 #endif
242
237
243
238
244 Make a pull (not-inline → no-inline)
239 Make a pull (not-inline → no-inline)
245 -------------------------------------
240 -------------------------------------
246
241
247 the repository should no longer be inline (for relevant format)
242 the repository should no longer be inline (for relevant format)
248
243
249 #if revlogv1
244 #if revlogv1
250
245
251 $ hg debugrevlog -c | grep inline
246 $ hg debugrevlog -c | grep inline
252 [1]
247 [1]
253
248
254 #endif
249 #endif
255
250
256 $ make_one_pull tip
251 $ make_one_pull tip
257 pre-commit: 403 third
252 pre-commit: 403 third
258 external: 403 third
253 external: 403 third
259 internal: 503 r500 (revlogv1 !)
254 internal: 503 r500
260 internal: 403 third (revlogv2 known-bad-output !)
261 post-tr: 503 r500
255 post-tr: 503 r500
262
256
263 #if revlogv1
257 #if revlogv1
264
258
265 $ hg debugrevlog -c | grep inline
259 $ hg debugrevlog -c | grep inline
266 [1]
260 [1]
267
261
268 #endif
262 #endif
General Comments 0
You need to be logged in to leave comments. Login now