##// END OF EJS Templates
revlogv2: delay the update of the changelog docket to transaction end...
marmoute -
r48013:682f0985 default
parent child Browse files
Show More
@@ -1,627 +1,630 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 )
14 )
15 from .thirdparty import attr
15 from .thirdparty import attr
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 metadata,
20 metadata,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 dateutil,
25 dateutil,
26 stringutil,
26 stringutil,
27 )
27 )
28 from .revlogutils import (
28 from .revlogutils import (
29 constants as revlog_constants,
29 constants as revlog_constants,
30 flagutil,
30 flagutil,
31 )
31 )
32
32
33 _defaultextra = {b'branch': b'default'}
33 _defaultextra = {b'branch': b'default'}
34
34
35
35
36 def _string_escape(text):
36 def _string_escape(text):
37 """
37 """
38 >>> from .pycompat import bytechr as chr
38 >>> from .pycompat import bytechr as chr
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 >>> s
41 >>> s
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 >>> res = _string_escape(s)
43 >>> res = _string_escape(s)
44 >>> s == _string_unescape(res)
44 >>> s == _string_unescape(res)
45 True
45 True
46 """
46 """
47 # subset of the string_escape codec
47 # subset of the string_escape codec
48 text = (
48 text = (
49 text.replace(b'\\', b'\\\\')
49 text.replace(b'\\', b'\\\\')
50 .replace(b'\n', b'\\n')
50 .replace(b'\n', b'\\n')
51 .replace(b'\r', b'\\r')
51 .replace(b'\r', b'\\r')
52 )
52 )
53 return text.replace(b'\0', b'\\0')
53 return text.replace(b'\0', b'\\0')
54
54
55
55
56 def _string_unescape(text):
56 def _string_unescape(text):
57 if b'\\0' in text:
57 if b'\\0' in text:
58 # fix up \0 without getting into trouble with \\0
58 # fix up \0 without getting into trouble with \\0
59 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\\\', b'\\\\\n')
60 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\\0', b'\0')
61 text = text.replace(b'\n', b'')
61 text = text.replace(b'\n', b'')
62 return stringutil.unescapestr(text)
62 return stringutil.unescapestr(text)
63
63
64
64
65 def decodeextra(text):
65 def decodeextra(text):
66 """
66 """
67 >>> from .pycompat import bytechr as chr
67 >>> from .pycompat import bytechr as chr
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 ... ).items())
69 ... ).items())
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... b'baz': chr(92) + chr(0) + b'2'})
73 ... ).items())
73 ... ).items())
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 """
75 """
76 extra = _defaultextra.copy()
76 extra = _defaultextra.copy()
77 for l in text.split(b'\0'):
77 for l in text.split(b'\0'):
78 if l:
78 if l:
79 k, v = _string_unescape(l).split(b':', 1)
79 k, v = _string_unescape(l).split(b':', 1)
80 extra[k] = v
80 extra[k] = v
81 return extra
81 return extra
82
82
83
83
84 def encodeextra(d):
84 def encodeextra(d):
85 # keys must be sorted to produce a deterministic changelog entry
85 # keys must be sorted to produce a deterministic changelog entry
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 return b"\0".join(items)
87 return b"\0".join(items)
88
88
89
89
90 def stripdesc(desc):
90 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
91 """strip trailing whitespace and leading and trailing empty lines"""
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93
93
94
94
95 class appender(object):
95 class appender(object):
96 """the changelog index must be updated last on disk, so we use this class
96 """the changelog index must be updated last on disk, so we use this class
97 to delay writes to it"""
97 to delay writes to it"""
98
98
99 def __init__(self, vfs, name, mode, buf):
99 def __init__(self, vfs, name, mode, buf):
100 self.data = buf
100 self.data = buf
101 fp = vfs(name, mode)
101 fp = vfs(name, mode)
102 self.fp = fp
102 self.fp = fp
103 self.offset = fp.tell()
103 self.offset = fp.tell()
104 self.size = vfs.fstat(fp).st_size
104 self.size = vfs.fstat(fp).st_size
105 self._end = self.size
105 self._end = self.size
106
106
107 def end(self):
107 def end(self):
108 return self._end
108 return self._end
109
109
110 def tell(self):
110 def tell(self):
111 return self.offset
111 return self.offset
112
112
113 def flush(self):
113 def flush(self):
114 pass
114 pass
115
115
116 @property
116 @property
117 def closed(self):
117 def closed(self):
118 return self.fp.closed
118 return self.fp.closed
119
119
120 def close(self):
120 def close(self):
121 self.fp.close()
121 self.fp.close()
122
122
123 def seek(self, offset, whence=0):
123 def seek(self, offset, whence=0):
124 '''virtual file offset spans real file and data'''
124 '''virtual file offset spans real file and data'''
125 if whence == 0:
125 if whence == 0:
126 self.offset = offset
126 self.offset = offset
127 elif whence == 1:
127 elif whence == 1:
128 self.offset += offset
128 self.offset += offset
129 elif whence == 2:
129 elif whence == 2:
130 self.offset = self.end() + offset
130 self.offset = self.end() + offset
131 if self.offset < self.size:
131 if self.offset < self.size:
132 self.fp.seek(self.offset)
132 self.fp.seek(self.offset)
133
133
134 def read(self, count=-1):
134 def read(self, count=-1):
135 '''only trick here is reads that span real file and data'''
135 '''only trick here is reads that span real file and data'''
136 ret = b""
136 ret = b""
137 if self.offset < self.size:
137 if self.offset < self.size:
138 s = self.fp.read(count)
138 s = self.fp.read(count)
139 ret = s
139 ret = s
140 self.offset += len(s)
140 self.offset += len(s)
141 if count > 0:
141 if count > 0:
142 count -= len(s)
142 count -= len(s)
143 if count != 0:
143 if count != 0:
144 doff = self.offset - self.size
144 doff = self.offset - self.size
145 self.data.insert(0, b"".join(self.data))
145 self.data.insert(0, b"".join(self.data))
146 del self.data[1:]
146 del self.data[1:]
147 s = self.data[0][doff : doff + count]
147 s = self.data[0][doff : doff + count]
148 self.offset += len(s)
148 self.offset += len(s)
149 ret += s
149 ret += s
150 return ret
150 return ret
151
151
152 def write(self, s):
152 def write(self, s):
153 self.data.append(bytes(s))
153 self.data.append(bytes(s))
154 self.offset += len(s)
154 self.offset += len(s)
155 self._end += len(s)
155 self._end += len(s)
156
156
157 def __enter__(self):
157 def __enter__(self):
158 self.fp.__enter__()
158 self.fp.__enter__()
159 return self
159 return self
160
160
161 def __exit__(self, *args):
161 def __exit__(self, *args):
162 return self.fp.__exit__(*args)
162 return self.fp.__exit__(*args)
163
163
164
164
165 class _divertopener(object):
165 class _divertopener(object):
166 def __init__(self, opener, target):
166 def __init__(self, opener, target):
167 self._opener = opener
167 self._opener = opener
168 self._target = target
168 self._target = target
169
169
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
171 if name != self._target:
171 if name != self._target:
172 return self._opener(name, mode, **kwargs)
172 return self._opener(name, mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
174
174
175 def __getattr__(self, attr):
175 def __getattr__(self, attr):
176 return getattr(self._opener, attr)
176 return getattr(self._opener, attr)
177
177
178
178
179 def _delayopener(opener, target, buf):
179 def _delayopener(opener, target, buf):
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
181
181
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
183 if name != target:
183 if name != target:
184 return opener(name, mode, **kwargs)
184 return opener(name, mode, **kwargs)
185 assert not kwargs
185 assert not kwargs
186 return appender(opener, name, mode, buf)
186 return appender(opener, name, mode, buf)
187
187
188 return _delay
188 return _delay
189
189
190
190
191 @attr.s
191 @attr.s
192 class _changelogrevision(object):
192 class _changelogrevision(object):
193 # Extensions might modify _defaultextra, so let the constructor below pass
193 # Extensions might modify _defaultextra, so let the constructor below pass
194 # it in
194 # it in
195 extra = attr.ib()
195 extra = attr.ib()
196 manifest = attr.ib()
196 manifest = attr.ib()
197 user = attr.ib(default=b'')
197 user = attr.ib(default=b'')
198 date = attr.ib(default=(0, 0))
198 date = attr.ib(default=(0, 0))
199 files = attr.ib(default=attr.Factory(list))
199 files = attr.ib(default=attr.Factory(list))
200 filesadded = attr.ib(default=None)
200 filesadded = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
204 description = attr.ib(default=b'')
204 description = attr.ib(default=b'')
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
206
206
207
207
208 class changelogrevision(object):
208 class changelogrevision(object):
209 """Holds results of a parsed changelog revision.
209 """Holds results of a parsed changelog revision.
210
210
211 Changelog revisions consist of multiple pieces of data, including
211 Changelog revisions consist of multiple pieces of data, including
212 the manifest node, user, and date. This object exposes a view into
212 the manifest node, user, and date. This object exposes a view into
213 the parsed object.
213 the parsed object.
214 """
214 """
215
215
216 __slots__ = (
216 __slots__ = (
217 '_offsets',
217 '_offsets',
218 '_text',
218 '_text',
219 '_sidedata',
219 '_sidedata',
220 '_cpsd',
220 '_cpsd',
221 '_changes',
221 '_changes',
222 )
222 )
223
223
224 def __new__(cls, cl, text, sidedata, cpsd):
224 def __new__(cls, cl, text, sidedata, cpsd):
225 if not text:
225 if not text:
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
227
227
228 self = super(changelogrevision, cls).__new__(cls)
228 self = super(changelogrevision, cls).__new__(cls)
229 # We could return here and implement the following as an __init__.
229 # We could return here and implement the following as an __init__.
230 # But doing it here is equivalent and saves an extra function call.
230 # But doing it here is equivalent and saves an extra function call.
231
231
232 # format used:
232 # format used:
233 # nodeid\n : manifest node in ascii
233 # nodeid\n : manifest node in ascii
234 # user\n : user, no \n or \r allowed
234 # user\n : user, no \n or \r allowed
235 # time tz extra\n : date (time is int or float, timezone is int)
235 # time tz extra\n : date (time is int or float, timezone is int)
236 # : extra is metadata, encoded and separated by '\0'
236 # : extra is metadata, encoded and separated by '\0'
237 # : older versions ignore it
237 # : older versions ignore it
238 # files\n\n : files modified by the cset, no \n or \r allowed
238 # files\n\n : files modified by the cset, no \n or \r allowed
239 # (.*) : comment (free text, ideally utf-8)
239 # (.*) : comment (free text, ideally utf-8)
240 #
240 #
241 # changelog v0 doesn't use extra
241 # changelog v0 doesn't use extra
242
242
243 nl1 = text.index(b'\n')
243 nl1 = text.index(b'\n')
244 nl2 = text.index(b'\n', nl1 + 1)
244 nl2 = text.index(b'\n', nl1 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
246
246
247 # The list of files may be empty. Which means nl3 is the first of the
247 # The list of files may be empty. Which means nl3 is the first of the
248 # double newline that precedes the description.
248 # double newline that precedes the description.
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
250 doublenl = nl3
250 doublenl = nl3
251 else:
251 else:
252 doublenl = text.index(b'\n\n', nl3 + 1)
252 doublenl = text.index(b'\n\n', nl3 + 1)
253
253
254 self._offsets = (nl1, nl2, nl3, doublenl)
254 self._offsets = (nl1, nl2, nl3, doublenl)
255 self._text = text
255 self._text = text
256 self._sidedata = sidedata
256 self._sidedata = sidedata
257 self._cpsd = cpsd
257 self._cpsd = cpsd
258 self._changes = None
258 self._changes = None
259
259
260 return self
260 return self
261
261
262 @property
262 @property
263 def manifest(self):
263 def manifest(self):
264 return bin(self._text[0 : self._offsets[0]])
264 return bin(self._text[0 : self._offsets[0]])
265
265
266 @property
266 @property
267 def user(self):
267 def user(self):
268 off = self._offsets
268 off = self._offsets
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
270
270
271 @property
271 @property
272 def _rawdate(self):
272 def _rawdate(self):
273 off = self._offsets
273 off = self._offsets
274 dateextra = self._text[off[1] + 1 : off[2]]
274 dateextra = self._text[off[1] + 1 : off[2]]
275 return dateextra.split(b' ', 2)[0:2]
275 return dateextra.split(b' ', 2)[0:2]
276
276
277 @property
277 @property
278 def _rawextra(self):
278 def _rawextra(self):
279 off = self._offsets
279 off = self._offsets
280 dateextra = self._text[off[1] + 1 : off[2]]
280 dateextra = self._text[off[1] + 1 : off[2]]
281 fields = dateextra.split(b' ', 2)
281 fields = dateextra.split(b' ', 2)
282 if len(fields) != 3:
282 if len(fields) != 3:
283 return None
283 return None
284
284
285 return fields[2]
285 return fields[2]
286
286
287 @property
287 @property
288 def date(self):
288 def date(self):
289 raw = self._rawdate
289 raw = self._rawdate
290 time = float(raw[0])
290 time = float(raw[0])
291 # Various tools did silly things with the timezone.
291 # Various tools did silly things with the timezone.
292 try:
292 try:
293 timezone = int(raw[1])
293 timezone = int(raw[1])
294 except ValueError:
294 except ValueError:
295 timezone = 0
295 timezone = 0
296
296
297 return time, timezone
297 return time, timezone
298
298
299 @property
299 @property
300 def extra(self):
300 def extra(self):
301 raw = self._rawextra
301 raw = self._rawextra
302 if raw is None:
302 if raw is None:
303 return _defaultextra
303 return _defaultextra
304
304
305 return decodeextra(raw)
305 return decodeextra(raw)
306
306
307 @property
307 @property
308 def changes(self):
308 def changes(self):
309 if self._changes is not None:
309 if self._changes is not None:
310 return self._changes
310 return self._changes
311 if self._cpsd:
311 if self._cpsd:
312 changes = metadata.decode_files_sidedata(self._sidedata)
312 changes = metadata.decode_files_sidedata(self._sidedata)
313 else:
313 else:
314 changes = metadata.ChangingFiles(
314 changes = metadata.ChangingFiles(
315 touched=self.files or (),
315 touched=self.files or (),
316 added=self.filesadded or (),
316 added=self.filesadded or (),
317 removed=self.filesremoved or (),
317 removed=self.filesremoved or (),
318 p1_copies=self.p1copies or {},
318 p1_copies=self.p1copies or {},
319 p2_copies=self.p2copies or {},
319 p2_copies=self.p2copies or {},
320 )
320 )
321 self._changes = changes
321 self._changes = changes
322 return changes
322 return changes
323
323
324 @property
324 @property
325 def files(self):
325 def files(self):
326 if self._cpsd:
326 if self._cpsd:
327 return sorted(self.changes.touched)
327 return sorted(self.changes.touched)
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 if self._cpsd:
336 if self._cpsd:
337 return self.changes.added
337 return self.changes.added
338 else:
338 else:
339 rawindices = self.extra.get(b'filesadded')
339 rawindices = self.extra.get(b'filesadded')
340 if rawindices is None:
340 if rawindices is None:
341 return None
341 return None
342 return metadata.decodefileindices(self.files, rawindices)
342 return metadata.decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def filesremoved(self):
345 def filesremoved(self):
346 if self._cpsd:
346 if self._cpsd:
347 return self.changes.removed
347 return self.changes.removed
348 else:
348 else:
349 rawindices = self.extra.get(b'filesremoved')
349 rawindices = self.extra.get(b'filesremoved')
350 if rawindices is None:
350 if rawindices is None:
351 return None
351 return None
352 return metadata.decodefileindices(self.files, rawindices)
352 return metadata.decodefileindices(self.files, rawindices)
353
353
354 @property
354 @property
355 def p1copies(self):
355 def p1copies(self):
356 if self._cpsd:
356 if self._cpsd:
357 return self.changes.copied_from_p1
357 return self.changes.copied_from_p1
358 else:
358 else:
359 rawcopies = self.extra.get(b'p1copies')
359 rawcopies = self.extra.get(b'p1copies')
360 if rawcopies is None:
360 if rawcopies is None:
361 return None
361 return None
362 return metadata.decodecopies(self.files, rawcopies)
362 return metadata.decodecopies(self.files, rawcopies)
363
363
364 @property
364 @property
365 def p2copies(self):
365 def p2copies(self):
366 if self._cpsd:
366 if self._cpsd:
367 return self.changes.copied_from_p2
367 return self.changes.copied_from_p2
368 else:
368 else:
369 rawcopies = self.extra.get(b'p2copies')
369 rawcopies = self.extra.get(b'p2copies')
370 if rawcopies is None:
370 if rawcopies is None:
371 return None
371 return None
372 return metadata.decodecopies(self.files, rawcopies)
372 return metadata.decodecopies(self.files, rawcopies)
373
373
374 @property
374 @property
375 def description(self):
375 def description(self):
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
377
377
378 @property
378 @property
379 def branchinfo(self):
379 def branchinfo(self):
380 extra = self.extra
380 extra = self.extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
382
382
383
383
384 class changelog(revlog.revlog):
384 class changelog(revlog.revlog):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
386 """Load a changelog revlog using an opener.
386 """Load a changelog revlog using an opener.
387
387
388 If ``trypending`` is true, we attempt to load the index from a
388 If ``trypending`` is true, we attempt to load the index from a
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
390 The ``00changelog.i.a`` file contains index (and possibly inline
390 The ``00changelog.i.a`` file contains index (and possibly inline
391 revision) data for a transaction that hasn't been finalized yet.
391 revision) data for a transaction that hasn't been finalized yet.
392 It exists in a separate file to facilitate readers (such as
392 It exists in a separate file to facilitate readers (such as
393 hooks processes) accessing data before a transaction is finalized.
393 hooks processes) accessing data before a transaction is finalized.
394
394
395 ``concurrencychecker`` will be passed to the revlog init function, see
395 ``concurrencychecker`` will be passed to the revlog init function, see
396 the documentation there.
396 the documentation there.
397 """
397 """
398
398
399 if trypending and opener.exists(b'00changelog.i.a'):
399 if trypending and opener.exists(b'00changelog.i.a'):
400 postfix = b'a'
400 postfix = b'a'
401 else:
401 else:
402 postfix = None
402 postfix = None
403
403
404 revlog.revlog.__init__(
404 revlog.revlog.__init__(
405 self,
405 self,
406 opener,
406 opener,
407 target=(revlog_constants.KIND_CHANGELOG, None),
407 target=(revlog_constants.KIND_CHANGELOG, None),
408 radix=b'00changelog',
408 radix=b'00changelog',
409 postfix=postfix,
409 postfix=postfix,
410 checkambig=True,
410 checkambig=True,
411 mmaplargeindex=True,
411 mmaplargeindex=True,
412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
413 concurrencychecker=concurrencychecker,
413 concurrencychecker=concurrencychecker,
414 )
414 )
415
415
416 if self._initempty and (self._format_version == revlog.REVLOGV1):
416 if self._initempty and (self._format_version == revlog.REVLOGV1):
417 # changelogs don't benefit from generaldelta.
417 # changelogs don't benefit from generaldelta.
418
418
419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
420 self._generaldelta = False
420 self._generaldelta = False
421
421
422 # Delta chains for changelogs tend to be very small because entries
422 # Delta chains for changelogs tend to be very small because entries
423 # tend to be small and don't delta well with each. So disable delta
423 # tend to be small and don't delta well with each. So disable delta
424 # chains.
424 # chains.
425 self._storedeltachains = False
425 self._storedeltachains = False
426
426
427 self._realopener = opener
427 self._realopener = opener
428 self._delayed = False
428 self._delayed = False
429 self._delaybuf = None
429 self._delaybuf = None
430 self._divert = False
430 self._divert = False
431 self._filteredrevs = frozenset()
431 self._filteredrevs = frozenset()
432 self._filteredrevs_hashcache = {}
432 self._filteredrevs_hashcache = {}
433 self._copiesstorage = opener.options.get(b'copies-storage')
433 self._copiesstorage = opener.options.get(b'copies-storage')
434
434
435 @property
435 @property
436 def filteredrevs(self):
436 def filteredrevs(self):
437 return self._filteredrevs
437 return self._filteredrevs
438
438
439 @filteredrevs.setter
439 @filteredrevs.setter
440 def filteredrevs(self, val):
440 def filteredrevs(self, val):
441 # Ensure all updates go through this function
441 # Ensure all updates go through this function
442 assert isinstance(val, frozenset)
442 assert isinstance(val, frozenset)
443 self._filteredrevs = val
443 self._filteredrevs = val
444 self._filteredrevs_hashcache = {}
444 self._filteredrevs_hashcache = {}
445
445
446 def _write_docket(self, tr):
447 if not self._delayed:
448 super(changelog, self)._write_docket(tr)
449
446 def delayupdate(self, tr):
450 def delayupdate(self, tr):
447 """delay visibility of index updates to other readers"""
451 """delay visibility of index updates to other readers"""
448 if self._docket is not None:
452 if self._docket is None and not self._delayed:
449 return
450
451 if not self._delayed:
452 if len(self) == 0:
453 if len(self) == 0:
453 self._divert = True
454 self._divert = True
454 if self._realopener.exists(self._indexfile + b'.a'):
455 if self._realopener.exists(self._indexfile + b'.a'):
455 self._realopener.unlink(self._indexfile + b'.a')
456 self._realopener.unlink(self._indexfile + b'.a')
456 self.opener = _divertopener(self._realopener, self._indexfile)
457 self.opener = _divertopener(self._realopener, self._indexfile)
457 else:
458 else:
458 self._delaybuf = []
459 self._delaybuf = []
459 self.opener = _delayopener(
460 self.opener = _delayopener(
460 self._realopener, self._indexfile, self._delaybuf
461 self._realopener, self._indexfile, self._delaybuf
461 )
462 )
462 self._delayed = True
463 self._delayed = True
463 tr.addpending(b'cl-%i' % id(self), self._writepending)
464 tr.addpending(b'cl-%i' % id(self), self._writepending)
464 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
465 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
465
466
466 def _finalize(self, tr):
467 def _finalize(self, tr):
467 """finalize index updates"""
468 """finalize index updates"""
468 self._delayed = False
469 self._delayed = False
469 self.opener = self._realopener
470 self.opener = self._realopener
470 # move redirected index data back into place
471 # move redirected index data back into place
471 if self._divert:
472 if self._docket is not None:
473 self._write_docket(tr)
474 elif self._divert:
472 assert not self._delaybuf
475 assert not self._delaybuf
473 tmpname = self._indexfile + b".a"
476 tmpname = self._indexfile + b".a"
474 nfile = self.opener.open(tmpname)
477 nfile = self.opener.open(tmpname)
475 nfile.close()
478 nfile.close()
476 self.opener.rename(tmpname, self._indexfile, checkambig=True)
479 self.opener.rename(tmpname, self._indexfile, checkambig=True)
477 elif self._delaybuf:
480 elif self._delaybuf:
478 fp = self.opener(self._indexfile, b'a', checkambig=True)
481 fp = self.opener(self._indexfile, b'a', checkambig=True)
479 fp.write(b"".join(self._delaybuf))
482 fp.write(b"".join(self._delaybuf))
480 fp.close()
483 fp.close()
481 self._delaybuf = None
484 self._delaybuf = None
482 self._divert = False
485 self._divert = False
483 # split when we're done
486 # split when we're done
484 self._enforceinlinesize(tr)
487 self._enforceinlinesize(tr)
485
488
486 def _writepending(self, tr):
489 def _writepending(self, tr):
487 """create a file containing the unfinalized state for
490 """create a file containing the unfinalized state for
488 pretxnchangegroup"""
491 pretxnchangegroup"""
489 if self._delaybuf:
492 if self._delaybuf:
490 # make a temporary copy of the index
493 # make a temporary copy of the index
491 fp1 = self._realopener(self._indexfile)
494 fp1 = self._realopener(self._indexfile)
492 pendingfilename = self._indexfile + b".a"
495 pendingfilename = self._indexfile + b".a"
493 # register as a temp file to ensure cleanup on failure
496 # register as a temp file to ensure cleanup on failure
494 tr.registertmp(pendingfilename)
497 tr.registertmp(pendingfilename)
495 # write existing data
498 # write existing data
496 fp2 = self._realopener(pendingfilename, b"w")
499 fp2 = self._realopener(pendingfilename, b"w")
497 fp2.write(fp1.read())
500 fp2.write(fp1.read())
498 # add pending data
501 # add pending data
499 fp2.write(b"".join(self._delaybuf))
502 fp2.write(b"".join(self._delaybuf))
500 fp2.close()
503 fp2.close()
501 # switch modes so finalize can simply rename
504 # switch modes so finalize can simply rename
502 self._delaybuf = None
505 self._delaybuf = None
503 self._divert = True
506 self._divert = True
504 self.opener = _divertopener(self._realopener, self._indexfile)
507 self.opener = _divertopener(self._realopener, self._indexfile)
505
508
506 if self._divert:
509 if self._divert:
507 return True
510 return True
508
511
509 return False
512 return False
510
513
511 def _enforceinlinesize(self, tr):
514 def _enforceinlinesize(self, tr):
512 if not self._delayed:
515 if not self._delayed:
513 revlog.revlog._enforceinlinesize(self, tr)
516 revlog.revlog._enforceinlinesize(self, tr)
514
517
515 def read(self, nodeorrev):
518 def read(self, nodeorrev):
516 """Obtain data from a parsed changelog revision.
519 """Obtain data from a parsed changelog revision.
517
520
518 Returns a 6-tuple of:
521 Returns a 6-tuple of:
519
522
520 - manifest node in binary
523 - manifest node in binary
521 - author/user as a localstr
524 - author/user as a localstr
522 - date as a 2-tuple of (time, timezone)
525 - date as a 2-tuple of (time, timezone)
523 - list of files
526 - list of files
524 - commit message as a localstr
527 - commit message as a localstr
525 - dict of extra metadata
528 - dict of extra metadata
526
529
527 Unless you need to access all fields, consider calling
530 Unless you need to access all fields, consider calling
528 ``changelogrevision`` instead, as it is faster for partial object
531 ``changelogrevision`` instead, as it is faster for partial object
529 access.
532 access.
530 """
533 """
531 d, s = self._revisiondata(nodeorrev)
534 d, s = self._revisiondata(nodeorrev)
532 c = changelogrevision(
535 c = changelogrevision(
533 self, d, s, self._copiesstorage == b'changeset-sidedata'
536 self, d, s, self._copiesstorage == b'changeset-sidedata'
534 )
537 )
535 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
538 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
536
539
537 def changelogrevision(self, nodeorrev):
540 def changelogrevision(self, nodeorrev):
538 """Obtain a ``changelogrevision`` for a node or revision."""
541 """Obtain a ``changelogrevision`` for a node or revision."""
539 text, sidedata = self._revisiondata(nodeorrev)
542 text, sidedata = self._revisiondata(nodeorrev)
540 return changelogrevision(
543 return changelogrevision(
541 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
544 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
542 )
545 )
543
546
544 def readfiles(self, nodeorrev):
547 def readfiles(self, nodeorrev):
545 """
548 """
546 short version of read that only returns the files modified by the cset
549 short version of read that only returns the files modified by the cset
547 """
550 """
548 text = self.revision(nodeorrev)
551 text = self.revision(nodeorrev)
549 if not text:
552 if not text:
550 return []
553 return []
551 last = text.index(b"\n\n")
554 last = text.index(b"\n\n")
552 l = text[:last].split(b'\n')
555 l = text[:last].split(b'\n')
553 return l[3:]
556 return l[3:]
554
557
555 def add(
558 def add(
556 self,
559 self,
557 manifest,
560 manifest,
558 files,
561 files,
559 desc,
562 desc,
560 transaction,
563 transaction,
561 p1,
564 p1,
562 p2,
565 p2,
563 user,
566 user,
564 date=None,
567 date=None,
565 extra=None,
568 extra=None,
566 ):
569 ):
567 # Convert to UTF-8 encoded bytestrings as the very first
570 # Convert to UTF-8 encoded bytestrings as the very first
568 # thing: calling any method on a localstr object will turn it
571 # thing: calling any method on a localstr object will turn it
569 # into a str object and the cached UTF-8 string is thus lost.
572 # into a str object and the cached UTF-8 string is thus lost.
570 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
573 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
571
574
572 user = user.strip()
575 user = user.strip()
573 # An empty username or a username with a "\n" will make the
576 # An empty username or a username with a "\n" will make the
574 # revision text contain two "\n\n" sequences -> corrupt
577 # revision text contain two "\n\n" sequences -> corrupt
575 # repository since read cannot unpack the revision.
578 # repository since read cannot unpack the revision.
576 if not user:
579 if not user:
577 raise error.StorageError(_(b"empty username"))
580 raise error.StorageError(_(b"empty username"))
578 if b"\n" in user:
581 if b"\n" in user:
579 raise error.StorageError(
582 raise error.StorageError(
580 _(b"username %r contains a newline") % pycompat.bytestr(user)
583 _(b"username %r contains a newline") % pycompat.bytestr(user)
581 )
584 )
582
585
583 desc = stripdesc(desc)
586 desc = stripdesc(desc)
584
587
585 if date:
588 if date:
586 parseddate = b"%d %d" % dateutil.parsedate(date)
589 parseddate = b"%d %d" % dateutil.parsedate(date)
587 else:
590 else:
588 parseddate = b"%d %d" % dateutil.makedate()
591 parseddate = b"%d %d" % dateutil.makedate()
589 if extra:
592 if extra:
590 branch = extra.get(b"branch")
593 branch = extra.get(b"branch")
591 if branch in (b"default", b""):
594 if branch in (b"default", b""):
592 del extra[b"branch"]
595 del extra[b"branch"]
593 elif branch in (b".", b"null", b"tip"):
596 elif branch in (b".", b"null", b"tip"):
594 raise error.StorageError(
597 raise error.StorageError(
595 _(b'the name \'%s\' is reserved') % branch
598 _(b'the name \'%s\' is reserved') % branch
596 )
599 )
597 sortedfiles = sorted(files.touched)
600 sortedfiles = sorted(files.touched)
598 flags = 0
601 flags = 0
599 sidedata = None
602 sidedata = None
600 if self._copiesstorage == b'changeset-sidedata':
603 if self._copiesstorage == b'changeset-sidedata':
601 if files.has_copies_info:
604 if files.has_copies_info:
602 flags |= flagutil.REVIDX_HASCOPIESINFO
605 flags |= flagutil.REVIDX_HASCOPIESINFO
603 sidedata = metadata.encode_files_sidedata(files)
606 sidedata = metadata.encode_files_sidedata(files)
604
607
605 if extra:
608 if extra:
606 extra = encodeextra(extra)
609 extra = encodeextra(extra)
607 parseddate = b"%s %s" % (parseddate, extra)
610 parseddate = b"%s %s" % (parseddate, extra)
608 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
611 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
609 text = b"\n".join(l)
612 text = b"\n".join(l)
610 rev = self.addrevision(
613 rev = self.addrevision(
611 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
614 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
612 )
615 )
613 return self.node(rev)
616 return self.node(rev)
614
617
615 def branchinfo(self, rev):
618 def branchinfo(self, rev):
616 """return the branch name and open/close state of a revision
619 """return the branch name and open/close state of a revision
617
620
618 This function exists because creating a changectx object
621 This function exists because creating a changectx object
619 just to access this is costly."""
622 just to access this is costly."""
620 return self.changelogrevision(rev).branchinfo
623 return self.changelogrevision(rev).branchinfo
621
624
622 def _nodeduplicatecallback(self, transaction, rev):
625 def _nodeduplicatecallback(self, transaction, rev):
623 # keep track of revisions that got "re-added", eg: unbunde of know rev.
626 # keep track of revisions that got "re-added", eg: unbunde of know rev.
624 #
627 #
625 # We track them in a list to preserve their order from the source bundle
628 # We track them in a list to preserve their order from the source bundle
626 duplicates = transaction.changes.setdefault(b'revduplicates', [])
629 duplicates = transaction.changes.setdefault(b'revduplicates', [])
627 duplicates.append(rev)
630 duplicates.append(rev)
@@ -1,2699 +1,2698 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * properly hide uncommitted content to other process
1154 # * expose transaction content hooks during pre-commit validation
1153 # * expose transaction content hooks during pre-commit validation
1155 # * include management of a persistent nodemap in the main docket
1154 # * include management of a persistent nodemap in the main docket
1156 # * enforce a "no-truncate" policy for mmap safety
1155 # * enforce a "no-truncate" policy for mmap safety
1157 # - for censoring operation
1156 # - for censoring operation
1158 # - for stripping operation
1157 # - for stripping operation
1159 # - for rollback operation
1158 # - for rollback operation
1160 # * proper streaming (race free) of the docket file
1159 # * proper streaming (race free) of the docket file
1161 # * store the data size in the docket to simplify sidedata rewrite.
1160 # * store the data size in the docket to simplify sidedata rewrite.
1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1161 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1163 # * Exchange-wise, we will also need to do something more efficient than
1162 # * Exchange-wise, we will also need to do something more efficient than
1164 # keeping references to the affected revlogs, especially memory-wise when
1163 # keeping references to the affected revlogs, especially memory-wise when
1165 # rewriting sidedata.
1164 # rewriting sidedata.
1166 # * sidedata compression
1165 # * sidedata compression
1167 # * introduce a proper solution to reduce the number of filelog related files.
1166 # * introduce a proper solution to reduce the number of filelog related files.
1168 # * Improvement to consider
1167 # * Improvement to consider
1169 # - track compression mode in the index entris instead of the chunks
1168 # - track compression mode in the index entris instead of the chunks
1170 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1171 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1172 # - keep track of chain base or size (probably not that useful anymore)
1171 # - keep track of chain base or size (probably not that useful anymore)
1173 # - store data and sidedata in different files
1172 # - store data and sidedata in different files
1174 coreconfigitem(
1173 coreconfigitem(
1175 b'experimental',
1174 b'experimental',
1176 b'revlogv2',
1175 b'revlogv2',
1177 default=None,
1176 default=None,
1178 )
1177 )
1179 coreconfigitem(
1178 coreconfigitem(
1180 b'experimental',
1179 b'experimental',
1181 b'revisions.disambiguatewithin',
1180 b'revisions.disambiguatewithin',
1182 default=None,
1181 default=None,
1183 )
1182 )
1184 coreconfigitem(
1183 coreconfigitem(
1185 b'experimental',
1184 b'experimental',
1186 b'rust.index',
1185 b'rust.index',
1187 default=False,
1186 default=False,
1188 )
1187 )
1189 coreconfigitem(
1188 coreconfigitem(
1190 b'experimental',
1189 b'experimental',
1191 b'server.filesdata.recommended-batch-size',
1190 b'server.filesdata.recommended-batch-size',
1192 default=50000,
1191 default=50000,
1193 )
1192 )
1194 coreconfigitem(
1193 coreconfigitem(
1195 b'experimental',
1194 b'experimental',
1196 b'server.manifestdata.recommended-batch-size',
1195 b'server.manifestdata.recommended-batch-size',
1197 default=100000,
1196 default=100000,
1198 )
1197 )
1199 coreconfigitem(
1198 coreconfigitem(
1200 b'experimental',
1199 b'experimental',
1201 b'server.stream-narrow-clones',
1200 b'server.stream-narrow-clones',
1202 default=False,
1201 default=False,
1203 )
1202 )
1204 coreconfigitem(
1203 coreconfigitem(
1205 b'experimental',
1204 b'experimental',
1206 b'single-head-per-branch',
1205 b'single-head-per-branch',
1207 default=False,
1206 default=False,
1208 )
1207 )
1209 coreconfigitem(
1208 coreconfigitem(
1210 b'experimental',
1209 b'experimental',
1211 b'single-head-per-branch:account-closed-heads',
1210 b'single-head-per-branch:account-closed-heads',
1212 default=False,
1211 default=False,
1213 )
1212 )
1214 coreconfigitem(
1213 coreconfigitem(
1215 b'experimental',
1214 b'experimental',
1216 b'single-head-per-branch:public-changes-only',
1215 b'single-head-per-branch:public-changes-only',
1217 default=False,
1216 default=False,
1218 )
1217 )
1219 coreconfigitem(
1218 coreconfigitem(
1220 b'experimental',
1219 b'experimental',
1221 b'sshserver.support-v2',
1220 b'sshserver.support-v2',
1222 default=False,
1221 default=False,
1223 )
1222 )
1224 coreconfigitem(
1223 coreconfigitem(
1225 b'experimental',
1224 b'experimental',
1226 b'sparse-read',
1225 b'sparse-read',
1227 default=False,
1226 default=False,
1228 )
1227 )
1229 coreconfigitem(
1228 coreconfigitem(
1230 b'experimental',
1229 b'experimental',
1231 b'sparse-read.density-threshold',
1230 b'sparse-read.density-threshold',
1232 default=0.50,
1231 default=0.50,
1233 )
1232 )
1234 coreconfigitem(
1233 coreconfigitem(
1235 b'experimental',
1234 b'experimental',
1236 b'sparse-read.min-gap-size',
1235 b'sparse-read.min-gap-size',
1237 default=b'65K',
1236 default=b'65K',
1238 )
1237 )
1239 coreconfigitem(
1238 coreconfigitem(
1240 b'experimental',
1239 b'experimental',
1241 b'treemanifest',
1240 b'treemanifest',
1242 default=False,
1241 default=False,
1243 )
1242 )
1244 coreconfigitem(
1243 coreconfigitem(
1245 b'experimental',
1244 b'experimental',
1246 b'update.atomic-file',
1245 b'update.atomic-file',
1247 default=False,
1246 default=False,
1248 )
1247 )
1249 coreconfigitem(
1248 coreconfigitem(
1250 b'experimental',
1249 b'experimental',
1251 b'sshpeer.advertise-v2',
1250 b'sshpeer.advertise-v2',
1252 default=False,
1251 default=False,
1253 )
1252 )
1254 coreconfigitem(
1253 coreconfigitem(
1255 b'experimental',
1254 b'experimental',
1256 b'web.apiserver',
1255 b'web.apiserver',
1257 default=False,
1256 default=False,
1258 )
1257 )
1259 coreconfigitem(
1258 coreconfigitem(
1260 b'experimental',
1259 b'experimental',
1261 b'web.api.http-v2',
1260 b'web.api.http-v2',
1262 default=False,
1261 default=False,
1263 )
1262 )
1264 coreconfigitem(
1263 coreconfigitem(
1265 b'experimental',
1264 b'experimental',
1266 b'web.api.debugreflect',
1265 b'web.api.debugreflect',
1267 default=False,
1266 default=False,
1268 )
1267 )
1269 coreconfigitem(
1268 coreconfigitem(
1270 b'experimental',
1269 b'experimental',
1271 b'worker.wdir-get-thread-safe',
1270 b'worker.wdir-get-thread-safe',
1272 default=False,
1271 default=False,
1273 )
1272 )
1274 coreconfigitem(
1273 coreconfigitem(
1275 b'experimental',
1274 b'experimental',
1276 b'worker.repository-upgrade',
1275 b'worker.repository-upgrade',
1277 default=False,
1276 default=False,
1278 )
1277 )
1279 coreconfigitem(
1278 coreconfigitem(
1280 b'experimental',
1279 b'experimental',
1281 b'xdiff',
1280 b'xdiff',
1282 default=False,
1281 default=False,
1283 )
1282 )
1284 coreconfigitem(
1283 coreconfigitem(
1285 b'extensions',
1284 b'extensions',
1286 b'.*',
1285 b'.*',
1287 default=None,
1286 default=None,
1288 generic=True,
1287 generic=True,
1289 )
1288 )
1290 coreconfigitem(
1289 coreconfigitem(
1291 b'extdata',
1290 b'extdata',
1292 b'.*',
1291 b'.*',
1293 default=None,
1292 default=None,
1294 generic=True,
1293 generic=True,
1295 )
1294 )
1296 coreconfigitem(
1295 coreconfigitem(
1297 b'format',
1296 b'format',
1298 b'bookmarks-in-store',
1297 b'bookmarks-in-store',
1299 default=False,
1298 default=False,
1300 )
1299 )
1301 coreconfigitem(
1300 coreconfigitem(
1302 b'format',
1301 b'format',
1303 b'chunkcachesize',
1302 b'chunkcachesize',
1304 default=None,
1303 default=None,
1305 experimental=True,
1304 experimental=True,
1306 )
1305 )
1307 coreconfigitem(
1306 coreconfigitem(
1308 b'format',
1307 b'format',
1309 b'dotencode',
1308 b'dotencode',
1310 default=True,
1309 default=True,
1311 )
1310 )
1312 coreconfigitem(
1311 coreconfigitem(
1313 b'format',
1312 b'format',
1314 b'generaldelta',
1313 b'generaldelta',
1315 default=False,
1314 default=False,
1316 experimental=True,
1315 experimental=True,
1317 )
1316 )
1318 coreconfigitem(
1317 coreconfigitem(
1319 b'format',
1318 b'format',
1320 b'manifestcachesize',
1319 b'manifestcachesize',
1321 default=None,
1320 default=None,
1322 experimental=True,
1321 experimental=True,
1323 )
1322 )
1324 coreconfigitem(
1323 coreconfigitem(
1325 b'format',
1324 b'format',
1326 b'maxchainlen',
1325 b'maxchainlen',
1327 default=dynamicdefault,
1326 default=dynamicdefault,
1328 experimental=True,
1327 experimental=True,
1329 )
1328 )
1330 coreconfigitem(
1329 coreconfigitem(
1331 b'format',
1330 b'format',
1332 b'obsstore-version',
1331 b'obsstore-version',
1333 default=None,
1332 default=None,
1334 )
1333 )
1335 coreconfigitem(
1334 coreconfigitem(
1336 b'format',
1335 b'format',
1337 b'sparse-revlog',
1336 b'sparse-revlog',
1338 default=True,
1337 default=True,
1339 )
1338 )
1340 coreconfigitem(
1339 coreconfigitem(
1341 b'format',
1340 b'format',
1342 b'revlog-compression',
1341 b'revlog-compression',
1343 default=lambda: [b'zstd', b'zlib'],
1342 default=lambda: [b'zstd', b'zlib'],
1344 alias=[(b'experimental', b'format.compression')],
1343 alias=[(b'experimental', b'format.compression')],
1345 )
1344 )
1346 coreconfigitem(
1345 coreconfigitem(
1347 b'format',
1346 b'format',
1348 b'usefncache',
1347 b'usefncache',
1349 default=True,
1348 default=True,
1350 )
1349 )
1351 coreconfigitem(
1350 coreconfigitem(
1352 b'format',
1351 b'format',
1353 b'usegeneraldelta',
1352 b'usegeneraldelta',
1354 default=True,
1353 default=True,
1355 )
1354 )
1356 coreconfigitem(
1355 coreconfigitem(
1357 b'format',
1356 b'format',
1358 b'usestore',
1357 b'usestore',
1359 default=True,
1358 default=True,
1360 )
1359 )
1361
1360
1362
1361
1363 def _persistent_nodemap_default():
1362 def _persistent_nodemap_default():
1364 """compute `use-persistent-nodemap` default value
1363 """compute `use-persistent-nodemap` default value
1365
1364
1366 The feature is disabled unless a fast implementation is available.
1365 The feature is disabled unless a fast implementation is available.
1367 """
1366 """
1368 from . import policy
1367 from . import policy
1369
1368
1370 return policy.importrust('revlog') is not None
1369 return policy.importrust('revlog') is not None
1371
1370
1372
1371
1373 coreconfigitem(
1372 coreconfigitem(
1374 b'format',
1373 b'format',
1375 b'use-persistent-nodemap',
1374 b'use-persistent-nodemap',
1376 default=_persistent_nodemap_default,
1375 default=_persistent_nodemap_default,
1377 )
1376 )
1378 coreconfigitem(
1377 coreconfigitem(
1379 b'format',
1378 b'format',
1380 b'exp-use-copies-side-data-changeset',
1379 b'exp-use-copies-side-data-changeset',
1381 default=False,
1380 default=False,
1382 experimental=True,
1381 experimental=True,
1383 )
1382 )
1384 coreconfigitem(
1383 coreconfigitem(
1385 b'format',
1384 b'format',
1386 b'use-share-safe',
1385 b'use-share-safe',
1387 default=False,
1386 default=False,
1388 )
1387 )
1389 coreconfigitem(
1388 coreconfigitem(
1390 b'format',
1389 b'format',
1391 b'internal-phase',
1390 b'internal-phase',
1392 default=False,
1391 default=False,
1393 experimental=True,
1392 experimental=True,
1394 )
1393 )
1395 coreconfigitem(
1394 coreconfigitem(
1396 b'fsmonitor',
1395 b'fsmonitor',
1397 b'warn_when_unused',
1396 b'warn_when_unused',
1398 default=True,
1397 default=True,
1399 )
1398 )
1400 coreconfigitem(
1399 coreconfigitem(
1401 b'fsmonitor',
1400 b'fsmonitor',
1402 b'warn_update_file_count',
1401 b'warn_update_file_count',
1403 default=50000,
1402 default=50000,
1404 )
1403 )
1405 coreconfigitem(
1404 coreconfigitem(
1406 b'fsmonitor',
1405 b'fsmonitor',
1407 b'warn_update_file_count_rust',
1406 b'warn_update_file_count_rust',
1408 default=400000,
1407 default=400000,
1409 )
1408 )
1410 coreconfigitem(
1409 coreconfigitem(
1411 b'help',
1410 b'help',
1412 br'hidden-command\..*',
1411 br'hidden-command\..*',
1413 default=False,
1412 default=False,
1414 generic=True,
1413 generic=True,
1415 )
1414 )
1416 coreconfigitem(
1415 coreconfigitem(
1417 b'help',
1416 b'help',
1418 br'hidden-topic\..*',
1417 br'hidden-topic\..*',
1419 default=False,
1418 default=False,
1420 generic=True,
1419 generic=True,
1421 )
1420 )
1422 coreconfigitem(
1421 coreconfigitem(
1423 b'hooks',
1422 b'hooks',
1424 b'[^:]*',
1423 b'[^:]*',
1425 default=dynamicdefault,
1424 default=dynamicdefault,
1426 generic=True,
1425 generic=True,
1427 )
1426 )
1428 coreconfigitem(
1427 coreconfigitem(
1429 b'hooks',
1428 b'hooks',
1430 b'.*:run-with-plain',
1429 b'.*:run-with-plain',
1431 default=True,
1430 default=True,
1432 generic=True,
1431 generic=True,
1433 )
1432 )
1434 coreconfigitem(
1433 coreconfigitem(
1435 b'hgweb-paths',
1434 b'hgweb-paths',
1436 b'.*',
1435 b'.*',
1437 default=list,
1436 default=list,
1438 generic=True,
1437 generic=True,
1439 )
1438 )
1440 coreconfigitem(
1439 coreconfigitem(
1441 b'hostfingerprints',
1440 b'hostfingerprints',
1442 b'.*',
1441 b'.*',
1443 default=list,
1442 default=list,
1444 generic=True,
1443 generic=True,
1445 )
1444 )
1446 coreconfigitem(
1445 coreconfigitem(
1447 b'hostsecurity',
1446 b'hostsecurity',
1448 b'ciphers',
1447 b'ciphers',
1449 default=None,
1448 default=None,
1450 )
1449 )
1451 coreconfigitem(
1450 coreconfigitem(
1452 b'hostsecurity',
1451 b'hostsecurity',
1453 b'minimumprotocol',
1452 b'minimumprotocol',
1454 default=dynamicdefault,
1453 default=dynamicdefault,
1455 )
1454 )
1456 coreconfigitem(
1455 coreconfigitem(
1457 b'hostsecurity',
1456 b'hostsecurity',
1458 b'.*:minimumprotocol$',
1457 b'.*:minimumprotocol$',
1459 default=dynamicdefault,
1458 default=dynamicdefault,
1460 generic=True,
1459 generic=True,
1461 )
1460 )
1462 coreconfigitem(
1461 coreconfigitem(
1463 b'hostsecurity',
1462 b'hostsecurity',
1464 b'.*:ciphers$',
1463 b'.*:ciphers$',
1465 default=dynamicdefault,
1464 default=dynamicdefault,
1466 generic=True,
1465 generic=True,
1467 )
1466 )
1468 coreconfigitem(
1467 coreconfigitem(
1469 b'hostsecurity',
1468 b'hostsecurity',
1470 b'.*:fingerprints$',
1469 b'.*:fingerprints$',
1471 default=list,
1470 default=list,
1472 generic=True,
1471 generic=True,
1473 )
1472 )
1474 coreconfigitem(
1473 coreconfigitem(
1475 b'hostsecurity',
1474 b'hostsecurity',
1476 b'.*:verifycertsfile$',
1475 b'.*:verifycertsfile$',
1477 default=None,
1476 default=None,
1478 generic=True,
1477 generic=True,
1479 )
1478 )
1480
1479
1481 coreconfigitem(
1480 coreconfigitem(
1482 b'http_proxy',
1481 b'http_proxy',
1483 b'always',
1482 b'always',
1484 default=False,
1483 default=False,
1485 )
1484 )
1486 coreconfigitem(
1485 coreconfigitem(
1487 b'http_proxy',
1486 b'http_proxy',
1488 b'host',
1487 b'host',
1489 default=None,
1488 default=None,
1490 )
1489 )
1491 coreconfigitem(
1490 coreconfigitem(
1492 b'http_proxy',
1491 b'http_proxy',
1493 b'no',
1492 b'no',
1494 default=list,
1493 default=list,
1495 )
1494 )
1496 coreconfigitem(
1495 coreconfigitem(
1497 b'http_proxy',
1496 b'http_proxy',
1498 b'passwd',
1497 b'passwd',
1499 default=None,
1498 default=None,
1500 )
1499 )
1501 coreconfigitem(
1500 coreconfigitem(
1502 b'http_proxy',
1501 b'http_proxy',
1503 b'user',
1502 b'user',
1504 default=None,
1503 default=None,
1505 )
1504 )
1506
1505
1507 coreconfigitem(
1506 coreconfigitem(
1508 b'http',
1507 b'http',
1509 b'timeout',
1508 b'timeout',
1510 default=None,
1509 default=None,
1511 )
1510 )
1512
1511
1513 coreconfigitem(
1512 coreconfigitem(
1514 b'logtoprocess',
1513 b'logtoprocess',
1515 b'commandexception',
1514 b'commandexception',
1516 default=None,
1515 default=None,
1517 )
1516 )
1518 coreconfigitem(
1517 coreconfigitem(
1519 b'logtoprocess',
1518 b'logtoprocess',
1520 b'commandfinish',
1519 b'commandfinish',
1521 default=None,
1520 default=None,
1522 )
1521 )
1523 coreconfigitem(
1522 coreconfigitem(
1524 b'logtoprocess',
1523 b'logtoprocess',
1525 b'command',
1524 b'command',
1526 default=None,
1525 default=None,
1527 )
1526 )
1528 coreconfigitem(
1527 coreconfigitem(
1529 b'logtoprocess',
1528 b'logtoprocess',
1530 b'develwarn',
1529 b'develwarn',
1531 default=None,
1530 default=None,
1532 )
1531 )
1533 coreconfigitem(
1532 coreconfigitem(
1534 b'logtoprocess',
1533 b'logtoprocess',
1535 b'uiblocked',
1534 b'uiblocked',
1536 default=None,
1535 default=None,
1537 )
1536 )
1538 coreconfigitem(
1537 coreconfigitem(
1539 b'merge',
1538 b'merge',
1540 b'checkunknown',
1539 b'checkunknown',
1541 default=b'abort',
1540 default=b'abort',
1542 )
1541 )
1543 coreconfigitem(
1542 coreconfigitem(
1544 b'merge',
1543 b'merge',
1545 b'checkignored',
1544 b'checkignored',
1546 default=b'abort',
1545 default=b'abort',
1547 )
1546 )
1548 coreconfigitem(
1547 coreconfigitem(
1549 b'experimental',
1548 b'experimental',
1550 b'merge.checkpathconflicts',
1549 b'merge.checkpathconflicts',
1551 default=False,
1550 default=False,
1552 )
1551 )
1553 coreconfigitem(
1552 coreconfigitem(
1554 b'merge',
1553 b'merge',
1555 b'followcopies',
1554 b'followcopies',
1556 default=True,
1555 default=True,
1557 )
1556 )
1558 coreconfigitem(
1557 coreconfigitem(
1559 b'merge',
1558 b'merge',
1560 b'on-failure',
1559 b'on-failure',
1561 default=b'continue',
1560 default=b'continue',
1562 )
1561 )
1563 coreconfigitem(
1562 coreconfigitem(
1564 b'merge',
1563 b'merge',
1565 b'preferancestor',
1564 b'preferancestor',
1566 default=lambda: [b'*'],
1565 default=lambda: [b'*'],
1567 experimental=True,
1566 experimental=True,
1568 )
1567 )
1569 coreconfigitem(
1568 coreconfigitem(
1570 b'merge',
1569 b'merge',
1571 b'strict-capability-check',
1570 b'strict-capability-check',
1572 default=False,
1571 default=False,
1573 )
1572 )
1574 coreconfigitem(
1573 coreconfigitem(
1575 b'merge-tools',
1574 b'merge-tools',
1576 b'.*',
1575 b'.*',
1577 default=None,
1576 default=None,
1578 generic=True,
1577 generic=True,
1579 )
1578 )
1580 coreconfigitem(
1579 coreconfigitem(
1581 b'merge-tools',
1580 b'merge-tools',
1582 br'.*\.args$',
1581 br'.*\.args$',
1583 default=b"$local $base $other",
1582 default=b"$local $base $other",
1584 generic=True,
1583 generic=True,
1585 priority=-1,
1584 priority=-1,
1586 )
1585 )
1587 coreconfigitem(
1586 coreconfigitem(
1588 b'merge-tools',
1587 b'merge-tools',
1589 br'.*\.binary$',
1588 br'.*\.binary$',
1590 default=False,
1589 default=False,
1591 generic=True,
1590 generic=True,
1592 priority=-1,
1591 priority=-1,
1593 )
1592 )
1594 coreconfigitem(
1593 coreconfigitem(
1595 b'merge-tools',
1594 b'merge-tools',
1596 br'.*\.check$',
1595 br'.*\.check$',
1597 default=list,
1596 default=list,
1598 generic=True,
1597 generic=True,
1599 priority=-1,
1598 priority=-1,
1600 )
1599 )
1601 coreconfigitem(
1600 coreconfigitem(
1602 b'merge-tools',
1601 b'merge-tools',
1603 br'.*\.checkchanged$',
1602 br'.*\.checkchanged$',
1604 default=False,
1603 default=False,
1605 generic=True,
1604 generic=True,
1606 priority=-1,
1605 priority=-1,
1607 )
1606 )
1608 coreconfigitem(
1607 coreconfigitem(
1609 b'merge-tools',
1608 b'merge-tools',
1610 br'.*\.executable$',
1609 br'.*\.executable$',
1611 default=dynamicdefault,
1610 default=dynamicdefault,
1612 generic=True,
1611 generic=True,
1613 priority=-1,
1612 priority=-1,
1614 )
1613 )
1615 coreconfigitem(
1614 coreconfigitem(
1616 b'merge-tools',
1615 b'merge-tools',
1617 br'.*\.fixeol$',
1616 br'.*\.fixeol$',
1618 default=False,
1617 default=False,
1619 generic=True,
1618 generic=True,
1620 priority=-1,
1619 priority=-1,
1621 )
1620 )
1622 coreconfigitem(
1621 coreconfigitem(
1623 b'merge-tools',
1622 b'merge-tools',
1624 br'.*\.gui$',
1623 br'.*\.gui$',
1625 default=False,
1624 default=False,
1626 generic=True,
1625 generic=True,
1627 priority=-1,
1626 priority=-1,
1628 )
1627 )
1629 coreconfigitem(
1628 coreconfigitem(
1630 b'merge-tools',
1629 b'merge-tools',
1631 br'.*\.mergemarkers$',
1630 br'.*\.mergemarkers$',
1632 default=b'basic',
1631 default=b'basic',
1633 generic=True,
1632 generic=True,
1634 priority=-1,
1633 priority=-1,
1635 )
1634 )
1636 coreconfigitem(
1635 coreconfigitem(
1637 b'merge-tools',
1636 b'merge-tools',
1638 br'.*\.mergemarkertemplate$',
1637 br'.*\.mergemarkertemplate$',
1639 default=dynamicdefault, # take from command-templates.mergemarker
1638 default=dynamicdefault, # take from command-templates.mergemarker
1640 generic=True,
1639 generic=True,
1641 priority=-1,
1640 priority=-1,
1642 )
1641 )
1643 coreconfigitem(
1642 coreconfigitem(
1644 b'merge-tools',
1643 b'merge-tools',
1645 br'.*\.priority$',
1644 br'.*\.priority$',
1646 default=0,
1645 default=0,
1647 generic=True,
1646 generic=True,
1648 priority=-1,
1647 priority=-1,
1649 )
1648 )
1650 coreconfigitem(
1649 coreconfigitem(
1651 b'merge-tools',
1650 b'merge-tools',
1652 br'.*\.premerge$',
1651 br'.*\.premerge$',
1653 default=dynamicdefault,
1652 default=dynamicdefault,
1654 generic=True,
1653 generic=True,
1655 priority=-1,
1654 priority=-1,
1656 )
1655 )
1657 coreconfigitem(
1656 coreconfigitem(
1658 b'merge-tools',
1657 b'merge-tools',
1659 br'.*\.symlink$',
1658 br'.*\.symlink$',
1660 default=False,
1659 default=False,
1661 generic=True,
1660 generic=True,
1662 priority=-1,
1661 priority=-1,
1663 )
1662 )
1664 coreconfigitem(
1663 coreconfigitem(
1665 b'pager',
1664 b'pager',
1666 b'attend-.*',
1665 b'attend-.*',
1667 default=dynamicdefault,
1666 default=dynamicdefault,
1668 generic=True,
1667 generic=True,
1669 )
1668 )
1670 coreconfigitem(
1669 coreconfigitem(
1671 b'pager',
1670 b'pager',
1672 b'ignore',
1671 b'ignore',
1673 default=list,
1672 default=list,
1674 )
1673 )
1675 coreconfigitem(
1674 coreconfigitem(
1676 b'pager',
1675 b'pager',
1677 b'pager',
1676 b'pager',
1678 default=dynamicdefault,
1677 default=dynamicdefault,
1679 )
1678 )
1680 coreconfigitem(
1679 coreconfigitem(
1681 b'patch',
1680 b'patch',
1682 b'eol',
1681 b'eol',
1683 default=b'strict',
1682 default=b'strict',
1684 )
1683 )
1685 coreconfigitem(
1684 coreconfigitem(
1686 b'patch',
1685 b'patch',
1687 b'fuzz',
1686 b'fuzz',
1688 default=2,
1687 default=2,
1689 )
1688 )
1690 coreconfigitem(
1689 coreconfigitem(
1691 b'paths',
1690 b'paths',
1692 b'default',
1691 b'default',
1693 default=None,
1692 default=None,
1694 )
1693 )
1695 coreconfigitem(
1694 coreconfigitem(
1696 b'paths',
1695 b'paths',
1697 b'default-push',
1696 b'default-push',
1698 default=None,
1697 default=None,
1699 )
1698 )
1700 coreconfigitem(
1699 coreconfigitem(
1701 b'paths',
1700 b'paths',
1702 b'.*',
1701 b'.*',
1703 default=None,
1702 default=None,
1704 generic=True,
1703 generic=True,
1705 )
1704 )
1706 coreconfigitem(
1705 coreconfigitem(
1707 b'phases',
1706 b'phases',
1708 b'checksubrepos',
1707 b'checksubrepos',
1709 default=b'follow',
1708 default=b'follow',
1710 )
1709 )
1711 coreconfigitem(
1710 coreconfigitem(
1712 b'phases',
1711 b'phases',
1713 b'new-commit',
1712 b'new-commit',
1714 default=b'draft',
1713 default=b'draft',
1715 )
1714 )
1716 coreconfigitem(
1715 coreconfigitem(
1717 b'phases',
1716 b'phases',
1718 b'publish',
1717 b'publish',
1719 default=True,
1718 default=True,
1720 )
1719 )
1721 coreconfigitem(
1720 coreconfigitem(
1722 b'profiling',
1721 b'profiling',
1723 b'enabled',
1722 b'enabled',
1724 default=False,
1723 default=False,
1725 )
1724 )
1726 coreconfigitem(
1725 coreconfigitem(
1727 b'profiling',
1726 b'profiling',
1728 b'format',
1727 b'format',
1729 default=b'text',
1728 default=b'text',
1730 )
1729 )
1731 coreconfigitem(
1730 coreconfigitem(
1732 b'profiling',
1731 b'profiling',
1733 b'freq',
1732 b'freq',
1734 default=1000,
1733 default=1000,
1735 )
1734 )
1736 coreconfigitem(
1735 coreconfigitem(
1737 b'profiling',
1736 b'profiling',
1738 b'limit',
1737 b'limit',
1739 default=30,
1738 default=30,
1740 )
1739 )
1741 coreconfigitem(
1740 coreconfigitem(
1742 b'profiling',
1741 b'profiling',
1743 b'nested',
1742 b'nested',
1744 default=0,
1743 default=0,
1745 )
1744 )
1746 coreconfigitem(
1745 coreconfigitem(
1747 b'profiling',
1746 b'profiling',
1748 b'output',
1747 b'output',
1749 default=None,
1748 default=None,
1750 )
1749 )
1751 coreconfigitem(
1750 coreconfigitem(
1752 b'profiling',
1751 b'profiling',
1753 b'showmax',
1752 b'showmax',
1754 default=0.999,
1753 default=0.999,
1755 )
1754 )
1756 coreconfigitem(
1755 coreconfigitem(
1757 b'profiling',
1756 b'profiling',
1758 b'showmin',
1757 b'showmin',
1759 default=dynamicdefault,
1758 default=dynamicdefault,
1760 )
1759 )
1761 coreconfigitem(
1760 coreconfigitem(
1762 b'profiling',
1761 b'profiling',
1763 b'showtime',
1762 b'showtime',
1764 default=True,
1763 default=True,
1765 )
1764 )
1766 coreconfigitem(
1765 coreconfigitem(
1767 b'profiling',
1766 b'profiling',
1768 b'sort',
1767 b'sort',
1769 default=b'inlinetime',
1768 default=b'inlinetime',
1770 )
1769 )
1771 coreconfigitem(
1770 coreconfigitem(
1772 b'profiling',
1771 b'profiling',
1773 b'statformat',
1772 b'statformat',
1774 default=b'hotpath',
1773 default=b'hotpath',
1775 )
1774 )
1776 coreconfigitem(
1775 coreconfigitem(
1777 b'profiling',
1776 b'profiling',
1778 b'time-track',
1777 b'time-track',
1779 default=dynamicdefault,
1778 default=dynamicdefault,
1780 )
1779 )
1781 coreconfigitem(
1780 coreconfigitem(
1782 b'profiling',
1781 b'profiling',
1783 b'type',
1782 b'type',
1784 default=b'stat',
1783 default=b'stat',
1785 )
1784 )
1786 coreconfigitem(
1785 coreconfigitem(
1787 b'progress',
1786 b'progress',
1788 b'assume-tty',
1787 b'assume-tty',
1789 default=False,
1788 default=False,
1790 )
1789 )
1791 coreconfigitem(
1790 coreconfigitem(
1792 b'progress',
1791 b'progress',
1793 b'changedelay',
1792 b'changedelay',
1794 default=1,
1793 default=1,
1795 )
1794 )
1796 coreconfigitem(
1795 coreconfigitem(
1797 b'progress',
1796 b'progress',
1798 b'clear-complete',
1797 b'clear-complete',
1799 default=True,
1798 default=True,
1800 )
1799 )
1801 coreconfigitem(
1800 coreconfigitem(
1802 b'progress',
1801 b'progress',
1803 b'debug',
1802 b'debug',
1804 default=False,
1803 default=False,
1805 )
1804 )
1806 coreconfigitem(
1805 coreconfigitem(
1807 b'progress',
1806 b'progress',
1808 b'delay',
1807 b'delay',
1809 default=3,
1808 default=3,
1810 )
1809 )
1811 coreconfigitem(
1810 coreconfigitem(
1812 b'progress',
1811 b'progress',
1813 b'disable',
1812 b'disable',
1814 default=False,
1813 default=False,
1815 )
1814 )
1816 coreconfigitem(
1815 coreconfigitem(
1817 b'progress',
1816 b'progress',
1818 b'estimateinterval',
1817 b'estimateinterval',
1819 default=60.0,
1818 default=60.0,
1820 )
1819 )
1821 coreconfigitem(
1820 coreconfigitem(
1822 b'progress',
1821 b'progress',
1823 b'format',
1822 b'format',
1824 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1823 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1825 )
1824 )
1826 coreconfigitem(
1825 coreconfigitem(
1827 b'progress',
1826 b'progress',
1828 b'refresh',
1827 b'refresh',
1829 default=0.1,
1828 default=0.1,
1830 )
1829 )
1831 coreconfigitem(
1830 coreconfigitem(
1832 b'progress',
1831 b'progress',
1833 b'width',
1832 b'width',
1834 default=dynamicdefault,
1833 default=dynamicdefault,
1835 )
1834 )
1836 coreconfigitem(
1835 coreconfigitem(
1837 b'pull',
1836 b'pull',
1838 b'confirm',
1837 b'confirm',
1839 default=False,
1838 default=False,
1840 )
1839 )
1841 coreconfigitem(
1840 coreconfigitem(
1842 b'push',
1841 b'push',
1843 b'pushvars.server',
1842 b'pushvars.server',
1844 default=False,
1843 default=False,
1845 )
1844 )
1846 coreconfigitem(
1845 coreconfigitem(
1847 b'rewrite',
1846 b'rewrite',
1848 b'backup-bundle',
1847 b'backup-bundle',
1849 default=True,
1848 default=True,
1850 alias=[(b'ui', b'history-editing-backup')],
1849 alias=[(b'ui', b'history-editing-backup')],
1851 )
1850 )
1852 coreconfigitem(
1851 coreconfigitem(
1853 b'rewrite',
1852 b'rewrite',
1854 b'update-timestamp',
1853 b'update-timestamp',
1855 default=False,
1854 default=False,
1856 )
1855 )
1857 coreconfigitem(
1856 coreconfigitem(
1858 b'rewrite',
1857 b'rewrite',
1859 b'empty-successor',
1858 b'empty-successor',
1860 default=b'skip',
1859 default=b'skip',
1861 experimental=True,
1860 experimental=True,
1862 )
1861 )
1863 coreconfigitem(
1862 coreconfigitem(
1864 b'storage',
1863 b'storage',
1865 b'new-repo-backend',
1864 b'new-repo-backend',
1866 default=b'revlogv1',
1865 default=b'revlogv1',
1867 experimental=True,
1866 experimental=True,
1868 )
1867 )
1869 coreconfigitem(
1868 coreconfigitem(
1870 b'storage',
1869 b'storage',
1871 b'revlog.optimize-delta-parent-choice',
1870 b'revlog.optimize-delta-parent-choice',
1872 default=True,
1871 default=True,
1873 alias=[(b'format', b'aggressivemergedeltas')],
1872 alias=[(b'format', b'aggressivemergedeltas')],
1874 )
1873 )
1875 # experimental as long as rust is experimental (or a C version is implemented)
1874 # experimental as long as rust is experimental (or a C version is implemented)
1876 coreconfigitem(
1875 coreconfigitem(
1877 b'storage',
1876 b'storage',
1878 b'revlog.persistent-nodemap.mmap',
1877 b'revlog.persistent-nodemap.mmap',
1879 default=True,
1878 default=True,
1880 )
1879 )
1881 # experimental as long as format.use-persistent-nodemap is.
1880 # experimental as long as format.use-persistent-nodemap is.
1882 coreconfigitem(
1881 coreconfigitem(
1883 b'storage',
1882 b'storage',
1884 b'revlog.persistent-nodemap.slow-path',
1883 b'revlog.persistent-nodemap.slow-path',
1885 default=b"abort",
1884 default=b"abort",
1886 )
1885 )
1887
1886
1888 coreconfigitem(
1887 coreconfigitem(
1889 b'storage',
1888 b'storage',
1890 b'revlog.reuse-external-delta',
1889 b'revlog.reuse-external-delta',
1891 default=True,
1890 default=True,
1892 )
1891 )
1893 coreconfigitem(
1892 coreconfigitem(
1894 b'storage',
1893 b'storage',
1895 b'revlog.reuse-external-delta-parent',
1894 b'revlog.reuse-external-delta-parent',
1896 default=None,
1895 default=None,
1897 )
1896 )
1898 coreconfigitem(
1897 coreconfigitem(
1899 b'storage',
1898 b'storage',
1900 b'revlog.zlib.level',
1899 b'revlog.zlib.level',
1901 default=None,
1900 default=None,
1902 )
1901 )
1903 coreconfigitem(
1902 coreconfigitem(
1904 b'storage',
1903 b'storage',
1905 b'revlog.zstd.level',
1904 b'revlog.zstd.level',
1906 default=None,
1905 default=None,
1907 )
1906 )
1908 coreconfigitem(
1907 coreconfigitem(
1909 b'server',
1908 b'server',
1910 b'bookmarks-pushkey-compat',
1909 b'bookmarks-pushkey-compat',
1911 default=True,
1910 default=True,
1912 )
1911 )
1913 coreconfigitem(
1912 coreconfigitem(
1914 b'server',
1913 b'server',
1915 b'bundle1',
1914 b'bundle1',
1916 default=True,
1915 default=True,
1917 )
1916 )
1918 coreconfigitem(
1917 coreconfigitem(
1919 b'server',
1918 b'server',
1920 b'bundle1gd',
1919 b'bundle1gd',
1921 default=None,
1920 default=None,
1922 )
1921 )
1923 coreconfigitem(
1922 coreconfigitem(
1924 b'server',
1923 b'server',
1925 b'bundle1.pull',
1924 b'bundle1.pull',
1926 default=None,
1925 default=None,
1927 )
1926 )
1928 coreconfigitem(
1927 coreconfigitem(
1929 b'server',
1928 b'server',
1930 b'bundle1gd.pull',
1929 b'bundle1gd.pull',
1931 default=None,
1930 default=None,
1932 )
1931 )
1933 coreconfigitem(
1932 coreconfigitem(
1934 b'server',
1933 b'server',
1935 b'bundle1.push',
1934 b'bundle1.push',
1936 default=None,
1935 default=None,
1937 )
1936 )
1938 coreconfigitem(
1937 coreconfigitem(
1939 b'server',
1938 b'server',
1940 b'bundle1gd.push',
1939 b'bundle1gd.push',
1941 default=None,
1940 default=None,
1942 )
1941 )
1943 coreconfigitem(
1942 coreconfigitem(
1944 b'server',
1943 b'server',
1945 b'bundle2.stream',
1944 b'bundle2.stream',
1946 default=True,
1945 default=True,
1947 alias=[(b'experimental', b'bundle2.stream')],
1946 alias=[(b'experimental', b'bundle2.stream')],
1948 )
1947 )
1949 coreconfigitem(
1948 coreconfigitem(
1950 b'server',
1949 b'server',
1951 b'compressionengines',
1950 b'compressionengines',
1952 default=list,
1951 default=list,
1953 )
1952 )
1954 coreconfigitem(
1953 coreconfigitem(
1955 b'server',
1954 b'server',
1956 b'concurrent-push-mode',
1955 b'concurrent-push-mode',
1957 default=b'check-related',
1956 default=b'check-related',
1958 )
1957 )
1959 coreconfigitem(
1958 coreconfigitem(
1960 b'server',
1959 b'server',
1961 b'disablefullbundle',
1960 b'disablefullbundle',
1962 default=False,
1961 default=False,
1963 )
1962 )
1964 coreconfigitem(
1963 coreconfigitem(
1965 b'server',
1964 b'server',
1966 b'maxhttpheaderlen',
1965 b'maxhttpheaderlen',
1967 default=1024,
1966 default=1024,
1968 )
1967 )
1969 coreconfigitem(
1968 coreconfigitem(
1970 b'server',
1969 b'server',
1971 b'pullbundle',
1970 b'pullbundle',
1972 default=False,
1971 default=False,
1973 )
1972 )
1974 coreconfigitem(
1973 coreconfigitem(
1975 b'server',
1974 b'server',
1976 b'preferuncompressed',
1975 b'preferuncompressed',
1977 default=False,
1976 default=False,
1978 )
1977 )
1979 coreconfigitem(
1978 coreconfigitem(
1980 b'server',
1979 b'server',
1981 b'streamunbundle',
1980 b'streamunbundle',
1982 default=False,
1981 default=False,
1983 )
1982 )
1984 coreconfigitem(
1983 coreconfigitem(
1985 b'server',
1984 b'server',
1986 b'uncompressed',
1985 b'uncompressed',
1987 default=True,
1986 default=True,
1988 )
1987 )
1989 coreconfigitem(
1988 coreconfigitem(
1990 b'server',
1989 b'server',
1991 b'uncompressedallowsecret',
1990 b'uncompressedallowsecret',
1992 default=False,
1991 default=False,
1993 )
1992 )
1994 coreconfigitem(
1993 coreconfigitem(
1995 b'server',
1994 b'server',
1996 b'view',
1995 b'view',
1997 default=b'served',
1996 default=b'served',
1998 )
1997 )
1999 coreconfigitem(
1998 coreconfigitem(
2000 b'server',
1999 b'server',
2001 b'validate',
2000 b'validate',
2002 default=False,
2001 default=False,
2003 )
2002 )
2004 coreconfigitem(
2003 coreconfigitem(
2005 b'server',
2004 b'server',
2006 b'zliblevel',
2005 b'zliblevel',
2007 default=-1,
2006 default=-1,
2008 )
2007 )
2009 coreconfigitem(
2008 coreconfigitem(
2010 b'server',
2009 b'server',
2011 b'zstdlevel',
2010 b'zstdlevel',
2012 default=3,
2011 default=3,
2013 )
2012 )
2014 coreconfigitem(
2013 coreconfigitem(
2015 b'share',
2014 b'share',
2016 b'pool',
2015 b'pool',
2017 default=None,
2016 default=None,
2018 )
2017 )
2019 coreconfigitem(
2018 coreconfigitem(
2020 b'share',
2019 b'share',
2021 b'poolnaming',
2020 b'poolnaming',
2022 default=b'identity',
2021 default=b'identity',
2023 )
2022 )
2024 coreconfigitem(
2023 coreconfigitem(
2025 b'share',
2024 b'share',
2026 b'safe-mismatch.source-not-safe',
2025 b'safe-mismatch.source-not-safe',
2027 default=b'abort',
2026 default=b'abort',
2028 )
2027 )
2029 coreconfigitem(
2028 coreconfigitem(
2030 b'share',
2029 b'share',
2031 b'safe-mismatch.source-safe',
2030 b'safe-mismatch.source-safe',
2032 default=b'abort',
2031 default=b'abort',
2033 )
2032 )
2034 coreconfigitem(
2033 coreconfigitem(
2035 b'share',
2034 b'share',
2036 b'safe-mismatch.source-not-safe.warn',
2035 b'safe-mismatch.source-not-safe.warn',
2037 default=True,
2036 default=True,
2038 )
2037 )
2039 coreconfigitem(
2038 coreconfigitem(
2040 b'share',
2039 b'share',
2041 b'safe-mismatch.source-safe.warn',
2040 b'safe-mismatch.source-safe.warn',
2042 default=True,
2041 default=True,
2043 )
2042 )
2044 coreconfigitem(
2043 coreconfigitem(
2045 b'shelve',
2044 b'shelve',
2046 b'maxbackups',
2045 b'maxbackups',
2047 default=10,
2046 default=10,
2048 )
2047 )
2049 coreconfigitem(
2048 coreconfigitem(
2050 b'smtp',
2049 b'smtp',
2051 b'host',
2050 b'host',
2052 default=None,
2051 default=None,
2053 )
2052 )
2054 coreconfigitem(
2053 coreconfigitem(
2055 b'smtp',
2054 b'smtp',
2056 b'local_hostname',
2055 b'local_hostname',
2057 default=None,
2056 default=None,
2058 )
2057 )
2059 coreconfigitem(
2058 coreconfigitem(
2060 b'smtp',
2059 b'smtp',
2061 b'password',
2060 b'password',
2062 default=None,
2061 default=None,
2063 )
2062 )
2064 coreconfigitem(
2063 coreconfigitem(
2065 b'smtp',
2064 b'smtp',
2066 b'port',
2065 b'port',
2067 default=dynamicdefault,
2066 default=dynamicdefault,
2068 )
2067 )
2069 coreconfigitem(
2068 coreconfigitem(
2070 b'smtp',
2069 b'smtp',
2071 b'tls',
2070 b'tls',
2072 default=b'none',
2071 default=b'none',
2073 )
2072 )
2074 coreconfigitem(
2073 coreconfigitem(
2075 b'smtp',
2074 b'smtp',
2076 b'username',
2075 b'username',
2077 default=None,
2076 default=None,
2078 )
2077 )
2079 coreconfigitem(
2078 coreconfigitem(
2080 b'sparse',
2079 b'sparse',
2081 b'missingwarning',
2080 b'missingwarning',
2082 default=True,
2081 default=True,
2083 experimental=True,
2082 experimental=True,
2084 )
2083 )
2085 coreconfigitem(
2084 coreconfigitem(
2086 b'subrepos',
2085 b'subrepos',
2087 b'allowed',
2086 b'allowed',
2088 default=dynamicdefault, # to make backporting simpler
2087 default=dynamicdefault, # to make backporting simpler
2089 )
2088 )
2090 coreconfigitem(
2089 coreconfigitem(
2091 b'subrepos',
2090 b'subrepos',
2092 b'hg:allowed',
2091 b'hg:allowed',
2093 default=dynamicdefault,
2092 default=dynamicdefault,
2094 )
2093 )
2095 coreconfigitem(
2094 coreconfigitem(
2096 b'subrepos',
2095 b'subrepos',
2097 b'git:allowed',
2096 b'git:allowed',
2098 default=dynamicdefault,
2097 default=dynamicdefault,
2099 )
2098 )
2100 coreconfigitem(
2099 coreconfigitem(
2101 b'subrepos',
2100 b'subrepos',
2102 b'svn:allowed',
2101 b'svn:allowed',
2103 default=dynamicdefault,
2102 default=dynamicdefault,
2104 )
2103 )
2105 coreconfigitem(
2104 coreconfigitem(
2106 b'templates',
2105 b'templates',
2107 b'.*',
2106 b'.*',
2108 default=None,
2107 default=None,
2109 generic=True,
2108 generic=True,
2110 )
2109 )
2111 coreconfigitem(
2110 coreconfigitem(
2112 b'templateconfig',
2111 b'templateconfig',
2113 b'.*',
2112 b'.*',
2114 default=dynamicdefault,
2113 default=dynamicdefault,
2115 generic=True,
2114 generic=True,
2116 )
2115 )
2117 coreconfigitem(
2116 coreconfigitem(
2118 b'trusted',
2117 b'trusted',
2119 b'groups',
2118 b'groups',
2120 default=list,
2119 default=list,
2121 )
2120 )
2122 coreconfigitem(
2121 coreconfigitem(
2123 b'trusted',
2122 b'trusted',
2124 b'users',
2123 b'users',
2125 default=list,
2124 default=list,
2126 )
2125 )
2127 coreconfigitem(
2126 coreconfigitem(
2128 b'ui',
2127 b'ui',
2129 b'_usedassubrepo',
2128 b'_usedassubrepo',
2130 default=False,
2129 default=False,
2131 )
2130 )
2132 coreconfigitem(
2131 coreconfigitem(
2133 b'ui',
2132 b'ui',
2134 b'allowemptycommit',
2133 b'allowemptycommit',
2135 default=False,
2134 default=False,
2136 )
2135 )
2137 coreconfigitem(
2136 coreconfigitem(
2138 b'ui',
2137 b'ui',
2139 b'archivemeta',
2138 b'archivemeta',
2140 default=True,
2139 default=True,
2141 )
2140 )
2142 coreconfigitem(
2141 coreconfigitem(
2143 b'ui',
2142 b'ui',
2144 b'askusername',
2143 b'askusername',
2145 default=False,
2144 default=False,
2146 )
2145 )
2147 coreconfigitem(
2146 coreconfigitem(
2148 b'ui',
2147 b'ui',
2149 b'available-memory',
2148 b'available-memory',
2150 default=None,
2149 default=None,
2151 )
2150 )
2152
2151
2153 coreconfigitem(
2152 coreconfigitem(
2154 b'ui',
2153 b'ui',
2155 b'clonebundlefallback',
2154 b'clonebundlefallback',
2156 default=False,
2155 default=False,
2157 )
2156 )
2158 coreconfigitem(
2157 coreconfigitem(
2159 b'ui',
2158 b'ui',
2160 b'clonebundleprefers',
2159 b'clonebundleprefers',
2161 default=list,
2160 default=list,
2162 )
2161 )
2163 coreconfigitem(
2162 coreconfigitem(
2164 b'ui',
2163 b'ui',
2165 b'clonebundles',
2164 b'clonebundles',
2166 default=True,
2165 default=True,
2167 )
2166 )
2168 coreconfigitem(
2167 coreconfigitem(
2169 b'ui',
2168 b'ui',
2170 b'color',
2169 b'color',
2171 default=b'auto',
2170 default=b'auto',
2172 )
2171 )
2173 coreconfigitem(
2172 coreconfigitem(
2174 b'ui',
2173 b'ui',
2175 b'commitsubrepos',
2174 b'commitsubrepos',
2176 default=False,
2175 default=False,
2177 )
2176 )
2178 coreconfigitem(
2177 coreconfigitem(
2179 b'ui',
2178 b'ui',
2180 b'debug',
2179 b'debug',
2181 default=False,
2180 default=False,
2182 )
2181 )
2183 coreconfigitem(
2182 coreconfigitem(
2184 b'ui',
2183 b'ui',
2185 b'debugger',
2184 b'debugger',
2186 default=None,
2185 default=None,
2187 )
2186 )
2188 coreconfigitem(
2187 coreconfigitem(
2189 b'ui',
2188 b'ui',
2190 b'editor',
2189 b'editor',
2191 default=dynamicdefault,
2190 default=dynamicdefault,
2192 )
2191 )
2193 coreconfigitem(
2192 coreconfigitem(
2194 b'ui',
2193 b'ui',
2195 b'detailed-exit-code',
2194 b'detailed-exit-code',
2196 default=False,
2195 default=False,
2197 experimental=True,
2196 experimental=True,
2198 )
2197 )
2199 coreconfigitem(
2198 coreconfigitem(
2200 b'ui',
2199 b'ui',
2201 b'fallbackencoding',
2200 b'fallbackencoding',
2202 default=None,
2201 default=None,
2203 )
2202 )
2204 coreconfigitem(
2203 coreconfigitem(
2205 b'ui',
2204 b'ui',
2206 b'forcecwd',
2205 b'forcecwd',
2207 default=None,
2206 default=None,
2208 )
2207 )
2209 coreconfigitem(
2208 coreconfigitem(
2210 b'ui',
2209 b'ui',
2211 b'forcemerge',
2210 b'forcemerge',
2212 default=None,
2211 default=None,
2213 )
2212 )
2214 coreconfigitem(
2213 coreconfigitem(
2215 b'ui',
2214 b'ui',
2216 b'formatdebug',
2215 b'formatdebug',
2217 default=False,
2216 default=False,
2218 )
2217 )
2219 coreconfigitem(
2218 coreconfigitem(
2220 b'ui',
2219 b'ui',
2221 b'formatjson',
2220 b'formatjson',
2222 default=False,
2221 default=False,
2223 )
2222 )
2224 coreconfigitem(
2223 coreconfigitem(
2225 b'ui',
2224 b'ui',
2226 b'formatted',
2225 b'formatted',
2227 default=None,
2226 default=None,
2228 )
2227 )
2229 coreconfigitem(
2228 coreconfigitem(
2230 b'ui',
2229 b'ui',
2231 b'interactive',
2230 b'interactive',
2232 default=None,
2231 default=None,
2233 )
2232 )
2234 coreconfigitem(
2233 coreconfigitem(
2235 b'ui',
2234 b'ui',
2236 b'interface',
2235 b'interface',
2237 default=None,
2236 default=None,
2238 )
2237 )
2239 coreconfigitem(
2238 coreconfigitem(
2240 b'ui',
2239 b'ui',
2241 b'interface.chunkselector',
2240 b'interface.chunkselector',
2242 default=None,
2241 default=None,
2243 )
2242 )
2244 coreconfigitem(
2243 coreconfigitem(
2245 b'ui',
2244 b'ui',
2246 b'large-file-limit',
2245 b'large-file-limit',
2247 default=10000000,
2246 default=10000000,
2248 )
2247 )
2249 coreconfigitem(
2248 coreconfigitem(
2250 b'ui',
2249 b'ui',
2251 b'logblockedtimes',
2250 b'logblockedtimes',
2252 default=False,
2251 default=False,
2253 )
2252 )
2254 coreconfigitem(
2253 coreconfigitem(
2255 b'ui',
2254 b'ui',
2256 b'merge',
2255 b'merge',
2257 default=None,
2256 default=None,
2258 )
2257 )
2259 coreconfigitem(
2258 coreconfigitem(
2260 b'ui',
2259 b'ui',
2261 b'mergemarkers',
2260 b'mergemarkers',
2262 default=b'basic',
2261 default=b'basic',
2263 )
2262 )
2264 coreconfigitem(
2263 coreconfigitem(
2265 b'ui',
2264 b'ui',
2266 b'message-output',
2265 b'message-output',
2267 default=b'stdio',
2266 default=b'stdio',
2268 )
2267 )
2269 coreconfigitem(
2268 coreconfigitem(
2270 b'ui',
2269 b'ui',
2271 b'nontty',
2270 b'nontty',
2272 default=False,
2271 default=False,
2273 )
2272 )
2274 coreconfigitem(
2273 coreconfigitem(
2275 b'ui',
2274 b'ui',
2276 b'origbackuppath',
2275 b'origbackuppath',
2277 default=None,
2276 default=None,
2278 )
2277 )
2279 coreconfigitem(
2278 coreconfigitem(
2280 b'ui',
2279 b'ui',
2281 b'paginate',
2280 b'paginate',
2282 default=True,
2281 default=True,
2283 )
2282 )
2284 coreconfigitem(
2283 coreconfigitem(
2285 b'ui',
2284 b'ui',
2286 b'patch',
2285 b'patch',
2287 default=None,
2286 default=None,
2288 )
2287 )
2289 coreconfigitem(
2288 coreconfigitem(
2290 b'ui',
2289 b'ui',
2291 b'portablefilenames',
2290 b'portablefilenames',
2292 default=b'warn',
2291 default=b'warn',
2293 )
2292 )
2294 coreconfigitem(
2293 coreconfigitem(
2295 b'ui',
2294 b'ui',
2296 b'promptecho',
2295 b'promptecho',
2297 default=False,
2296 default=False,
2298 )
2297 )
2299 coreconfigitem(
2298 coreconfigitem(
2300 b'ui',
2299 b'ui',
2301 b'quiet',
2300 b'quiet',
2302 default=False,
2301 default=False,
2303 )
2302 )
2304 coreconfigitem(
2303 coreconfigitem(
2305 b'ui',
2304 b'ui',
2306 b'quietbookmarkmove',
2305 b'quietbookmarkmove',
2307 default=False,
2306 default=False,
2308 )
2307 )
2309 coreconfigitem(
2308 coreconfigitem(
2310 b'ui',
2309 b'ui',
2311 b'relative-paths',
2310 b'relative-paths',
2312 default=b'legacy',
2311 default=b'legacy',
2313 )
2312 )
2314 coreconfigitem(
2313 coreconfigitem(
2315 b'ui',
2314 b'ui',
2316 b'remotecmd',
2315 b'remotecmd',
2317 default=b'hg',
2316 default=b'hg',
2318 )
2317 )
2319 coreconfigitem(
2318 coreconfigitem(
2320 b'ui',
2319 b'ui',
2321 b'report_untrusted',
2320 b'report_untrusted',
2322 default=True,
2321 default=True,
2323 )
2322 )
2324 coreconfigitem(
2323 coreconfigitem(
2325 b'ui',
2324 b'ui',
2326 b'rollback',
2325 b'rollback',
2327 default=True,
2326 default=True,
2328 )
2327 )
2329 coreconfigitem(
2328 coreconfigitem(
2330 b'ui',
2329 b'ui',
2331 b'signal-safe-lock',
2330 b'signal-safe-lock',
2332 default=True,
2331 default=True,
2333 )
2332 )
2334 coreconfigitem(
2333 coreconfigitem(
2335 b'ui',
2334 b'ui',
2336 b'slash',
2335 b'slash',
2337 default=False,
2336 default=False,
2338 )
2337 )
2339 coreconfigitem(
2338 coreconfigitem(
2340 b'ui',
2339 b'ui',
2341 b'ssh',
2340 b'ssh',
2342 default=b'ssh',
2341 default=b'ssh',
2343 )
2342 )
2344 coreconfigitem(
2343 coreconfigitem(
2345 b'ui',
2344 b'ui',
2346 b'ssherrorhint',
2345 b'ssherrorhint',
2347 default=None,
2346 default=None,
2348 )
2347 )
2349 coreconfigitem(
2348 coreconfigitem(
2350 b'ui',
2349 b'ui',
2351 b'statuscopies',
2350 b'statuscopies',
2352 default=False,
2351 default=False,
2353 )
2352 )
2354 coreconfigitem(
2353 coreconfigitem(
2355 b'ui',
2354 b'ui',
2356 b'strict',
2355 b'strict',
2357 default=False,
2356 default=False,
2358 )
2357 )
2359 coreconfigitem(
2358 coreconfigitem(
2360 b'ui',
2359 b'ui',
2361 b'style',
2360 b'style',
2362 default=b'',
2361 default=b'',
2363 )
2362 )
2364 coreconfigitem(
2363 coreconfigitem(
2365 b'ui',
2364 b'ui',
2366 b'supportcontact',
2365 b'supportcontact',
2367 default=None,
2366 default=None,
2368 )
2367 )
2369 coreconfigitem(
2368 coreconfigitem(
2370 b'ui',
2369 b'ui',
2371 b'textwidth',
2370 b'textwidth',
2372 default=78,
2371 default=78,
2373 )
2372 )
2374 coreconfigitem(
2373 coreconfigitem(
2375 b'ui',
2374 b'ui',
2376 b'timeout',
2375 b'timeout',
2377 default=b'600',
2376 default=b'600',
2378 )
2377 )
2379 coreconfigitem(
2378 coreconfigitem(
2380 b'ui',
2379 b'ui',
2381 b'timeout.warn',
2380 b'timeout.warn',
2382 default=0,
2381 default=0,
2383 )
2382 )
2384 coreconfigitem(
2383 coreconfigitem(
2385 b'ui',
2384 b'ui',
2386 b'timestamp-output',
2385 b'timestamp-output',
2387 default=False,
2386 default=False,
2388 )
2387 )
2389 coreconfigitem(
2388 coreconfigitem(
2390 b'ui',
2389 b'ui',
2391 b'traceback',
2390 b'traceback',
2392 default=False,
2391 default=False,
2393 )
2392 )
2394 coreconfigitem(
2393 coreconfigitem(
2395 b'ui',
2394 b'ui',
2396 b'tweakdefaults',
2395 b'tweakdefaults',
2397 default=False,
2396 default=False,
2398 )
2397 )
2399 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2398 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2400 coreconfigitem(
2399 coreconfigitem(
2401 b'ui',
2400 b'ui',
2402 b'verbose',
2401 b'verbose',
2403 default=False,
2402 default=False,
2404 )
2403 )
2405 coreconfigitem(
2404 coreconfigitem(
2406 b'verify',
2405 b'verify',
2407 b'skipflags',
2406 b'skipflags',
2408 default=None,
2407 default=None,
2409 )
2408 )
2410 coreconfigitem(
2409 coreconfigitem(
2411 b'web',
2410 b'web',
2412 b'allowbz2',
2411 b'allowbz2',
2413 default=False,
2412 default=False,
2414 )
2413 )
2415 coreconfigitem(
2414 coreconfigitem(
2416 b'web',
2415 b'web',
2417 b'allowgz',
2416 b'allowgz',
2418 default=False,
2417 default=False,
2419 )
2418 )
2420 coreconfigitem(
2419 coreconfigitem(
2421 b'web',
2420 b'web',
2422 b'allow-pull',
2421 b'allow-pull',
2423 alias=[(b'web', b'allowpull')],
2422 alias=[(b'web', b'allowpull')],
2424 default=True,
2423 default=True,
2425 )
2424 )
2426 coreconfigitem(
2425 coreconfigitem(
2427 b'web',
2426 b'web',
2428 b'allow-push',
2427 b'allow-push',
2429 alias=[(b'web', b'allow_push')],
2428 alias=[(b'web', b'allow_push')],
2430 default=list,
2429 default=list,
2431 )
2430 )
2432 coreconfigitem(
2431 coreconfigitem(
2433 b'web',
2432 b'web',
2434 b'allowzip',
2433 b'allowzip',
2435 default=False,
2434 default=False,
2436 )
2435 )
2437 coreconfigitem(
2436 coreconfigitem(
2438 b'web',
2437 b'web',
2439 b'archivesubrepos',
2438 b'archivesubrepos',
2440 default=False,
2439 default=False,
2441 )
2440 )
2442 coreconfigitem(
2441 coreconfigitem(
2443 b'web',
2442 b'web',
2444 b'cache',
2443 b'cache',
2445 default=True,
2444 default=True,
2446 )
2445 )
2447 coreconfigitem(
2446 coreconfigitem(
2448 b'web',
2447 b'web',
2449 b'comparisoncontext',
2448 b'comparisoncontext',
2450 default=5,
2449 default=5,
2451 )
2450 )
2452 coreconfigitem(
2451 coreconfigitem(
2453 b'web',
2452 b'web',
2454 b'contact',
2453 b'contact',
2455 default=None,
2454 default=None,
2456 )
2455 )
2457 coreconfigitem(
2456 coreconfigitem(
2458 b'web',
2457 b'web',
2459 b'deny_push',
2458 b'deny_push',
2460 default=list,
2459 default=list,
2461 )
2460 )
2462 coreconfigitem(
2461 coreconfigitem(
2463 b'web',
2462 b'web',
2464 b'guessmime',
2463 b'guessmime',
2465 default=False,
2464 default=False,
2466 )
2465 )
2467 coreconfigitem(
2466 coreconfigitem(
2468 b'web',
2467 b'web',
2469 b'hidden',
2468 b'hidden',
2470 default=False,
2469 default=False,
2471 )
2470 )
2472 coreconfigitem(
2471 coreconfigitem(
2473 b'web',
2472 b'web',
2474 b'labels',
2473 b'labels',
2475 default=list,
2474 default=list,
2476 )
2475 )
2477 coreconfigitem(
2476 coreconfigitem(
2478 b'web',
2477 b'web',
2479 b'logoimg',
2478 b'logoimg',
2480 default=b'hglogo.png',
2479 default=b'hglogo.png',
2481 )
2480 )
2482 coreconfigitem(
2481 coreconfigitem(
2483 b'web',
2482 b'web',
2484 b'logourl',
2483 b'logourl',
2485 default=b'https://mercurial-scm.org/',
2484 default=b'https://mercurial-scm.org/',
2486 )
2485 )
2487 coreconfigitem(
2486 coreconfigitem(
2488 b'web',
2487 b'web',
2489 b'accesslog',
2488 b'accesslog',
2490 default=b'-',
2489 default=b'-',
2491 )
2490 )
2492 coreconfigitem(
2491 coreconfigitem(
2493 b'web',
2492 b'web',
2494 b'address',
2493 b'address',
2495 default=b'',
2494 default=b'',
2496 )
2495 )
2497 coreconfigitem(
2496 coreconfigitem(
2498 b'web',
2497 b'web',
2499 b'allow-archive',
2498 b'allow-archive',
2500 alias=[(b'web', b'allow_archive')],
2499 alias=[(b'web', b'allow_archive')],
2501 default=list,
2500 default=list,
2502 )
2501 )
2503 coreconfigitem(
2502 coreconfigitem(
2504 b'web',
2503 b'web',
2505 b'allow_read',
2504 b'allow_read',
2506 default=list,
2505 default=list,
2507 )
2506 )
2508 coreconfigitem(
2507 coreconfigitem(
2509 b'web',
2508 b'web',
2510 b'baseurl',
2509 b'baseurl',
2511 default=None,
2510 default=None,
2512 )
2511 )
2513 coreconfigitem(
2512 coreconfigitem(
2514 b'web',
2513 b'web',
2515 b'cacerts',
2514 b'cacerts',
2516 default=None,
2515 default=None,
2517 )
2516 )
2518 coreconfigitem(
2517 coreconfigitem(
2519 b'web',
2518 b'web',
2520 b'certificate',
2519 b'certificate',
2521 default=None,
2520 default=None,
2522 )
2521 )
2523 coreconfigitem(
2522 coreconfigitem(
2524 b'web',
2523 b'web',
2525 b'collapse',
2524 b'collapse',
2526 default=False,
2525 default=False,
2527 )
2526 )
2528 coreconfigitem(
2527 coreconfigitem(
2529 b'web',
2528 b'web',
2530 b'csp',
2529 b'csp',
2531 default=None,
2530 default=None,
2532 )
2531 )
2533 coreconfigitem(
2532 coreconfigitem(
2534 b'web',
2533 b'web',
2535 b'deny_read',
2534 b'deny_read',
2536 default=list,
2535 default=list,
2537 )
2536 )
2538 coreconfigitem(
2537 coreconfigitem(
2539 b'web',
2538 b'web',
2540 b'descend',
2539 b'descend',
2541 default=True,
2540 default=True,
2542 )
2541 )
2543 coreconfigitem(
2542 coreconfigitem(
2544 b'web',
2543 b'web',
2545 b'description',
2544 b'description',
2546 default=b"",
2545 default=b"",
2547 )
2546 )
2548 coreconfigitem(
2547 coreconfigitem(
2549 b'web',
2548 b'web',
2550 b'encoding',
2549 b'encoding',
2551 default=lambda: encoding.encoding,
2550 default=lambda: encoding.encoding,
2552 )
2551 )
2553 coreconfigitem(
2552 coreconfigitem(
2554 b'web',
2553 b'web',
2555 b'errorlog',
2554 b'errorlog',
2556 default=b'-',
2555 default=b'-',
2557 )
2556 )
2558 coreconfigitem(
2557 coreconfigitem(
2559 b'web',
2558 b'web',
2560 b'ipv6',
2559 b'ipv6',
2561 default=False,
2560 default=False,
2562 )
2561 )
2563 coreconfigitem(
2562 coreconfigitem(
2564 b'web',
2563 b'web',
2565 b'maxchanges',
2564 b'maxchanges',
2566 default=10,
2565 default=10,
2567 )
2566 )
2568 coreconfigitem(
2567 coreconfigitem(
2569 b'web',
2568 b'web',
2570 b'maxfiles',
2569 b'maxfiles',
2571 default=10,
2570 default=10,
2572 )
2571 )
2573 coreconfigitem(
2572 coreconfigitem(
2574 b'web',
2573 b'web',
2575 b'maxshortchanges',
2574 b'maxshortchanges',
2576 default=60,
2575 default=60,
2577 )
2576 )
2578 coreconfigitem(
2577 coreconfigitem(
2579 b'web',
2578 b'web',
2580 b'motd',
2579 b'motd',
2581 default=b'',
2580 default=b'',
2582 )
2581 )
2583 coreconfigitem(
2582 coreconfigitem(
2584 b'web',
2583 b'web',
2585 b'name',
2584 b'name',
2586 default=dynamicdefault,
2585 default=dynamicdefault,
2587 )
2586 )
2588 coreconfigitem(
2587 coreconfigitem(
2589 b'web',
2588 b'web',
2590 b'port',
2589 b'port',
2591 default=8000,
2590 default=8000,
2592 )
2591 )
2593 coreconfigitem(
2592 coreconfigitem(
2594 b'web',
2593 b'web',
2595 b'prefix',
2594 b'prefix',
2596 default=b'',
2595 default=b'',
2597 )
2596 )
2598 coreconfigitem(
2597 coreconfigitem(
2599 b'web',
2598 b'web',
2600 b'push_ssl',
2599 b'push_ssl',
2601 default=True,
2600 default=True,
2602 )
2601 )
2603 coreconfigitem(
2602 coreconfigitem(
2604 b'web',
2603 b'web',
2605 b'refreshinterval',
2604 b'refreshinterval',
2606 default=20,
2605 default=20,
2607 )
2606 )
2608 coreconfigitem(
2607 coreconfigitem(
2609 b'web',
2608 b'web',
2610 b'server-header',
2609 b'server-header',
2611 default=None,
2610 default=None,
2612 )
2611 )
2613 coreconfigitem(
2612 coreconfigitem(
2614 b'web',
2613 b'web',
2615 b'static',
2614 b'static',
2616 default=None,
2615 default=None,
2617 )
2616 )
2618 coreconfigitem(
2617 coreconfigitem(
2619 b'web',
2618 b'web',
2620 b'staticurl',
2619 b'staticurl',
2621 default=None,
2620 default=None,
2622 )
2621 )
2623 coreconfigitem(
2622 coreconfigitem(
2624 b'web',
2623 b'web',
2625 b'stripes',
2624 b'stripes',
2626 default=1,
2625 default=1,
2627 )
2626 )
2628 coreconfigitem(
2627 coreconfigitem(
2629 b'web',
2628 b'web',
2630 b'style',
2629 b'style',
2631 default=b'paper',
2630 default=b'paper',
2632 )
2631 )
2633 coreconfigitem(
2632 coreconfigitem(
2634 b'web',
2633 b'web',
2635 b'templates',
2634 b'templates',
2636 default=None,
2635 default=None,
2637 )
2636 )
2638 coreconfigitem(
2637 coreconfigitem(
2639 b'web',
2638 b'web',
2640 b'view',
2639 b'view',
2641 default=b'served',
2640 default=b'served',
2642 experimental=True,
2641 experimental=True,
2643 )
2642 )
2644 coreconfigitem(
2643 coreconfigitem(
2645 b'worker',
2644 b'worker',
2646 b'backgroundclose',
2645 b'backgroundclose',
2647 default=dynamicdefault,
2646 default=dynamicdefault,
2648 )
2647 )
2649 # Windows defaults to a limit of 512 open files. A buffer of 128
2648 # Windows defaults to a limit of 512 open files. A buffer of 128
2650 # should give us enough headway.
2649 # should give us enough headway.
2651 coreconfigitem(
2650 coreconfigitem(
2652 b'worker',
2651 b'worker',
2653 b'backgroundclosemaxqueue',
2652 b'backgroundclosemaxqueue',
2654 default=384,
2653 default=384,
2655 )
2654 )
2656 coreconfigitem(
2655 coreconfigitem(
2657 b'worker',
2656 b'worker',
2658 b'backgroundcloseminfilecount',
2657 b'backgroundcloseminfilecount',
2659 default=2048,
2658 default=2048,
2660 )
2659 )
2661 coreconfigitem(
2660 coreconfigitem(
2662 b'worker',
2661 b'worker',
2663 b'backgroundclosethreadcount',
2662 b'backgroundclosethreadcount',
2664 default=4,
2663 default=4,
2665 )
2664 )
2666 coreconfigitem(
2665 coreconfigitem(
2667 b'worker',
2666 b'worker',
2668 b'enabled',
2667 b'enabled',
2669 default=True,
2668 default=True,
2670 )
2669 )
2671 coreconfigitem(
2670 coreconfigitem(
2672 b'worker',
2671 b'worker',
2673 b'numcpus',
2672 b'numcpus',
2674 default=None,
2673 default=None,
2675 )
2674 )
2676
2675
2677 # Rebase related configuration moved to core because other extension are doing
2676 # Rebase related configuration moved to core because other extension are doing
2678 # strange things. For example, shelve import the extensions to reuse some bit
2677 # strange things. For example, shelve import the extensions to reuse some bit
2679 # without formally loading it.
2678 # without formally loading it.
2680 coreconfigitem(
2679 coreconfigitem(
2681 b'commands',
2680 b'commands',
2682 b'rebase.requiredest',
2681 b'rebase.requiredest',
2683 default=False,
2682 default=False,
2684 )
2683 )
2685 coreconfigitem(
2684 coreconfigitem(
2686 b'experimental',
2685 b'experimental',
2687 b'rebaseskipobsolete',
2686 b'rebaseskipobsolete',
2688 default=True,
2687 default=True,
2689 )
2688 )
2690 coreconfigitem(
2689 coreconfigitem(
2691 b'rebase',
2690 b'rebase',
2692 b'singletransaction',
2691 b'singletransaction',
2693 default=False,
2692 default=False,
2694 )
2693 )
2695 coreconfigitem(
2694 coreconfigitem(
2696 b'rebase',
2695 b'rebase',
2697 b'experimental.inmemory',
2696 b'experimental.inmemory',
2698 default=False,
2697 default=False,
2699 )
2698 )
@@ -1,3249 +1,3246 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 FEATURES_BY_VERSION,
38 FEATURES_BY_VERSION,
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 INDEX_HEADER,
41 INDEX_HEADER,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 SUPPORTED_FLAGS,
50 SUPPORTED_FLAGS,
51 )
51 )
52 from .revlogutils.flagutil import (
52 from .revlogutils.flagutil import (
53 REVIDX_DEFAULT_FLAGS,
53 REVIDX_DEFAULT_FLAGS,
54 REVIDX_ELLIPSIS,
54 REVIDX_ELLIPSIS,
55 REVIDX_EXTSTORED,
55 REVIDX_EXTSTORED,
56 REVIDX_FLAGS_ORDER,
56 REVIDX_FLAGS_ORDER,
57 REVIDX_HASCOPIESINFO,
57 REVIDX_HASCOPIESINFO,
58 REVIDX_ISCENSORED,
58 REVIDX_ISCENSORED,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 )
60 )
61 from .thirdparty import attr
61 from .thirdparty import attr
62 from . import (
62 from . import (
63 ancestor,
63 ancestor,
64 dagop,
64 dagop,
65 error,
65 error,
66 mdiff,
66 mdiff,
67 policy,
67 policy,
68 pycompat,
68 pycompat,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 docket as docketutil,
78 docket as docketutil,
79 flagutil,
79 flagutil,
80 nodemap as nodemaputil,
80 nodemap as nodemaputil,
81 revlogv0,
81 revlogv0,
82 sidedata as sidedatautil,
82 sidedata as sidedatautil,
83 )
83 )
84 from .utils import (
84 from .utils import (
85 storageutil,
85 storageutil,
86 stringutil,
86 stringutil,
87 )
87 )
88
88
89 # blanked usage of all the name to prevent pyflakes constraints
89 # blanked usage of all the name to prevent pyflakes constraints
90 # We need these name available in the module for extensions.
90 # We need these name available in the module for extensions.
91
91
92 REVLOGV0
92 REVLOGV0
93 REVLOGV1
93 REVLOGV1
94 REVLOGV2
94 REVLOGV2
95 FLAG_INLINE_DATA
95 FLAG_INLINE_DATA
96 FLAG_GENERALDELTA
96 FLAG_GENERALDELTA
97 REVLOG_DEFAULT_FLAGS
97 REVLOG_DEFAULT_FLAGS
98 REVLOG_DEFAULT_FORMAT
98 REVLOG_DEFAULT_FORMAT
99 REVLOG_DEFAULT_VERSION
99 REVLOG_DEFAULT_VERSION
100 REVLOGV1_FLAGS
100 REVLOGV1_FLAGS
101 REVLOGV2_FLAGS
101 REVLOGV2_FLAGS
102 REVIDX_ISCENSORED
102 REVIDX_ISCENSORED
103 REVIDX_ELLIPSIS
103 REVIDX_ELLIPSIS
104 REVIDX_HASCOPIESINFO
104 REVIDX_HASCOPIESINFO
105 REVIDX_EXTSTORED
105 REVIDX_EXTSTORED
106 REVIDX_DEFAULT_FLAGS
106 REVIDX_DEFAULT_FLAGS
107 REVIDX_FLAGS_ORDER
107 REVIDX_FLAGS_ORDER
108 REVIDX_RAWTEXT_CHANGING_FLAGS
108 REVIDX_RAWTEXT_CHANGING_FLAGS
109
109
110 parsers = policy.importmod('parsers')
110 parsers = policy.importmod('parsers')
111 rustancestor = policy.importrust('ancestor')
111 rustancestor = policy.importrust('ancestor')
112 rustdagop = policy.importrust('dagop')
112 rustdagop = policy.importrust('dagop')
113 rustrevlog = policy.importrust('revlog')
113 rustrevlog = policy.importrust('revlog')
114
114
115 # Aliased for performance.
115 # Aliased for performance.
116 _zlibdecompress = zlib.decompress
116 _zlibdecompress = zlib.decompress
117
117
118 # max size of revlog with inline data
118 # max size of revlog with inline data
119 _maxinline = 131072
119 _maxinline = 131072
120 _chunksize = 1048576
120 _chunksize = 1048576
121
121
122 # Flag processors for REVIDX_ELLIPSIS.
122 # Flag processors for REVIDX_ELLIPSIS.
123 def ellipsisreadprocessor(rl, text):
123 def ellipsisreadprocessor(rl, text):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsiswriteprocessor(rl, text):
127 def ellipsiswriteprocessor(rl, text):
128 return text, False
128 return text, False
129
129
130
130
131 def ellipsisrawprocessor(rl, text):
131 def ellipsisrawprocessor(rl, text):
132 return False
132 return False
133
133
134
134
135 ellipsisprocessor = (
135 ellipsisprocessor = (
136 ellipsisreadprocessor,
136 ellipsisreadprocessor,
137 ellipsiswriteprocessor,
137 ellipsiswriteprocessor,
138 ellipsisrawprocessor,
138 ellipsisrawprocessor,
139 )
139 )
140
140
141
141
142 def offset_type(offset, type):
142 def offset_type(offset, type):
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
144 raise ValueError(b'unknown revlog index flags')
144 raise ValueError(b'unknown revlog index flags')
145 return int(int(offset) << 16 | type)
145 return int(int(offset) << 16 | type)
146
146
147
147
148 def _verify_revision(rl, skipflags, state, node):
148 def _verify_revision(rl, skipflags, state, node):
149 """Verify the integrity of the given revlog ``node`` while providing a hook
149 """Verify the integrity of the given revlog ``node`` while providing a hook
150 point for extensions to influence the operation."""
150 point for extensions to influence the operation."""
151 if skipflags:
151 if skipflags:
152 state[b'skipread'].add(node)
152 state[b'skipread'].add(node)
153 else:
153 else:
154 # Side-effect: read content and verify hash.
154 # Side-effect: read content and verify hash.
155 rl.revision(node)
155 rl.revision(node)
156
156
157
157
158 # True if a fast implementation for persistent-nodemap is available
158 # True if a fast implementation for persistent-nodemap is available
159 #
159 #
160 # We also consider we have a "fast" implementation in "pure" python because
160 # We also consider we have a "fast" implementation in "pure" python because
161 # people using pure don't really have performance consideration (and a
161 # people using pure don't really have performance consideration (and a
162 # wheelbarrow of other slowness source)
162 # wheelbarrow of other slowness source)
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
164 parsers, 'BaseIndexObject'
164 parsers, 'BaseIndexObject'
165 )
165 )
166
166
167
167
168 @attr.s(slots=True, frozen=True)
168 @attr.s(slots=True, frozen=True)
169 class _revisioninfo(object):
169 class _revisioninfo(object):
170 """Information about a revision that allows building its fulltext
170 """Information about a revision that allows building its fulltext
171 node: expected hash of the revision
171 node: expected hash of the revision
172 p1, p2: parent revs of the revision
172 p1, p2: parent revs of the revision
173 btext: built text cache consisting of a one-element list
173 btext: built text cache consisting of a one-element list
174 cachedelta: (baserev, uncompressed_delta) or None
174 cachedelta: (baserev, uncompressed_delta) or None
175 flags: flags associated to the revision storage
175 flags: flags associated to the revision storage
176
176
177 One of btext[0] or cachedelta must be set.
177 One of btext[0] or cachedelta must be set.
178 """
178 """
179
179
180 node = attr.ib()
180 node = attr.ib()
181 p1 = attr.ib()
181 p1 = attr.ib()
182 p2 = attr.ib()
182 p2 = attr.ib()
183 btext = attr.ib()
183 btext = attr.ib()
184 textlen = attr.ib()
184 textlen = attr.ib()
185 cachedelta = attr.ib()
185 cachedelta = attr.ib()
186 flags = attr.ib()
186 flags = attr.ib()
187
187
188
188
189 @interfaceutil.implementer(repository.irevisiondelta)
189 @interfaceutil.implementer(repository.irevisiondelta)
190 @attr.s(slots=True)
190 @attr.s(slots=True)
191 class revlogrevisiondelta(object):
191 class revlogrevisiondelta(object):
192 node = attr.ib()
192 node = attr.ib()
193 p1node = attr.ib()
193 p1node = attr.ib()
194 p2node = attr.ib()
194 p2node = attr.ib()
195 basenode = attr.ib()
195 basenode = attr.ib()
196 flags = attr.ib()
196 flags = attr.ib()
197 baserevisionsize = attr.ib()
197 baserevisionsize = attr.ib()
198 revision = attr.ib()
198 revision = attr.ib()
199 delta = attr.ib()
199 delta = attr.ib()
200 sidedata = attr.ib()
200 sidedata = attr.ib()
201 protocol_flags = attr.ib()
201 protocol_flags = attr.ib()
202 linknode = attr.ib(default=None)
202 linknode = attr.ib(default=None)
203
203
204
204
205 @interfaceutil.implementer(repository.iverifyproblem)
205 @interfaceutil.implementer(repository.iverifyproblem)
206 @attr.s(frozen=True)
206 @attr.s(frozen=True)
207 class revlogproblem(object):
207 class revlogproblem(object):
208 warning = attr.ib(default=None)
208 warning = attr.ib(default=None)
209 error = attr.ib(default=None)
209 error = attr.ib(default=None)
210 node = attr.ib(default=None)
210 node = attr.ib(default=None)
211
211
212
212
213 def parse_index_v1(data, inline):
213 def parse_index_v1(data, inline):
214 # call the C implementation to parse the index data
214 # call the C implementation to parse the index data
215 index, cache = parsers.parse_index2(data, inline)
215 index, cache = parsers.parse_index2(data, inline)
216 return index, cache
216 return index, cache
217
217
218
218
219 def parse_index_v2(data, inline):
219 def parse_index_v2(data, inline):
220 # call the C implementation to parse the index data
220 # call the C implementation to parse the index data
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
222 return index, cache
222 return index, cache
223
223
224
224
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
226
226
227 def parse_index_v1_nodemap(data, inline):
227 def parse_index_v1_nodemap(data, inline):
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
229 return index, cache
229 return index, cache
230
230
231
231
232 else:
232 else:
233 parse_index_v1_nodemap = None
233 parse_index_v1_nodemap = None
234
234
235
235
236 def parse_index_v1_mixed(data, inline):
236 def parse_index_v1_mixed(data, inline):
237 index, cache = parse_index_v1(data, inline)
237 index, cache = parse_index_v1(data, inline)
238 return rustrevlog.MixedIndex(index), cache
238 return rustrevlog.MixedIndex(index), cache
239
239
240
240
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
242 # signed integer)
242 # signed integer)
243 _maxentrysize = 0x7FFFFFFF
243 _maxentrysize = 0x7FFFFFFF
244
244
245
245
246 class revlog(object):
246 class revlog(object):
247 """
247 """
248 the underlying revision storage object
248 the underlying revision storage object
249
249
250 A revlog consists of two parts, an index and the revision data.
250 A revlog consists of two parts, an index and the revision data.
251
251
252 The index is a file with a fixed record size containing
252 The index is a file with a fixed record size containing
253 information on each revision, including its nodeid (hash), the
253 information on each revision, including its nodeid (hash), the
254 nodeids of its parents, the position and offset of its data within
254 nodeids of its parents, the position and offset of its data within
255 the data file, and the revision it's based on. Finally, each entry
255 the data file, and the revision it's based on. Finally, each entry
256 contains a linkrev entry that can serve as a pointer to external
256 contains a linkrev entry that can serve as a pointer to external
257 data.
257 data.
258
258
259 The revision data itself is a linear collection of data chunks.
259 The revision data itself is a linear collection of data chunks.
260 Each chunk represents a revision and is usually represented as a
260 Each chunk represents a revision and is usually represented as a
261 delta against the previous chunk. To bound lookup time, runs of
261 delta against the previous chunk. To bound lookup time, runs of
262 deltas are limited to about 2 times the length of the original
262 deltas are limited to about 2 times the length of the original
263 version data. This makes retrieval of a version proportional to
263 version data. This makes retrieval of a version proportional to
264 its size, or O(1) relative to the number of revisions.
264 its size, or O(1) relative to the number of revisions.
265
265
266 Both pieces of the revlog are written to in an append-only
266 Both pieces of the revlog are written to in an append-only
267 fashion, which means we never need to rewrite a file to insert or
267 fashion, which means we never need to rewrite a file to insert or
268 remove data, and can use some simple techniques to avoid the need
268 remove data, and can use some simple techniques to avoid the need
269 for locking while reading.
269 for locking while reading.
270
270
271 If checkambig, indexfile is opened with checkambig=True at
271 If checkambig, indexfile is opened with checkambig=True at
272 writing, to avoid file stat ambiguity.
272 writing, to avoid file stat ambiguity.
273
273
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
275 index will be mmapped rather than read if it is larger than the
275 index will be mmapped rather than read if it is larger than the
276 configured threshold.
276 configured threshold.
277
277
278 If censorable is True, the revlog can have censored revisions.
278 If censorable is True, the revlog can have censored revisions.
279
279
280 If `upperboundcomp` is not None, this is the expected maximal gain from
280 If `upperboundcomp` is not None, this is the expected maximal gain from
281 compression for the data content.
281 compression for the data content.
282
282
283 `concurrencychecker` is an optional function that receives 3 arguments: a
283 `concurrencychecker` is an optional function that receives 3 arguments: a
284 file handle, a filename, and an expected position. It should check whether
284 file handle, a filename, and an expected position. It should check whether
285 the current position in the file handle is valid, and log/warn/fail (by
285 the current position in the file handle is valid, and log/warn/fail (by
286 raising).
286 raising).
287 """
287 """
288
288
289 _flagserrorclass = error.RevlogError
289 _flagserrorclass = error.RevlogError
290
290
291 def __init__(
291 def __init__(
292 self,
292 self,
293 opener,
293 opener,
294 target,
294 target,
295 radix,
295 radix,
296 postfix=None,
296 postfix=None,
297 checkambig=False,
297 checkambig=False,
298 mmaplargeindex=False,
298 mmaplargeindex=False,
299 censorable=False,
299 censorable=False,
300 upperboundcomp=None,
300 upperboundcomp=None,
301 persistentnodemap=False,
301 persistentnodemap=False,
302 concurrencychecker=None,
302 concurrencychecker=None,
303 ):
303 ):
304 """
304 """
305 create a revlog object
305 create a revlog object
306
306
307 opener is a function that abstracts the file opening operation
307 opener is a function that abstracts the file opening operation
308 and can be used to implement COW semantics or the like.
308 and can be used to implement COW semantics or the like.
309
309
310 `target`: a (KIND, ID) tuple that identify the content stored in
310 `target`: a (KIND, ID) tuple that identify the content stored in
311 this revlog. It help the rest of the code to understand what the revlog
311 this revlog. It help the rest of the code to understand what the revlog
312 is about without having to resort to heuristic and index filename
312 is about without having to resort to heuristic and index filename
313 analysis. Note: that this must be reliably be set by normal code, but
313 analysis. Note: that this must be reliably be set by normal code, but
314 that test, debug, or performance measurement code might not set this to
314 that test, debug, or performance measurement code might not set this to
315 accurate value.
315 accurate value.
316 """
316 """
317 self.upperboundcomp = upperboundcomp
317 self.upperboundcomp = upperboundcomp
318
318
319 self.radix = radix
319 self.radix = radix
320
320
321 self._docket_file = None
321 self._docket_file = None
322 self._indexfile = None
322 self._indexfile = None
323 self._datafile = None
323 self._datafile = None
324 self._nodemap_file = None
324 self._nodemap_file = None
325 self.postfix = postfix
325 self.postfix = postfix
326 self.opener = opener
326 self.opener = opener
327 if persistentnodemap:
327 if persistentnodemap:
328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
329
329
330 assert target[0] in ALL_KINDS
330 assert target[0] in ALL_KINDS
331 assert len(target) == 2
331 assert len(target) == 2
332 self.target = target
332 self.target = target
333 # When True, indexfile is opened with checkambig=True at writing, to
333 # When True, indexfile is opened with checkambig=True at writing, to
334 # avoid file stat ambiguity.
334 # avoid file stat ambiguity.
335 self._checkambig = checkambig
335 self._checkambig = checkambig
336 self._mmaplargeindex = mmaplargeindex
336 self._mmaplargeindex = mmaplargeindex
337 self._censorable = censorable
337 self._censorable = censorable
338 # 3-tuple of (node, rev, text) for a raw revision.
338 # 3-tuple of (node, rev, text) for a raw revision.
339 self._revisioncache = None
339 self._revisioncache = None
340 # Maps rev to chain base rev.
340 # Maps rev to chain base rev.
341 self._chainbasecache = util.lrucachedict(100)
341 self._chainbasecache = util.lrucachedict(100)
342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
343 self._chunkcache = (0, b'')
343 self._chunkcache = (0, b'')
344 # How much data to read and cache into the raw revlog data cache.
344 # How much data to read and cache into the raw revlog data cache.
345 self._chunkcachesize = 65536
345 self._chunkcachesize = 65536
346 self._maxchainlen = None
346 self._maxchainlen = None
347 self._deltabothparents = True
347 self._deltabothparents = True
348 self.index = None
348 self.index = None
349 self._docket = None
349 self._docket = None
350 self._nodemap_docket = None
350 self._nodemap_docket = None
351 # Mapping of partial identifiers to full nodes.
351 # Mapping of partial identifiers to full nodes.
352 self._pcache = {}
352 self._pcache = {}
353 # Mapping of revision integer to full node.
353 # Mapping of revision integer to full node.
354 self._compengine = b'zlib'
354 self._compengine = b'zlib'
355 self._compengineopts = {}
355 self._compengineopts = {}
356 self._maxdeltachainspan = -1
356 self._maxdeltachainspan = -1
357 self._withsparseread = False
357 self._withsparseread = False
358 self._sparserevlog = False
358 self._sparserevlog = False
359 self.hassidedata = False
359 self.hassidedata = False
360 self._srdensitythreshold = 0.50
360 self._srdensitythreshold = 0.50
361 self._srmingapsize = 262144
361 self._srmingapsize = 262144
362
362
363 # Make copy of flag processors so each revlog instance can support
363 # Make copy of flag processors so each revlog instance can support
364 # custom flags.
364 # custom flags.
365 self._flagprocessors = dict(flagutil.flagprocessors)
365 self._flagprocessors = dict(flagutil.flagprocessors)
366
366
367 # 2-tuple of file handles being used for active writing.
367 # 2-tuple of file handles being used for active writing.
368 self._writinghandles = None
368 self._writinghandles = None
369 # prevent nesting of addgroup
369 # prevent nesting of addgroup
370 self._adding_group = None
370 self._adding_group = None
371
371
372 self._loadindex()
372 self._loadindex()
373
373
374 self._concurrencychecker = concurrencychecker
374 self._concurrencychecker = concurrencychecker
375
375
376 def _init_opts(self):
376 def _init_opts(self):
377 """process options (from above/config) to setup associated default revlog mode
377 """process options (from above/config) to setup associated default revlog mode
378
378
379 These values might be affected when actually reading on disk information.
379 These values might be affected when actually reading on disk information.
380
380
381 The relevant values are returned for use in _loadindex().
381 The relevant values are returned for use in _loadindex().
382
382
383 * newversionflags:
383 * newversionflags:
384 version header to use if we need to create a new revlog
384 version header to use if we need to create a new revlog
385
385
386 * mmapindexthreshold:
386 * mmapindexthreshold:
387 minimal index size for start to use mmap
387 minimal index size for start to use mmap
388
388
389 * force_nodemap:
389 * force_nodemap:
390 force the usage of a "development" version of the nodemap code
390 force the usage of a "development" version of the nodemap code
391 """
391 """
392 mmapindexthreshold = None
392 mmapindexthreshold = None
393 opts = self.opener.options
393 opts = self.opener.options
394
394
395 if b'revlogv2' in opts:
395 if b'revlogv2' in opts:
396 new_header = REVLOGV2 | FLAG_INLINE_DATA
396 new_header = REVLOGV2 | FLAG_INLINE_DATA
397 elif b'revlogv1' in opts:
397 elif b'revlogv1' in opts:
398 new_header = REVLOGV1 | FLAG_INLINE_DATA
398 new_header = REVLOGV1 | FLAG_INLINE_DATA
399 if b'generaldelta' in opts:
399 if b'generaldelta' in opts:
400 new_header |= FLAG_GENERALDELTA
400 new_header |= FLAG_GENERALDELTA
401 elif b'revlogv0' in self.opener.options:
401 elif b'revlogv0' in self.opener.options:
402 new_header = REVLOGV0
402 new_header = REVLOGV0
403 else:
403 else:
404 new_header = REVLOG_DEFAULT_VERSION
404 new_header = REVLOG_DEFAULT_VERSION
405
405
406 if b'chunkcachesize' in opts:
406 if b'chunkcachesize' in opts:
407 self._chunkcachesize = opts[b'chunkcachesize']
407 self._chunkcachesize = opts[b'chunkcachesize']
408 if b'maxchainlen' in opts:
408 if b'maxchainlen' in opts:
409 self._maxchainlen = opts[b'maxchainlen']
409 self._maxchainlen = opts[b'maxchainlen']
410 if b'deltabothparents' in opts:
410 if b'deltabothparents' in opts:
411 self._deltabothparents = opts[b'deltabothparents']
411 self._deltabothparents = opts[b'deltabothparents']
412 self._lazydelta = bool(opts.get(b'lazydelta', True))
412 self._lazydelta = bool(opts.get(b'lazydelta', True))
413 self._lazydeltabase = False
413 self._lazydeltabase = False
414 if self._lazydelta:
414 if self._lazydelta:
415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
416 if b'compengine' in opts:
416 if b'compengine' in opts:
417 self._compengine = opts[b'compengine']
417 self._compengine = opts[b'compengine']
418 if b'zlib.level' in opts:
418 if b'zlib.level' in opts:
419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
420 if b'zstd.level' in opts:
420 if b'zstd.level' in opts:
421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
422 if b'maxdeltachainspan' in opts:
422 if b'maxdeltachainspan' in opts:
423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
425 mmapindexthreshold = opts[b'mmapindexthreshold']
425 mmapindexthreshold = opts[b'mmapindexthreshold']
426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
427 withsparseread = bool(opts.get(b'with-sparse-read', False))
427 withsparseread = bool(opts.get(b'with-sparse-read', False))
428 # sparse-revlog forces sparse-read
428 # sparse-revlog forces sparse-read
429 self._withsparseread = self._sparserevlog or withsparseread
429 self._withsparseread = self._sparserevlog or withsparseread
430 if b'sparse-read-density-threshold' in opts:
430 if b'sparse-read-density-threshold' in opts:
431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
432 if b'sparse-read-min-gap-size' in opts:
432 if b'sparse-read-min-gap-size' in opts:
433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
434 if opts.get(b'enableellipsis'):
434 if opts.get(b'enableellipsis'):
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
436
436
437 # revlog v0 doesn't have flag processors
437 # revlog v0 doesn't have flag processors
438 for flag, processor in pycompat.iteritems(
438 for flag, processor in pycompat.iteritems(
439 opts.get(b'flagprocessors', {})
439 opts.get(b'flagprocessors', {})
440 ):
440 ):
441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
442
442
443 if self._chunkcachesize <= 0:
443 if self._chunkcachesize <= 0:
444 raise error.RevlogError(
444 raise error.RevlogError(
445 _(b'revlog chunk cache size %r is not greater than 0')
445 _(b'revlog chunk cache size %r is not greater than 0')
446 % self._chunkcachesize
446 % self._chunkcachesize
447 )
447 )
448 elif self._chunkcachesize & (self._chunkcachesize - 1):
448 elif self._chunkcachesize & (self._chunkcachesize - 1):
449 raise error.RevlogError(
449 raise error.RevlogError(
450 _(b'revlog chunk cache size %r is not a power of 2')
450 _(b'revlog chunk cache size %r is not a power of 2')
451 % self._chunkcachesize
451 % self._chunkcachesize
452 )
452 )
453 force_nodemap = opts.get(b'devel-force-nodemap', False)
453 force_nodemap = opts.get(b'devel-force-nodemap', False)
454 return new_header, mmapindexthreshold, force_nodemap
454 return new_header, mmapindexthreshold, force_nodemap
455
455
456 def _get_data(self, filepath, mmap_threshold, size=None):
456 def _get_data(self, filepath, mmap_threshold, size=None):
457 """return a file content with or without mmap
457 """return a file content with or without mmap
458
458
459 If the file is missing return the empty string"""
459 If the file is missing return the empty string"""
460 try:
460 try:
461 with self.opener(filepath) as fp:
461 with self.opener(filepath) as fp:
462 if mmap_threshold is not None:
462 if mmap_threshold is not None:
463 file_size = self.opener.fstat(fp).st_size
463 file_size = self.opener.fstat(fp).st_size
464 if file_size >= mmap_threshold:
464 if file_size >= mmap_threshold:
465 if size is not None:
465 if size is not None:
466 # avoid potentiel mmap crash
466 # avoid potentiel mmap crash
467 size = min(file_size, size)
467 size = min(file_size, size)
468 # TODO: should .close() to release resources without
468 # TODO: should .close() to release resources without
469 # relying on Python GC
469 # relying on Python GC
470 if size is None:
470 if size is None:
471 return util.buffer(util.mmapread(fp))
471 return util.buffer(util.mmapread(fp))
472 else:
472 else:
473 return util.buffer(util.mmapread(fp, size))
473 return util.buffer(util.mmapread(fp, size))
474 if size is None:
474 if size is None:
475 return fp.read()
475 return fp.read()
476 else:
476 else:
477 return fp.read(size)
477 return fp.read(size)
478 except IOError as inst:
478 except IOError as inst:
479 if inst.errno != errno.ENOENT:
479 if inst.errno != errno.ENOENT:
480 raise
480 raise
481 return b''
481 return b''
482
482
483 def _loadindex(self):
483 def _loadindex(self):
484
484
485 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
485 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
486
486
487 if self.postfix is None:
487 if self.postfix is None:
488 entry_point = b'%s.i' % self.radix
488 entry_point = b'%s.i' % self.radix
489 else:
489 else:
490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
491
491
492 entry_data = b''
492 entry_data = b''
493 self._initempty = True
493 self._initempty = True
494 entry_data = self._get_data(entry_point, mmapindexthreshold)
494 entry_data = self._get_data(entry_point, mmapindexthreshold)
495 if len(entry_data) > 0:
495 if len(entry_data) > 0:
496 header = INDEX_HEADER.unpack(entry_data[:4])[0]
496 header = INDEX_HEADER.unpack(entry_data[:4])[0]
497 self._initempty = False
497 self._initempty = False
498 else:
498 else:
499 header = new_header
499 header = new_header
500
500
501 self._format_flags = header & ~0xFFFF
501 self._format_flags = header & ~0xFFFF
502 self._format_version = header & 0xFFFF
502 self._format_version = header & 0xFFFF
503
503
504 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
504 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
505 if supported_flags is None:
505 if supported_flags is None:
506 msg = _(b'unknown version (%d) in revlog %s')
506 msg = _(b'unknown version (%d) in revlog %s')
507 msg %= (self._format_version, self.display_id)
507 msg %= (self._format_version, self.display_id)
508 raise error.RevlogError(msg)
508 raise error.RevlogError(msg)
509 elif self._format_flags & ~supported_flags:
509 elif self._format_flags & ~supported_flags:
510 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
510 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
511 display_flag = self._format_flags >> 16
511 display_flag = self._format_flags >> 16
512 msg %= (display_flag, self._format_version, self.display_id)
512 msg %= (display_flag, self._format_version, self.display_id)
513 raise error.RevlogError(msg)
513 raise error.RevlogError(msg)
514
514
515 features = FEATURES_BY_VERSION[self._format_version]
515 features = FEATURES_BY_VERSION[self._format_version]
516 self._inline = features[b'inline'](self._format_flags)
516 self._inline = features[b'inline'](self._format_flags)
517 self._generaldelta = features[b'generaldelta'](self._format_flags)
517 self._generaldelta = features[b'generaldelta'](self._format_flags)
518 self.hassidedata = features[b'sidedata']
518 self.hassidedata = features[b'sidedata']
519
519
520 if not features[b'docket']:
520 if not features[b'docket']:
521 self._indexfile = entry_point
521 self._indexfile = entry_point
522 index_data = entry_data
522 index_data = entry_data
523 else:
523 else:
524 self._docket_file = entry_point
524 self._docket_file = entry_point
525 if self._initempty:
525 if self._initempty:
526 self._docket = docketutil.default_docket(self, header)
526 self._docket = docketutil.default_docket(self, header)
527 else:
527 else:
528 self._docket = docketutil.parse_docket(self, entry_data)
528 self._docket = docketutil.parse_docket(self, entry_data)
529 self._indexfile = self._docket.index_filepath()
529 self._indexfile = self._docket.index_filepath()
530 index_data = b''
530 index_data = b''
531 index_size = self._docket.index_end
531 index_size = self._docket.index_end
532 if index_size > 0:
532 if index_size > 0:
533 index_data = self._get_data(
533 index_data = self._get_data(
534 self._indexfile, mmapindexthreshold, size=index_size
534 self._indexfile, mmapindexthreshold, size=index_size
535 )
535 )
536 if len(index_data) < index_size:
536 if len(index_data) < index_size:
537 msg = _(b'too few index data for %s: got %d, expected %d')
537 msg = _(b'too few index data for %s: got %d, expected %d')
538 msg %= (self.display_id, len(index_data), index_size)
538 msg %= (self.display_id, len(index_data), index_size)
539 raise error.RevlogError(msg)
539 raise error.RevlogError(msg)
540
540
541 self._inline = False
541 self._inline = False
542 # generaldelta implied by version 2 revlogs.
542 # generaldelta implied by version 2 revlogs.
543 self._generaldelta = True
543 self._generaldelta = True
544 # the logic for persistent nodemap will be dealt with within the
544 # the logic for persistent nodemap will be dealt with within the
545 # main docket, so disable it for now.
545 # main docket, so disable it for now.
546 self._nodemap_file = None
546 self._nodemap_file = None
547
547
548 if self.postfix is None or self.postfix == b'a':
548 if self.postfix is None or self.postfix == b'a':
549 self._datafile = b'%s.d' % self.radix
549 self._datafile = b'%s.d' % self.radix
550 else:
550 else:
551 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
551 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
552
552
553 self.nodeconstants = sha1nodeconstants
553 self.nodeconstants = sha1nodeconstants
554 self.nullid = self.nodeconstants.nullid
554 self.nullid = self.nodeconstants.nullid
555
555
556 # sparse-revlog can't be on without general-delta (issue6056)
556 # sparse-revlog can't be on without general-delta (issue6056)
557 if not self._generaldelta:
557 if not self._generaldelta:
558 self._sparserevlog = False
558 self._sparserevlog = False
559
559
560 self._storedeltachains = True
560 self._storedeltachains = True
561
561
562 devel_nodemap = (
562 devel_nodemap = (
563 self._nodemap_file
563 self._nodemap_file
564 and force_nodemap
564 and force_nodemap
565 and parse_index_v1_nodemap is not None
565 and parse_index_v1_nodemap is not None
566 )
566 )
567
567
568 use_rust_index = False
568 use_rust_index = False
569 if rustrevlog is not None:
569 if rustrevlog is not None:
570 if self._nodemap_file is not None:
570 if self._nodemap_file is not None:
571 use_rust_index = True
571 use_rust_index = True
572 else:
572 else:
573 use_rust_index = self.opener.options.get(b'rust.index')
573 use_rust_index = self.opener.options.get(b'rust.index')
574
574
575 self._parse_index = parse_index_v1
575 self._parse_index = parse_index_v1
576 if self._format_version == REVLOGV0:
576 if self._format_version == REVLOGV0:
577 self._parse_index = revlogv0.parse_index_v0
577 self._parse_index = revlogv0.parse_index_v0
578 elif self._format_version == REVLOGV2:
578 elif self._format_version == REVLOGV2:
579 self._parse_index = parse_index_v2
579 self._parse_index = parse_index_v2
580 elif devel_nodemap:
580 elif devel_nodemap:
581 self._parse_index = parse_index_v1_nodemap
581 self._parse_index = parse_index_v1_nodemap
582 elif use_rust_index:
582 elif use_rust_index:
583 self._parse_index = parse_index_v1_mixed
583 self._parse_index = parse_index_v1_mixed
584 try:
584 try:
585 d = self._parse_index(index_data, self._inline)
585 d = self._parse_index(index_data, self._inline)
586 index, _chunkcache = d
586 index, _chunkcache = d
587 use_nodemap = (
587 use_nodemap = (
588 not self._inline
588 not self._inline
589 and self._nodemap_file is not None
589 and self._nodemap_file is not None
590 and util.safehasattr(index, 'update_nodemap_data')
590 and util.safehasattr(index, 'update_nodemap_data')
591 )
591 )
592 if use_nodemap:
592 if use_nodemap:
593 nodemap_data = nodemaputil.persisted_data(self)
593 nodemap_data = nodemaputil.persisted_data(self)
594 if nodemap_data is not None:
594 if nodemap_data is not None:
595 docket = nodemap_data[0]
595 docket = nodemap_data[0]
596 if (
596 if (
597 len(d[0]) > docket.tip_rev
597 len(d[0]) > docket.tip_rev
598 and d[0][docket.tip_rev][7] == docket.tip_node
598 and d[0][docket.tip_rev][7] == docket.tip_node
599 ):
599 ):
600 # no changelog tampering
600 # no changelog tampering
601 self._nodemap_docket = docket
601 self._nodemap_docket = docket
602 index.update_nodemap_data(*nodemap_data)
602 index.update_nodemap_data(*nodemap_data)
603 except (ValueError, IndexError):
603 except (ValueError, IndexError):
604 raise error.RevlogError(
604 raise error.RevlogError(
605 _(b"index %s is corrupted") % self.display_id
605 _(b"index %s is corrupted") % self.display_id
606 )
606 )
607 self.index, self._chunkcache = d
607 self.index, self._chunkcache = d
608 if not self._chunkcache:
608 if not self._chunkcache:
609 self._chunkclear()
609 self._chunkclear()
610 # revnum -> (chain-length, sum-delta-length)
610 # revnum -> (chain-length, sum-delta-length)
611 self._chaininfocache = util.lrucachedict(500)
611 self._chaininfocache = util.lrucachedict(500)
612 # revlog header -> revlog compressor
612 # revlog header -> revlog compressor
613 self._decompressors = {}
613 self._decompressors = {}
614
614
615 @util.propertycache
615 @util.propertycache
616 def revlog_kind(self):
616 def revlog_kind(self):
617 return self.target[0]
617 return self.target[0]
618
618
619 @util.propertycache
619 @util.propertycache
620 def display_id(self):
620 def display_id(self):
621 """The public facing "ID" of the revlog that we use in message"""
621 """The public facing "ID" of the revlog that we use in message"""
622 # Maybe we should build a user facing representation of
622 # Maybe we should build a user facing representation of
623 # revlog.target instead of using `self.radix`
623 # revlog.target instead of using `self.radix`
624 return self.radix
624 return self.radix
625
625
626 @util.propertycache
626 @util.propertycache
627 def _compressor(self):
627 def _compressor(self):
628 engine = util.compengines[self._compengine]
628 engine = util.compengines[self._compengine]
629 return engine.revlogcompressor(self._compengineopts)
629 return engine.revlogcompressor(self._compengineopts)
630
630
631 def _indexfp(self):
631 def _indexfp(self):
632 """file object for the revlog's index file"""
632 """file object for the revlog's index file"""
633 return self.opener(self._indexfile, mode=b"r")
633 return self.opener(self._indexfile, mode=b"r")
634
634
635 def __index_write_fp(self):
635 def __index_write_fp(self):
636 # You should not use this directly and use `_writing` instead
636 # You should not use this directly and use `_writing` instead
637 try:
637 try:
638 f = self.opener(
638 f = self.opener(
639 self._indexfile, mode=b"r+", checkambig=self._checkambig
639 self._indexfile, mode=b"r+", checkambig=self._checkambig
640 )
640 )
641 if self._docket is None:
641 if self._docket is None:
642 f.seek(0, os.SEEK_END)
642 f.seek(0, os.SEEK_END)
643 else:
643 else:
644 f.seek(self._docket.index_end, os.SEEK_SET)
644 f.seek(self._docket.index_end, os.SEEK_SET)
645 return f
645 return f
646 except IOError as inst:
646 except IOError as inst:
647 if inst.errno != errno.ENOENT:
647 if inst.errno != errno.ENOENT:
648 raise
648 raise
649 return self.opener(
649 return self.opener(
650 self._indexfile, mode=b"w+", checkambig=self._checkambig
650 self._indexfile, mode=b"w+", checkambig=self._checkambig
651 )
651 )
652
652
653 def __index_new_fp(self):
653 def __index_new_fp(self):
654 # You should not use this unless you are upgrading from inline revlog
654 # You should not use this unless you are upgrading from inline revlog
655 return self.opener(
655 return self.opener(
656 self._indexfile,
656 self._indexfile,
657 mode=b"w",
657 mode=b"w",
658 checkambig=self._checkambig,
658 checkambig=self._checkambig,
659 atomictemp=True,
659 atomictemp=True,
660 )
660 )
661
661
662 def _datafp(self, mode=b'r'):
662 def _datafp(self, mode=b'r'):
663 """file object for the revlog's data file"""
663 """file object for the revlog's data file"""
664 return self.opener(self._datafile, mode=mode)
664 return self.opener(self._datafile, mode=mode)
665
665
666 @contextlib.contextmanager
666 @contextlib.contextmanager
667 def _datareadfp(self, existingfp=None):
667 def _datareadfp(self, existingfp=None):
668 """file object suitable to read data"""
668 """file object suitable to read data"""
669 # Use explicit file handle, if given.
669 # Use explicit file handle, if given.
670 if existingfp is not None:
670 if existingfp is not None:
671 yield existingfp
671 yield existingfp
672
672
673 # Use a file handle being actively used for writes, if available.
673 # Use a file handle being actively used for writes, if available.
674 # There is some danger to doing this because reads will seek the
674 # There is some danger to doing this because reads will seek the
675 # file. However, _writeentry() performs a SEEK_END before all writes,
675 # file. However, _writeentry() performs a SEEK_END before all writes,
676 # so we should be safe.
676 # so we should be safe.
677 elif self._writinghandles:
677 elif self._writinghandles:
678 if self._inline:
678 if self._inline:
679 yield self._writinghandles[0]
679 yield self._writinghandles[0]
680 else:
680 else:
681 yield self._writinghandles[1]
681 yield self._writinghandles[1]
682
682
683 # Otherwise open a new file handle.
683 # Otherwise open a new file handle.
684 else:
684 else:
685 if self._inline:
685 if self._inline:
686 func = self._indexfp
686 func = self._indexfp
687 else:
687 else:
688 func = self._datafp
688 func = self._datafp
689 with func() as fp:
689 with func() as fp:
690 yield fp
690 yield fp
691
691
692 def tiprev(self):
692 def tiprev(self):
693 return len(self.index) - 1
693 return len(self.index) - 1
694
694
695 def tip(self):
695 def tip(self):
696 return self.node(self.tiprev())
696 return self.node(self.tiprev())
697
697
698 def __contains__(self, rev):
698 def __contains__(self, rev):
699 return 0 <= rev < len(self)
699 return 0 <= rev < len(self)
700
700
701 def __len__(self):
701 def __len__(self):
702 return len(self.index)
702 return len(self.index)
703
703
704 def __iter__(self):
704 def __iter__(self):
705 return iter(pycompat.xrange(len(self)))
705 return iter(pycompat.xrange(len(self)))
706
706
707 def revs(self, start=0, stop=None):
707 def revs(self, start=0, stop=None):
708 """iterate over all rev in this revlog (from start to stop)"""
708 """iterate over all rev in this revlog (from start to stop)"""
709 return storageutil.iterrevs(len(self), start=start, stop=stop)
709 return storageutil.iterrevs(len(self), start=start, stop=stop)
710
710
711 @property
711 @property
712 def nodemap(self):
712 def nodemap(self):
713 msg = (
713 msg = (
714 b"revlog.nodemap is deprecated, "
714 b"revlog.nodemap is deprecated, "
715 b"use revlog.index.[has_node|rev|get_rev]"
715 b"use revlog.index.[has_node|rev|get_rev]"
716 )
716 )
717 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
717 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
718 return self.index.nodemap
718 return self.index.nodemap
719
719
720 @property
720 @property
721 def _nodecache(self):
721 def _nodecache(self):
722 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
722 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
724 return self.index.nodemap
724 return self.index.nodemap
725
725
726 def hasnode(self, node):
726 def hasnode(self, node):
727 try:
727 try:
728 self.rev(node)
728 self.rev(node)
729 return True
729 return True
730 except KeyError:
730 except KeyError:
731 return False
731 return False
732
732
733 def candelta(self, baserev, rev):
733 def candelta(self, baserev, rev):
734 """whether two revisions (baserev, rev) can be delta-ed or not"""
734 """whether two revisions (baserev, rev) can be delta-ed or not"""
735 # Disable delta if either rev requires a content-changing flag
735 # Disable delta if either rev requires a content-changing flag
736 # processor (ex. LFS). This is because such flag processor can alter
736 # processor (ex. LFS). This is because such flag processor can alter
737 # the rawtext content that the delta will be based on, and two clients
737 # the rawtext content that the delta will be based on, and two clients
738 # could have a same revlog node with different flags (i.e. different
738 # could have a same revlog node with different flags (i.e. different
739 # rawtext contents) and the delta could be incompatible.
739 # rawtext contents) and the delta could be incompatible.
740 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
740 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
741 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
741 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
742 ):
742 ):
743 return False
743 return False
744 return True
744 return True
745
745
746 def update_caches(self, transaction):
746 def update_caches(self, transaction):
747 if self._nodemap_file is not None:
747 if self._nodemap_file is not None:
748 if transaction is None:
748 if transaction is None:
749 nodemaputil.update_persistent_nodemap(self)
749 nodemaputil.update_persistent_nodemap(self)
750 else:
750 else:
751 nodemaputil.setup_persistent_nodemap(transaction, self)
751 nodemaputil.setup_persistent_nodemap(transaction, self)
752
752
753 def clearcaches(self):
753 def clearcaches(self):
754 self._revisioncache = None
754 self._revisioncache = None
755 self._chainbasecache.clear()
755 self._chainbasecache.clear()
756 self._chunkcache = (0, b'')
756 self._chunkcache = (0, b'')
757 self._pcache = {}
757 self._pcache = {}
758 self._nodemap_docket = None
758 self._nodemap_docket = None
759 self.index.clearcaches()
759 self.index.clearcaches()
760 # The python code is the one responsible for validating the docket, we
760 # The python code is the one responsible for validating the docket, we
761 # end up having to refresh it here.
761 # end up having to refresh it here.
762 use_nodemap = (
762 use_nodemap = (
763 not self._inline
763 not self._inline
764 and self._nodemap_file is not None
764 and self._nodemap_file is not None
765 and util.safehasattr(self.index, 'update_nodemap_data')
765 and util.safehasattr(self.index, 'update_nodemap_data')
766 )
766 )
767 if use_nodemap:
767 if use_nodemap:
768 nodemap_data = nodemaputil.persisted_data(self)
768 nodemap_data = nodemaputil.persisted_data(self)
769 if nodemap_data is not None:
769 if nodemap_data is not None:
770 self._nodemap_docket = nodemap_data[0]
770 self._nodemap_docket = nodemap_data[0]
771 self.index.update_nodemap_data(*nodemap_data)
771 self.index.update_nodemap_data(*nodemap_data)
772
772
773 def rev(self, node):
773 def rev(self, node):
774 try:
774 try:
775 return self.index.rev(node)
775 return self.index.rev(node)
776 except TypeError:
776 except TypeError:
777 raise
777 raise
778 except error.RevlogError:
778 except error.RevlogError:
779 # parsers.c radix tree lookup failed
779 # parsers.c radix tree lookup failed
780 if (
780 if (
781 node == self.nodeconstants.wdirid
781 node == self.nodeconstants.wdirid
782 or node in self.nodeconstants.wdirfilenodeids
782 or node in self.nodeconstants.wdirfilenodeids
783 ):
783 ):
784 raise error.WdirUnsupported
784 raise error.WdirUnsupported
785 raise error.LookupError(node, self.display_id, _(b'no node'))
785 raise error.LookupError(node, self.display_id, _(b'no node'))
786
786
787 # Accessors for index entries.
787 # Accessors for index entries.
788
788
789 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
789 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
790 # are flags.
790 # are flags.
791 def start(self, rev):
791 def start(self, rev):
792 return int(self.index[rev][0] >> 16)
792 return int(self.index[rev][0] >> 16)
793
793
794 def flags(self, rev):
794 def flags(self, rev):
795 return self.index[rev][0] & 0xFFFF
795 return self.index[rev][0] & 0xFFFF
796
796
797 def length(self, rev):
797 def length(self, rev):
798 return self.index[rev][1]
798 return self.index[rev][1]
799
799
800 def sidedata_length(self, rev):
800 def sidedata_length(self, rev):
801 if not self.hassidedata:
801 if not self.hassidedata:
802 return 0
802 return 0
803 return self.index[rev][9]
803 return self.index[rev][9]
804
804
805 def rawsize(self, rev):
805 def rawsize(self, rev):
806 """return the length of the uncompressed text for a given revision"""
806 """return the length of the uncompressed text for a given revision"""
807 l = self.index[rev][2]
807 l = self.index[rev][2]
808 if l >= 0:
808 if l >= 0:
809 return l
809 return l
810
810
811 t = self.rawdata(rev)
811 t = self.rawdata(rev)
812 return len(t)
812 return len(t)
813
813
814 def size(self, rev):
814 def size(self, rev):
815 """length of non-raw text (processed by a "read" flag processor)"""
815 """length of non-raw text (processed by a "read" flag processor)"""
816 # fast path: if no "read" flag processor could change the content,
816 # fast path: if no "read" flag processor could change the content,
817 # size is rawsize. note: ELLIPSIS is known to not change the content.
817 # size is rawsize. note: ELLIPSIS is known to not change the content.
818 flags = self.flags(rev)
818 flags = self.flags(rev)
819 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
819 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
820 return self.rawsize(rev)
820 return self.rawsize(rev)
821
821
822 return len(self.revision(rev, raw=False))
822 return len(self.revision(rev, raw=False))
823
823
824 def chainbase(self, rev):
824 def chainbase(self, rev):
825 base = self._chainbasecache.get(rev)
825 base = self._chainbasecache.get(rev)
826 if base is not None:
826 if base is not None:
827 return base
827 return base
828
828
829 index = self.index
829 index = self.index
830 iterrev = rev
830 iterrev = rev
831 base = index[iterrev][3]
831 base = index[iterrev][3]
832 while base != iterrev:
832 while base != iterrev:
833 iterrev = base
833 iterrev = base
834 base = index[iterrev][3]
834 base = index[iterrev][3]
835
835
836 self._chainbasecache[rev] = base
836 self._chainbasecache[rev] = base
837 return base
837 return base
838
838
839 def linkrev(self, rev):
839 def linkrev(self, rev):
840 return self.index[rev][4]
840 return self.index[rev][4]
841
841
842 def parentrevs(self, rev):
842 def parentrevs(self, rev):
843 try:
843 try:
844 entry = self.index[rev]
844 entry = self.index[rev]
845 except IndexError:
845 except IndexError:
846 if rev == wdirrev:
846 if rev == wdirrev:
847 raise error.WdirUnsupported
847 raise error.WdirUnsupported
848 raise
848 raise
849 if entry[5] == nullrev:
849 if entry[5] == nullrev:
850 return entry[6], entry[5]
850 return entry[6], entry[5]
851 else:
851 else:
852 return entry[5], entry[6]
852 return entry[5], entry[6]
853
853
854 # fast parentrevs(rev) where rev isn't filtered
854 # fast parentrevs(rev) where rev isn't filtered
855 _uncheckedparentrevs = parentrevs
855 _uncheckedparentrevs = parentrevs
856
856
857 def node(self, rev):
857 def node(self, rev):
858 try:
858 try:
859 return self.index[rev][7]
859 return self.index[rev][7]
860 except IndexError:
860 except IndexError:
861 if rev == wdirrev:
861 if rev == wdirrev:
862 raise error.WdirUnsupported
862 raise error.WdirUnsupported
863 raise
863 raise
864
864
865 # Derived from index values.
865 # Derived from index values.
866
866
867 def end(self, rev):
867 def end(self, rev):
868 return self.start(rev) + self.length(rev)
868 return self.start(rev) + self.length(rev)
869
869
870 def parents(self, node):
870 def parents(self, node):
871 i = self.index
871 i = self.index
872 d = i[self.rev(node)]
872 d = i[self.rev(node)]
873 # inline node() to avoid function call overhead
873 # inline node() to avoid function call overhead
874 if d[5] == self.nullid:
874 if d[5] == self.nullid:
875 return i[d[6]][7], i[d[5]][7]
875 return i[d[6]][7], i[d[5]][7]
876 else:
876 else:
877 return i[d[5]][7], i[d[6]][7]
877 return i[d[5]][7], i[d[6]][7]
878
878
879 def chainlen(self, rev):
879 def chainlen(self, rev):
880 return self._chaininfo(rev)[0]
880 return self._chaininfo(rev)[0]
881
881
882 def _chaininfo(self, rev):
882 def _chaininfo(self, rev):
883 chaininfocache = self._chaininfocache
883 chaininfocache = self._chaininfocache
884 if rev in chaininfocache:
884 if rev in chaininfocache:
885 return chaininfocache[rev]
885 return chaininfocache[rev]
886 index = self.index
886 index = self.index
887 generaldelta = self._generaldelta
887 generaldelta = self._generaldelta
888 iterrev = rev
888 iterrev = rev
889 e = index[iterrev]
889 e = index[iterrev]
890 clen = 0
890 clen = 0
891 compresseddeltalen = 0
891 compresseddeltalen = 0
892 while iterrev != e[3]:
892 while iterrev != e[3]:
893 clen += 1
893 clen += 1
894 compresseddeltalen += e[1]
894 compresseddeltalen += e[1]
895 if generaldelta:
895 if generaldelta:
896 iterrev = e[3]
896 iterrev = e[3]
897 else:
897 else:
898 iterrev -= 1
898 iterrev -= 1
899 if iterrev in chaininfocache:
899 if iterrev in chaininfocache:
900 t = chaininfocache[iterrev]
900 t = chaininfocache[iterrev]
901 clen += t[0]
901 clen += t[0]
902 compresseddeltalen += t[1]
902 compresseddeltalen += t[1]
903 break
903 break
904 e = index[iterrev]
904 e = index[iterrev]
905 else:
905 else:
906 # Add text length of base since decompressing that also takes
906 # Add text length of base since decompressing that also takes
907 # work. For cache hits the length is already included.
907 # work. For cache hits the length is already included.
908 compresseddeltalen += e[1]
908 compresseddeltalen += e[1]
909 r = (clen, compresseddeltalen)
909 r = (clen, compresseddeltalen)
910 chaininfocache[rev] = r
910 chaininfocache[rev] = r
911 return r
911 return r
912
912
913 def _deltachain(self, rev, stoprev=None):
913 def _deltachain(self, rev, stoprev=None):
914 """Obtain the delta chain for a revision.
914 """Obtain the delta chain for a revision.
915
915
916 ``stoprev`` specifies a revision to stop at. If not specified, we
916 ``stoprev`` specifies a revision to stop at. If not specified, we
917 stop at the base of the chain.
917 stop at the base of the chain.
918
918
919 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
919 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
920 revs in ascending order and ``stopped`` is a bool indicating whether
920 revs in ascending order and ``stopped`` is a bool indicating whether
921 ``stoprev`` was hit.
921 ``stoprev`` was hit.
922 """
922 """
923 # Try C implementation.
923 # Try C implementation.
924 try:
924 try:
925 return self.index.deltachain(rev, stoprev, self._generaldelta)
925 return self.index.deltachain(rev, stoprev, self._generaldelta)
926 except AttributeError:
926 except AttributeError:
927 pass
927 pass
928
928
929 chain = []
929 chain = []
930
930
931 # Alias to prevent attribute lookup in tight loop.
931 # Alias to prevent attribute lookup in tight loop.
932 index = self.index
932 index = self.index
933 generaldelta = self._generaldelta
933 generaldelta = self._generaldelta
934
934
935 iterrev = rev
935 iterrev = rev
936 e = index[iterrev]
936 e = index[iterrev]
937 while iterrev != e[3] and iterrev != stoprev:
937 while iterrev != e[3] and iterrev != stoprev:
938 chain.append(iterrev)
938 chain.append(iterrev)
939 if generaldelta:
939 if generaldelta:
940 iterrev = e[3]
940 iterrev = e[3]
941 else:
941 else:
942 iterrev -= 1
942 iterrev -= 1
943 e = index[iterrev]
943 e = index[iterrev]
944
944
945 if iterrev == stoprev:
945 if iterrev == stoprev:
946 stopped = True
946 stopped = True
947 else:
947 else:
948 chain.append(iterrev)
948 chain.append(iterrev)
949 stopped = False
949 stopped = False
950
950
951 chain.reverse()
951 chain.reverse()
952 return chain, stopped
952 return chain, stopped
953
953
954 def ancestors(self, revs, stoprev=0, inclusive=False):
954 def ancestors(self, revs, stoprev=0, inclusive=False):
955 """Generate the ancestors of 'revs' in reverse revision order.
955 """Generate the ancestors of 'revs' in reverse revision order.
956 Does not generate revs lower than stoprev.
956 Does not generate revs lower than stoprev.
957
957
958 See the documentation for ancestor.lazyancestors for more details."""
958 See the documentation for ancestor.lazyancestors for more details."""
959
959
960 # first, make sure start revisions aren't filtered
960 # first, make sure start revisions aren't filtered
961 revs = list(revs)
961 revs = list(revs)
962 checkrev = self.node
962 checkrev = self.node
963 for r in revs:
963 for r in revs:
964 checkrev(r)
964 checkrev(r)
965 # and we're sure ancestors aren't filtered as well
965 # and we're sure ancestors aren't filtered as well
966
966
967 if rustancestor is not None:
967 if rustancestor is not None:
968 lazyancestors = rustancestor.LazyAncestors
968 lazyancestors = rustancestor.LazyAncestors
969 arg = self.index
969 arg = self.index
970 else:
970 else:
971 lazyancestors = ancestor.lazyancestors
971 lazyancestors = ancestor.lazyancestors
972 arg = self._uncheckedparentrevs
972 arg = self._uncheckedparentrevs
973 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
973 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
974
974
975 def descendants(self, revs):
975 def descendants(self, revs):
976 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
976 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
977
977
978 def findcommonmissing(self, common=None, heads=None):
978 def findcommonmissing(self, common=None, heads=None):
979 """Return a tuple of the ancestors of common and the ancestors of heads
979 """Return a tuple of the ancestors of common and the ancestors of heads
980 that are not ancestors of common. In revset terminology, we return the
980 that are not ancestors of common. In revset terminology, we return the
981 tuple:
981 tuple:
982
982
983 ::common, (::heads) - (::common)
983 ::common, (::heads) - (::common)
984
984
985 The list is sorted by revision number, meaning it is
985 The list is sorted by revision number, meaning it is
986 topologically sorted.
986 topologically sorted.
987
987
988 'heads' and 'common' are both lists of node IDs. If heads is
988 'heads' and 'common' are both lists of node IDs. If heads is
989 not supplied, uses all of the revlog's heads. If common is not
989 not supplied, uses all of the revlog's heads. If common is not
990 supplied, uses nullid."""
990 supplied, uses nullid."""
991 if common is None:
991 if common is None:
992 common = [self.nullid]
992 common = [self.nullid]
993 if heads is None:
993 if heads is None:
994 heads = self.heads()
994 heads = self.heads()
995
995
996 common = [self.rev(n) for n in common]
996 common = [self.rev(n) for n in common]
997 heads = [self.rev(n) for n in heads]
997 heads = [self.rev(n) for n in heads]
998
998
999 # we want the ancestors, but inclusive
999 # we want the ancestors, but inclusive
1000 class lazyset(object):
1000 class lazyset(object):
1001 def __init__(self, lazyvalues):
1001 def __init__(self, lazyvalues):
1002 self.addedvalues = set()
1002 self.addedvalues = set()
1003 self.lazyvalues = lazyvalues
1003 self.lazyvalues = lazyvalues
1004
1004
1005 def __contains__(self, value):
1005 def __contains__(self, value):
1006 return value in self.addedvalues or value in self.lazyvalues
1006 return value in self.addedvalues or value in self.lazyvalues
1007
1007
1008 def __iter__(self):
1008 def __iter__(self):
1009 added = self.addedvalues
1009 added = self.addedvalues
1010 for r in added:
1010 for r in added:
1011 yield r
1011 yield r
1012 for r in self.lazyvalues:
1012 for r in self.lazyvalues:
1013 if not r in added:
1013 if not r in added:
1014 yield r
1014 yield r
1015
1015
1016 def add(self, value):
1016 def add(self, value):
1017 self.addedvalues.add(value)
1017 self.addedvalues.add(value)
1018
1018
1019 def update(self, values):
1019 def update(self, values):
1020 self.addedvalues.update(values)
1020 self.addedvalues.update(values)
1021
1021
1022 has = lazyset(self.ancestors(common))
1022 has = lazyset(self.ancestors(common))
1023 has.add(nullrev)
1023 has.add(nullrev)
1024 has.update(common)
1024 has.update(common)
1025
1025
1026 # take all ancestors from heads that aren't in has
1026 # take all ancestors from heads that aren't in has
1027 missing = set()
1027 missing = set()
1028 visit = collections.deque(r for r in heads if r not in has)
1028 visit = collections.deque(r for r in heads if r not in has)
1029 while visit:
1029 while visit:
1030 r = visit.popleft()
1030 r = visit.popleft()
1031 if r in missing:
1031 if r in missing:
1032 continue
1032 continue
1033 else:
1033 else:
1034 missing.add(r)
1034 missing.add(r)
1035 for p in self.parentrevs(r):
1035 for p in self.parentrevs(r):
1036 if p not in has:
1036 if p not in has:
1037 visit.append(p)
1037 visit.append(p)
1038 missing = list(missing)
1038 missing = list(missing)
1039 missing.sort()
1039 missing.sort()
1040 return has, [self.node(miss) for miss in missing]
1040 return has, [self.node(miss) for miss in missing]
1041
1041
1042 def incrementalmissingrevs(self, common=None):
1042 def incrementalmissingrevs(self, common=None):
1043 """Return an object that can be used to incrementally compute the
1043 """Return an object that can be used to incrementally compute the
1044 revision numbers of the ancestors of arbitrary sets that are not
1044 revision numbers of the ancestors of arbitrary sets that are not
1045 ancestors of common. This is an ancestor.incrementalmissingancestors
1045 ancestors of common. This is an ancestor.incrementalmissingancestors
1046 object.
1046 object.
1047
1047
1048 'common' is a list of revision numbers. If common is not supplied, uses
1048 'common' is a list of revision numbers. If common is not supplied, uses
1049 nullrev.
1049 nullrev.
1050 """
1050 """
1051 if common is None:
1051 if common is None:
1052 common = [nullrev]
1052 common = [nullrev]
1053
1053
1054 if rustancestor is not None:
1054 if rustancestor is not None:
1055 return rustancestor.MissingAncestors(self.index, common)
1055 return rustancestor.MissingAncestors(self.index, common)
1056 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1056 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1057
1057
1058 def findmissingrevs(self, common=None, heads=None):
1058 def findmissingrevs(self, common=None, heads=None):
1059 """Return the revision numbers of the ancestors of heads that
1059 """Return the revision numbers of the ancestors of heads that
1060 are not ancestors of common.
1060 are not ancestors of common.
1061
1061
1062 More specifically, return a list of revision numbers corresponding to
1062 More specifically, return a list of revision numbers corresponding to
1063 nodes N such that every N satisfies the following constraints:
1063 nodes N such that every N satisfies the following constraints:
1064
1064
1065 1. N is an ancestor of some node in 'heads'
1065 1. N is an ancestor of some node in 'heads'
1066 2. N is not an ancestor of any node in 'common'
1066 2. N is not an ancestor of any node in 'common'
1067
1067
1068 The list is sorted by revision number, meaning it is
1068 The list is sorted by revision number, meaning it is
1069 topologically sorted.
1069 topologically sorted.
1070
1070
1071 'heads' and 'common' are both lists of revision numbers. If heads is
1071 'heads' and 'common' are both lists of revision numbers. If heads is
1072 not supplied, uses all of the revlog's heads. If common is not
1072 not supplied, uses all of the revlog's heads. If common is not
1073 supplied, uses nullid."""
1073 supplied, uses nullid."""
1074 if common is None:
1074 if common is None:
1075 common = [nullrev]
1075 common = [nullrev]
1076 if heads is None:
1076 if heads is None:
1077 heads = self.headrevs()
1077 heads = self.headrevs()
1078
1078
1079 inc = self.incrementalmissingrevs(common=common)
1079 inc = self.incrementalmissingrevs(common=common)
1080 return inc.missingancestors(heads)
1080 return inc.missingancestors(heads)
1081
1081
1082 def findmissing(self, common=None, heads=None):
1082 def findmissing(self, common=None, heads=None):
1083 """Return the ancestors of heads that are not ancestors of common.
1083 """Return the ancestors of heads that are not ancestors of common.
1084
1084
1085 More specifically, return a list of nodes N such that every N
1085 More specifically, return a list of nodes N such that every N
1086 satisfies the following constraints:
1086 satisfies the following constraints:
1087
1087
1088 1. N is an ancestor of some node in 'heads'
1088 1. N is an ancestor of some node in 'heads'
1089 2. N is not an ancestor of any node in 'common'
1089 2. N is not an ancestor of any node in 'common'
1090
1090
1091 The list is sorted by revision number, meaning it is
1091 The list is sorted by revision number, meaning it is
1092 topologically sorted.
1092 topologically sorted.
1093
1093
1094 'heads' and 'common' are both lists of node IDs. If heads is
1094 'heads' and 'common' are both lists of node IDs. If heads is
1095 not supplied, uses all of the revlog's heads. If common is not
1095 not supplied, uses all of the revlog's heads. If common is not
1096 supplied, uses nullid."""
1096 supplied, uses nullid."""
1097 if common is None:
1097 if common is None:
1098 common = [self.nullid]
1098 common = [self.nullid]
1099 if heads is None:
1099 if heads is None:
1100 heads = self.heads()
1100 heads = self.heads()
1101
1101
1102 common = [self.rev(n) for n in common]
1102 common = [self.rev(n) for n in common]
1103 heads = [self.rev(n) for n in heads]
1103 heads = [self.rev(n) for n in heads]
1104
1104
1105 inc = self.incrementalmissingrevs(common=common)
1105 inc = self.incrementalmissingrevs(common=common)
1106 return [self.node(r) for r in inc.missingancestors(heads)]
1106 return [self.node(r) for r in inc.missingancestors(heads)]
1107
1107
1108 def nodesbetween(self, roots=None, heads=None):
1108 def nodesbetween(self, roots=None, heads=None):
1109 """Return a topological path from 'roots' to 'heads'.
1109 """Return a topological path from 'roots' to 'heads'.
1110
1110
1111 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1111 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1112 topologically sorted list of all nodes N that satisfy both of
1112 topologically sorted list of all nodes N that satisfy both of
1113 these constraints:
1113 these constraints:
1114
1114
1115 1. N is a descendant of some node in 'roots'
1115 1. N is a descendant of some node in 'roots'
1116 2. N is an ancestor of some node in 'heads'
1116 2. N is an ancestor of some node in 'heads'
1117
1117
1118 Every node is considered to be both a descendant and an ancestor
1118 Every node is considered to be both a descendant and an ancestor
1119 of itself, so every reachable node in 'roots' and 'heads' will be
1119 of itself, so every reachable node in 'roots' and 'heads' will be
1120 included in 'nodes'.
1120 included in 'nodes'.
1121
1121
1122 'outroots' is the list of reachable nodes in 'roots', i.e., the
1122 'outroots' is the list of reachable nodes in 'roots', i.e., the
1123 subset of 'roots' that is returned in 'nodes'. Likewise,
1123 subset of 'roots' that is returned in 'nodes'. Likewise,
1124 'outheads' is the subset of 'heads' that is also in 'nodes'.
1124 'outheads' is the subset of 'heads' that is also in 'nodes'.
1125
1125
1126 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1126 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1127 unspecified, uses nullid as the only root. If 'heads' is
1127 unspecified, uses nullid as the only root. If 'heads' is
1128 unspecified, uses list of all of the revlog's heads."""
1128 unspecified, uses list of all of the revlog's heads."""
1129 nonodes = ([], [], [])
1129 nonodes = ([], [], [])
1130 if roots is not None:
1130 if roots is not None:
1131 roots = list(roots)
1131 roots = list(roots)
1132 if not roots:
1132 if not roots:
1133 return nonodes
1133 return nonodes
1134 lowestrev = min([self.rev(n) for n in roots])
1134 lowestrev = min([self.rev(n) for n in roots])
1135 else:
1135 else:
1136 roots = [self.nullid] # Everybody's a descendant of nullid
1136 roots = [self.nullid] # Everybody's a descendant of nullid
1137 lowestrev = nullrev
1137 lowestrev = nullrev
1138 if (lowestrev == nullrev) and (heads is None):
1138 if (lowestrev == nullrev) and (heads is None):
1139 # We want _all_ the nodes!
1139 # We want _all_ the nodes!
1140 return (
1140 return (
1141 [self.node(r) for r in self],
1141 [self.node(r) for r in self],
1142 [self.nullid],
1142 [self.nullid],
1143 list(self.heads()),
1143 list(self.heads()),
1144 )
1144 )
1145 if heads is None:
1145 if heads is None:
1146 # All nodes are ancestors, so the latest ancestor is the last
1146 # All nodes are ancestors, so the latest ancestor is the last
1147 # node.
1147 # node.
1148 highestrev = len(self) - 1
1148 highestrev = len(self) - 1
1149 # Set ancestors to None to signal that every node is an ancestor.
1149 # Set ancestors to None to signal that every node is an ancestor.
1150 ancestors = None
1150 ancestors = None
1151 # Set heads to an empty dictionary for later discovery of heads
1151 # Set heads to an empty dictionary for later discovery of heads
1152 heads = {}
1152 heads = {}
1153 else:
1153 else:
1154 heads = list(heads)
1154 heads = list(heads)
1155 if not heads:
1155 if not heads:
1156 return nonodes
1156 return nonodes
1157 ancestors = set()
1157 ancestors = set()
1158 # Turn heads into a dictionary so we can remove 'fake' heads.
1158 # Turn heads into a dictionary so we can remove 'fake' heads.
1159 # Also, later we will be using it to filter out the heads we can't
1159 # Also, later we will be using it to filter out the heads we can't
1160 # find from roots.
1160 # find from roots.
1161 heads = dict.fromkeys(heads, False)
1161 heads = dict.fromkeys(heads, False)
1162 # Start at the top and keep marking parents until we're done.
1162 # Start at the top and keep marking parents until we're done.
1163 nodestotag = set(heads)
1163 nodestotag = set(heads)
1164 # Remember where the top was so we can use it as a limit later.
1164 # Remember where the top was so we can use it as a limit later.
1165 highestrev = max([self.rev(n) for n in nodestotag])
1165 highestrev = max([self.rev(n) for n in nodestotag])
1166 while nodestotag:
1166 while nodestotag:
1167 # grab a node to tag
1167 # grab a node to tag
1168 n = nodestotag.pop()
1168 n = nodestotag.pop()
1169 # Never tag nullid
1169 # Never tag nullid
1170 if n == self.nullid:
1170 if n == self.nullid:
1171 continue
1171 continue
1172 # A node's revision number represents its place in a
1172 # A node's revision number represents its place in a
1173 # topologically sorted list of nodes.
1173 # topologically sorted list of nodes.
1174 r = self.rev(n)
1174 r = self.rev(n)
1175 if r >= lowestrev:
1175 if r >= lowestrev:
1176 if n not in ancestors:
1176 if n not in ancestors:
1177 # If we are possibly a descendant of one of the roots
1177 # If we are possibly a descendant of one of the roots
1178 # and we haven't already been marked as an ancestor
1178 # and we haven't already been marked as an ancestor
1179 ancestors.add(n) # Mark as ancestor
1179 ancestors.add(n) # Mark as ancestor
1180 # Add non-nullid parents to list of nodes to tag.
1180 # Add non-nullid parents to list of nodes to tag.
1181 nodestotag.update(
1181 nodestotag.update(
1182 [p for p in self.parents(n) if p != self.nullid]
1182 [p for p in self.parents(n) if p != self.nullid]
1183 )
1183 )
1184 elif n in heads: # We've seen it before, is it a fake head?
1184 elif n in heads: # We've seen it before, is it a fake head?
1185 # So it is, real heads should not be the ancestors of
1185 # So it is, real heads should not be the ancestors of
1186 # any other heads.
1186 # any other heads.
1187 heads.pop(n)
1187 heads.pop(n)
1188 if not ancestors:
1188 if not ancestors:
1189 return nonodes
1189 return nonodes
1190 # Now that we have our set of ancestors, we want to remove any
1190 # Now that we have our set of ancestors, we want to remove any
1191 # roots that are not ancestors.
1191 # roots that are not ancestors.
1192
1192
1193 # If one of the roots was nullid, everything is included anyway.
1193 # If one of the roots was nullid, everything is included anyway.
1194 if lowestrev > nullrev:
1194 if lowestrev > nullrev:
1195 # But, since we weren't, let's recompute the lowest rev to not
1195 # But, since we weren't, let's recompute the lowest rev to not
1196 # include roots that aren't ancestors.
1196 # include roots that aren't ancestors.
1197
1197
1198 # Filter out roots that aren't ancestors of heads
1198 # Filter out roots that aren't ancestors of heads
1199 roots = [root for root in roots if root in ancestors]
1199 roots = [root for root in roots if root in ancestors]
1200 # Recompute the lowest revision
1200 # Recompute the lowest revision
1201 if roots:
1201 if roots:
1202 lowestrev = min([self.rev(root) for root in roots])
1202 lowestrev = min([self.rev(root) for root in roots])
1203 else:
1203 else:
1204 # No more roots? Return empty list
1204 # No more roots? Return empty list
1205 return nonodes
1205 return nonodes
1206 else:
1206 else:
1207 # We are descending from nullid, and don't need to care about
1207 # We are descending from nullid, and don't need to care about
1208 # any other roots.
1208 # any other roots.
1209 lowestrev = nullrev
1209 lowestrev = nullrev
1210 roots = [self.nullid]
1210 roots = [self.nullid]
1211 # Transform our roots list into a set.
1211 # Transform our roots list into a set.
1212 descendants = set(roots)
1212 descendants = set(roots)
1213 # Also, keep the original roots so we can filter out roots that aren't
1213 # Also, keep the original roots so we can filter out roots that aren't
1214 # 'real' roots (i.e. are descended from other roots).
1214 # 'real' roots (i.e. are descended from other roots).
1215 roots = descendants.copy()
1215 roots = descendants.copy()
1216 # Our topologically sorted list of output nodes.
1216 # Our topologically sorted list of output nodes.
1217 orderedout = []
1217 orderedout = []
1218 # Don't start at nullid since we don't want nullid in our output list,
1218 # Don't start at nullid since we don't want nullid in our output list,
1219 # and if nullid shows up in descendants, empty parents will look like
1219 # and if nullid shows up in descendants, empty parents will look like
1220 # they're descendants.
1220 # they're descendants.
1221 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1221 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1222 n = self.node(r)
1222 n = self.node(r)
1223 isdescendant = False
1223 isdescendant = False
1224 if lowestrev == nullrev: # Everybody is a descendant of nullid
1224 if lowestrev == nullrev: # Everybody is a descendant of nullid
1225 isdescendant = True
1225 isdescendant = True
1226 elif n in descendants:
1226 elif n in descendants:
1227 # n is already a descendant
1227 # n is already a descendant
1228 isdescendant = True
1228 isdescendant = True
1229 # This check only needs to be done here because all the roots
1229 # This check only needs to be done here because all the roots
1230 # will start being marked is descendants before the loop.
1230 # will start being marked is descendants before the loop.
1231 if n in roots:
1231 if n in roots:
1232 # If n was a root, check if it's a 'real' root.
1232 # If n was a root, check if it's a 'real' root.
1233 p = tuple(self.parents(n))
1233 p = tuple(self.parents(n))
1234 # If any of its parents are descendants, it's not a root.
1234 # If any of its parents are descendants, it's not a root.
1235 if (p[0] in descendants) or (p[1] in descendants):
1235 if (p[0] in descendants) or (p[1] in descendants):
1236 roots.remove(n)
1236 roots.remove(n)
1237 else:
1237 else:
1238 p = tuple(self.parents(n))
1238 p = tuple(self.parents(n))
1239 # A node is a descendant if either of its parents are
1239 # A node is a descendant if either of its parents are
1240 # descendants. (We seeded the dependents list with the roots
1240 # descendants. (We seeded the dependents list with the roots
1241 # up there, remember?)
1241 # up there, remember?)
1242 if (p[0] in descendants) or (p[1] in descendants):
1242 if (p[0] in descendants) or (p[1] in descendants):
1243 descendants.add(n)
1243 descendants.add(n)
1244 isdescendant = True
1244 isdescendant = True
1245 if isdescendant and ((ancestors is None) or (n in ancestors)):
1245 if isdescendant and ((ancestors is None) or (n in ancestors)):
1246 # Only include nodes that are both descendants and ancestors.
1246 # Only include nodes that are both descendants and ancestors.
1247 orderedout.append(n)
1247 orderedout.append(n)
1248 if (ancestors is not None) and (n in heads):
1248 if (ancestors is not None) and (n in heads):
1249 # We're trying to figure out which heads are reachable
1249 # We're trying to figure out which heads are reachable
1250 # from roots.
1250 # from roots.
1251 # Mark this head as having been reached
1251 # Mark this head as having been reached
1252 heads[n] = True
1252 heads[n] = True
1253 elif ancestors is None:
1253 elif ancestors is None:
1254 # Otherwise, we're trying to discover the heads.
1254 # Otherwise, we're trying to discover the heads.
1255 # Assume this is a head because if it isn't, the next step
1255 # Assume this is a head because if it isn't, the next step
1256 # will eventually remove it.
1256 # will eventually remove it.
1257 heads[n] = True
1257 heads[n] = True
1258 # But, obviously its parents aren't.
1258 # But, obviously its parents aren't.
1259 for p in self.parents(n):
1259 for p in self.parents(n):
1260 heads.pop(p, None)
1260 heads.pop(p, None)
1261 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1261 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1262 roots = list(roots)
1262 roots = list(roots)
1263 assert orderedout
1263 assert orderedout
1264 assert roots
1264 assert roots
1265 assert heads
1265 assert heads
1266 return (orderedout, roots, heads)
1266 return (orderedout, roots, heads)
1267
1267
1268 def headrevs(self, revs=None):
1268 def headrevs(self, revs=None):
1269 if revs is None:
1269 if revs is None:
1270 try:
1270 try:
1271 return self.index.headrevs()
1271 return self.index.headrevs()
1272 except AttributeError:
1272 except AttributeError:
1273 return self._headrevs()
1273 return self._headrevs()
1274 if rustdagop is not None:
1274 if rustdagop is not None:
1275 return rustdagop.headrevs(self.index, revs)
1275 return rustdagop.headrevs(self.index, revs)
1276 return dagop.headrevs(revs, self._uncheckedparentrevs)
1276 return dagop.headrevs(revs, self._uncheckedparentrevs)
1277
1277
1278 def computephases(self, roots):
1278 def computephases(self, roots):
1279 return self.index.computephasesmapsets(roots)
1279 return self.index.computephasesmapsets(roots)
1280
1280
1281 def _headrevs(self):
1281 def _headrevs(self):
1282 count = len(self)
1282 count = len(self)
1283 if not count:
1283 if not count:
1284 return [nullrev]
1284 return [nullrev]
1285 # we won't iter over filtered rev so nobody is a head at start
1285 # we won't iter over filtered rev so nobody is a head at start
1286 ishead = [0] * (count + 1)
1286 ishead = [0] * (count + 1)
1287 index = self.index
1287 index = self.index
1288 for r in self:
1288 for r in self:
1289 ishead[r] = 1 # I may be an head
1289 ishead[r] = 1 # I may be an head
1290 e = index[r]
1290 e = index[r]
1291 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1291 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1292 return [r for r, val in enumerate(ishead) if val]
1292 return [r for r, val in enumerate(ishead) if val]
1293
1293
1294 def heads(self, start=None, stop=None):
1294 def heads(self, start=None, stop=None):
1295 """return the list of all nodes that have no children
1295 """return the list of all nodes that have no children
1296
1296
1297 if start is specified, only heads that are descendants of
1297 if start is specified, only heads that are descendants of
1298 start will be returned
1298 start will be returned
1299 if stop is specified, it will consider all the revs from stop
1299 if stop is specified, it will consider all the revs from stop
1300 as if they had no children
1300 as if they had no children
1301 """
1301 """
1302 if start is None and stop is None:
1302 if start is None and stop is None:
1303 if not len(self):
1303 if not len(self):
1304 return [self.nullid]
1304 return [self.nullid]
1305 return [self.node(r) for r in self.headrevs()]
1305 return [self.node(r) for r in self.headrevs()]
1306
1306
1307 if start is None:
1307 if start is None:
1308 start = nullrev
1308 start = nullrev
1309 else:
1309 else:
1310 start = self.rev(start)
1310 start = self.rev(start)
1311
1311
1312 stoprevs = {self.rev(n) for n in stop or []}
1312 stoprevs = {self.rev(n) for n in stop or []}
1313
1313
1314 revs = dagop.headrevssubset(
1314 revs = dagop.headrevssubset(
1315 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1315 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1316 )
1316 )
1317
1317
1318 return [self.node(rev) for rev in revs]
1318 return [self.node(rev) for rev in revs]
1319
1319
1320 def children(self, node):
1320 def children(self, node):
1321 """find the children of a given node"""
1321 """find the children of a given node"""
1322 c = []
1322 c = []
1323 p = self.rev(node)
1323 p = self.rev(node)
1324 for r in self.revs(start=p + 1):
1324 for r in self.revs(start=p + 1):
1325 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1325 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1326 if prevs:
1326 if prevs:
1327 for pr in prevs:
1327 for pr in prevs:
1328 if pr == p:
1328 if pr == p:
1329 c.append(self.node(r))
1329 c.append(self.node(r))
1330 elif p == nullrev:
1330 elif p == nullrev:
1331 c.append(self.node(r))
1331 c.append(self.node(r))
1332 return c
1332 return c
1333
1333
1334 def commonancestorsheads(self, a, b):
1334 def commonancestorsheads(self, a, b):
1335 """calculate all the heads of the common ancestors of nodes a and b"""
1335 """calculate all the heads of the common ancestors of nodes a and b"""
1336 a, b = self.rev(a), self.rev(b)
1336 a, b = self.rev(a), self.rev(b)
1337 ancs = self._commonancestorsheads(a, b)
1337 ancs = self._commonancestorsheads(a, b)
1338 return pycompat.maplist(self.node, ancs)
1338 return pycompat.maplist(self.node, ancs)
1339
1339
1340 def _commonancestorsheads(self, *revs):
1340 def _commonancestorsheads(self, *revs):
1341 """calculate all the heads of the common ancestors of revs"""
1341 """calculate all the heads of the common ancestors of revs"""
1342 try:
1342 try:
1343 ancs = self.index.commonancestorsheads(*revs)
1343 ancs = self.index.commonancestorsheads(*revs)
1344 except (AttributeError, OverflowError): # C implementation failed
1344 except (AttributeError, OverflowError): # C implementation failed
1345 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1345 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1346 return ancs
1346 return ancs
1347
1347
1348 def isancestor(self, a, b):
1348 def isancestor(self, a, b):
1349 """return True if node a is an ancestor of node b
1349 """return True if node a is an ancestor of node b
1350
1350
1351 A revision is considered an ancestor of itself."""
1351 A revision is considered an ancestor of itself."""
1352 a, b = self.rev(a), self.rev(b)
1352 a, b = self.rev(a), self.rev(b)
1353 return self.isancestorrev(a, b)
1353 return self.isancestorrev(a, b)
1354
1354
1355 def isancestorrev(self, a, b):
1355 def isancestorrev(self, a, b):
1356 """return True if revision a is an ancestor of revision b
1356 """return True if revision a is an ancestor of revision b
1357
1357
1358 A revision is considered an ancestor of itself.
1358 A revision is considered an ancestor of itself.
1359
1359
1360 The implementation of this is trivial but the use of
1360 The implementation of this is trivial but the use of
1361 reachableroots is not."""
1361 reachableroots is not."""
1362 if a == nullrev:
1362 if a == nullrev:
1363 return True
1363 return True
1364 elif a == b:
1364 elif a == b:
1365 return True
1365 return True
1366 elif a > b:
1366 elif a > b:
1367 return False
1367 return False
1368 return bool(self.reachableroots(a, [b], [a], includepath=False))
1368 return bool(self.reachableroots(a, [b], [a], includepath=False))
1369
1369
1370 def reachableroots(self, minroot, heads, roots, includepath=False):
1370 def reachableroots(self, minroot, heads, roots, includepath=False):
1371 """return (heads(::(<roots> and <roots>::<heads>)))
1371 """return (heads(::(<roots> and <roots>::<heads>)))
1372
1372
1373 If includepath is True, return (<roots>::<heads>)."""
1373 If includepath is True, return (<roots>::<heads>)."""
1374 try:
1374 try:
1375 return self.index.reachableroots2(
1375 return self.index.reachableroots2(
1376 minroot, heads, roots, includepath
1376 minroot, heads, roots, includepath
1377 )
1377 )
1378 except AttributeError:
1378 except AttributeError:
1379 return dagop._reachablerootspure(
1379 return dagop._reachablerootspure(
1380 self.parentrevs, minroot, roots, heads, includepath
1380 self.parentrevs, minroot, roots, heads, includepath
1381 )
1381 )
1382
1382
1383 def ancestor(self, a, b):
1383 def ancestor(self, a, b):
1384 """calculate the "best" common ancestor of nodes a and b"""
1384 """calculate the "best" common ancestor of nodes a and b"""
1385
1385
1386 a, b = self.rev(a), self.rev(b)
1386 a, b = self.rev(a), self.rev(b)
1387 try:
1387 try:
1388 ancs = self.index.ancestors(a, b)
1388 ancs = self.index.ancestors(a, b)
1389 except (AttributeError, OverflowError):
1389 except (AttributeError, OverflowError):
1390 ancs = ancestor.ancestors(self.parentrevs, a, b)
1390 ancs = ancestor.ancestors(self.parentrevs, a, b)
1391 if ancs:
1391 if ancs:
1392 # choose a consistent winner when there's a tie
1392 # choose a consistent winner when there's a tie
1393 return min(map(self.node, ancs))
1393 return min(map(self.node, ancs))
1394 return self.nullid
1394 return self.nullid
1395
1395
1396 def _match(self, id):
1396 def _match(self, id):
1397 if isinstance(id, int):
1397 if isinstance(id, int):
1398 # rev
1398 # rev
1399 return self.node(id)
1399 return self.node(id)
1400 if len(id) == self.nodeconstants.nodelen:
1400 if len(id) == self.nodeconstants.nodelen:
1401 # possibly a binary node
1401 # possibly a binary node
1402 # odds of a binary node being all hex in ASCII are 1 in 10**25
1402 # odds of a binary node being all hex in ASCII are 1 in 10**25
1403 try:
1403 try:
1404 node = id
1404 node = id
1405 self.rev(node) # quick search the index
1405 self.rev(node) # quick search the index
1406 return node
1406 return node
1407 except error.LookupError:
1407 except error.LookupError:
1408 pass # may be partial hex id
1408 pass # may be partial hex id
1409 try:
1409 try:
1410 # str(rev)
1410 # str(rev)
1411 rev = int(id)
1411 rev = int(id)
1412 if b"%d" % rev != id:
1412 if b"%d" % rev != id:
1413 raise ValueError
1413 raise ValueError
1414 if rev < 0:
1414 if rev < 0:
1415 rev = len(self) + rev
1415 rev = len(self) + rev
1416 if rev < 0 or rev >= len(self):
1416 if rev < 0 or rev >= len(self):
1417 raise ValueError
1417 raise ValueError
1418 return self.node(rev)
1418 return self.node(rev)
1419 except (ValueError, OverflowError):
1419 except (ValueError, OverflowError):
1420 pass
1420 pass
1421 if len(id) == 2 * self.nodeconstants.nodelen:
1421 if len(id) == 2 * self.nodeconstants.nodelen:
1422 try:
1422 try:
1423 # a full hex nodeid?
1423 # a full hex nodeid?
1424 node = bin(id)
1424 node = bin(id)
1425 self.rev(node)
1425 self.rev(node)
1426 return node
1426 return node
1427 except (TypeError, error.LookupError):
1427 except (TypeError, error.LookupError):
1428 pass
1428 pass
1429
1429
1430 def _partialmatch(self, id):
1430 def _partialmatch(self, id):
1431 # we don't care wdirfilenodeids as they should be always full hash
1431 # we don't care wdirfilenodeids as they should be always full hash
1432 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1432 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1433 try:
1433 try:
1434 partial = self.index.partialmatch(id)
1434 partial = self.index.partialmatch(id)
1435 if partial and self.hasnode(partial):
1435 if partial and self.hasnode(partial):
1436 if maybewdir:
1436 if maybewdir:
1437 # single 'ff...' match in radix tree, ambiguous with wdir
1437 # single 'ff...' match in radix tree, ambiguous with wdir
1438 raise error.RevlogError
1438 raise error.RevlogError
1439 return partial
1439 return partial
1440 if maybewdir:
1440 if maybewdir:
1441 # no 'ff...' match in radix tree, wdir identified
1441 # no 'ff...' match in radix tree, wdir identified
1442 raise error.WdirUnsupported
1442 raise error.WdirUnsupported
1443 return None
1443 return None
1444 except error.RevlogError:
1444 except error.RevlogError:
1445 # parsers.c radix tree lookup gave multiple matches
1445 # parsers.c radix tree lookup gave multiple matches
1446 # fast path: for unfiltered changelog, radix tree is accurate
1446 # fast path: for unfiltered changelog, radix tree is accurate
1447 if not getattr(self, 'filteredrevs', None):
1447 if not getattr(self, 'filteredrevs', None):
1448 raise error.AmbiguousPrefixLookupError(
1448 raise error.AmbiguousPrefixLookupError(
1449 id, self.display_id, _(b'ambiguous identifier')
1449 id, self.display_id, _(b'ambiguous identifier')
1450 )
1450 )
1451 # fall through to slow path that filters hidden revisions
1451 # fall through to slow path that filters hidden revisions
1452 except (AttributeError, ValueError):
1452 except (AttributeError, ValueError):
1453 # we are pure python, or key was too short to search radix tree
1453 # we are pure python, or key was too short to search radix tree
1454 pass
1454 pass
1455
1455
1456 if id in self._pcache:
1456 if id in self._pcache:
1457 return self._pcache[id]
1457 return self._pcache[id]
1458
1458
1459 if len(id) <= 40:
1459 if len(id) <= 40:
1460 try:
1460 try:
1461 # hex(node)[:...]
1461 # hex(node)[:...]
1462 l = len(id) // 2 # grab an even number of digits
1462 l = len(id) // 2 # grab an even number of digits
1463 prefix = bin(id[: l * 2])
1463 prefix = bin(id[: l * 2])
1464 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1464 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1465 nl = [
1465 nl = [
1466 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1466 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1467 ]
1467 ]
1468 if self.nodeconstants.nullhex.startswith(id):
1468 if self.nodeconstants.nullhex.startswith(id):
1469 nl.append(self.nullid)
1469 nl.append(self.nullid)
1470 if len(nl) > 0:
1470 if len(nl) > 0:
1471 if len(nl) == 1 and not maybewdir:
1471 if len(nl) == 1 and not maybewdir:
1472 self._pcache[id] = nl[0]
1472 self._pcache[id] = nl[0]
1473 return nl[0]
1473 return nl[0]
1474 raise error.AmbiguousPrefixLookupError(
1474 raise error.AmbiguousPrefixLookupError(
1475 id, self.display_id, _(b'ambiguous identifier')
1475 id, self.display_id, _(b'ambiguous identifier')
1476 )
1476 )
1477 if maybewdir:
1477 if maybewdir:
1478 raise error.WdirUnsupported
1478 raise error.WdirUnsupported
1479 return None
1479 return None
1480 except TypeError:
1480 except TypeError:
1481 pass
1481 pass
1482
1482
1483 def lookup(self, id):
1483 def lookup(self, id):
1484 """locate a node based on:
1484 """locate a node based on:
1485 - revision number or str(revision number)
1485 - revision number or str(revision number)
1486 - nodeid or subset of hex nodeid
1486 - nodeid or subset of hex nodeid
1487 """
1487 """
1488 n = self._match(id)
1488 n = self._match(id)
1489 if n is not None:
1489 if n is not None:
1490 return n
1490 return n
1491 n = self._partialmatch(id)
1491 n = self._partialmatch(id)
1492 if n:
1492 if n:
1493 return n
1493 return n
1494
1494
1495 raise error.LookupError(id, self.display_id, _(b'no match found'))
1495 raise error.LookupError(id, self.display_id, _(b'no match found'))
1496
1496
1497 def shortest(self, node, minlength=1):
1497 def shortest(self, node, minlength=1):
1498 """Find the shortest unambiguous prefix that matches node."""
1498 """Find the shortest unambiguous prefix that matches node."""
1499
1499
1500 def isvalid(prefix):
1500 def isvalid(prefix):
1501 try:
1501 try:
1502 matchednode = self._partialmatch(prefix)
1502 matchednode = self._partialmatch(prefix)
1503 except error.AmbiguousPrefixLookupError:
1503 except error.AmbiguousPrefixLookupError:
1504 return False
1504 return False
1505 except error.WdirUnsupported:
1505 except error.WdirUnsupported:
1506 # single 'ff...' match
1506 # single 'ff...' match
1507 return True
1507 return True
1508 if matchednode is None:
1508 if matchednode is None:
1509 raise error.LookupError(node, self.display_id, _(b'no node'))
1509 raise error.LookupError(node, self.display_id, _(b'no node'))
1510 return True
1510 return True
1511
1511
1512 def maybewdir(prefix):
1512 def maybewdir(prefix):
1513 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1513 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1514
1514
1515 hexnode = hex(node)
1515 hexnode = hex(node)
1516
1516
1517 def disambiguate(hexnode, minlength):
1517 def disambiguate(hexnode, minlength):
1518 """Disambiguate against wdirid."""
1518 """Disambiguate against wdirid."""
1519 for length in range(minlength, len(hexnode) + 1):
1519 for length in range(minlength, len(hexnode) + 1):
1520 prefix = hexnode[:length]
1520 prefix = hexnode[:length]
1521 if not maybewdir(prefix):
1521 if not maybewdir(prefix):
1522 return prefix
1522 return prefix
1523
1523
1524 if not getattr(self, 'filteredrevs', None):
1524 if not getattr(self, 'filteredrevs', None):
1525 try:
1525 try:
1526 length = max(self.index.shortest(node), minlength)
1526 length = max(self.index.shortest(node), minlength)
1527 return disambiguate(hexnode, length)
1527 return disambiguate(hexnode, length)
1528 except error.RevlogError:
1528 except error.RevlogError:
1529 if node != self.nodeconstants.wdirid:
1529 if node != self.nodeconstants.wdirid:
1530 raise error.LookupError(
1530 raise error.LookupError(
1531 node, self.display_id, _(b'no node')
1531 node, self.display_id, _(b'no node')
1532 )
1532 )
1533 except AttributeError:
1533 except AttributeError:
1534 # Fall through to pure code
1534 # Fall through to pure code
1535 pass
1535 pass
1536
1536
1537 if node == self.nodeconstants.wdirid:
1537 if node == self.nodeconstants.wdirid:
1538 for length in range(minlength, len(hexnode) + 1):
1538 for length in range(minlength, len(hexnode) + 1):
1539 prefix = hexnode[:length]
1539 prefix = hexnode[:length]
1540 if isvalid(prefix):
1540 if isvalid(prefix):
1541 return prefix
1541 return prefix
1542
1542
1543 for length in range(minlength, len(hexnode) + 1):
1543 for length in range(minlength, len(hexnode) + 1):
1544 prefix = hexnode[:length]
1544 prefix = hexnode[:length]
1545 if isvalid(prefix):
1545 if isvalid(prefix):
1546 return disambiguate(hexnode, length)
1546 return disambiguate(hexnode, length)
1547
1547
1548 def cmp(self, node, text):
1548 def cmp(self, node, text):
1549 """compare text with a given file revision
1549 """compare text with a given file revision
1550
1550
1551 returns True if text is different than what is stored.
1551 returns True if text is different than what is stored.
1552 """
1552 """
1553 p1, p2 = self.parents(node)
1553 p1, p2 = self.parents(node)
1554 return storageutil.hashrevisionsha1(text, p1, p2) != node
1554 return storageutil.hashrevisionsha1(text, p1, p2) != node
1555
1555
1556 def _cachesegment(self, offset, data):
1556 def _cachesegment(self, offset, data):
1557 """Add a segment to the revlog cache.
1557 """Add a segment to the revlog cache.
1558
1558
1559 Accepts an absolute offset and the data that is at that location.
1559 Accepts an absolute offset and the data that is at that location.
1560 """
1560 """
1561 o, d = self._chunkcache
1561 o, d = self._chunkcache
1562 # try to add to existing cache
1562 # try to add to existing cache
1563 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1563 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1564 self._chunkcache = o, d + data
1564 self._chunkcache = o, d + data
1565 else:
1565 else:
1566 self._chunkcache = offset, data
1566 self._chunkcache = offset, data
1567
1567
1568 def _readsegment(self, offset, length, df=None):
1568 def _readsegment(self, offset, length, df=None):
1569 """Load a segment of raw data from the revlog.
1569 """Load a segment of raw data from the revlog.
1570
1570
1571 Accepts an absolute offset, length to read, and an optional existing
1571 Accepts an absolute offset, length to read, and an optional existing
1572 file handle to read from.
1572 file handle to read from.
1573
1573
1574 If an existing file handle is passed, it will be seeked and the
1574 If an existing file handle is passed, it will be seeked and the
1575 original seek position will NOT be restored.
1575 original seek position will NOT be restored.
1576
1576
1577 Returns a str or buffer of raw byte data.
1577 Returns a str or buffer of raw byte data.
1578
1578
1579 Raises if the requested number of bytes could not be read.
1579 Raises if the requested number of bytes could not be read.
1580 """
1580 """
1581 # Cache data both forward and backward around the requested
1581 # Cache data both forward and backward around the requested
1582 # data, in a fixed size window. This helps speed up operations
1582 # data, in a fixed size window. This helps speed up operations
1583 # involving reading the revlog backwards.
1583 # involving reading the revlog backwards.
1584 cachesize = self._chunkcachesize
1584 cachesize = self._chunkcachesize
1585 realoffset = offset & ~(cachesize - 1)
1585 realoffset = offset & ~(cachesize - 1)
1586 reallength = (
1586 reallength = (
1587 (offset + length + cachesize) & ~(cachesize - 1)
1587 (offset + length + cachesize) & ~(cachesize - 1)
1588 ) - realoffset
1588 ) - realoffset
1589 with self._datareadfp(df) as df:
1589 with self._datareadfp(df) as df:
1590 df.seek(realoffset)
1590 df.seek(realoffset)
1591 d = df.read(reallength)
1591 d = df.read(reallength)
1592
1592
1593 self._cachesegment(realoffset, d)
1593 self._cachesegment(realoffset, d)
1594 if offset != realoffset or reallength != length:
1594 if offset != realoffset or reallength != length:
1595 startoffset = offset - realoffset
1595 startoffset = offset - realoffset
1596 if len(d) - startoffset < length:
1596 if len(d) - startoffset < length:
1597 raise error.RevlogError(
1597 raise error.RevlogError(
1598 _(
1598 _(
1599 b'partial read of revlog %s; expected %d bytes from '
1599 b'partial read of revlog %s; expected %d bytes from '
1600 b'offset %d, got %d'
1600 b'offset %d, got %d'
1601 )
1601 )
1602 % (
1602 % (
1603 self._indexfile if self._inline else self._datafile,
1603 self._indexfile if self._inline else self._datafile,
1604 length,
1604 length,
1605 offset,
1605 offset,
1606 len(d) - startoffset,
1606 len(d) - startoffset,
1607 )
1607 )
1608 )
1608 )
1609
1609
1610 return util.buffer(d, startoffset, length)
1610 return util.buffer(d, startoffset, length)
1611
1611
1612 if len(d) < length:
1612 if len(d) < length:
1613 raise error.RevlogError(
1613 raise error.RevlogError(
1614 _(
1614 _(
1615 b'partial read of revlog %s; expected %d bytes from offset '
1615 b'partial read of revlog %s; expected %d bytes from offset '
1616 b'%d, got %d'
1616 b'%d, got %d'
1617 )
1617 )
1618 % (
1618 % (
1619 self._indexfile if self._inline else self._datafile,
1619 self._indexfile if self._inline else self._datafile,
1620 length,
1620 length,
1621 offset,
1621 offset,
1622 len(d),
1622 len(d),
1623 )
1623 )
1624 )
1624 )
1625
1625
1626 return d
1626 return d
1627
1627
1628 def _getsegment(self, offset, length, df=None):
1628 def _getsegment(self, offset, length, df=None):
1629 """Obtain a segment of raw data from the revlog.
1629 """Obtain a segment of raw data from the revlog.
1630
1630
1631 Accepts an absolute offset, length of bytes to obtain, and an
1631 Accepts an absolute offset, length of bytes to obtain, and an
1632 optional file handle to the already-opened revlog. If the file
1632 optional file handle to the already-opened revlog. If the file
1633 handle is used, it's original seek position will not be preserved.
1633 handle is used, it's original seek position will not be preserved.
1634
1634
1635 Requests for data may be returned from a cache.
1635 Requests for data may be returned from a cache.
1636
1636
1637 Returns a str or a buffer instance of raw byte data.
1637 Returns a str or a buffer instance of raw byte data.
1638 """
1638 """
1639 o, d = self._chunkcache
1639 o, d = self._chunkcache
1640 l = len(d)
1640 l = len(d)
1641
1641
1642 # is it in the cache?
1642 # is it in the cache?
1643 cachestart = offset - o
1643 cachestart = offset - o
1644 cacheend = cachestart + length
1644 cacheend = cachestart + length
1645 if cachestart >= 0 and cacheend <= l:
1645 if cachestart >= 0 and cacheend <= l:
1646 if cachestart == 0 and cacheend == l:
1646 if cachestart == 0 and cacheend == l:
1647 return d # avoid a copy
1647 return d # avoid a copy
1648 return util.buffer(d, cachestart, cacheend - cachestart)
1648 return util.buffer(d, cachestart, cacheend - cachestart)
1649
1649
1650 return self._readsegment(offset, length, df=df)
1650 return self._readsegment(offset, length, df=df)
1651
1651
1652 def _getsegmentforrevs(self, startrev, endrev, df=None):
1652 def _getsegmentforrevs(self, startrev, endrev, df=None):
1653 """Obtain a segment of raw data corresponding to a range of revisions.
1653 """Obtain a segment of raw data corresponding to a range of revisions.
1654
1654
1655 Accepts the start and end revisions and an optional already-open
1655 Accepts the start and end revisions and an optional already-open
1656 file handle to be used for reading. If the file handle is read, its
1656 file handle to be used for reading. If the file handle is read, its
1657 seek position will not be preserved.
1657 seek position will not be preserved.
1658
1658
1659 Requests for data may be satisfied by a cache.
1659 Requests for data may be satisfied by a cache.
1660
1660
1661 Returns a 2-tuple of (offset, data) for the requested range of
1661 Returns a 2-tuple of (offset, data) for the requested range of
1662 revisions. Offset is the integer offset from the beginning of the
1662 revisions. Offset is the integer offset from the beginning of the
1663 revlog and data is a str or buffer of the raw byte data.
1663 revlog and data is a str or buffer of the raw byte data.
1664
1664
1665 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1665 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1666 to determine where each revision's data begins and ends.
1666 to determine where each revision's data begins and ends.
1667 """
1667 """
1668 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1668 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1669 # (functions are expensive).
1669 # (functions are expensive).
1670 index = self.index
1670 index = self.index
1671 istart = index[startrev]
1671 istart = index[startrev]
1672 start = int(istart[0] >> 16)
1672 start = int(istart[0] >> 16)
1673 if startrev == endrev:
1673 if startrev == endrev:
1674 end = start + istart[1]
1674 end = start + istart[1]
1675 else:
1675 else:
1676 iend = index[endrev]
1676 iend = index[endrev]
1677 end = int(iend[0] >> 16) + iend[1]
1677 end = int(iend[0] >> 16) + iend[1]
1678
1678
1679 if self._inline:
1679 if self._inline:
1680 start += (startrev + 1) * self.index.entry_size
1680 start += (startrev + 1) * self.index.entry_size
1681 end += (endrev + 1) * self.index.entry_size
1681 end += (endrev + 1) * self.index.entry_size
1682 length = end - start
1682 length = end - start
1683
1683
1684 return start, self._getsegment(start, length, df=df)
1684 return start, self._getsegment(start, length, df=df)
1685
1685
1686 def _chunk(self, rev, df=None):
1686 def _chunk(self, rev, df=None):
1687 """Obtain a single decompressed chunk for a revision.
1687 """Obtain a single decompressed chunk for a revision.
1688
1688
1689 Accepts an integer revision and an optional already-open file handle
1689 Accepts an integer revision and an optional already-open file handle
1690 to be used for reading. If used, the seek position of the file will not
1690 to be used for reading. If used, the seek position of the file will not
1691 be preserved.
1691 be preserved.
1692
1692
1693 Returns a str holding uncompressed data for the requested revision.
1693 Returns a str holding uncompressed data for the requested revision.
1694 """
1694 """
1695 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1695 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1696
1696
1697 def _chunks(self, revs, df=None, targetsize=None):
1697 def _chunks(self, revs, df=None, targetsize=None):
1698 """Obtain decompressed chunks for the specified revisions.
1698 """Obtain decompressed chunks for the specified revisions.
1699
1699
1700 Accepts an iterable of numeric revisions that are assumed to be in
1700 Accepts an iterable of numeric revisions that are assumed to be in
1701 ascending order. Also accepts an optional already-open file handle
1701 ascending order. Also accepts an optional already-open file handle
1702 to be used for reading. If used, the seek position of the file will
1702 to be used for reading. If used, the seek position of the file will
1703 not be preserved.
1703 not be preserved.
1704
1704
1705 This function is similar to calling ``self._chunk()`` multiple times,
1705 This function is similar to calling ``self._chunk()`` multiple times,
1706 but is faster.
1706 but is faster.
1707
1707
1708 Returns a list with decompressed data for each requested revision.
1708 Returns a list with decompressed data for each requested revision.
1709 """
1709 """
1710 if not revs:
1710 if not revs:
1711 return []
1711 return []
1712 start = self.start
1712 start = self.start
1713 length = self.length
1713 length = self.length
1714 inline = self._inline
1714 inline = self._inline
1715 iosize = self.index.entry_size
1715 iosize = self.index.entry_size
1716 buffer = util.buffer
1716 buffer = util.buffer
1717
1717
1718 l = []
1718 l = []
1719 ladd = l.append
1719 ladd = l.append
1720
1720
1721 if not self._withsparseread:
1721 if not self._withsparseread:
1722 slicedchunks = (revs,)
1722 slicedchunks = (revs,)
1723 else:
1723 else:
1724 slicedchunks = deltautil.slicechunk(
1724 slicedchunks = deltautil.slicechunk(
1725 self, revs, targetsize=targetsize
1725 self, revs, targetsize=targetsize
1726 )
1726 )
1727
1727
1728 for revschunk in slicedchunks:
1728 for revschunk in slicedchunks:
1729 firstrev = revschunk[0]
1729 firstrev = revschunk[0]
1730 # Skip trailing revisions with empty diff
1730 # Skip trailing revisions with empty diff
1731 for lastrev in revschunk[::-1]:
1731 for lastrev in revschunk[::-1]:
1732 if length(lastrev) != 0:
1732 if length(lastrev) != 0:
1733 break
1733 break
1734
1734
1735 try:
1735 try:
1736 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1736 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1737 except OverflowError:
1737 except OverflowError:
1738 # issue4215 - we can't cache a run of chunks greater than
1738 # issue4215 - we can't cache a run of chunks greater than
1739 # 2G on Windows
1739 # 2G on Windows
1740 return [self._chunk(rev, df=df) for rev in revschunk]
1740 return [self._chunk(rev, df=df) for rev in revschunk]
1741
1741
1742 decomp = self.decompress
1742 decomp = self.decompress
1743 for rev in revschunk:
1743 for rev in revschunk:
1744 chunkstart = start(rev)
1744 chunkstart = start(rev)
1745 if inline:
1745 if inline:
1746 chunkstart += (rev + 1) * iosize
1746 chunkstart += (rev + 1) * iosize
1747 chunklength = length(rev)
1747 chunklength = length(rev)
1748 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1748 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1749
1749
1750 return l
1750 return l
1751
1751
1752 def _chunkclear(self):
1752 def _chunkclear(self):
1753 """Clear the raw chunk cache."""
1753 """Clear the raw chunk cache."""
1754 self._chunkcache = (0, b'')
1754 self._chunkcache = (0, b'')
1755
1755
1756 def deltaparent(self, rev):
1756 def deltaparent(self, rev):
1757 """return deltaparent of the given revision"""
1757 """return deltaparent of the given revision"""
1758 base = self.index[rev][3]
1758 base = self.index[rev][3]
1759 if base == rev:
1759 if base == rev:
1760 return nullrev
1760 return nullrev
1761 elif self._generaldelta:
1761 elif self._generaldelta:
1762 return base
1762 return base
1763 else:
1763 else:
1764 return rev - 1
1764 return rev - 1
1765
1765
1766 def issnapshot(self, rev):
1766 def issnapshot(self, rev):
1767 """tells whether rev is a snapshot"""
1767 """tells whether rev is a snapshot"""
1768 if not self._sparserevlog:
1768 if not self._sparserevlog:
1769 return self.deltaparent(rev) == nullrev
1769 return self.deltaparent(rev) == nullrev
1770 elif util.safehasattr(self.index, b'issnapshot'):
1770 elif util.safehasattr(self.index, b'issnapshot'):
1771 # directly assign the method to cache the testing and access
1771 # directly assign the method to cache the testing and access
1772 self.issnapshot = self.index.issnapshot
1772 self.issnapshot = self.index.issnapshot
1773 return self.issnapshot(rev)
1773 return self.issnapshot(rev)
1774 if rev == nullrev:
1774 if rev == nullrev:
1775 return True
1775 return True
1776 entry = self.index[rev]
1776 entry = self.index[rev]
1777 base = entry[3]
1777 base = entry[3]
1778 if base == rev:
1778 if base == rev:
1779 return True
1779 return True
1780 if base == nullrev:
1780 if base == nullrev:
1781 return True
1781 return True
1782 p1 = entry[5]
1782 p1 = entry[5]
1783 p2 = entry[6]
1783 p2 = entry[6]
1784 if base == p1 or base == p2:
1784 if base == p1 or base == p2:
1785 return False
1785 return False
1786 return self.issnapshot(base)
1786 return self.issnapshot(base)
1787
1787
1788 def snapshotdepth(self, rev):
1788 def snapshotdepth(self, rev):
1789 """number of snapshot in the chain before this one"""
1789 """number of snapshot in the chain before this one"""
1790 if not self.issnapshot(rev):
1790 if not self.issnapshot(rev):
1791 raise error.ProgrammingError(b'revision %d not a snapshot')
1791 raise error.ProgrammingError(b'revision %d not a snapshot')
1792 return len(self._deltachain(rev)[0]) - 1
1792 return len(self._deltachain(rev)[0]) - 1
1793
1793
1794 def revdiff(self, rev1, rev2):
1794 def revdiff(self, rev1, rev2):
1795 """return or calculate a delta between two revisions
1795 """return or calculate a delta between two revisions
1796
1796
1797 The delta calculated is in binary form and is intended to be written to
1797 The delta calculated is in binary form and is intended to be written to
1798 revlog data directly. So this function needs raw revision data.
1798 revlog data directly. So this function needs raw revision data.
1799 """
1799 """
1800 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1800 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1801 return bytes(self._chunk(rev2))
1801 return bytes(self._chunk(rev2))
1802
1802
1803 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1803 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1804
1804
1805 def _processflags(self, text, flags, operation, raw=False):
1805 def _processflags(self, text, flags, operation, raw=False):
1806 """deprecated entry point to access flag processors"""
1806 """deprecated entry point to access flag processors"""
1807 msg = b'_processflag(...) use the specialized variant'
1807 msg = b'_processflag(...) use the specialized variant'
1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1809 if raw:
1809 if raw:
1810 return text, flagutil.processflagsraw(self, text, flags)
1810 return text, flagutil.processflagsraw(self, text, flags)
1811 elif operation == b'read':
1811 elif operation == b'read':
1812 return flagutil.processflagsread(self, text, flags)
1812 return flagutil.processflagsread(self, text, flags)
1813 else: # write operation
1813 else: # write operation
1814 return flagutil.processflagswrite(self, text, flags)
1814 return flagutil.processflagswrite(self, text, flags)
1815
1815
1816 def revision(self, nodeorrev, _df=None, raw=False):
1816 def revision(self, nodeorrev, _df=None, raw=False):
1817 """return an uncompressed revision of a given node or revision
1817 """return an uncompressed revision of a given node or revision
1818 number.
1818 number.
1819
1819
1820 _df - an existing file handle to read from. (internal-only)
1820 _df - an existing file handle to read from. (internal-only)
1821 raw - an optional argument specifying if the revision data is to be
1821 raw - an optional argument specifying if the revision data is to be
1822 treated as raw data when applying flag transforms. 'raw' should be set
1822 treated as raw data when applying flag transforms. 'raw' should be set
1823 to True when generating changegroups or in debug commands.
1823 to True when generating changegroups or in debug commands.
1824 """
1824 """
1825 if raw:
1825 if raw:
1826 msg = (
1826 msg = (
1827 b'revlog.revision(..., raw=True) is deprecated, '
1827 b'revlog.revision(..., raw=True) is deprecated, '
1828 b'use revlog.rawdata(...)'
1828 b'use revlog.rawdata(...)'
1829 )
1829 )
1830 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1830 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1831 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1831 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1832
1832
1833 def sidedata(self, nodeorrev, _df=None):
1833 def sidedata(self, nodeorrev, _df=None):
1834 """a map of extra data related to the changeset but not part of the hash
1834 """a map of extra data related to the changeset but not part of the hash
1835
1835
1836 This function currently return a dictionary. However, more advanced
1836 This function currently return a dictionary. However, more advanced
1837 mapping object will likely be used in the future for a more
1837 mapping object will likely be used in the future for a more
1838 efficient/lazy code.
1838 efficient/lazy code.
1839 """
1839 """
1840 return self._revisiondata(nodeorrev, _df)[1]
1840 return self._revisiondata(nodeorrev, _df)[1]
1841
1841
1842 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1842 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1843 # deal with <nodeorrev> argument type
1843 # deal with <nodeorrev> argument type
1844 if isinstance(nodeorrev, int):
1844 if isinstance(nodeorrev, int):
1845 rev = nodeorrev
1845 rev = nodeorrev
1846 node = self.node(rev)
1846 node = self.node(rev)
1847 else:
1847 else:
1848 node = nodeorrev
1848 node = nodeorrev
1849 rev = None
1849 rev = None
1850
1850
1851 # fast path the special `nullid` rev
1851 # fast path the special `nullid` rev
1852 if node == self.nullid:
1852 if node == self.nullid:
1853 return b"", {}
1853 return b"", {}
1854
1854
1855 # ``rawtext`` is the text as stored inside the revlog. Might be the
1855 # ``rawtext`` is the text as stored inside the revlog. Might be the
1856 # revision or might need to be processed to retrieve the revision.
1856 # revision or might need to be processed to retrieve the revision.
1857 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1857 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1858
1858
1859 if self.hassidedata:
1859 if self.hassidedata:
1860 if rev is None:
1860 if rev is None:
1861 rev = self.rev(node)
1861 rev = self.rev(node)
1862 sidedata = self._sidedata(rev)
1862 sidedata = self._sidedata(rev)
1863 else:
1863 else:
1864 sidedata = {}
1864 sidedata = {}
1865
1865
1866 if raw and validated:
1866 if raw and validated:
1867 # if we don't want to process the raw text and that raw
1867 # if we don't want to process the raw text and that raw
1868 # text is cached, we can exit early.
1868 # text is cached, we can exit early.
1869 return rawtext, sidedata
1869 return rawtext, sidedata
1870 if rev is None:
1870 if rev is None:
1871 rev = self.rev(node)
1871 rev = self.rev(node)
1872 # the revlog's flag for this revision
1872 # the revlog's flag for this revision
1873 # (usually alter its state or content)
1873 # (usually alter its state or content)
1874 flags = self.flags(rev)
1874 flags = self.flags(rev)
1875
1875
1876 if validated and flags == REVIDX_DEFAULT_FLAGS:
1876 if validated and flags == REVIDX_DEFAULT_FLAGS:
1877 # no extra flags set, no flag processor runs, text = rawtext
1877 # no extra flags set, no flag processor runs, text = rawtext
1878 return rawtext, sidedata
1878 return rawtext, sidedata
1879
1879
1880 if raw:
1880 if raw:
1881 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1881 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1882 text = rawtext
1882 text = rawtext
1883 else:
1883 else:
1884 r = flagutil.processflagsread(self, rawtext, flags)
1884 r = flagutil.processflagsread(self, rawtext, flags)
1885 text, validatehash = r
1885 text, validatehash = r
1886 if validatehash:
1886 if validatehash:
1887 self.checkhash(text, node, rev=rev)
1887 self.checkhash(text, node, rev=rev)
1888 if not validated:
1888 if not validated:
1889 self._revisioncache = (node, rev, rawtext)
1889 self._revisioncache = (node, rev, rawtext)
1890
1890
1891 return text, sidedata
1891 return text, sidedata
1892
1892
1893 def _rawtext(self, node, rev, _df=None):
1893 def _rawtext(self, node, rev, _df=None):
1894 """return the possibly unvalidated rawtext for a revision
1894 """return the possibly unvalidated rawtext for a revision
1895
1895
1896 returns (rev, rawtext, validated)
1896 returns (rev, rawtext, validated)
1897 """
1897 """
1898
1898
1899 # revision in the cache (could be useful to apply delta)
1899 # revision in the cache (could be useful to apply delta)
1900 cachedrev = None
1900 cachedrev = None
1901 # An intermediate text to apply deltas to
1901 # An intermediate text to apply deltas to
1902 basetext = None
1902 basetext = None
1903
1903
1904 # Check if we have the entry in cache
1904 # Check if we have the entry in cache
1905 # The cache entry looks like (node, rev, rawtext)
1905 # The cache entry looks like (node, rev, rawtext)
1906 if self._revisioncache:
1906 if self._revisioncache:
1907 if self._revisioncache[0] == node:
1907 if self._revisioncache[0] == node:
1908 return (rev, self._revisioncache[2], True)
1908 return (rev, self._revisioncache[2], True)
1909 cachedrev = self._revisioncache[1]
1909 cachedrev = self._revisioncache[1]
1910
1910
1911 if rev is None:
1911 if rev is None:
1912 rev = self.rev(node)
1912 rev = self.rev(node)
1913
1913
1914 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1914 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1915 if stopped:
1915 if stopped:
1916 basetext = self._revisioncache[2]
1916 basetext = self._revisioncache[2]
1917
1917
1918 # drop cache to save memory, the caller is expected to
1918 # drop cache to save memory, the caller is expected to
1919 # update self._revisioncache after validating the text
1919 # update self._revisioncache after validating the text
1920 self._revisioncache = None
1920 self._revisioncache = None
1921
1921
1922 targetsize = None
1922 targetsize = None
1923 rawsize = self.index[rev][2]
1923 rawsize = self.index[rev][2]
1924 if 0 <= rawsize:
1924 if 0 <= rawsize:
1925 targetsize = 4 * rawsize
1925 targetsize = 4 * rawsize
1926
1926
1927 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1927 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1928 if basetext is None:
1928 if basetext is None:
1929 basetext = bytes(bins[0])
1929 basetext = bytes(bins[0])
1930 bins = bins[1:]
1930 bins = bins[1:]
1931
1931
1932 rawtext = mdiff.patches(basetext, bins)
1932 rawtext = mdiff.patches(basetext, bins)
1933 del basetext # let us have a chance to free memory early
1933 del basetext # let us have a chance to free memory early
1934 return (rev, rawtext, False)
1934 return (rev, rawtext, False)
1935
1935
1936 def _sidedata(self, rev):
1936 def _sidedata(self, rev):
1937 """Return the sidedata for a given revision number."""
1937 """Return the sidedata for a given revision number."""
1938 index_entry = self.index[rev]
1938 index_entry = self.index[rev]
1939 sidedata_offset = index_entry[8]
1939 sidedata_offset = index_entry[8]
1940 sidedata_size = index_entry[9]
1940 sidedata_size = index_entry[9]
1941
1941
1942 if self._inline:
1942 if self._inline:
1943 sidedata_offset += self.index.entry_size * (1 + rev)
1943 sidedata_offset += self.index.entry_size * (1 + rev)
1944 if sidedata_size == 0:
1944 if sidedata_size == 0:
1945 return {}
1945 return {}
1946
1946
1947 segment = self._getsegment(sidedata_offset, sidedata_size)
1947 segment = self._getsegment(sidedata_offset, sidedata_size)
1948 sidedata = sidedatautil.deserialize_sidedata(segment)
1948 sidedata = sidedatautil.deserialize_sidedata(segment)
1949 return sidedata
1949 return sidedata
1950
1950
1951 def rawdata(self, nodeorrev, _df=None):
1951 def rawdata(self, nodeorrev, _df=None):
1952 """return an uncompressed raw data of a given node or revision number.
1952 """return an uncompressed raw data of a given node or revision number.
1953
1953
1954 _df - an existing file handle to read from. (internal-only)
1954 _df - an existing file handle to read from. (internal-only)
1955 """
1955 """
1956 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1956 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1957
1957
1958 def hash(self, text, p1, p2):
1958 def hash(self, text, p1, p2):
1959 """Compute a node hash.
1959 """Compute a node hash.
1960
1960
1961 Available as a function so that subclasses can replace the hash
1961 Available as a function so that subclasses can replace the hash
1962 as needed.
1962 as needed.
1963 """
1963 """
1964 return storageutil.hashrevisionsha1(text, p1, p2)
1964 return storageutil.hashrevisionsha1(text, p1, p2)
1965
1965
1966 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1966 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1967 """Check node hash integrity.
1967 """Check node hash integrity.
1968
1968
1969 Available as a function so that subclasses can extend hash mismatch
1969 Available as a function so that subclasses can extend hash mismatch
1970 behaviors as needed.
1970 behaviors as needed.
1971 """
1971 """
1972 try:
1972 try:
1973 if p1 is None and p2 is None:
1973 if p1 is None and p2 is None:
1974 p1, p2 = self.parents(node)
1974 p1, p2 = self.parents(node)
1975 if node != self.hash(text, p1, p2):
1975 if node != self.hash(text, p1, p2):
1976 # Clear the revision cache on hash failure. The revision cache
1976 # Clear the revision cache on hash failure. The revision cache
1977 # only stores the raw revision and clearing the cache does have
1977 # only stores the raw revision and clearing the cache does have
1978 # the side-effect that we won't have a cache hit when the raw
1978 # the side-effect that we won't have a cache hit when the raw
1979 # revision data is accessed. But this case should be rare and
1979 # revision data is accessed. But this case should be rare and
1980 # it is extra work to teach the cache about the hash
1980 # it is extra work to teach the cache about the hash
1981 # verification state.
1981 # verification state.
1982 if self._revisioncache and self._revisioncache[0] == node:
1982 if self._revisioncache and self._revisioncache[0] == node:
1983 self._revisioncache = None
1983 self._revisioncache = None
1984
1984
1985 revornode = rev
1985 revornode = rev
1986 if revornode is None:
1986 if revornode is None:
1987 revornode = templatefilters.short(hex(node))
1987 revornode = templatefilters.short(hex(node))
1988 raise error.RevlogError(
1988 raise error.RevlogError(
1989 _(b"integrity check failed on %s:%s")
1989 _(b"integrity check failed on %s:%s")
1990 % (self.display_id, pycompat.bytestr(revornode))
1990 % (self.display_id, pycompat.bytestr(revornode))
1991 )
1991 )
1992 except error.RevlogError:
1992 except error.RevlogError:
1993 if self._censorable and storageutil.iscensoredtext(text):
1993 if self._censorable and storageutil.iscensoredtext(text):
1994 raise error.CensoredNodeError(self.display_id, node, text)
1994 raise error.CensoredNodeError(self.display_id, node, text)
1995 raise
1995 raise
1996
1996
1997 def _enforceinlinesize(self, tr):
1997 def _enforceinlinesize(self, tr):
1998 """Check if the revlog is too big for inline and convert if so.
1998 """Check if the revlog is too big for inline and convert if so.
1999
1999
2000 This should be called after revisions are added to the revlog. If the
2000 This should be called after revisions are added to the revlog. If the
2001 revlog has grown too large to be an inline revlog, it will convert it
2001 revlog has grown too large to be an inline revlog, it will convert it
2002 to use multiple index and data files.
2002 to use multiple index and data files.
2003 """
2003 """
2004 tiprev = len(self) - 1
2004 tiprev = len(self) - 1
2005 total_size = self.start(tiprev) + self.length(tiprev)
2005 total_size = self.start(tiprev) + self.length(tiprev)
2006 if not self._inline or total_size < _maxinline:
2006 if not self._inline or total_size < _maxinline:
2007 return
2007 return
2008
2008
2009 troffset = tr.findoffset(self._indexfile)
2009 troffset = tr.findoffset(self._indexfile)
2010 if troffset is None:
2010 if troffset is None:
2011 raise error.RevlogError(
2011 raise error.RevlogError(
2012 _(b"%s not found in the transaction") % self._indexfile
2012 _(b"%s not found in the transaction") % self._indexfile
2013 )
2013 )
2014 trindex = 0
2014 trindex = 0
2015 tr.add(self._datafile, 0)
2015 tr.add(self._datafile, 0)
2016
2016
2017 existing_handles = False
2017 existing_handles = False
2018 if self._writinghandles is not None:
2018 if self._writinghandles is not None:
2019 existing_handles = True
2019 existing_handles = True
2020 fp = self._writinghandles[0]
2020 fp = self._writinghandles[0]
2021 fp.flush()
2021 fp.flush()
2022 fp.close()
2022 fp.close()
2023 # We can't use the cached file handle after close(). So prevent
2023 # We can't use the cached file handle after close(). So prevent
2024 # its usage.
2024 # its usage.
2025 self._writinghandles = None
2025 self._writinghandles = None
2026
2026
2027 new_dfh = self._datafp(b'w+')
2027 new_dfh = self._datafp(b'w+')
2028 new_dfh.truncate(0) # drop any potentially existing data
2028 new_dfh.truncate(0) # drop any potentially existing data
2029 try:
2029 try:
2030 with self._indexfp() as read_ifh:
2030 with self._indexfp() as read_ifh:
2031 for r in self:
2031 for r in self:
2032 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2032 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2033 if troffset <= self.start(r):
2033 if troffset <= self.start(r):
2034 trindex = r
2034 trindex = r
2035 new_dfh.flush()
2035 new_dfh.flush()
2036
2036
2037 with self.__index_new_fp() as fp:
2037 with self.__index_new_fp() as fp:
2038 self._format_flags &= ~FLAG_INLINE_DATA
2038 self._format_flags &= ~FLAG_INLINE_DATA
2039 self._inline = False
2039 self._inline = False
2040 for i in self:
2040 for i in self:
2041 e = self.index.entry_binary(i)
2041 e = self.index.entry_binary(i)
2042 if i == 0 and self._docket is None:
2042 if i == 0 and self._docket is None:
2043 header = self._format_flags | self._format_version
2043 header = self._format_flags | self._format_version
2044 header = self.index.pack_header(header)
2044 header = self.index.pack_header(header)
2045 e = header + e
2045 e = header + e
2046 fp.write(e)
2046 fp.write(e)
2047 if self._docket is not None:
2047 if self._docket is not None:
2048 self._docket.index_end = fp.tell()
2048 self._docket.index_end = fp.tell()
2049 # the temp file replace the real index when we exit the context
2049 # the temp file replace the real index when we exit the context
2050 # manager
2050 # manager
2051
2051
2052 tr.replace(self._indexfile, trindex * self.index.entry_size)
2052 tr.replace(self._indexfile, trindex * self.index.entry_size)
2053 nodemaputil.setup_persistent_nodemap(tr, self)
2053 nodemaputil.setup_persistent_nodemap(tr, self)
2054 self._chunkclear()
2054 self._chunkclear()
2055
2055
2056 if existing_handles:
2056 if existing_handles:
2057 # switched from inline to conventional reopen the index
2057 # switched from inline to conventional reopen the index
2058 ifh = self.__index_write_fp()
2058 ifh = self.__index_write_fp()
2059 self._writinghandles = (ifh, new_dfh)
2059 self._writinghandles = (ifh, new_dfh)
2060 new_dfh = None
2060 new_dfh = None
2061 finally:
2061 finally:
2062 if new_dfh is not None:
2062 if new_dfh is not None:
2063 new_dfh.close()
2063 new_dfh.close()
2064
2064
2065 def _nodeduplicatecallback(self, transaction, node):
2065 def _nodeduplicatecallback(self, transaction, node):
2066 """called when trying to add a node already stored."""
2066 """called when trying to add a node already stored."""
2067
2067
2068 @contextlib.contextmanager
2068 @contextlib.contextmanager
2069 def _writing(self, transaction):
2069 def _writing(self, transaction):
2070 if self._writinghandles is not None:
2070 if self._writinghandles is not None:
2071 yield
2071 yield
2072 else:
2072 else:
2073 r = len(self)
2073 r = len(self)
2074 dsize = 0
2074 dsize = 0
2075 if r:
2075 if r:
2076 dsize = self.end(r - 1)
2076 dsize = self.end(r - 1)
2077 dfh = None
2077 dfh = None
2078 if not self._inline:
2078 if not self._inline:
2079 try:
2079 try:
2080 dfh = self._datafp(b"r+")
2080 dfh = self._datafp(b"r+")
2081 dfh.seek(0, os.SEEK_END)
2081 dfh.seek(0, os.SEEK_END)
2082 except IOError as inst:
2082 except IOError as inst:
2083 if inst.errno != errno.ENOENT:
2083 if inst.errno != errno.ENOENT:
2084 raise
2084 raise
2085 dfh = self._datafp(b"w+")
2085 dfh = self._datafp(b"w+")
2086 transaction.add(self._datafile, dsize)
2086 transaction.add(self._datafile, dsize)
2087 try:
2087 try:
2088 isize = r * self.index.entry_size
2088 isize = r * self.index.entry_size
2089 ifh = self.__index_write_fp()
2089 ifh = self.__index_write_fp()
2090 if self._inline:
2090 if self._inline:
2091 transaction.add(self._indexfile, dsize + isize)
2091 transaction.add(self._indexfile, dsize + isize)
2092 else:
2092 else:
2093 transaction.add(self._indexfile, isize)
2093 transaction.add(self._indexfile, isize)
2094 try:
2094 try:
2095 self._writinghandles = (ifh, dfh)
2095 self._writinghandles = (ifh, dfh)
2096 try:
2096 try:
2097 yield
2097 yield
2098 if self._docket is not None:
2098 if self._docket is not None:
2099 self._docket.write(transaction)
2099 self._write_docket(transaction)
2100 finally:
2100 finally:
2101 self._writinghandles = None
2101 self._writinghandles = None
2102 finally:
2102 finally:
2103 ifh.close()
2103 ifh.close()
2104 finally:
2104 finally:
2105 if dfh is not None:
2105 if dfh is not None:
2106 dfh.close()
2106 dfh.close()
2107
2107
2108 def _write_docket(self, transaction):
2109 """write the current docket on disk
2110
2111 Exist as a method to help changelog to implement transaction logic
2112
2113 We could also imagine using the same transaction logic for all revlog
2114 since docket are cheap."""
2115 self._docket.write(transaction)
2116
2108 def addrevision(
2117 def addrevision(
2109 self,
2118 self,
2110 text,
2119 text,
2111 transaction,
2120 transaction,
2112 link,
2121 link,
2113 p1,
2122 p1,
2114 p2,
2123 p2,
2115 cachedelta=None,
2124 cachedelta=None,
2116 node=None,
2125 node=None,
2117 flags=REVIDX_DEFAULT_FLAGS,
2126 flags=REVIDX_DEFAULT_FLAGS,
2118 deltacomputer=None,
2127 deltacomputer=None,
2119 sidedata=None,
2128 sidedata=None,
2120 ):
2129 ):
2121 """add a revision to the log
2130 """add a revision to the log
2122
2131
2123 text - the revision data to add
2132 text - the revision data to add
2124 transaction - the transaction object used for rollback
2133 transaction - the transaction object used for rollback
2125 link - the linkrev data to add
2134 link - the linkrev data to add
2126 p1, p2 - the parent nodeids of the revision
2135 p1, p2 - the parent nodeids of the revision
2127 cachedelta - an optional precomputed delta
2136 cachedelta - an optional precomputed delta
2128 node - nodeid of revision; typically node is not specified, and it is
2137 node - nodeid of revision; typically node is not specified, and it is
2129 computed by default as hash(text, p1, p2), however subclasses might
2138 computed by default as hash(text, p1, p2), however subclasses might
2130 use different hashing method (and override checkhash() in such case)
2139 use different hashing method (and override checkhash() in such case)
2131 flags - the known flags to set on the revision
2140 flags - the known flags to set on the revision
2132 deltacomputer - an optional deltacomputer instance shared between
2141 deltacomputer - an optional deltacomputer instance shared between
2133 multiple calls
2142 multiple calls
2134 """
2143 """
2135 if link == nullrev:
2144 if link == nullrev:
2136 raise error.RevlogError(
2145 raise error.RevlogError(
2137 _(b"attempted to add linkrev -1 to %s") % self.display_id
2146 _(b"attempted to add linkrev -1 to %s") % self.display_id
2138 )
2147 )
2139
2148
2140 if sidedata is None:
2149 if sidedata is None:
2141 sidedata = {}
2150 sidedata = {}
2142 elif sidedata and not self.hassidedata:
2151 elif sidedata and not self.hassidedata:
2143 raise error.ProgrammingError(
2152 raise error.ProgrammingError(
2144 _(b"trying to add sidedata to a revlog who don't support them")
2153 _(b"trying to add sidedata to a revlog who don't support them")
2145 )
2154 )
2146
2155
2147 if flags:
2156 if flags:
2148 node = node or self.hash(text, p1, p2)
2157 node = node or self.hash(text, p1, p2)
2149
2158
2150 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2159 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2151
2160
2152 # If the flag processor modifies the revision data, ignore any provided
2161 # If the flag processor modifies the revision data, ignore any provided
2153 # cachedelta.
2162 # cachedelta.
2154 if rawtext != text:
2163 if rawtext != text:
2155 cachedelta = None
2164 cachedelta = None
2156
2165
2157 if len(rawtext) > _maxentrysize:
2166 if len(rawtext) > _maxentrysize:
2158 raise error.RevlogError(
2167 raise error.RevlogError(
2159 _(
2168 _(
2160 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2169 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2161 )
2170 )
2162 % (self.display_id, len(rawtext))
2171 % (self.display_id, len(rawtext))
2163 )
2172 )
2164
2173
2165 node = node or self.hash(rawtext, p1, p2)
2174 node = node or self.hash(rawtext, p1, p2)
2166 rev = self.index.get_rev(node)
2175 rev = self.index.get_rev(node)
2167 if rev is not None:
2176 if rev is not None:
2168 return rev
2177 return rev
2169
2178
2170 if validatehash:
2179 if validatehash:
2171 self.checkhash(rawtext, node, p1=p1, p2=p2)
2180 self.checkhash(rawtext, node, p1=p1, p2=p2)
2172
2181
2173 return self.addrawrevision(
2182 return self.addrawrevision(
2174 rawtext,
2183 rawtext,
2175 transaction,
2184 transaction,
2176 link,
2185 link,
2177 p1,
2186 p1,
2178 p2,
2187 p2,
2179 node,
2188 node,
2180 flags,
2189 flags,
2181 cachedelta=cachedelta,
2190 cachedelta=cachedelta,
2182 deltacomputer=deltacomputer,
2191 deltacomputer=deltacomputer,
2183 sidedata=sidedata,
2192 sidedata=sidedata,
2184 )
2193 )
2185
2194
2186 def addrawrevision(
2195 def addrawrevision(
2187 self,
2196 self,
2188 rawtext,
2197 rawtext,
2189 transaction,
2198 transaction,
2190 link,
2199 link,
2191 p1,
2200 p1,
2192 p2,
2201 p2,
2193 node,
2202 node,
2194 flags,
2203 flags,
2195 cachedelta=None,
2204 cachedelta=None,
2196 deltacomputer=None,
2205 deltacomputer=None,
2197 sidedata=None,
2206 sidedata=None,
2198 ):
2207 ):
2199 """add a raw revision with known flags, node and parents
2208 """add a raw revision with known flags, node and parents
2200 useful when reusing a revision not stored in this revlog (ex: received
2209 useful when reusing a revision not stored in this revlog (ex: received
2201 over wire, or read from an external bundle).
2210 over wire, or read from an external bundle).
2202 """
2211 """
2203 with self._writing(transaction):
2212 with self._writing(transaction):
2204 return self._addrevision(
2213 return self._addrevision(
2205 node,
2214 node,
2206 rawtext,
2215 rawtext,
2207 transaction,
2216 transaction,
2208 link,
2217 link,
2209 p1,
2218 p1,
2210 p2,
2219 p2,
2211 flags,
2220 flags,
2212 cachedelta,
2221 cachedelta,
2213 deltacomputer=deltacomputer,
2222 deltacomputer=deltacomputer,
2214 sidedata=sidedata,
2223 sidedata=sidedata,
2215 )
2224 )
2216
2225
2217 def compress(self, data):
2226 def compress(self, data):
2218 """Generate a possibly-compressed representation of data."""
2227 """Generate a possibly-compressed representation of data."""
2219 if not data:
2228 if not data:
2220 return b'', data
2229 return b'', data
2221
2230
2222 compressed = self._compressor.compress(data)
2231 compressed = self._compressor.compress(data)
2223
2232
2224 if compressed:
2233 if compressed:
2225 # The revlog compressor added the header in the returned data.
2234 # The revlog compressor added the header in the returned data.
2226 return b'', compressed
2235 return b'', compressed
2227
2236
2228 if data[0:1] == b'\0':
2237 if data[0:1] == b'\0':
2229 return b'', data
2238 return b'', data
2230 return b'u', data
2239 return b'u', data
2231
2240
2232 def decompress(self, data):
2241 def decompress(self, data):
2233 """Decompress a revlog chunk.
2242 """Decompress a revlog chunk.
2234
2243
2235 The chunk is expected to begin with a header identifying the
2244 The chunk is expected to begin with a header identifying the
2236 format type so it can be routed to an appropriate decompressor.
2245 format type so it can be routed to an appropriate decompressor.
2237 """
2246 """
2238 if not data:
2247 if not data:
2239 return data
2248 return data
2240
2249
2241 # Revlogs are read much more frequently than they are written and many
2250 # Revlogs are read much more frequently than they are written and many
2242 # chunks only take microseconds to decompress, so performance is
2251 # chunks only take microseconds to decompress, so performance is
2243 # important here.
2252 # important here.
2244 #
2253 #
2245 # We can make a few assumptions about revlogs:
2254 # We can make a few assumptions about revlogs:
2246 #
2255 #
2247 # 1) the majority of chunks will be compressed (as opposed to inline
2256 # 1) the majority of chunks will be compressed (as opposed to inline
2248 # raw data).
2257 # raw data).
2249 # 2) decompressing *any* data will likely by at least 10x slower than
2258 # 2) decompressing *any* data will likely by at least 10x slower than
2250 # returning raw inline data.
2259 # returning raw inline data.
2251 # 3) we want to prioritize common and officially supported compression
2260 # 3) we want to prioritize common and officially supported compression
2252 # engines
2261 # engines
2253 #
2262 #
2254 # It follows that we want to optimize for "decompress compressed data
2263 # It follows that we want to optimize for "decompress compressed data
2255 # when encoded with common and officially supported compression engines"
2264 # when encoded with common and officially supported compression engines"
2256 # case over "raw data" and "data encoded by less common or non-official
2265 # case over "raw data" and "data encoded by less common or non-official
2257 # compression engines." That is why we have the inline lookup first
2266 # compression engines." That is why we have the inline lookup first
2258 # followed by the compengines lookup.
2267 # followed by the compengines lookup.
2259 #
2268 #
2260 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2269 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2261 # compressed chunks. And this matters for changelog and manifest reads.
2270 # compressed chunks. And this matters for changelog and manifest reads.
2262 t = data[0:1]
2271 t = data[0:1]
2263
2272
2264 if t == b'x':
2273 if t == b'x':
2265 try:
2274 try:
2266 return _zlibdecompress(data)
2275 return _zlibdecompress(data)
2267 except zlib.error as e:
2276 except zlib.error as e:
2268 raise error.RevlogError(
2277 raise error.RevlogError(
2269 _(b'revlog decompress error: %s')
2278 _(b'revlog decompress error: %s')
2270 % stringutil.forcebytestr(e)
2279 % stringutil.forcebytestr(e)
2271 )
2280 )
2272 # '\0' is more common than 'u' so it goes first.
2281 # '\0' is more common than 'u' so it goes first.
2273 elif t == b'\0':
2282 elif t == b'\0':
2274 return data
2283 return data
2275 elif t == b'u':
2284 elif t == b'u':
2276 return util.buffer(data, 1)
2285 return util.buffer(data, 1)
2277
2286
2278 try:
2287 try:
2279 compressor = self._decompressors[t]
2288 compressor = self._decompressors[t]
2280 except KeyError:
2289 except KeyError:
2281 try:
2290 try:
2282 engine = util.compengines.forrevlogheader(t)
2291 engine = util.compengines.forrevlogheader(t)
2283 compressor = engine.revlogcompressor(self._compengineopts)
2292 compressor = engine.revlogcompressor(self._compengineopts)
2284 self._decompressors[t] = compressor
2293 self._decompressors[t] = compressor
2285 except KeyError:
2294 except KeyError:
2286 raise error.RevlogError(
2295 raise error.RevlogError(
2287 _(b'unknown compression type %s') % binascii.hexlify(t)
2296 _(b'unknown compression type %s') % binascii.hexlify(t)
2288 )
2297 )
2289
2298
2290 return compressor.decompress(data)
2299 return compressor.decompress(data)
2291
2300
2292 def _addrevision(
2301 def _addrevision(
2293 self,
2302 self,
2294 node,
2303 node,
2295 rawtext,
2304 rawtext,
2296 transaction,
2305 transaction,
2297 link,
2306 link,
2298 p1,
2307 p1,
2299 p2,
2308 p2,
2300 flags,
2309 flags,
2301 cachedelta,
2310 cachedelta,
2302 alwayscache=False,
2311 alwayscache=False,
2303 deltacomputer=None,
2312 deltacomputer=None,
2304 sidedata=None,
2313 sidedata=None,
2305 ):
2314 ):
2306 """internal function to add revisions to the log
2315 """internal function to add revisions to the log
2307
2316
2308 see addrevision for argument descriptions.
2317 see addrevision for argument descriptions.
2309
2318
2310 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2319 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2311
2320
2312 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2321 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2313 be used.
2322 be used.
2314
2323
2315 invariants:
2324 invariants:
2316 - rawtext is optional (can be None); if not set, cachedelta must be set.
2325 - rawtext is optional (can be None); if not set, cachedelta must be set.
2317 if both are set, they must correspond to each other.
2326 if both are set, they must correspond to each other.
2318 """
2327 """
2319 if node == self.nullid:
2328 if node == self.nullid:
2320 raise error.RevlogError(
2329 raise error.RevlogError(
2321 _(b"%s: attempt to add null revision") % self.display_id
2330 _(b"%s: attempt to add null revision") % self.display_id
2322 )
2331 )
2323 if (
2332 if (
2324 node == self.nodeconstants.wdirid
2333 node == self.nodeconstants.wdirid
2325 or node in self.nodeconstants.wdirfilenodeids
2334 or node in self.nodeconstants.wdirfilenodeids
2326 ):
2335 ):
2327 raise error.RevlogError(
2336 raise error.RevlogError(
2328 _(b"%s: attempt to add wdir revision") % self.display_id
2337 _(b"%s: attempt to add wdir revision") % self.display_id
2329 )
2338 )
2330 if self._writinghandles is None:
2339 if self._writinghandles is None:
2331 msg = b'adding revision outside `revlog._writing` context'
2340 msg = b'adding revision outside `revlog._writing` context'
2332 raise error.ProgrammingError(msg)
2341 raise error.ProgrammingError(msg)
2333
2342
2334 if self._inline:
2343 if self._inline:
2335 fh = self._writinghandles[0]
2344 fh = self._writinghandles[0]
2336 else:
2345 else:
2337 fh = self._writinghandles[1]
2346 fh = self._writinghandles[1]
2338
2347
2339 btext = [rawtext]
2348 btext = [rawtext]
2340
2349
2341 curr = len(self)
2350 curr = len(self)
2342 prev = curr - 1
2351 prev = curr - 1
2343
2352
2344 offset = self._get_data_offset(prev)
2353 offset = self._get_data_offset(prev)
2345
2354
2346 if self._concurrencychecker:
2355 if self._concurrencychecker:
2347 ifh, dfh = self._writinghandles
2356 ifh, dfh = self._writinghandles
2348 if self._inline:
2357 if self._inline:
2349 # offset is "as if" it were in the .d file, so we need to add on
2358 # offset is "as if" it were in the .d file, so we need to add on
2350 # the size of the entry metadata.
2359 # the size of the entry metadata.
2351 self._concurrencychecker(
2360 self._concurrencychecker(
2352 ifh, self._indexfile, offset + curr * self.index.entry_size
2361 ifh, self._indexfile, offset + curr * self.index.entry_size
2353 )
2362 )
2354 else:
2363 else:
2355 # Entries in the .i are a consistent size.
2364 # Entries in the .i are a consistent size.
2356 self._concurrencychecker(
2365 self._concurrencychecker(
2357 ifh, self._indexfile, curr * self.index.entry_size
2366 ifh, self._indexfile, curr * self.index.entry_size
2358 )
2367 )
2359 self._concurrencychecker(dfh, self._datafile, offset)
2368 self._concurrencychecker(dfh, self._datafile, offset)
2360
2369
2361 p1r, p2r = self.rev(p1), self.rev(p2)
2370 p1r, p2r = self.rev(p1), self.rev(p2)
2362
2371
2363 # full versions are inserted when the needed deltas
2372 # full versions are inserted when the needed deltas
2364 # become comparable to the uncompressed text
2373 # become comparable to the uncompressed text
2365 if rawtext is None:
2374 if rawtext is None:
2366 # need rawtext size, before changed by flag processors, which is
2375 # need rawtext size, before changed by flag processors, which is
2367 # the non-raw size. use revlog explicitly to avoid filelog's extra
2376 # the non-raw size. use revlog explicitly to avoid filelog's extra
2368 # logic that might remove metadata size.
2377 # logic that might remove metadata size.
2369 textlen = mdiff.patchedsize(
2378 textlen = mdiff.patchedsize(
2370 revlog.size(self, cachedelta[0]), cachedelta[1]
2379 revlog.size(self, cachedelta[0]), cachedelta[1]
2371 )
2380 )
2372 else:
2381 else:
2373 textlen = len(rawtext)
2382 textlen = len(rawtext)
2374
2383
2375 if deltacomputer is None:
2384 if deltacomputer is None:
2376 deltacomputer = deltautil.deltacomputer(self)
2385 deltacomputer = deltautil.deltacomputer(self)
2377
2386
2378 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2387 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2379
2388
2380 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2389 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2381
2390
2382 if sidedata and self.hassidedata:
2391 if sidedata and self.hassidedata:
2383 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2392 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2384 sidedata_offset = offset + deltainfo.deltalen
2393 sidedata_offset = offset + deltainfo.deltalen
2385 else:
2394 else:
2386 serialized_sidedata = b""
2395 serialized_sidedata = b""
2387 # Don't store the offset if the sidedata is empty, that way
2396 # Don't store the offset if the sidedata is empty, that way
2388 # we can easily detect empty sidedata and they will be no different
2397 # we can easily detect empty sidedata and they will be no different
2389 # than ones we manually add.
2398 # than ones we manually add.
2390 sidedata_offset = 0
2399 sidedata_offset = 0
2391
2400
2392 e = (
2401 e = (
2393 offset_type(offset, flags),
2402 offset_type(offset, flags),
2394 deltainfo.deltalen,
2403 deltainfo.deltalen,
2395 textlen,
2404 textlen,
2396 deltainfo.base,
2405 deltainfo.base,
2397 link,
2406 link,
2398 p1r,
2407 p1r,
2399 p2r,
2408 p2r,
2400 node,
2409 node,
2401 sidedata_offset,
2410 sidedata_offset,
2402 len(serialized_sidedata),
2411 len(serialized_sidedata),
2403 )
2412 )
2404
2413
2405 self.index.append(e)
2414 self.index.append(e)
2406 entry = self.index.entry_binary(curr)
2415 entry = self.index.entry_binary(curr)
2407 if curr == 0 and self._docket is None:
2416 if curr == 0 and self._docket is None:
2408 header = self._format_flags | self._format_version
2417 header = self._format_flags | self._format_version
2409 header = self.index.pack_header(header)
2418 header = self.index.pack_header(header)
2410 entry = header + entry
2419 entry = header + entry
2411 self._writeentry(
2420 self._writeentry(
2412 transaction,
2421 transaction,
2413 entry,
2422 entry,
2414 deltainfo.data,
2423 deltainfo.data,
2415 link,
2424 link,
2416 offset,
2425 offset,
2417 serialized_sidedata,
2426 serialized_sidedata,
2418 )
2427 )
2419
2428
2420 rawtext = btext[0]
2429 rawtext = btext[0]
2421
2430
2422 if alwayscache and rawtext is None:
2431 if alwayscache and rawtext is None:
2423 rawtext = deltacomputer.buildtext(revinfo, fh)
2432 rawtext = deltacomputer.buildtext(revinfo, fh)
2424
2433
2425 if type(rawtext) == bytes: # only accept immutable objects
2434 if type(rawtext) == bytes: # only accept immutable objects
2426 self._revisioncache = (node, curr, rawtext)
2435 self._revisioncache = (node, curr, rawtext)
2427 self._chainbasecache[curr] = deltainfo.chainbase
2436 self._chainbasecache[curr] = deltainfo.chainbase
2428 return curr
2437 return curr
2429
2438
2430 def _get_data_offset(self, prev):
2439 def _get_data_offset(self, prev):
2431 """Returns the current offset in the (in-transaction) data file.
2440 """Returns the current offset in the (in-transaction) data file.
2432 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2441 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2433 file to store that information: since sidedata can be rewritten to the
2442 file to store that information: since sidedata can be rewritten to the
2434 end of the data file within a transaction, you can have cases where, for
2443 end of the data file within a transaction, you can have cases where, for
2435 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2444 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2436 to `n - 1`'s sidedata being written after `n`'s data.
2445 to `n - 1`'s sidedata being written after `n`'s data.
2437
2446
2438 TODO cache this in a docket file before getting out of experimental."""
2447 TODO cache this in a docket file before getting out of experimental."""
2439 if self._format_version != REVLOGV2:
2448 if self._format_version != REVLOGV2:
2440 return self.end(prev)
2449 return self.end(prev)
2441
2450
2442 offset = 0
2451 offset = 0
2443 for rev, entry in enumerate(self.index):
2452 for rev, entry in enumerate(self.index):
2444 sidedata_end = entry[8] + entry[9]
2453 sidedata_end = entry[8] + entry[9]
2445 # Sidedata for a previous rev has potentially been written after
2454 # Sidedata for a previous rev has potentially been written after
2446 # this rev's end, so take the max.
2455 # this rev's end, so take the max.
2447 offset = max(self.end(rev), offset, sidedata_end)
2456 offset = max(self.end(rev), offset, sidedata_end)
2448 return offset
2457 return offset
2449
2458
2450 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2459 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2451 # Files opened in a+ mode have inconsistent behavior on various
2460 # Files opened in a+ mode have inconsistent behavior on various
2452 # platforms. Windows requires that a file positioning call be made
2461 # platforms. Windows requires that a file positioning call be made
2453 # when the file handle transitions between reads and writes. See
2462 # when the file handle transitions between reads and writes. See
2454 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2463 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2455 # platforms, Python or the platform itself can be buggy. Some versions
2464 # platforms, Python or the platform itself can be buggy. Some versions
2456 # of Solaris have been observed to not append at the end of the file
2465 # of Solaris have been observed to not append at the end of the file
2457 # if the file was seeked to before the end. See issue4943 for more.
2466 # if the file was seeked to before the end. See issue4943 for more.
2458 #
2467 #
2459 # We work around this issue by inserting a seek() before writing.
2468 # We work around this issue by inserting a seek() before writing.
2460 # Note: This is likely not necessary on Python 3. However, because
2469 # Note: This is likely not necessary on Python 3. However, because
2461 # the file handle is reused for reads and may be seeked there, we need
2470 # the file handle is reused for reads and may be seeked there, we need
2462 # to be careful before changing this.
2471 # to be careful before changing this.
2463 if self._writinghandles is None:
2472 if self._writinghandles is None:
2464 msg = b'adding revision outside `revlog._writing` context'
2473 msg = b'adding revision outside `revlog._writing` context'
2465 raise error.ProgrammingError(msg)
2474 raise error.ProgrammingError(msg)
2466 ifh, dfh = self._writinghandles
2475 ifh, dfh = self._writinghandles
2467 if self._docket is None:
2476 if self._docket is None:
2468 ifh.seek(0, os.SEEK_END)
2477 ifh.seek(0, os.SEEK_END)
2469 else:
2478 else:
2470 ifh.seek(self._docket.index_end, os.SEEK_SET)
2479 ifh.seek(self._docket.index_end, os.SEEK_SET)
2471 if dfh:
2480 if dfh:
2472 dfh.seek(0, os.SEEK_END)
2481 dfh.seek(0, os.SEEK_END)
2473
2482
2474 curr = len(self) - 1
2483 curr = len(self) - 1
2475 if not self._inline:
2484 if not self._inline:
2476 transaction.add(self._datafile, offset)
2485 transaction.add(self._datafile, offset)
2477 transaction.add(self._indexfile, curr * len(entry))
2486 transaction.add(self._indexfile, curr * len(entry))
2478 if data[0]:
2487 if data[0]:
2479 dfh.write(data[0])
2488 dfh.write(data[0])
2480 dfh.write(data[1])
2489 dfh.write(data[1])
2481 if sidedata:
2490 if sidedata:
2482 dfh.write(sidedata)
2491 dfh.write(sidedata)
2483 ifh.write(entry)
2492 ifh.write(entry)
2484 else:
2493 else:
2485 offset += curr * self.index.entry_size
2494 offset += curr * self.index.entry_size
2486 transaction.add(self._indexfile, offset)
2495 transaction.add(self._indexfile, offset)
2487 ifh.write(entry)
2496 ifh.write(entry)
2488 ifh.write(data[0])
2497 ifh.write(data[0])
2489 ifh.write(data[1])
2498 ifh.write(data[1])
2490 if sidedata:
2499 if sidedata:
2491 ifh.write(sidedata)
2500 ifh.write(sidedata)
2492 self._enforceinlinesize(transaction)
2501 self._enforceinlinesize(transaction)
2493 if self._docket is not None:
2502 if self._docket is not None:
2494 self._docket.index_end = self._writinghandles[0].tell()
2503 self._docket.index_end = self._writinghandles[0].tell()
2495
2504
2496 nodemaputil.setup_persistent_nodemap(transaction, self)
2505 nodemaputil.setup_persistent_nodemap(transaction, self)
2497
2506
2498 def addgroup(
2507 def addgroup(
2499 self,
2508 self,
2500 deltas,
2509 deltas,
2501 linkmapper,
2510 linkmapper,
2502 transaction,
2511 transaction,
2503 alwayscache=False,
2512 alwayscache=False,
2504 addrevisioncb=None,
2513 addrevisioncb=None,
2505 duplicaterevisioncb=None,
2514 duplicaterevisioncb=None,
2506 ):
2515 ):
2507 """
2516 """
2508 add a delta group
2517 add a delta group
2509
2518
2510 given a set of deltas, add them to the revision log. the
2519 given a set of deltas, add them to the revision log. the
2511 first delta is against its parent, which should be in our
2520 first delta is against its parent, which should be in our
2512 log, the rest are against the previous delta.
2521 log, the rest are against the previous delta.
2513
2522
2514 If ``addrevisioncb`` is defined, it will be called with arguments of
2523 If ``addrevisioncb`` is defined, it will be called with arguments of
2515 this revlog and the node that was added.
2524 this revlog and the node that was added.
2516 """
2525 """
2517
2526
2518 if self._adding_group:
2527 if self._adding_group:
2519 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2528 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2520
2529
2521 self._adding_group = True
2530 self._adding_group = True
2522 empty = True
2531 empty = True
2523 try:
2532 try:
2524 with self._writing(transaction):
2533 with self._writing(transaction):
2525 deltacomputer = deltautil.deltacomputer(self)
2534 deltacomputer = deltautil.deltacomputer(self)
2526 # loop through our set of deltas
2535 # loop through our set of deltas
2527 for data in deltas:
2536 for data in deltas:
2528 (
2537 (
2529 node,
2538 node,
2530 p1,
2539 p1,
2531 p2,
2540 p2,
2532 linknode,
2541 linknode,
2533 deltabase,
2542 deltabase,
2534 delta,
2543 delta,
2535 flags,
2544 flags,
2536 sidedata,
2545 sidedata,
2537 ) = data
2546 ) = data
2538 link = linkmapper(linknode)
2547 link = linkmapper(linknode)
2539 flags = flags or REVIDX_DEFAULT_FLAGS
2548 flags = flags or REVIDX_DEFAULT_FLAGS
2540
2549
2541 rev = self.index.get_rev(node)
2550 rev = self.index.get_rev(node)
2542 if rev is not None:
2551 if rev is not None:
2543 # this can happen if two branches make the same change
2552 # this can happen if two branches make the same change
2544 self._nodeduplicatecallback(transaction, rev)
2553 self._nodeduplicatecallback(transaction, rev)
2545 if duplicaterevisioncb:
2554 if duplicaterevisioncb:
2546 duplicaterevisioncb(self, rev)
2555 duplicaterevisioncb(self, rev)
2547 empty = False
2556 empty = False
2548 continue
2557 continue
2549
2558
2550 for p in (p1, p2):
2559 for p in (p1, p2):
2551 if not self.index.has_node(p):
2560 if not self.index.has_node(p):
2552 raise error.LookupError(
2561 raise error.LookupError(
2553 p, self.radix, _(b'unknown parent')
2562 p, self.radix, _(b'unknown parent')
2554 )
2563 )
2555
2564
2556 if not self.index.has_node(deltabase):
2565 if not self.index.has_node(deltabase):
2557 raise error.LookupError(
2566 raise error.LookupError(
2558 deltabase, self.display_id, _(b'unknown delta base')
2567 deltabase, self.display_id, _(b'unknown delta base')
2559 )
2568 )
2560
2569
2561 baserev = self.rev(deltabase)
2570 baserev = self.rev(deltabase)
2562
2571
2563 if baserev != nullrev and self.iscensored(baserev):
2572 if baserev != nullrev and self.iscensored(baserev):
2564 # if base is censored, delta must be full replacement in a
2573 # if base is censored, delta must be full replacement in a
2565 # single patch operation
2574 # single patch operation
2566 hlen = struct.calcsize(b">lll")
2575 hlen = struct.calcsize(b">lll")
2567 oldlen = self.rawsize(baserev)
2576 oldlen = self.rawsize(baserev)
2568 newlen = len(delta) - hlen
2577 newlen = len(delta) - hlen
2569 if delta[:hlen] != mdiff.replacediffheader(
2578 if delta[:hlen] != mdiff.replacediffheader(
2570 oldlen, newlen
2579 oldlen, newlen
2571 ):
2580 ):
2572 raise error.CensoredBaseError(
2581 raise error.CensoredBaseError(
2573 self.display_id, self.node(baserev)
2582 self.display_id, self.node(baserev)
2574 )
2583 )
2575
2584
2576 if not flags and self._peek_iscensored(baserev, delta):
2585 if not flags and self._peek_iscensored(baserev, delta):
2577 flags |= REVIDX_ISCENSORED
2586 flags |= REVIDX_ISCENSORED
2578
2587
2579 # We assume consumers of addrevisioncb will want to retrieve
2588 # We assume consumers of addrevisioncb will want to retrieve
2580 # the added revision, which will require a call to
2589 # the added revision, which will require a call to
2581 # revision(). revision() will fast path if there is a cache
2590 # revision(). revision() will fast path if there is a cache
2582 # hit. So, we tell _addrevision() to always cache in this case.
2591 # hit. So, we tell _addrevision() to always cache in this case.
2583 # We're only using addgroup() in the context of changegroup
2592 # We're only using addgroup() in the context of changegroup
2584 # generation so the revision data can always be handled as raw
2593 # generation so the revision data can always be handled as raw
2585 # by the flagprocessor.
2594 # by the flagprocessor.
2586 rev = self._addrevision(
2595 rev = self._addrevision(
2587 node,
2596 node,
2588 None,
2597 None,
2589 transaction,
2598 transaction,
2590 link,
2599 link,
2591 p1,
2600 p1,
2592 p2,
2601 p2,
2593 flags,
2602 flags,
2594 (baserev, delta),
2603 (baserev, delta),
2595 alwayscache=alwayscache,
2604 alwayscache=alwayscache,
2596 deltacomputer=deltacomputer,
2605 deltacomputer=deltacomputer,
2597 sidedata=sidedata,
2606 sidedata=sidedata,
2598 )
2607 )
2599
2608
2600 if addrevisioncb:
2609 if addrevisioncb:
2601 addrevisioncb(self, rev)
2610 addrevisioncb(self, rev)
2602 empty = False
2611 empty = False
2603 finally:
2612 finally:
2604 self._adding_group = False
2613 self._adding_group = False
2605 return not empty
2614 return not empty
2606
2615
2607 def iscensored(self, rev):
2616 def iscensored(self, rev):
2608 """Check if a file revision is censored."""
2617 """Check if a file revision is censored."""
2609 if not self._censorable:
2618 if not self._censorable:
2610 return False
2619 return False
2611
2620
2612 return self.flags(rev) & REVIDX_ISCENSORED
2621 return self.flags(rev) & REVIDX_ISCENSORED
2613
2622
2614 def _peek_iscensored(self, baserev, delta):
2623 def _peek_iscensored(self, baserev, delta):
2615 """Quickly check if a delta produces a censored revision."""
2624 """Quickly check if a delta produces a censored revision."""
2616 if not self._censorable:
2625 if not self._censorable:
2617 return False
2626 return False
2618
2627
2619 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2628 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2620
2629
2621 def getstrippoint(self, minlink):
2630 def getstrippoint(self, minlink):
2622 """find the minimum rev that must be stripped to strip the linkrev
2631 """find the minimum rev that must be stripped to strip the linkrev
2623
2632
2624 Returns a tuple containing the minimum rev and a set of all revs that
2633 Returns a tuple containing the minimum rev and a set of all revs that
2625 have linkrevs that will be broken by this strip.
2634 have linkrevs that will be broken by this strip.
2626 """
2635 """
2627 return storageutil.resolvestripinfo(
2636 return storageutil.resolvestripinfo(
2628 minlink,
2637 minlink,
2629 len(self) - 1,
2638 len(self) - 1,
2630 self.headrevs(),
2639 self.headrevs(),
2631 self.linkrev,
2640 self.linkrev,
2632 self.parentrevs,
2641 self.parentrevs,
2633 )
2642 )
2634
2643
2635 def strip(self, minlink, transaction):
2644 def strip(self, minlink, transaction):
2636 """truncate the revlog on the first revision with a linkrev >= minlink
2645 """truncate the revlog on the first revision with a linkrev >= minlink
2637
2646
2638 This function is called when we're stripping revision minlink and
2647 This function is called when we're stripping revision minlink and
2639 its descendants from the repository.
2648 its descendants from the repository.
2640
2649
2641 We have to remove all revisions with linkrev >= minlink, because
2650 We have to remove all revisions with linkrev >= minlink, because
2642 the equivalent changelog revisions will be renumbered after the
2651 the equivalent changelog revisions will be renumbered after the
2643 strip.
2652 strip.
2644
2653
2645 So we truncate the revlog on the first of these revisions, and
2654 So we truncate the revlog on the first of these revisions, and
2646 trust that the caller has saved the revisions that shouldn't be
2655 trust that the caller has saved the revisions that shouldn't be
2647 removed and that it'll re-add them after this truncation.
2656 removed and that it'll re-add them after this truncation.
2648 """
2657 """
2649 if len(self) == 0:
2658 if len(self) == 0:
2650 return
2659 return
2651
2660
2652 rev, _ = self.getstrippoint(minlink)
2661 rev, _ = self.getstrippoint(minlink)
2653 if rev == len(self):
2662 if rev == len(self):
2654 return
2663 return
2655
2664
2656 # first truncate the files on disk
2665 # first truncate the files on disk
2657 end = self.start(rev)
2666 end = self.start(rev)
2658 if not self._inline:
2667 if not self._inline:
2659 transaction.add(self._datafile, end)
2668 transaction.add(self._datafile, end)
2660 end = rev * self.index.entry_size
2669 end = rev * self.index.entry_size
2661 else:
2670 else:
2662 end += rev * self.index.entry_size
2671 end += rev * self.index.entry_size
2663
2672
2664 transaction.add(self._indexfile, end)
2673 transaction.add(self._indexfile, end)
2665 if self._docket is not None:
2674 if self._docket is not None:
2666 # XXX we could, leverage the docket while stripping. However it is
2675 # XXX we could, leverage the docket while stripping. However it is
2667 # not powerfull enough at the time of this comment
2676 # not powerfull enough at the time of this comment
2668 self._docket.index_end = end
2677 self._docket.index_end = end
2669 self._docket.write(transaction, stripping=True)
2678 self._docket.write(transaction, stripping=True)
2670
2679
2671 # then reset internal state in memory to forget those revisions
2680 # then reset internal state in memory to forget those revisions
2672 self._revisioncache = None
2681 self._revisioncache = None
2673 self._chaininfocache = util.lrucachedict(500)
2682 self._chaininfocache = util.lrucachedict(500)
2674 self._chunkclear()
2683 self._chunkclear()
2675
2684
2676 del self.index[rev:-1]
2685 del self.index[rev:-1]
2677
2686
2678 def checksize(self):
2687 def checksize(self):
2679 """Check size of index and data files
2688 """Check size of index and data files
2680
2689
2681 return a (dd, di) tuple.
2690 return a (dd, di) tuple.
2682 - dd: extra bytes for the "data" file
2691 - dd: extra bytes for the "data" file
2683 - di: extra bytes for the "index" file
2692 - di: extra bytes for the "index" file
2684
2693
2685 A healthy revlog will return (0, 0).
2694 A healthy revlog will return (0, 0).
2686 """
2695 """
2687 expected = 0
2696 expected = 0
2688 if len(self):
2697 if len(self):
2689 expected = max(0, self.end(len(self) - 1))
2698 expected = max(0, self.end(len(self) - 1))
2690
2699
2691 try:
2700 try:
2692 with self._datafp() as f:
2701 with self._datafp() as f:
2693 f.seek(0, io.SEEK_END)
2702 f.seek(0, io.SEEK_END)
2694 actual = f.tell()
2703 actual = f.tell()
2695 dd = actual - expected
2704 dd = actual - expected
2696 except IOError as inst:
2705 except IOError as inst:
2697 if inst.errno != errno.ENOENT:
2706 if inst.errno != errno.ENOENT:
2698 raise
2707 raise
2699 dd = 0
2708 dd = 0
2700
2709
2701 try:
2710 try:
2702 f = self.opener(self._indexfile)
2711 f = self.opener(self._indexfile)
2703 f.seek(0, io.SEEK_END)
2712 f.seek(0, io.SEEK_END)
2704 actual = f.tell()
2713 actual = f.tell()
2705 f.close()
2714 f.close()
2706 s = self.index.entry_size
2715 s = self.index.entry_size
2707 i = max(0, actual // s)
2716 i = max(0, actual // s)
2708 di = actual - (i * s)
2717 di = actual - (i * s)
2709 if self._inline:
2718 if self._inline:
2710 databytes = 0
2719 databytes = 0
2711 for r in self:
2720 for r in self:
2712 databytes += max(0, self.length(r))
2721 databytes += max(0, self.length(r))
2713 dd = 0
2722 dd = 0
2714 di = actual - len(self) * s - databytes
2723 di = actual - len(self) * s - databytes
2715 except IOError as inst:
2724 except IOError as inst:
2716 if inst.errno != errno.ENOENT:
2725 if inst.errno != errno.ENOENT:
2717 raise
2726 raise
2718 di = 0
2727 di = 0
2719
2728
2720 return (dd, di)
2729 return (dd, di)
2721
2730
2722 def files(self):
2731 def files(self):
2723 res = [self._indexfile]
2732 res = [self._indexfile]
2724 if not self._inline:
2733 if not self._inline:
2725 res.append(self._datafile)
2734 res.append(self._datafile)
2726 return res
2735 return res
2727
2736
2728 def emitrevisions(
2737 def emitrevisions(
2729 self,
2738 self,
2730 nodes,
2739 nodes,
2731 nodesorder=None,
2740 nodesorder=None,
2732 revisiondata=False,
2741 revisiondata=False,
2733 assumehaveparentrevisions=False,
2742 assumehaveparentrevisions=False,
2734 deltamode=repository.CG_DELTAMODE_STD,
2743 deltamode=repository.CG_DELTAMODE_STD,
2735 sidedata_helpers=None,
2744 sidedata_helpers=None,
2736 ):
2745 ):
2737 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2746 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2738 raise error.ProgrammingError(
2747 raise error.ProgrammingError(
2739 b'unhandled value for nodesorder: %s' % nodesorder
2748 b'unhandled value for nodesorder: %s' % nodesorder
2740 )
2749 )
2741
2750
2742 if nodesorder is None and not self._generaldelta:
2751 if nodesorder is None and not self._generaldelta:
2743 nodesorder = b'storage'
2752 nodesorder = b'storage'
2744
2753
2745 if (
2754 if (
2746 not self._storedeltachains
2755 not self._storedeltachains
2747 and deltamode != repository.CG_DELTAMODE_PREV
2756 and deltamode != repository.CG_DELTAMODE_PREV
2748 ):
2757 ):
2749 deltamode = repository.CG_DELTAMODE_FULL
2758 deltamode = repository.CG_DELTAMODE_FULL
2750
2759
2751 return storageutil.emitrevisions(
2760 return storageutil.emitrevisions(
2752 self,
2761 self,
2753 nodes,
2762 nodes,
2754 nodesorder,
2763 nodesorder,
2755 revlogrevisiondelta,
2764 revlogrevisiondelta,
2756 deltaparentfn=self.deltaparent,
2765 deltaparentfn=self.deltaparent,
2757 candeltafn=self.candelta,
2766 candeltafn=self.candelta,
2758 rawsizefn=self.rawsize,
2767 rawsizefn=self.rawsize,
2759 revdifffn=self.revdiff,
2768 revdifffn=self.revdiff,
2760 flagsfn=self.flags,
2769 flagsfn=self.flags,
2761 deltamode=deltamode,
2770 deltamode=deltamode,
2762 revisiondata=revisiondata,
2771 revisiondata=revisiondata,
2763 assumehaveparentrevisions=assumehaveparentrevisions,
2772 assumehaveparentrevisions=assumehaveparentrevisions,
2764 sidedata_helpers=sidedata_helpers,
2773 sidedata_helpers=sidedata_helpers,
2765 )
2774 )
2766
2775
2767 DELTAREUSEALWAYS = b'always'
2776 DELTAREUSEALWAYS = b'always'
2768 DELTAREUSESAMEREVS = b'samerevs'
2777 DELTAREUSESAMEREVS = b'samerevs'
2769 DELTAREUSENEVER = b'never'
2778 DELTAREUSENEVER = b'never'
2770
2779
2771 DELTAREUSEFULLADD = b'fulladd'
2780 DELTAREUSEFULLADD = b'fulladd'
2772
2781
2773 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2782 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2774
2783
2775 def clone(
2784 def clone(
2776 self,
2785 self,
2777 tr,
2786 tr,
2778 destrevlog,
2787 destrevlog,
2779 addrevisioncb=None,
2788 addrevisioncb=None,
2780 deltareuse=DELTAREUSESAMEREVS,
2789 deltareuse=DELTAREUSESAMEREVS,
2781 forcedeltabothparents=None,
2790 forcedeltabothparents=None,
2782 sidedata_helpers=None,
2791 sidedata_helpers=None,
2783 ):
2792 ):
2784 """Copy this revlog to another, possibly with format changes.
2793 """Copy this revlog to another, possibly with format changes.
2785
2794
2786 The destination revlog will contain the same revisions and nodes.
2795 The destination revlog will contain the same revisions and nodes.
2787 However, it may not be bit-for-bit identical due to e.g. delta encoding
2796 However, it may not be bit-for-bit identical due to e.g. delta encoding
2788 differences.
2797 differences.
2789
2798
2790 The ``deltareuse`` argument control how deltas from the existing revlog
2799 The ``deltareuse`` argument control how deltas from the existing revlog
2791 are preserved in the destination revlog. The argument can have the
2800 are preserved in the destination revlog. The argument can have the
2792 following values:
2801 following values:
2793
2802
2794 DELTAREUSEALWAYS
2803 DELTAREUSEALWAYS
2795 Deltas will always be reused (if possible), even if the destination
2804 Deltas will always be reused (if possible), even if the destination
2796 revlog would not select the same revisions for the delta. This is the
2805 revlog would not select the same revisions for the delta. This is the
2797 fastest mode of operation.
2806 fastest mode of operation.
2798 DELTAREUSESAMEREVS
2807 DELTAREUSESAMEREVS
2799 Deltas will be reused if the destination revlog would pick the same
2808 Deltas will be reused if the destination revlog would pick the same
2800 revisions for the delta. This mode strikes a balance between speed
2809 revisions for the delta. This mode strikes a balance between speed
2801 and optimization.
2810 and optimization.
2802 DELTAREUSENEVER
2811 DELTAREUSENEVER
2803 Deltas will never be reused. This is the slowest mode of execution.
2812 Deltas will never be reused. This is the slowest mode of execution.
2804 This mode can be used to recompute deltas (e.g. if the diff/delta
2813 This mode can be used to recompute deltas (e.g. if the diff/delta
2805 algorithm changes).
2814 algorithm changes).
2806 DELTAREUSEFULLADD
2815 DELTAREUSEFULLADD
2807 Revision will be re-added as if their were new content. This is
2816 Revision will be re-added as if their were new content. This is
2808 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2817 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2809 eg: large file detection and handling.
2818 eg: large file detection and handling.
2810
2819
2811 Delta computation can be slow, so the choice of delta reuse policy can
2820 Delta computation can be slow, so the choice of delta reuse policy can
2812 significantly affect run time.
2821 significantly affect run time.
2813
2822
2814 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2823 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2815 two extremes. Deltas will be reused if they are appropriate. But if the
2824 two extremes. Deltas will be reused if they are appropriate. But if the
2816 delta could choose a better revision, it will do so. This means if you
2825 delta could choose a better revision, it will do so. This means if you
2817 are converting a non-generaldelta revlog to a generaldelta revlog,
2826 are converting a non-generaldelta revlog to a generaldelta revlog,
2818 deltas will be recomputed if the delta's parent isn't a parent of the
2827 deltas will be recomputed if the delta's parent isn't a parent of the
2819 revision.
2828 revision.
2820
2829
2821 In addition to the delta policy, the ``forcedeltabothparents``
2830 In addition to the delta policy, the ``forcedeltabothparents``
2822 argument controls whether to force compute deltas against both parents
2831 argument controls whether to force compute deltas against both parents
2823 for merges. By default, the current default is used.
2832 for merges. By default, the current default is used.
2824
2833
2825 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2834 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2826 `sidedata_helpers`.
2835 `sidedata_helpers`.
2827 """
2836 """
2828 if deltareuse not in self.DELTAREUSEALL:
2837 if deltareuse not in self.DELTAREUSEALL:
2829 raise ValueError(
2838 raise ValueError(
2830 _(b'value for deltareuse invalid: %s') % deltareuse
2839 _(b'value for deltareuse invalid: %s') % deltareuse
2831 )
2840 )
2832
2841
2833 if len(destrevlog):
2842 if len(destrevlog):
2834 raise ValueError(_(b'destination revlog is not empty'))
2843 raise ValueError(_(b'destination revlog is not empty'))
2835
2844
2836 if getattr(self, 'filteredrevs', None):
2845 if getattr(self, 'filteredrevs', None):
2837 raise ValueError(_(b'source revlog has filtered revisions'))
2846 raise ValueError(_(b'source revlog has filtered revisions'))
2838 if getattr(destrevlog, 'filteredrevs', None):
2847 if getattr(destrevlog, 'filteredrevs', None):
2839 raise ValueError(_(b'destination revlog has filtered revisions'))
2848 raise ValueError(_(b'destination revlog has filtered revisions'))
2840
2849
2841 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2850 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2842 # if possible.
2851 # if possible.
2843 oldlazydelta = destrevlog._lazydelta
2852 oldlazydelta = destrevlog._lazydelta
2844 oldlazydeltabase = destrevlog._lazydeltabase
2853 oldlazydeltabase = destrevlog._lazydeltabase
2845 oldamd = destrevlog._deltabothparents
2854 oldamd = destrevlog._deltabothparents
2846
2855
2847 try:
2856 try:
2848 if deltareuse == self.DELTAREUSEALWAYS:
2857 if deltareuse == self.DELTAREUSEALWAYS:
2849 destrevlog._lazydeltabase = True
2858 destrevlog._lazydeltabase = True
2850 destrevlog._lazydelta = True
2859 destrevlog._lazydelta = True
2851 elif deltareuse == self.DELTAREUSESAMEREVS:
2860 elif deltareuse == self.DELTAREUSESAMEREVS:
2852 destrevlog._lazydeltabase = False
2861 destrevlog._lazydeltabase = False
2853 destrevlog._lazydelta = True
2862 destrevlog._lazydelta = True
2854 elif deltareuse == self.DELTAREUSENEVER:
2863 elif deltareuse == self.DELTAREUSENEVER:
2855 destrevlog._lazydeltabase = False
2864 destrevlog._lazydeltabase = False
2856 destrevlog._lazydelta = False
2865 destrevlog._lazydelta = False
2857
2866
2858 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2867 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2859
2868
2860 self._clone(
2869 self._clone(
2861 tr,
2870 tr,
2862 destrevlog,
2871 destrevlog,
2863 addrevisioncb,
2872 addrevisioncb,
2864 deltareuse,
2873 deltareuse,
2865 forcedeltabothparents,
2874 forcedeltabothparents,
2866 sidedata_helpers,
2875 sidedata_helpers,
2867 )
2876 )
2868
2877
2869 finally:
2878 finally:
2870 destrevlog._lazydelta = oldlazydelta
2879 destrevlog._lazydelta = oldlazydelta
2871 destrevlog._lazydeltabase = oldlazydeltabase
2880 destrevlog._lazydeltabase = oldlazydeltabase
2872 destrevlog._deltabothparents = oldamd
2881 destrevlog._deltabothparents = oldamd
2873
2882
2874 def _clone(
2883 def _clone(
2875 self,
2884 self,
2876 tr,
2885 tr,
2877 destrevlog,
2886 destrevlog,
2878 addrevisioncb,
2887 addrevisioncb,
2879 deltareuse,
2888 deltareuse,
2880 forcedeltabothparents,
2889 forcedeltabothparents,
2881 sidedata_helpers,
2890 sidedata_helpers,
2882 ):
2891 ):
2883 """perform the core duty of `revlog.clone` after parameter processing"""
2892 """perform the core duty of `revlog.clone` after parameter processing"""
2884 deltacomputer = deltautil.deltacomputer(destrevlog)
2893 deltacomputer = deltautil.deltacomputer(destrevlog)
2885 index = self.index
2894 index = self.index
2886 for rev in self:
2895 for rev in self:
2887 entry = index[rev]
2896 entry = index[rev]
2888
2897
2889 # Some classes override linkrev to take filtered revs into
2898 # Some classes override linkrev to take filtered revs into
2890 # account. Use raw entry from index.
2899 # account. Use raw entry from index.
2891 flags = entry[0] & 0xFFFF
2900 flags = entry[0] & 0xFFFF
2892 linkrev = entry[4]
2901 linkrev = entry[4]
2893 p1 = index[entry[5]][7]
2902 p1 = index[entry[5]][7]
2894 p2 = index[entry[6]][7]
2903 p2 = index[entry[6]][7]
2895 node = entry[7]
2904 node = entry[7]
2896
2905
2897 # (Possibly) reuse the delta from the revlog if allowed and
2906 # (Possibly) reuse the delta from the revlog if allowed and
2898 # the revlog chunk is a delta.
2907 # the revlog chunk is a delta.
2899 cachedelta = None
2908 cachedelta = None
2900 rawtext = None
2909 rawtext = None
2901 if deltareuse == self.DELTAREUSEFULLADD:
2910 if deltareuse == self.DELTAREUSEFULLADD:
2902 text, sidedata = self._revisiondata(rev)
2911 text, sidedata = self._revisiondata(rev)
2903
2912
2904 if sidedata_helpers is not None:
2913 if sidedata_helpers is not None:
2905 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2914 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2906 self, sidedata_helpers, sidedata, rev
2915 self, sidedata_helpers, sidedata, rev
2907 )
2916 )
2908 flags = flags | new_flags[0] & ~new_flags[1]
2917 flags = flags | new_flags[0] & ~new_flags[1]
2909
2918
2910 destrevlog.addrevision(
2919 destrevlog.addrevision(
2911 text,
2920 text,
2912 tr,
2921 tr,
2913 linkrev,
2922 linkrev,
2914 p1,
2923 p1,
2915 p2,
2924 p2,
2916 cachedelta=cachedelta,
2925 cachedelta=cachedelta,
2917 node=node,
2926 node=node,
2918 flags=flags,
2927 flags=flags,
2919 deltacomputer=deltacomputer,
2928 deltacomputer=deltacomputer,
2920 sidedata=sidedata,
2929 sidedata=sidedata,
2921 )
2930 )
2922 else:
2931 else:
2923 if destrevlog._lazydelta:
2932 if destrevlog._lazydelta:
2924 dp = self.deltaparent(rev)
2933 dp = self.deltaparent(rev)
2925 if dp != nullrev:
2934 if dp != nullrev:
2926 cachedelta = (dp, bytes(self._chunk(rev)))
2935 cachedelta = (dp, bytes(self._chunk(rev)))
2927
2936
2928 sidedata = None
2937 sidedata = None
2929 if not cachedelta:
2938 if not cachedelta:
2930 rawtext, sidedata = self._revisiondata(rev)
2939 rawtext, sidedata = self._revisiondata(rev)
2931 if sidedata is None:
2940 if sidedata is None:
2932 sidedata = self.sidedata(rev)
2941 sidedata = self.sidedata(rev)
2933
2942
2934 if sidedata_helpers is not None:
2943 if sidedata_helpers is not None:
2935 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2944 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2936 self, sidedata_helpers, sidedata, rev
2945 self, sidedata_helpers, sidedata, rev
2937 )
2946 )
2938 flags = flags | new_flags[0] & ~new_flags[1]
2947 flags = flags | new_flags[0] & ~new_flags[1]
2939
2948
2940 with destrevlog._writing(tr):
2949 with destrevlog._writing(tr):
2941 destrevlog._addrevision(
2950 destrevlog._addrevision(
2942 node,
2951 node,
2943 rawtext,
2952 rawtext,
2944 tr,
2953 tr,
2945 linkrev,
2954 linkrev,
2946 p1,
2955 p1,
2947 p2,
2956 p2,
2948 flags,
2957 flags,
2949 cachedelta,
2958 cachedelta,
2950 deltacomputer=deltacomputer,
2959 deltacomputer=deltacomputer,
2951 sidedata=sidedata,
2960 sidedata=sidedata,
2952 )
2961 )
2953
2962
2954 if addrevisioncb:
2963 if addrevisioncb:
2955 addrevisioncb(self, rev, node)
2964 addrevisioncb(self, rev, node)
2956
2965
2957 def censorrevision(self, tr, censornode, tombstone=b''):
2966 def censorrevision(self, tr, censornode, tombstone=b''):
2958 if self._format_version == REVLOGV0:
2967 if self._format_version == REVLOGV0:
2959 raise error.RevlogError(
2968 raise error.RevlogError(
2960 _(b'cannot censor with version %d revlogs')
2969 _(b'cannot censor with version %d revlogs')
2961 % self._format_version
2970 % self._format_version
2962 )
2971 )
2963
2972
2964 censorrev = self.rev(censornode)
2973 censorrev = self.rev(censornode)
2965 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2974 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2966
2975
2967 if len(tombstone) > self.rawsize(censorrev):
2976 if len(tombstone) > self.rawsize(censorrev):
2968 raise error.Abort(
2977 raise error.Abort(
2969 _(b'censor tombstone must be no longer than censored data')
2978 _(b'censor tombstone must be no longer than censored data')
2970 )
2979 )
2971
2980
2972 # Rewriting the revlog in place is hard. Our strategy for censoring is
2981 # Rewriting the revlog in place is hard. Our strategy for censoring is
2973 # to create a new revlog, copy all revisions to it, then replace the
2982 # to create a new revlog, copy all revisions to it, then replace the
2974 # revlogs on transaction close.
2983 # revlogs on transaction close.
2975 #
2984 #
2976 # This is a bit dangerous. We could easily have a mismatch of state.
2985 # This is a bit dangerous. We could easily have a mismatch of state.
2977 newrl = revlog(
2986 newrl = revlog(
2978 self.opener,
2987 self.opener,
2979 target=self.target,
2988 target=self.target,
2980 radix=self.radix,
2989 radix=self.radix,
2981 postfix=b'tmpcensored',
2990 postfix=b'tmpcensored',
2982 censorable=True,
2991 censorable=True,
2983 )
2992 )
2984 newrl._format_version = self._format_version
2993 newrl._format_version = self._format_version
2985 newrl._format_flags = self._format_flags
2994 newrl._format_flags = self._format_flags
2986 newrl._generaldelta = self._generaldelta
2995 newrl._generaldelta = self._generaldelta
2987 newrl._parse_index = self._parse_index
2996 newrl._parse_index = self._parse_index
2988
2997
2989 for rev in self.revs():
2998 for rev in self.revs():
2990 node = self.node(rev)
2999 node = self.node(rev)
2991 p1, p2 = self.parents(node)
3000 p1, p2 = self.parents(node)
2992
3001
2993 if rev == censorrev:
3002 if rev == censorrev:
2994 newrl.addrawrevision(
3003 newrl.addrawrevision(
2995 tombstone,
3004 tombstone,
2996 tr,
3005 tr,
2997 self.linkrev(censorrev),
3006 self.linkrev(censorrev),
2998 p1,
3007 p1,
2999 p2,
3008 p2,
3000 censornode,
3009 censornode,
3001 REVIDX_ISCENSORED,
3010 REVIDX_ISCENSORED,
3002 )
3011 )
3003
3012
3004 if newrl.deltaparent(rev) != nullrev:
3013 if newrl.deltaparent(rev) != nullrev:
3005 raise error.Abort(
3014 raise error.Abort(
3006 _(
3015 _(
3007 b'censored revision stored as delta; '
3016 b'censored revision stored as delta; '
3008 b'cannot censor'
3017 b'cannot censor'
3009 ),
3018 ),
3010 hint=_(
3019 hint=_(
3011 b'censoring of revlogs is not '
3020 b'censoring of revlogs is not '
3012 b'fully implemented; please report '
3021 b'fully implemented; please report '
3013 b'this bug'
3022 b'this bug'
3014 ),
3023 ),
3015 )
3024 )
3016 continue
3025 continue
3017
3026
3018 if self.iscensored(rev):
3027 if self.iscensored(rev):
3019 if self.deltaparent(rev) != nullrev:
3028 if self.deltaparent(rev) != nullrev:
3020 raise error.Abort(
3029 raise error.Abort(
3021 _(
3030 _(
3022 b'cannot censor due to censored '
3031 b'cannot censor due to censored '
3023 b'revision having delta stored'
3032 b'revision having delta stored'
3024 )
3033 )
3025 )
3034 )
3026 rawtext = self._chunk(rev)
3035 rawtext = self._chunk(rev)
3027 else:
3036 else:
3028 rawtext = self.rawdata(rev)
3037 rawtext = self.rawdata(rev)
3029
3038
3030 newrl.addrawrevision(
3039 newrl.addrawrevision(
3031 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3040 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3032 )
3041 )
3033
3042
3034 tr.addbackup(self._indexfile, location=b'store')
3043 tr.addbackup(self._indexfile, location=b'store')
3035 if not self._inline:
3044 if not self._inline:
3036 tr.addbackup(self._datafile, location=b'store')
3045 tr.addbackup(self._datafile, location=b'store')
3037
3046
3038 self.opener.rename(newrl._indexfile, self._indexfile)
3047 self.opener.rename(newrl._indexfile, self._indexfile)
3039 if not self._inline:
3048 if not self._inline:
3040 self.opener.rename(newrl._datafile, self._datafile)
3049 self.opener.rename(newrl._datafile, self._datafile)
3041
3050
3042 self.clearcaches()
3051 self.clearcaches()
3043 self._loadindex()
3052 self._loadindex()
3044
3053
3045 def verifyintegrity(self, state):
3054 def verifyintegrity(self, state):
3046 """Verifies the integrity of the revlog.
3055 """Verifies the integrity of the revlog.
3047
3056
3048 Yields ``revlogproblem`` instances describing problems that are
3057 Yields ``revlogproblem`` instances describing problems that are
3049 found.
3058 found.
3050 """
3059 """
3051 dd, di = self.checksize()
3060 dd, di = self.checksize()
3052 if dd:
3061 if dd:
3053 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3062 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3054 if di:
3063 if di:
3055 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3064 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3056
3065
3057 version = self._format_version
3066 version = self._format_version
3058
3067
3059 # The verifier tells us what version revlog we should be.
3068 # The verifier tells us what version revlog we should be.
3060 if version != state[b'expectedversion']:
3069 if version != state[b'expectedversion']:
3061 yield revlogproblem(
3070 yield revlogproblem(
3062 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3071 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3063 % (self.display_id, version, state[b'expectedversion'])
3072 % (self.display_id, version, state[b'expectedversion'])
3064 )
3073 )
3065
3074
3066 state[b'skipread'] = set()
3075 state[b'skipread'] = set()
3067 state[b'safe_renamed'] = set()
3076 state[b'safe_renamed'] = set()
3068
3077
3069 for rev in self:
3078 for rev in self:
3070 node = self.node(rev)
3079 node = self.node(rev)
3071
3080
3072 # Verify contents. 4 cases to care about:
3081 # Verify contents. 4 cases to care about:
3073 #
3082 #
3074 # common: the most common case
3083 # common: the most common case
3075 # rename: with a rename
3084 # rename: with a rename
3076 # meta: file content starts with b'\1\n', the metadata
3085 # meta: file content starts with b'\1\n', the metadata
3077 # header defined in filelog.py, but without a rename
3086 # header defined in filelog.py, but without a rename
3078 # ext: content stored externally
3087 # ext: content stored externally
3079 #
3088 #
3080 # More formally, their differences are shown below:
3089 # More formally, their differences are shown below:
3081 #
3090 #
3082 # | common | rename | meta | ext
3091 # | common | rename | meta | ext
3083 # -------------------------------------------------------
3092 # -------------------------------------------------------
3084 # flags() | 0 | 0 | 0 | not 0
3093 # flags() | 0 | 0 | 0 | not 0
3085 # renamed() | False | True | False | ?
3094 # renamed() | False | True | False | ?
3086 # rawtext[0:2]=='\1\n'| False | True | True | ?
3095 # rawtext[0:2]=='\1\n'| False | True | True | ?
3087 #
3096 #
3088 # "rawtext" means the raw text stored in revlog data, which
3097 # "rawtext" means the raw text stored in revlog data, which
3089 # could be retrieved by "rawdata(rev)". "text"
3098 # could be retrieved by "rawdata(rev)". "text"
3090 # mentioned below is "revision(rev)".
3099 # mentioned below is "revision(rev)".
3091 #
3100 #
3092 # There are 3 different lengths stored physically:
3101 # There are 3 different lengths stored physically:
3093 # 1. L1: rawsize, stored in revlog index
3102 # 1. L1: rawsize, stored in revlog index
3094 # 2. L2: len(rawtext), stored in revlog data
3103 # 2. L2: len(rawtext), stored in revlog data
3095 # 3. L3: len(text), stored in revlog data if flags==0, or
3104 # 3. L3: len(text), stored in revlog data if flags==0, or
3096 # possibly somewhere else if flags!=0
3105 # possibly somewhere else if flags!=0
3097 #
3106 #
3098 # L1 should be equal to L2. L3 could be different from them.
3107 # L1 should be equal to L2. L3 could be different from them.
3099 # "text" may or may not affect commit hash depending on flag
3108 # "text" may or may not affect commit hash depending on flag
3100 # processors (see flagutil.addflagprocessor).
3109 # processors (see flagutil.addflagprocessor).
3101 #
3110 #
3102 # | common | rename | meta | ext
3111 # | common | rename | meta | ext
3103 # -------------------------------------------------
3112 # -------------------------------------------------
3104 # rawsize() | L1 | L1 | L1 | L1
3113 # rawsize() | L1 | L1 | L1 | L1
3105 # size() | L1 | L2-LM | L1(*) | L1 (?)
3114 # size() | L1 | L2-LM | L1(*) | L1 (?)
3106 # len(rawtext) | L2 | L2 | L2 | L2
3115 # len(rawtext) | L2 | L2 | L2 | L2
3107 # len(text) | L2 | L2 | L2 | L3
3116 # len(text) | L2 | L2 | L2 | L3
3108 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3117 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3109 #
3118 #
3110 # LM: length of metadata, depending on rawtext
3119 # LM: length of metadata, depending on rawtext
3111 # (*): not ideal, see comment in filelog.size
3120 # (*): not ideal, see comment in filelog.size
3112 # (?): could be "- len(meta)" if the resolved content has
3121 # (?): could be "- len(meta)" if the resolved content has
3113 # rename metadata
3122 # rename metadata
3114 #
3123 #
3115 # Checks needed to be done:
3124 # Checks needed to be done:
3116 # 1. length check: L1 == L2, in all cases.
3125 # 1. length check: L1 == L2, in all cases.
3117 # 2. hash check: depending on flag processor, we may need to
3126 # 2. hash check: depending on flag processor, we may need to
3118 # use either "text" (external), or "rawtext" (in revlog).
3127 # use either "text" (external), or "rawtext" (in revlog).
3119
3128
3120 try:
3129 try:
3121 skipflags = state.get(b'skipflags', 0)
3130 skipflags = state.get(b'skipflags', 0)
3122 if skipflags:
3131 if skipflags:
3123 skipflags &= self.flags(rev)
3132 skipflags &= self.flags(rev)
3124
3133
3125 _verify_revision(self, skipflags, state, node)
3134 _verify_revision(self, skipflags, state, node)
3126
3135
3127 l1 = self.rawsize(rev)
3136 l1 = self.rawsize(rev)
3128 l2 = len(self.rawdata(node))
3137 l2 = len(self.rawdata(node))
3129
3138
3130 if l1 != l2:
3139 if l1 != l2:
3131 yield revlogproblem(
3140 yield revlogproblem(
3132 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3141 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3133 node=node,
3142 node=node,
3134 )
3143 )
3135
3144
3136 except error.CensoredNodeError:
3145 except error.CensoredNodeError:
3137 if state[b'erroroncensored']:
3146 if state[b'erroroncensored']:
3138 yield revlogproblem(
3147 yield revlogproblem(
3139 error=_(b'censored file data'), node=node
3148 error=_(b'censored file data'), node=node
3140 )
3149 )
3141 state[b'skipread'].add(node)
3150 state[b'skipread'].add(node)
3142 except Exception as e:
3151 except Exception as e:
3143 yield revlogproblem(
3152 yield revlogproblem(
3144 error=_(b'unpacking %s: %s')
3153 error=_(b'unpacking %s: %s')
3145 % (short(node), stringutil.forcebytestr(e)),
3154 % (short(node), stringutil.forcebytestr(e)),
3146 node=node,
3155 node=node,
3147 )
3156 )
3148 state[b'skipread'].add(node)
3157 state[b'skipread'].add(node)
3149
3158
3150 def storageinfo(
3159 def storageinfo(
3151 self,
3160 self,
3152 exclusivefiles=False,
3161 exclusivefiles=False,
3153 sharedfiles=False,
3162 sharedfiles=False,
3154 revisionscount=False,
3163 revisionscount=False,
3155 trackedsize=False,
3164 trackedsize=False,
3156 storedsize=False,
3165 storedsize=False,
3157 ):
3166 ):
3158 d = {}
3167 d = {}
3159
3168
3160 if exclusivefiles:
3169 if exclusivefiles:
3161 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3170 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3162 if not self._inline:
3171 if not self._inline:
3163 d[b'exclusivefiles'].append((self.opener, self._datafile))
3172 d[b'exclusivefiles'].append((self.opener, self._datafile))
3164
3173
3165 if sharedfiles:
3174 if sharedfiles:
3166 d[b'sharedfiles'] = []
3175 d[b'sharedfiles'] = []
3167
3176
3168 if revisionscount:
3177 if revisionscount:
3169 d[b'revisionscount'] = len(self)
3178 d[b'revisionscount'] = len(self)
3170
3179
3171 if trackedsize:
3180 if trackedsize:
3172 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3181 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3173
3182
3174 if storedsize:
3183 if storedsize:
3175 d[b'storedsize'] = sum(
3184 d[b'storedsize'] = sum(
3176 self.opener.stat(path).st_size for path in self.files()
3185 self.opener.stat(path).st_size for path in self.files()
3177 )
3186 )
3178
3187
3179 return d
3188 return d
3180
3189
3181 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3190 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3182 if not self.hassidedata:
3191 if not self.hassidedata:
3183 return
3192 return
3184 # revlog formats with sidedata support does not support inline
3193 # revlog formats with sidedata support does not support inline
3185 assert not self._inline
3194 assert not self._inline
3186 if not helpers[1] and not helpers[2]:
3195 if not helpers[1] and not helpers[2]:
3187 # Nothing to generate or remove
3196 # Nothing to generate or remove
3188 return
3197 return
3189
3198
3190 # changelog implement some "delayed" writing mechanism that assume that
3191 # all index data is writen in append mode and is therefor incompatible
3192 # with the seeked write done in this method. The use of such "delayed"
3193 # writing will soon be removed for revlog version that support side
3194 # data, so for now, we only keep this simple assert to highlight the
3195 # situation.
3196 delayed = getattr(self, '_delayed', False)
3197 diverted = getattr(self, '_divert', False)
3198 if delayed and not diverted:
3199 msg = "cannot rewrite_sidedata of a delayed revlog"
3200 raise error.ProgrammingError(msg)
3201
3202 new_entries = []
3199 new_entries = []
3203 # append the new sidedata
3200 # append the new sidedata
3204 with self._writing(transaction):
3201 with self._writing(transaction):
3205 ifh, dfh = self._writinghandles
3202 ifh, dfh = self._writinghandles
3206 dfh.seek(0, os.SEEK_END)
3203 dfh.seek(0, os.SEEK_END)
3207 current_offset = dfh.tell()
3204 current_offset = dfh.tell()
3208 for rev in range(startrev, endrev + 1):
3205 for rev in range(startrev, endrev + 1):
3209 entry = self.index[rev]
3206 entry = self.index[rev]
3210 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3207 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3211 store=self,
3208 store=self,
3212 sidedata_helpers=helpers,
3209 sidedata_helpers=helpers,
3213 sidedata={},
3210 sidedata={},
3214 rev=rev,
3211 rev=rev,
3215 )
3212 )
3216
3213
3217 serialized_sidedata = sidedatautil.serialize_sidedata(
3214 serialized_sidedata = sidedatautil.serialize_sidedata(
3218 new_sidedata
3215 new_sidedata
3219 )
3216 )
3220 if entry[8] != 0 or entry[9] != 0:
3217 if entry[8] != 0 or entry[9] != 0:
3221 # rewriting entries that already have sidedata is not
3218 # rewriting entries that already have sidedata is not
3222 # supported yet, because it introduces garbage data in the
3219 # supported yet, because it introduces garbage data in the
3223 # revlog.
3220 # revlog.
3224 msg = b"rewriting existing sidedata is not supported yet"
3221 msg = b"rewriting existing sidedata is not supported yet"
3225 raise error.Abort(msg)
3222 raise error.Abort(msg)
3226
3223
3227 # Apply (potential) flags to add and to remove after running
3224 # Apply (potential) flags to add and to remove after running
3228 # the sidedata helpers
3225 # the sidedata helpers
3229 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3226 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3230 entry = (new_offset_flags,) + entry[1:8]
3227 entry = (new_offset_flags,) + entry[1:8]
3231 entry += (current_offset, len(serialized_sidedata))
3228 entry += (current_offset, len(serialized_sidedata))
3232
3229
3233 # the sidedata computation might have move the file cursors around
3230 # the sidedata computation might have move the file cursors around
3234 dfh.seek(current_offset, os.SEEK_SET)
3231 dfh.seek(current_offset, os.SEEK_SET)
3235 dfh.write(serialized_sidedata)
3232 dfh.write(serialized_sidedata)
3236 new_entries.append(entry)
3233 new_entries.append(entry)
3237 current_offset += len(serialized_sidedata)
3234 current_offset += len(serialized_sidedata)
3238
3235
3239 # rewrite the new index entries
3236 # rewrite the new index entries
3240 ifh.seek(startrev * self.index.entry_size)
3237 ifh.seek(startrev * self.index.entry_size)
3241 for i, e in enumerate(new_entries):
3238 for i, e in enumerate(new_entries):
3242 rev = startrev + i
3239 rev = startrev + i
3243 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3240 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3244 packed = self.index.entry_binary(rev)
3241 packed = self.index.entry_binary(rev)
3245 if rev == 0 and self._docket is None:
3242 if rev == 0 and self._docket is None:
3246 header = self._format_flags | self._format_version
3243 header = self._format_flags | self._format_version
3247 header = self.index.pack_header(header)
3244 header = self.index.pack_header(header)
3248 packed = header + packed
3245 packed = header + packed
3249 ifh.write(packed)
3246 ifh.write(packed)
@@ -1,268 +1,268 b''
1 Test transaction safety
1 Test transaction safety
2 =======================
2 =======================
3
3
4 #testcases revlogv1 revlogv2
4 #testcases revlogv1 revlogv2
5
5
6 #if revlogv1
6 #if revlogv1
7
7
8 $ cat << EOF >> $HGRCPATH
8 $ cat << EOF >> $HGRCPATH
9 > [experimental]
9 > [experimental]
10 > revlogv2=no
10 > revlogv2=no
11 > EOF
11 > EOF
12
12
13 #endif
13 #endif
14
14
15 #if revlogv2
15 #if revlogv2
16
16
17 $ cat << EOF >> $HGRCPATH
17 $ cat << EOF >> $HGRCPATH
18 > [experimental]
18 > [experimental]
19 > revlogv2=enable-unstable-format-and-corrupt-my-data
19 > revlogv2=enable-unstable-format-and-corrupt-my-data
20 > EOF
20 > EOF
21
21
22 #endif
22 #endif
23
23
24 This test basic case to make sure external process do not see transaction
24 This test basic case to make sure external process do not see transaction
25 content until it is committed.
25 content until it is committed.
26
26
27 # TODO: also add an external reader accessing revlog files while they are written
27 # TODO: also add an external reader accessing revlog files while they are written
28 # (instead of during transaction finalisation)
28 # (instead of during transaction finalisation)
29
29
30 # TODO: also add stream clone and hardlink clone happening during these transaction.
30 # TODO: also add stream clone and hardlink clone happening during these transaction.
31
31
32 setup
32 setup
33 -----
33 -----
34
34
35 synchronisation+output script:
35 synchronisation+output script:
36
36
37 $ mkdir sync
37 $ mkdir sync
38 $ mkdir output
38 $ mkdir output
39 $ mkdir script
39 $ mkdir script
40 $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
40 $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
41 $ export HG_TEST_FILE_EXT_WAITING
41 $ export HG_TEST_FILE_EXT_WAITING
42 $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
42 $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
43 $ export HG_TEST_FILE_EXT_UNLOCK
43 $ export HG_TEST_FILE_EXT_UNLOCK
44 $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
44 $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
45 $ export HG_TEST_FILE_EXT_DONE
45 $ export HG_TEST_FILE_EXT_DONE
46 $ cat << EOF > script/external.sh
46 $ cat << EOF > script/external.sh
47 > #!/bin/sh
47 > #!/bin/sh
48 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_UNLOCK $HG_TEST_FILE_EXT_WAITING
48 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_UNLOCK $HG_TEST_FILE_EXT_WAITING
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out 2>/dev/null
50 > touch $HG_TEST_FILE_EXT_DONE
50 > touch $HG_TEST_FILE_EXT_DONE
51 > EOF
51 > EOF
52 $ chmod +x script/external.sh
52 $ chmod +x script/external.sh
53 $ cat << EOF > script/internal.sh
53 $ cat << EOF > script/internal.sh
54 > #!/bin/sh
54 > #!/bin/sh
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out 2>/dev/null
56 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_DONE $HG_TEST_FILE_EXT_UNLOCK
56 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_DONE $HG_TEST_FILE_EXT_UNLOCK
57 > EOF
57 > EOF
58 $ chmod +x script/internal.sh
58 $ chmod +x script/internal.sh
59
59
60
60
61 Automated commands:
61 Automated commands:
62
62
63 $ make_one_commit() {
63 $ make_one_commit() {
64 > rm -f $TESTTMP/sync/*
64 > rm -f $TESTTMP/sync/*
65 > rm -f $TESTTMP/output/*
65 > rm -f $TESTTMP/output/*
66 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
66 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
67 > echo x >> a
67 > echo x >> a
68 > $TESTTMP/script/external.sh & hg commit -m "$1"
68 > $TESTTMP/script/external.sh & hg commit -m "$1"
69 > cat $TESTTMP/output/external.out
69 > cat $TESTTMP/output/external.out
70 > cat $TESTTMP/output/internal.out
70 > cat $TESTTMP/output/internal.out
71 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
71 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
72 > }
72 > }
73
73
74
74
75 $ make_one_pull() {
75 $ make_one_pull() {
76 > rm -f $TESTTMP/sync/*
76 > rm -f $TESTTMP/sync/*
77 > rm -f $TESTTMP/output/*
77 > rm -f $TESTTMP/output/*
78 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
78 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
79 > echo x >> a
79 > echo x >> a
80 > $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
80 > $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
81 > cat $TESTTMP/output/external.out
81 > cat $TESTTMP/output/external.out
82 > cat $TESTTMP/output/internal.out
82 > cat $TESTTMP/output/internal.out
83 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
83 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
84 > }
84 > }
85
85
86 prepare a large source to which to pull from:
86 prepare a large source to which to pull from:
87
87
88 The source is large to unsure we don't use inline more after the pull
88 The source is large to unsure we don't use inline more after the pull
89
89
90 $ hg init other-repo
90 $ hg init other-repo
91 $ hg -R other-repo debugbuilddag .+500
91 $ hg -R other-repo debugbuilddag .+500
92
92
93
93
94 prepare an empty repository where to make test:
94 prepare an empty repository where to make test:
95
95
96 $ hg init repo
96 $ hg init repo
97 $ cd repo
97 $ cd repo
98 $ touch a
98 $ touch a
99 $ hg add a
99 $ hg add a
100
100
101 prepare a small extension to controll inline size
101 prepare a small extension to controll inline size
102
102
103 $ mkdir $TESTTMP/ext
103 $ mkdir $TESTTMP/ext
104 $ cat << EOF > $TESTTMP/ext/small_inline.py
104 $ cat << EOF > $TESTTMP/ext/small_inline.py
105 > from mercurial import revlog
105 > from mercurial import revlog
106 > revlog._maxinline = 64 * 100
106 > revlog._maxinline = 64 * 100
107 > EOF
107 > EOF
108
108
109
109
110
110
111
111
112 $ cat << EOF >> $HGRCPATH
112 $ cat << EOF >> $HGRCPATH
113 > [extensions]
113 > [extensions]
114 > small_inline=$TESTTMP/ext/small_inline.py
114 > small_inline=$TESTTMP/ext/small_inline.py
115 > [hooks]
115 > [hooks]
116 > pretxnclose = $TESTTMP/script/internal.sh
116 > pretxnclose = $TESTTMP/script/internal.sh
117 > EOF
117 > EOF
118
118
119 check this is true for the initial commit (inline → inline)
119 check this is true for the initial commit (inline → inline)
120 -----------------------------------------------------------
120 -----------------------------------------------------------
121
121
122 the repository should still be inline (for relevant format)
122 the repository should still be inline (for relevant format)
123
123
124 $ make_one_commit first
124 $ make_one_commit first
125 pre-commit: -1
125 pre-commit: -1
126 external: -1 (revlogv1 !)
126 external: -1
127 external: 0 first (revlogv2 known-bad-output !)
127 internal: 0 first (revlogv1 !)
128 internal: 0 first
128 internal: -1 (revlogv2 known-bad-output !)
129 post-tr: 0 first
129 post-tr: 0 first
130
130
131 #if revlogv1
131 #if revlogv1
132
132
133 $ hg debugrevlog -c | grep inline
133 $ hg debugrevlog -c | grep inline
134 flags : inline
134 flags : inline
135
135
136 #endif
136 #endif
137
137
138 check this is true for extra commit (inline → inline)
138 check this is true for extra commit (inline → inline)
139 -----------------------------------------------------
139 -----------------------------------------------------
140
140
141 the repository should still be inline (for relevant format)
141 the repository should still be inline (for relevant format)
142
142
143 #if revlogv1
143 #if revlogv1
144
144
145 $ hg debugrevlog -c | grep inline
145 $ hg debugrevlog -c | grep inline
146 flags : inline
146 flags : inline
147
147
148 #endif
148 #endif
149
149
150 $ make_one_commit second
150 $ make_one_commit second
151 pre-commit: 0 first
151 pre-commit: 0 first
152 external: 0 first (revlogv1 !)
152 external: 0 first
153 external: 1 second (revlogv2 known-bad-output !)
153 internal: 1 second (revlogv1 !)
154 internal: 1 second
154 internal: 0 first (revlogv2 known-bad-output !)
155 post-tr: 1 second
155 post-tr: 1 second
156
156
157 #if revlogv1
157 #if revlogv1
158
158
159 $ hg debugrevlog -c | grep inline
159 $ hg debugrevlog -c | grep inline
160 flags : inline
160 flags : inline
161
161
162 #endif
162 #endif
163
163
164 check this is true for a small pull (inline → inline)
164 check this is true for a small pull (inline → inline)
165 -----------------------------------------------------
165 -----------------------------------------------------
166
166
167 the repository should still be inline (for relevant format)
167 the repository should still be inline (for relevant format)
168
168
169 #if revlogv1
169 #if revlogv1
170
170
171 $ hg debugrevlog -c | grep inline
171 $ hg debugrevlog -c | grep inline
172 flags : inline
172 flags : inline
173
173
174 #endif
174 #endif
175
175
176 $ make_one_pull 3
176 $ make_one_pull 3
177 pre-commit: 1 second
177 pre-commit: 1 second
178 warning: repository is unrelated
178 warning: repository is unrelated
179 external: 1 second (revlogv1 !)
179 external: 1 second
180 external: 5 r3 (revlogv2 known-bad-output !)
180 internal: 5 r3 (revlogv1 !)
181 internal: 5 r3
181 internal: 1 second (revlogv2 known-bad-output !)
182 post-tr: 5 r3
182 post-tr: 5 r3
183
183
184 #if revlogv1
184 #if revlogv1
185
185
186 $ hg debugrevlog -c | grep inline
186 $ hg debugrevlog -c | grep inline
187 flags : inline
187 flags : inline
188
188
189 #endif
189 #endif
190
190
191 Make a large pull (inline → no-inline)
191 Make a large pull (inline → no-inline)
192 ---------------------------------------
192 ---------------------------------------
193
193
194 the repository should no longer be inline (for relevant format)
194 the repository should no longer be inline (for relevant format)
195
195
196 #if revlogv1
196 #if revlogv1
197
197
198 $ hg debugrevlog -c | grep inline
198 $ hg debugrevlog -c | grep inline
199 flags : inline
199 flags : inline
200
200
201 #endif
201 #endif
202
202
203 $ make_one_pull 400
203 $ make_one_pull 400
204 pre-commit: 5 r3
204 pre-commit: 5 r3
205 external: 5 r3 (revlogv1 !)
205 external: 5 r3
206 external: 402 r400 (revlogv2 known-bad-output !)
206 internal: 402 r400 (revlogv1 !)
207 internal: 402 r400
207 internal: 5 r3 (revlogv2 known-bad-output !)
208 post-tr: 402 r400
208 post-tr: 402 r400
209
209
210 #if revlogv1
210 #if revlogv1
211
211
212 $ hg debugrevlog -c | grep inline
212 $ hg debugrevlog -c | grep inline
213 [1]
213 [1]
214
214
215 #endif
215 #endif
216
216
217 check this is true for extra commit (no-inline → no-inline)
217 check this is true for extra commit (no-inline → no-inline)
218 -----------------------------------------------------------
218 -----------------------------------------------------------
219
219
220 the repository should no longer be inline (for relevant format)
220 the repository should no longer be inline (for relevant format)
221
221
222 #if revlogv1
222 #if revlogv1
223
223
224 $ hg debugrevlog -c | grep inline
224 $ hg debugrevlog -c | grep inline
225 [1]
225 [1]
226
226
227 #endif
227 #endif
228
228
229 $ make_one_commit third
229 $ make_one_commit third
230 pre-commit: 402 r400
230 pre-commit: 402 r400
231 external: 402 r400 (revlogv1 !)
231 external: 402 r400
232 external: 403 third (revlogv2 known-bad-output !)
232 internal: 403 third (revlogv1 !)
233 internal: 403 third
233 internal: 402 r400 (revlogv2 known-bad-output !)
234 post-tr: 403 third
234 post-tr: 403 third
235
235
236 #if revlogv1
236 #if revlogv1
237
237
238 $ hg debugrevlog -c | grep inline
238 $ hg debugrevlog -c | grep inline
239 [1]
239 [1]
240
240
241 #endif
241 #endif
242
242
243
243
244 Make a pull (not-inline → no-inline)
244 Make a pull (not-inline → no-inline)
245 -------------------------------------
245 -------------------------------------
246
246
247 the repository should no longer be inline (for relevant format)
247 the repository should no longer be inline (for relevant format)
248
248
249 #if revlogv1
249 #if revlogv1
250
250
251 $ hg debugrevlog -c | grep inline
251 $ hg debugrevlog -c | grep inline
252 [1]
252 [1]
253
253
254 #endif
254 #endif
255
255
256 $ make_one_pull tip
256 $ make_one_pull tip
257 pre-commit: 403 third
257 pre-commit: 403 third
258 external: 403 third (revlogv1 !)
258 external: 403 third
259 external: 503 r500 (revlogv2 known-bad-output !)
259 internal: 503 r500 (revlogv1 !)
260 internal: 503 r500
260 internal: 403 third (revlogv2 known-bad-output !)
261 post-tr: 503 r500
261 post-tr: 503 r500
262
262
263 #if revlogv1
263 #if revlogv1
264
264
265 $ hg debugrevlog -c | grep inline
265 $ hg debugrevlog -c | grep inline
266 [1]
266 [1]
267
267
268 #endif
268 #endif
General Comments 0
You need to be logged in to leave comments. Login now