##// END OF EJS Templates
revlogv2: introduce a very basic docket file...
marmoute -
r48008:616b8f41 default
parent child Browse files
Show More
@@ -0,0 +1,80 b''
1 # docket - code related to revlog "docket"
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 ### Revlog docket file
9 #
10 # The revlog is stored on disk using multiple files:
11 #
12 # * a small docket file, containing metadata and a pointer,
13 #
14 # * an index file, containing fixed width information about revisions,
15 #
16 # * a data file, containing variable width data for these revisions,
17
18 from __future__ import absolute_import
19
20 import struct
21
22 from . import (
23 constants,
24 )
25
26 # Docket format
27 #
28 # * 4 bytes: revlog version
29 # | This is mandatory as docket must be compatible with the previous
30 # | revlog index header.
31 S_HEADER = struct.Struct(constants.INDEX_HEADER.format)
32
33
34 class RevlogDocket(object):
35 """metadata associated with revlog"""
36
37 def __init__(self, revlog, version_header=None):
38 self._version_header = version_header
39 self._dirty = False
40 self._radix = revlog.radix
41 self._path = revlog._docket_file
42 self._opener = revlog.opener
43
44 def index_filepath(self):
45 """file path to the current index file associated to this docket"""
46 # very simplistic version at first
47 return b"%s.idx" % self._radix
48
49 def write(self, transaction):
50 """write the modification of disk if any
51
52 This make the new content visible to all process"""
53 if self._dirty:
54 transaction.addbackup(self._path, location=b'store')
55 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
56 f.write(self._serialize())
57 self._dirty = False
58
59 def _serialize(self):
60 return S_HEADER.pack(self._version_header)
61
62
63 def default_docket(revlog, version_header):
64 """given a revlog version a new docket object for the given revlog"""
65 if (version_header & 0xFFFF) != constants.REVLOGV2:
66 return None
67 docket = RevlogDocket(revlog, version_header=version_header)
68 docket._dirty = True
69 return docket
70
71
72 def parse_docket(revlog, data):
73 """given some docket data return a docket object for the given revlog"""
74 header = S_HEADER.unpack(data[: S_HEADER.size])
75 (version_header,) = header
76 docket = RevlogDocket(
77 revlog,
78 version_header=version_header,
79 )
80 return docket
@@ -1,625 +1,627 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 )
14 )
15 from .thirdparty import attr
15 from .thirdparty import attr
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 metadata,
20 metadata,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 dateutil,
25 dateutil,
26 stringutil,
26 stringutil,
27 )
27 )
28 from .revlogutils import (
28 from .revlogutils import (
29 constants as revlog_constants,
29 constants as revlog_constants,
30 flagutil,
30 flagutil,
31 )
31 )
32
32
33 _defaultextra = {b'branch': b'default'}
33 _defaultextra = {b'branch': b'default'}
34
34
35
35
36 def _string_escape(text):
36 def _string_escape(text):
37 """
37 """
38 >>> from .pycompat import bytechr as chr
38 >>> from .pycompat import bytechr as chr
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 >>> s
41 >>> s
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 >>> res = _string_escape(s)
43 >>> res = _string_escape(s)
44 >>> s == _string_unescape(res)
44 >>> s == _string_unescape(res)
45 True
45 True
46 """
46 """
47 # subset of the string_escape codec
47 # subset of the string_escape codec
48 text = (
48 text = (
49 text.replace(b'\\', b'\\\\')
49 text.replace(b'\\', b'\\\\')
50 .replace(b'\n', b'\\n')
50 .replace(b'\n', b'\\n')
51 .replace(b'\r', b'\\r')
51 .replace(b'\r', b'\\r')
52 )
52 )
53 return text.replace(b'\0', b'\\0')
53 return text.replace(b'\0', b'\\0')
54
54
55
55
56 def _string_unescape(text):
56 def _string_unescape(text):
57 if b'\\0' in text:
57 if b'\\0' in text:
58 # fix up \0 without getting into trouble with \\0
58 # fix up \0 without getting into trouble with \\0
59 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\\\', b'\\\\\n')
60 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\\0', b'\0')
61 text = text.replace(b'\n', b'')
61 text = text.replace(b'\n', b'')
62 return stringutil.unescapestr(text)
62 return stringutil.unescapestr(text)
63
63
64
64
65 def decodeextra(text):
65 def decodeextra(text):
66 """
66 """
67 >>> from .pycompat import bytechr as chr
67 >>> from .pycompat import bytechr as chr
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 ... ).items())
69 ... ).items())
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... b'baz': chr(92) + chr(0) + b'2'})
73 ... ).items())
73 ... ).items())
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 """
75 """
76 extra = _defaultextra.copy()
76 extra = _defaultextra.copy()
77 for l in text.split(b'\0'):
77 for l in text.split(b'\0'):
78 if l:
78 if l:
79 k, v = _string_unescape(l).split(b':', 1)
79 k, v = _string_unescape(l).split(b':', 1)
80 extra[k] = v
80 extra[k] = v
81 return extra
81 return extra
82
82
83
83
84 def encodeextra(d):
84 def encodeextra(d):
85 # keys must be sorted to produce a deterministic changelog entry
85 # keys must be sorted to produce a deterministic changelog entry
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 return b"\0".join(items)
87 return b"\0".join(items)
88
88
89
89
90 def stripdesc(desc):
90 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
91 """strip trailing whitespace and leading and trailing empty lines"""
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93
93
94
94
95 class appender(object):
95 class appender(object):
96 """the changelog index must be updated last on disk, so we use this class
96 """the changelog index must be updated last on disk, so we use this class
97 to delay writes to it"""
97 to delay writes to it"""
98
98
99 def __init__(self, vfs, name, mode, buf):
99 def __init__(self, vfs, name, mode, buf):
100 self.data = buf
100 self.data = buf
101 fp = vfs(name, mode)
101 fp = vfs(name, mode)
102 self.fp = fp
102 self.fp = fp
103 self.offset = fp.tell()
103 self.offset = fp.tell()
104 self.size = vfs.fstat(fp).st_size
104 self.size = vfs.fstat(fp).st_size
105 self._end = self.size
105 self._end = self.size
106
106
107 def end(self):
107 def end(self):
108 return self._end
108 return self._end
109
109
110 def tell(self):
110 def tell(self):
111 return self.offset
111 return self.offset
112
112
113 def flush(self):
113 def flush(self):
114 pass
114 pass
115
115
116 @property
116 @property
117 def closed(self):
117 def closed(self):
118 return self.fp.closed
118 return self.fp.closed
119
119
120 def close(self):
120 def close(self):
121 self.fp.close()
121 self.fp.close()
122
122
123 def seek(self, offset, whence=0):
123 def seek(self, offset, whence=0):
124 '''virtual file offset spans real file and data'''
124 '''virtual file offset spans real file and data'''
125 if whence == 0:
125 if whence == 0:
126 self.offset = offset
126 self.offset = offset
127 elif whence == 1:
127 elif whence == 1:
128 self.offset += offset
128 self.offset += offset
129 elif whence == 2:
129 elif whence == 2:
130 self.offset = self.end() + offset
130 self.offset = self.end() + offset
131 if self.offset < self.size:
131 if self.offset < self.size:
132 self.fp.seek(self.offset)
132 self.fp.seek(self.offset)
133
133
134 def read(self, count=-1):
134 def read(self, count=-1):
135 '''only trick here is reads that span real file and data'''
135 '''only trick here is reads that span real file and data'''
136 ret = b""
136 ret = b""
137 if self.offset < self.size:
137 if self.offset < self.size:
138 s = self.fp.read(count)
138 s = self.fp.read(count)
139 ret = s
139 ret = s
140 self.offset += len(s)
140 self.offset += len(s)
141 if count > 0:
141 if count > 0:
142 count -= len(s)
142 count -= len(s)
143 if count != 0:
143 if count != 0:
144 doff = self.offset - self.size
144 doff = self.offset - self.size
145 self.data.insert(0, b"".join(self.data))
145 self.data.insert(0, b"".join(self.data))
146 del self.data[1:]
146 del self.data[1:]
147 s = self.data[0][doff : doff + count]
147 s = self.data[0][doff : doff + count]
148 self.offset += len(s)
148 self.offset += len(s)
149 ret += s
149 ret += s
150 return ret
150 return ret
151
151
152 def write(self, s):
152 def write(self, s):
153 self.data.append(bytes(s))
153 self.data.append(bytes(s))
154 self.offset += len(s)
154 self.offset += len(s)
155 self._end += len(s)
155 self._end += len(s)
156
156
157 def __enter__(self):
157 def __enter__(self):
158 self.fp.__enter__()
158 self.fp.__enter__()
159 return self
159 return self
160
160
161 def __exit__(self, *args):
161 def __exit__(self, *args):
162 return self.fp.__exit__(*args)
162 return self.fp.__exit__(*args)
163
163
164
164
165 class _divertopener(object):
165 class _divertopener(object):
166 def __init__(self, opener, target):
166 def __init__(self, opener, target):
167 self._opener = opener
167 self._opener = opener
168 self._target = target
168 self._target = target
169
169
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
171 if name != self._target:
171 if name != self._target:
172 return self._opener(name, mode, **kwargs)
172 return self._opener(name, mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
173 return self._opener(name + b".a", mode, **kwargs)
174
174
175 def __getattr__(self, attr):
175 def __getattr__(self, attr):
176 return getattr(self._opener, attr)
176 return getattr(self._opener, attr)
177
177
178
178
179 def _delayopener(opener, target, buf):
179 def _delayopener(opener, target, buf):
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
180 """build an opener that stores chunks in 'buf' instead of 'target'"""
181
181
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
183 if name != target:
183 if name != target:
184 return opener(name, mode, **kwargs)
184 return opener(name, mode, **kwargs)
185 assert not kwargs
185 assert not kwargs
186 return appender(opener, name, mode, buf)
186 return appender(opener, name, mode, buf)
187
187
188 return _delay
188 return _delay
189
189
190
190
191 @attr.s
191 @attr.s
192 class _changelogrevision(object):
192 class _changelogrevision(object):
193 # Extensions might modify _defaultextra, so let the constructor below pass
193 # Extensions might modify _defaultextra, so let the constructor below pass
194 # it in
194 # it in
195 extra = attr.ib()
195 extra = attr.ib()
196 manifest = attr.ib()
196 manifest = attr.ib()
197 user = attr.ib(default=b'')
197 user = attr.ib(default=b'')
198 date = attr.ib(default=(0, 0))
198 date = attr.ib(default=(0, 0))
199 files = attr.ib(default=attr.Factory(list))
199 files = attr.ib(default=attr.Factory(list))
200 filesadded = attr.ib(default=None)
200 filesadded = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
201 filesremoved = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
202 p1copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
203 p2copies = attr.ib(default=None)
204 description = attr.ib(default=b'')
204 description = attr.ib(default=b'')
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
206
206
207
207
208 class changelogrevision(object):
208 class changelogrevision(object):
209 """Holds results of a parsed changelog revision.
209 """Holds results of a parsed changelog revision.
210
210
211 Changelog revisions consist of multiple pieces of data, including
211 Changelog revisions consist of multiple pieces of data, including
212 the manifest node, user, and date. This object exposes a view into
212 the manifest node, user, and date. This object exposes a view into
213 the parsed object.
213 the parsed object.
214 """
214 """
215
215
216 __slots__ = (
216 __slots__ = (
217 '_offsets',
217 '_offsets',
218 '_text',
218 '_text',
219 '_sidedata',
219 '_sidedata',
220 '_cpsd',
220 '_cpsd',
221 '_changes',
221 '_changes',
222 )
222 )
223
223
224 def __new__(cls, cl, text, sidedata, cpsd):
224 def __new__(cls, cl, text, sidedata, cpsd):
225 if not text:
225 if not text:
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
227
227
228 self = super(changelogrevision, cls).__new__(cls)
228 self = super(changelogrevision, cls).__new__(cls)
229 # We could return here and implement the following as an __init__.
229 # We could return here and implement the following as an __init__.
230 # But doing it here is equivalent and saves an extra function call.
230 # But doing it here is equivalent and saves an extra function call.
231
231
232 # format used:
232 # format used:
233 # nodeid\n : manifest node in ascii
233 # nodeid\n : manifest node in ascii
234 # user\n : user, no \n or \r allowed
234 # user\n : user, no \n or \r allowed
235 # time tz extra\n : date (time is int or float, timezone is int)
235 # time tz extra\n : date (time is int or float, timezone is int)
236 # : extra is metadata, encoded and separated by '\0'
236 # : extra is metadata, encoded and separated by '\0'
237 # : older versions ignore it
237 # : older versions ignore it
238 # files\n\n : files modified by the cset, no \n or \r allowed
238 # files\n\n : files modified by the cset, no \n or \r allowed
239 # (.*) : comment (free text, ideally utf-8)
239 # (.*) : comment (free text, ideally utf-8)
240 #
240 #
241 # changelog v0 doesn't use extra
241 # changelog v0 doesn't use extra
242
242
243 nl1 = text.index(b'\n')
243 nl1 = text.index(b'\n')
244 nl2 = text.index(b'\n', nl1 + 1)
244 nl2 = text.index(b'\n', nl1 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
245 nl3 = text.index(b'\n', nl2 + 1)
246
246
247 # The list of files may be empty. Which means nl3 is the first of the
247 # The list of files may be empty. Which means nl3 is the first of the
248 # double newline that precedes the description.
248 # double newline that precedes the description.
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
249 if text[nl3 + 1 : nl3 + 2] == b'\n':
250 doublenl = nl3
250 doublenl = nl3
251 else:
251 else:
252 doublenl = text.index(b'\n\n', nl3 + 1)
252 doublenl = text.index(b'\n\n', nl3 + 1)
253
253
254 self._offsets = (nl1, nl2, nl3, doublenl)
254 self._offsets = (nl1, nl2, nl3, doublenl)
255 self._text = text
255 self._text = text
256 self._sidedata = sidedata
256 self._sidedata = sidedata
257 self._cpsd = cpsd
257 self._cpsd = cpsd
258 self._changes = None
258 self._changes = None
259
259
260 return self
260 return self
261
261
262 @property
262 @property
263 def manifest(self):
263 def manifest(self):
264 return bin(self._text[0 : self._offsets[0]])
264 return bin(self._text[0 : self._offsets[0]])
265
265
266 @property
266 @property
267 def user(self):
267 def user(self):
268 off = self._offsets
268 off = self._offsets
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
270
270
271 @property
271 @property
272 def _rawdate(self):
272 def _rawdate(self):
273 off = self._offsets
273 off = self._offsets
274 dateextra = self._text[off[1] + 1 : off[2]]
274 dateextra = self._text[off[1] + 1 : off[2]]
275 return dateextra.split(b' ', 2)[0:2]
275 return dateextra.split(b' ', 2)[0:2]
276
276
277 @property
277 @property
278 def _rawextra(self):
278 def _rawextra(self):
279 off = self._offsets
279 off = self._offsets
280 dateextra = self._text[off[1] + 1 : off[2]]
280 dateextra = self._text[off[1] + 1 : off[2]]
281 fields = dateextra.split(b' ', 2)
281 fields = dateextra.split(b' ', 2)
282 if len(fields) != 3:
282 if len(fields) != 3:
283 return None
283 return None
284
284
285 return fields[2]
285 return fields[2]
286
286
287 @property
287 @property
288 def date(self):
288 def date(self):
289 raw = self._rawdate
289 raw = self._rawdate
290 time = float(raw[0])
290 time = float(raw[0])
291 # Various tools did silly things with the timezone.
291 # Various tools did silly things with the timezone.
292 try:
292 try:
293 timezone = int(raw[1])
293 timezone = int(raw[1])
294 except ValueError:
294 except ValueError:
295 timezone = 0
295 timezone = 0
296
296
297 return time, timezone
297 return time, timezone
298
298
299 @property
299 @property
300 def extra(self):
300 def extra(self):
301 raw = self._rawextra
301 raw = self._rawextra
302 if raw is None:
302 if raw is None:
303 return _defaultextra
303 return _defaultextra
304
304
305 return decodeextra(raw)
305 return decodeextra(raw)
306
306
307 @property
307 @property
308 def changes(self):
308 def changes(self):
309 if self._changes is not None:
309 if self._changes is not None:
310 return self._changes
310 return self._changes
311 if self._cpsd:
311 if self._cpsd:
312 changes = metadata.decode_files_sidedata(self._sidedata)
312 changes = metadata.decode_files_sidedata(self._sidedata)
313 else:
313 else:
314 changes = metadata.ChangingFiles(
314 changes = metadata.ChangingFiles(
315 touched=self.files or (),
315 touched=self.files or (),
316 added=self.filesadded or (),
316 added=self.filesadded or (),
317 removed=self.filesremoved or (),
317 removed=self.filesremoved or (),
318 p1_copies=self.p1copies or {},
318 p1_copies=self.p1copies or {},
319 p2_copies=self.p2copies or {},
319 p2_copies=self.p2copies or {},
320 )
320 )
321 self._changes = changes
321 self._changes = changes
322 return changes
322 return changes
323
323
324 @property
324 @property
325 def files(self):
325 def files(self):
326 if self._cpsd:
326 if self._cpsd:
327 return sorted(self.changes.touched)
327 return sorted(self.changes.touched)
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
332 return self._text[off[2] + 1 : off[3]].split(b'\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 if self._cpsd:
336 if self._cpsd:
337 return self.changes.added
337 return self.changes.added
338 else:
338 else:
339 rawindices = self.extra.get(b'filesadded')
339 rawindices = self.extra.get(b'filesadded')
340 if rawindices is None:
340 if rawindices is None:
341 return None
341 return None
342 return metadata.decodefileindices(self.files, rawindices)
342 return metadata.decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def filesremoved(self):
345 def filesremoved(self):
346 if self._cpsd:
346 if self._cpsd:
347 return self.changes.removed
347 return self.changes.removed
348 else:
348 else:
349 rawindices = self.extra.get(b'filesremoved')
349 rawindices = self.extra.get(b'filesremoved')
350 if rawindices is None:
350 if rawindices is None:
351 return None
351 return None
352 return metadata.decodefileindices(self.files, rawindices)
352 return metadata.decodefileindices(self.files, rawindices)
353
353
354 @property
354 @property
355 def p1copies(self):
355 def p1copies(self):
356 if self._cpsd:
356 if self._cpsd:
357 return self.changes.copied_from_p1
357 return self.changes.copied_from_p1
358 else:
358 else:
359 rawcopies = self.extra.get(b'p1copies')
359 rawcopies = self.extra.get(b'p1copies')
360 if rawcopies is None:
360 if rawcopies is None:
361 return None
361 return None
362 return metadata.decodecopies(self.files, rawcopies)
362 return metadata.decodecopies(self.files, rawcopies)
363
363
364 @property
364 @property
365 def p2copies(self):
365 def p2copies(self):
366 if self._cpsd:
366 if self._cpsd:
367 return self.changes.copied_from_p2
367 return self.changes.copied_from_p2
368 else:
368 else:
369 rawcopies = self.extra.get(b'p2copies')
369 rawcopies = self.extra.get(b'p2copies')
370 if rawcopies is None:
370 if rawcopies is None:
371 return None
371 return None
372 return metadata.decodecopies(self.files, rawcopies)
372 return metadata.decodecopies(self.files, rawcopies)
373
373
374 @property
374 @property
375 def description(self):
375 def description(self):
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
377
377
378 @property
378 @property
379 def branchinfo(self):
379 def branchinfo(self):
380 extra = self.extra
380 extra = self.extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
382
382
383
383
384 class changelog(revlog.revlog):
384 class changelog(revlog.revlog):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
385 def __init__(self, opener, trypending=False, concurrencychecker=None):
386 """Load a changelog revlog using an opener.
386 """Load a changelog revlog using an opener.
387
387
388 If ``trypending`` is true, we attempt to load the index from a
388 If ``trypending`` is true, we attempt to load the index from a
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
390 The ``00changelog.i.a`` file contains index (and possibly inline
390 The ``00changelog.i.a`` file contains index (and possibly inline
391 revision) data for a transaction that hasn't been finalized yet.
391 revision) data for a transaction that hasn't been finalized yet.
392 It exists in a separate file to facilitate readers (such as
392 It exists in a separate file to facilitate readers (such as
393 hooks processes) accessing data before a transaction is finalized.
393 hooks processes) accessing data before a transaction is finalized.
394
394
395 ``concurrencychecker`` will be passed to the revlog init function, see
395 ``concurrencychecker`` will be passed to the revlog init function, see
396 the documentation there.
396 the documentation there.
397 """
397 """
398
398
399 if trypending and opener.exists(b'00changelog.i.a'):
399 if trypending and opener.exists(b'00changelog.i.a'):
400 postfix = b'a'
400 postfix = b'a'
401 else:
401 else:
402 postfix = None
402 postfix = None
403
403
404 revlog.revlog.__init__(
404 revlog.revlog.__init__(
405 self,
405 self,
406 opener,
406 opener,
407 target=(revlog_constants.KIND_CHANGELOG, None),
407 target=(revlog_constants.KIND_CHANGELOG, None),
408 radix=b'00changelog',
408 radix=b'00changelog',
409 postfix=postfix,
409 postfix=postfix,
410 checkambig=True,
410 checkambig=True,
411 mmaplargeindex=True,
411 mmaplargeindex=True,
412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
413 concurrencychecker=concurrencychecker,
413 concurrencychecker=concurrencychecker,
414 )
414 )
415
415
416 if self._initempty and (self._format_version == revlog.REVLOGV1):
416 if self._initempty and (self._format_version == revlog.REVLOGV1):
417 # changelogs don't benefit from generaldelta.
417 # changelogs don't benefit from generaldelta.
418
418
419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
420 self._generaldelta = False
420 self._generaldelta = False
421
421
422 # Delta chains for changelogs tend to be very small because entries
422 # Delta chains for changelogs tend to be very small because entries
423 # tend to be small and don't delta well with each. So disable delta
423 # tend to be small and don't delta well with each. So disable delta
424 # chains.
424 # chains.
425 self._storedeltachains = False
425 self._storedeltachains = False
426
426
427 self._realopener = opener
427 self._realopener = opener
428 self._delayed = False
428 self._delayed = False
429 self._delaybuf = None
429 self._delaybuf = None
430 self._divert = False
430 self._divert = False
431 self._filteredrevs = frozenset()
431 self._filteredrevs = frozenset()
432 self._filteredrevs_hashcache = {}
432 self._filteredrevs_hashcache = {}
433 self._copiesstorage = opener.options.get(b'copies-storage')
433 self._copiesstorage = opener.options.get(b'copies-storage')
434
434
435 @property
435 @property
436 def filteredrevs(self):
436 def filteredrevs(self):
437 return self._filteredrevs
437 return self._filteredrevs
438
438
439 @filteredrevs.setter
439 @filteredrevs.setter
440 def filteredrevs(self, val):
440 def filteredrevs(self, val):
441 # Ensure all updates go through this function
441 # Ensure all updates go through this function
442 assert isinstance(val, frozenset)
442 assert isinstance(val, frozenset)
443 self._filteredrevs = val
443 self._filteredrevs = val
444 self._filteredrevs_hashcache = {}
444 self._filteredrevs_hashcache = {}
445
445
446 def delayupdate(self, tr):
446 def delayupdate(self, tr):
447 """delay visibility of index updates to other readers"""
447 """delay visibility of index updates to other readers"""
448 if self._docket is not None:
449 return
448
450
449 if not self._delayed:
451 if not self._delayed:
450 if len(self) == 0:
452 if len(self) == 0:
451 self._divert = True
453 self._divert = True
452 if self._realopener.exists(self._indexfile + b'.a'):
454 if self._realopener.exists(self._indexfile + b'.a'):
453 self._realopener.unlink(self._indexfile + b'.a')
455 self._realopener.unlink(self._indexfile + b'.a')
454 self.opener = _divertopener(self._realopener, self._indexfile)
456 self.opener = _divertopener(self._realopener, self._indexfile)
455 else:
457 else:
456 self._delaybuf = []
458 self._delaybuf = []
457 self.opener = _delayopener(
459 self.opener = _delayopener(
458 self._realopener, self._indexfile, self._delaybuf
460 self._realopener, self._indexfile, self._delaybuf
459 )
461 )
460 self._delayed = True
462 self._delayed = True
461 tr.addpending(b'cl-%i' % id(self), self._writepending)
463 tr.addpending(b'cl-%i' % id(self), self._writepending)
462 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
464 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
463
465
464 def _finalize(self, tr):
466 def _finalize(self, tr):
465 """finalize index updates"""
467 """finalize index updates"""
466 self._delayed = False
468 self._delayed = False
467 self.opener = self._realopener
469 self.opener = self._realopener
468 # move redirected index data back into place
470 # move redirected index data back into place
469 if self._divert:
471 if self._divert:
470 assert not self._delaybuf
472 assert not self._delaybuf
471 tmpname = self._indexfile + b".a"
473 tmpname = self._indexfile + b".a"
472 nfile = self.opener.open(tmpname)
474 nfile = self.opener.open(tmpname)
473 nfile.close()
475 nfile.close()
474 self.opener.rename(tmpname, self._indexfile, checkambig=True)
476 self.opener.rename(tmpname, self._indexfile, checkambig=True)
475 elif self._delaybuf:
477 elif self._delaybuf:
476 fp = self.opener(self._indexfile, b'a', checkambig=True)
478 fp = self.opener(self._indexfile, b'a', checkambig=True)
477 fp.write(b"".join(self._delaybuf))
479 fp.write(b"".join(self._delaybuf))
478 fp.close()
480 fp.close()
479 self._delaybuf = None
481 self._delaybuf = None
480 self._divert = False
482 self._divert = False
481 # split when we're done
483 # split when we're done
482 self._enforceinlinesize(tr)
484 self._enforceinlinesize(tr)
483
485
484 def _writepending(self, tr):
486 def _writepending(self, tr):
485 """create a file containing the unfinalized state for
487 """create a file containing the unfinalized state for
486 pretxnchangegroup"""
488 pretxnchangegroup"""
487 if self._delaybuf:
489 if self._delaybuf:
488 # make a temporary copy of the index
490 # make a temporary copy of the index
489 fp1 = self._realopener(self._indexfile)
491 fp1 = self._realopener(self._indexfile)
490 pendingfilename = self._indexfile + b".a"
492 pendingfilename = self._indexfile + b".a"
491 # register as a temp file to ensure cleanup on failure
493 # register as a temp file to ensure cleanup on failure
492 tr.registertmp(pendingfilename)
494 tr.registertmp(pendingfilename)
493 # write existing data
495 # write existing data
494 fp2 = self._realopener(pendingfilename, b"w")
496 fp2 = self._realopener(pendingfilename, b"w")
495 fp2.write(fp1.read())
497 fp2.write(fp1.read())
496 # add pending data
498 # add pending data
497 fp2.write(b"".join(self._delaybuf))
499 fp2.write(b"".join(self._delaybuf))
498 fp2.close()
500 fp2.close()
499 # switch modes so finalize can simply rename
501 # switch modes so finalize can simply rename
500 self._delaybuf = None
502 self._delaybuf = None
501 self._divert = True
503 self._divert = True
502 self.opener = _divertopener(self._realopener, self._indexfile)
504 self.opener = _divertopener(self._realopener, self._indexfile)
503
505
504 if self._divert:
506 if self._divert:
505 return True
507 return True
506
508
507 return False
509 return False
508
510
509 def _enforceinlinesize(self, tr):
511 def _enforceinlinesize(self, tr):
510 if not self._delayed:
512 if not self._delayed:
511 revlog.revlog._enforceinlinesize(self, tr)
513 revlog.revlog._enforceinlinesize(self, tr)
512
514
513 def read(self, nodeorrev):
515 def read(self, nodeorrev):
514 """Obtain data from a parsed changelog revision.
516 """Obtain data from a parsed changelog revision.
515
517
516 Returns a 6-tuple of:
518 Returns a 6-tuple of:
517
519
518 - manifest node in binary
520 - manifest node in binary
519 - author/user as a localstr
521 - author/user as a localstr
520 - date as a 2-tuple of (time, timezone)
522 - date as a 2-tuple of (time, timezone)
521 - list of files
523 - list of files
522 - commit message as a localstr
524 - commit message as a localstr
523 - dict of extra metadata
525 - dict of extra metadata
524
526
525 Unless you need to access all fields, consider calling
527 Unless you need to access all fields, consider calling
526 ``changelogrevision`` instead, as it is faster for partial object
528 ``changelogrevision`` instead, as it is faster for partial object
527 access.
529 access.
528 """
530 """
529 d, s = self._revisiondata(nodeorrev)
531 d, s = self._revisiondata(nodeorrev)
530 c = changelogrevision(
532 c = changelogrevision(
531 self, d, s, self._copiesstorage == b'changeset-sidedata'
533 self, d, s, self._copiesstorage == b'changeset-sidedata'
532 )
534 )
533 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
535 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
534
536
535 def changelogrevision(self, nodeorrev):
537 def changelogrevision(self, nodeorrev):
536 """Obtain a ``changelogrevision`` for a node or revision."""
538 """Obtain a ``changelogrevision`` for a node or revision."""
537 text, sidedata = self._revisiondata(nodeorrev)
539 text, sidedata = self._revisiondata(nodeorrev)
538 return changelogrevision(
540 return changelogrevision(
539 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
541 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
540 )
542 )
541
543
542 def readfiles(self, nodeorrev):
544 def readfiles(self, nodeorrev):
543 """
545 """
544 short version of read that only returns the files modified by the cset
546 short version of read that only returns the files modified by the cset
545 """
547 """
546 text = self.revision(nodeorrev)
548 text = self.revision(nodeorrev)
547 if not text:
549 if not text:
548 return []
550 return []
549 last = text.index(b"\n\n")
551 last = text.index(b"\n\n")
550 l = text[:last].split(b'\n')
552 l = text[:last].split(b'\n')
551 return l[3:]
553 return l[3:]
552
554
553 def add(
555 def add(
554 self,
556 self,
555 manifest,
557 manifest,
556 files,
558 files,
557 desc,
559 desc,
558 transaction,
560 transaction,
559 p1,
561 p1,
560 p2,
562 p2,
561 user,
563 user,
562 date=None,
564 date=None,
563 extra=None,
565 extra=None,
564 ):
566 ):
565 # Convert to UTF-8 encoded bytestrings as the very first
567 # Convert to UTF-8 encoded bytestrings as the very first
566 # thing: calling any method on a localstr object will turn it
568 # thing: calling any method on a localstr object will turn it
567 # into a str object and the cached UTF-8 string is thus lost.
569 # into a str object and the cached UTF-8 string is thus lost.
568 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
570 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
569
571
570 user = user.strip()
572 user = user.strip()
571 # An empty username or a username with a "\n" will make the
573 # An empty username or a username with a "\n" will make the
572 # revision text contain two "\n\n" sequences -> corrupt
574 # revision text contain two "\n\n" sequences -> corrupt
573 # repository since read cannot unpack the revision.
575 # repository since read cannot unpack the revision.
574 if not user:
576 if not user:
575 raise error.StorageError(_(b"empty username"))
577 raise error.StorageError(_(b"empty username"))
576 if b"\n" in user:
578 if b"\n" in user:
577 raise error.StorageError(
579 raise error.StorageError(
578 _(b"username %r contains a newline") % pycompat.bytestr(user)
580 _(b"username %r contains a newline") % pycompat.bytestr(user)
579 )
581 )
580
582
581 desc = stripdesc(desc)
583 desc = stripdesc(desc)
582
584
583 if date:
585 if date:
584 parseddate = b"%d %d" % dateutil.parsedate(date)
586 parseddate = b"%d %d" % dateutil.parsedate(date)
585 else:
587 else:
586 parseddate = b"%d %d" % dateutil.makedate()
588 parseddate = b"%d %d" % dateutil.makedate()
587 if extra:
589 if extra:
588 branch = extra.get(b"branch")
590 branch = extra.get(b"branch")
589 if branch in (b"default", b""):
591 if branch in (b"default", b""):
590 del extra[b"branch"]
592 del extra[b"branch"]
591 elif branch in (b".", b"null", b"tip"):
593 elif branch in (b".", b"null", b"tip"):
592 raise error.StorageError(
594 raise error.StorageError(
593 _(b'the name \'%s\' is reserved') % branch
595 _(b'the name \'%s\' is reserved') % branch
594 )
596 )
595 sortedfiles = sorted(files.touched)
597 sortedfiles = sorted(files.touched)
596 flags = 0
598 flags = 0
597 sidedata = None
599 sidedata = None
598 if self._copiesstorage == b'changeset-sidedata':
600 if self._copiesstorage == b'changeset-sidedata':
599 if files.has_copies_info:
601 if files.has_copies_info:
600 flags |= flagutil.REVIDX_HASCOPIESINFO
602 flags |= flagutil.REVIDX_HASCOPIESINFO
601 sidedata = metadata.encode_files_sidedata(files)
603 sidedata = metadata.encode_files_sidedata(files)
602
604
603 if extra:
605 if extra:
604 extra = encodeextra(extra)
606 extra = encodeextra(extra)
605 parseddate = b"%s %s" % (parseddate, extra)
607 parseddate = b"%s %s" % (parseddate, extra)
606 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
608 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
607 text = b"\n".join(l)
609 text = b"\n".join(l)
608 rev = self.addrevision(
610 rev = self.addrevision(
609 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
611 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
610 )
612 )
611 return self.node(rev)
613 return self.node(rev)
612
614
613 def branchinfo(self, rev):
615 def branchinfo(self, rev):
614 """return the branch name and open/close state of a revision
616 """return the branch name and open/close state of a revision
615
617
616 This function exists because creating a changectx object
618 This function exists because creating a changectx object
617 just to access this is costly."""
619 just to access this is costly."""
618 return self.changelogrevision(rev).branchinfo
620 return self.changelogrevision(rev).branchinfo
619
621
620 def _nodeduplicatecallback(self, transaction, rev):
622 def _nodeduplicatecallback(self, transaction, rev):
621 # keep track of revisions that got "re-added", eg: unbunde of know rev.
623 # keep track of revisions that got "re-added", eg: unbunde of know rev.
622 #
624 #
623 # We track them in a list to preserve their order from the source bundle
625 # We track them in a list to preserve their order from the source bundle
624 duplicates = transaction.changes.setdefault(b'revduplicates', [])
626 duplicates = transaction.changes.setdefault(b'revduplicates', [])
625 duplicates.append(rev)
627 duplicates.append(rev)
@@ -1,2686 +1,2699 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * to grow a docket file to at least store the last offset of the data
1153 # * stop storing version information in the index (it is already in the docket)
1154 # file when rewriting sidedata.
1154 # * properly hide uncommitted content to other process
1155 # * need a way of dealing with garbage data if we allow rewriting
1155 # * expose transaction content hooks during pre-commit validation
1156 # *existing* sidedata.
1156 # * include management of a persistent nodemap in the main docket
1157 # * enforce a "no-truncate" policy for mmap safety
1158 # - for censoring operation
1159 # - for stripping operation
1160 # - for rollback operation
1161 # * store the data size in the docket to simplify sidedata rewrite.
1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1157 # * Exchange-wise, we will also need to do something more efficient than
1163 # * Exchange-wise, we will also need to do something more efficient than
1158 # keeping references to the affected revlogs, especially memory-wise when
1164 # keeping references to the affected revlogs, especially memory-wise when
1159 # rewriting sidedata.
1165 # rewriting sidedata.
1160 # * Also... compress the sidedata? (this should be coming very soon)
1166 # * sidedata compression
1167 # * introduce a proper solution to reduce the number of filelog related files.
1168 # * Improvement to consider
1169 # - track compression mode in the index entris instead of the chunks
1170 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1171 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1172 # - keep track of chain base or size (probably not that useful anymore)
1173 # - store data and sidedata in different files
1161 coreconfigitem(
1174 coreconfigitem(
1162 b'experimental',
1175 b'experimental',
1163 b'revlogv2',
1176 b'revlogv2',
1164 default=None,
1177 default=None,
1165 )
1178 )
1166 coreconfigitem(
1179 coreconfigitem(
1167 b'experimental',
1180 b'experimental',
1168 b'revisions.disambiguatewithin',
1181 b'revisions.disambiguatewithin',
1169 default=None,
1182 default=None,
1170 )
1183 )
1171 coreconfigitem(
1184 coreconfigitem(
1172 b'experimental',
1185 b'experimental',
1173 b'rust.index',
1186 b'rust.index',
1174 default=False,
1187 default=False,
1175 )
1188 )
1176 coreconfigitem(
1189 coreconfigitem(
1177 b'experimental',
1190 b'experimental',
1178 b'server.filesdata.recommended-batch-size',
1191 b'server.filesdata.recommended-batch-size',
1179 default=50000,
1192 default=50000,
1180 )
1193 )
1181 coreconfigitem(
1194 coreconfigitem(
1182 b'experimental',
1195 b'experimental',
1183 b'server.manifestdata.recommended-batch-size',
1196 b'server.manifestdata.recommended-batch-size',
1184 default=100000,
1197 default=100000,
1185 )
1198 )
1186 coreconfigitem(
1199 coreconfigitem(
1187 b'experimental',
1200 b'experimental',
1188 b'server.stream-narrow-clones',
1201 b'server.stream-narrow-clones',
1189 default=False,
1202 default=False,
1190 )
1203 )
1191 coreconfigitem(
1204 coreconfigitem(
1192 b'experimental',
1205 b'experimental',
1193 b'single-head-per-branch',
1206 b'single-head-per-branch',
1194 default=False,
1207 default=False,
1195 )
1208 )
1196 coreconfigitem(
1209 coreconfigitem(
1197 b'experimental',
1210 b'experimental',
1198 b'single-head-per-branch:account-closed-heads',
1211 b'single-head-per-branch:account-closed-heads',
1199 default=False,
1212 default=False,
1200 )
1213 )
1201 coreconfigitem(
1214 coreconfigitem(
1202 b'experimental',
1215 b'experimental',
1203 b'single-head-per-branch:public-changes-only',
1216 b'single-head-per-branch:public-changes-only',
1204 default=False,
1217 default=False,
1205 )
1218 )
1206 coreconfigitem(
1219 coreconfigitem(
1207 b'experimental',
1220 b'experimental',
1208 b'sshserver.support-v2',
1221 b'sshserver.support-v2',
1209 default=False,
1222 default=False,
1210 )
1223 )
1211 coreconfigitem(
1224 coreconfigitem(
1212 b'experimental',
1225 b'experimental',
1213 b'sparse-read',
1226 b'sparse-read',
1214 default=False,
1227 default=False,
1215 )
1228 )
1216 coreconfigitem(
1229 coreconfigitem(
1217 b'experimental',
1230 b'experimental',
1218 b'sparse-read.density-threshold',
1231 b'sparse-read.density-threshold',
1219 default=0.50,
1232 default=0.50,
1220 )
1233 )
1221 coreconfigitem(
1234 coreconfigitem(
1222 b'experimental',
1235 b'experimental',
1223 b'sparse-read.min-gap-size',
1236 b'sparse-read.min-gap-size',
1224 default=b'65K',
1237 default=b'65K',
1225 )
1238 )
1226 coreconfigitem(
1239 coreconfigitem(
1227 b'experimental',
1240 b'experimental',
1228 b'treemanifest',
1241 b'treemanifest',
1229 default=False,
1242 default=False,
1230 )
1243 )
1231 coreconfigitem(
1244 coreconfigitem(
1232 b'experimental',
1245 b'experimental',
1233 b'update.atomic-file',
1246 b'update.atomic-file',
1234 default=False,
1247 default=False,
1235 )
1248 )
1236 coreconfigitem(
1249 coreconfigitem(
1237 b'experimental',
1250 b'experimental',
1238 b'sshpeer.advertise-v2',
1251 b'sshpeer.advertise-v2',
1239 default=False,
1252 default=False,
1240 )
1253 )
1241 coreconfigitem(
1254 coreconfigitem(
1242 b'experimental',
1255 b'experimental',
1243 b'web.apiserver',
1256 b'web.apiserver',
1244 default=False,
1257 default=False,
1245 )
1258 )
1246 coreconfigitem(
1259 coreconfigitem(
1247 b'experimental',
1260 b'experimental',
1248 b'web.api.http-v2',
1261 b'web.api.http-v2',
1249 default=False,
1262 default=False,
1250 )
1263 )
1251 coreconfigitem(
1264 coreconfigitem(
1252 b'experimental',
1265 b'experimental',
1253 b'web.api.debugreflect',
1266 b'web.api.debugreflect',
1254 default=False,
1267 default=False,
1255 )
1268 )
1256 coreconfigitem(
1269 coreconfigitem(
1257 b'experimental',
1270 b'experimental',
1258 b'worker.wdir-get-thread-safe',
1271 b'worker.wdir-get-thread-safe',
1259 default=False,
1272 default=False,
1260 )
1273 )
1261 coreconfigitem(
1274 coreconfigitem(
1262 b'experimental',
1275 b'experimental',
1263 b'worker.repository-upgrade',
1276 b'worker.repository-upgrade',
1264 default=False,
1277 default=False,
1265 )
1278 )
1266 coreconfigitem(
1279 coreconfigitem(
1267 b'experimental',
1280 b'experimental',
1268 b'xdiff',
1281 b'xdiff',
1269 default=False,
1282 default=False,
1270 )
1283 )
1271 coreconfigitem(
1284 coreconfigitem(
1272 b'extensions',
1285 b'extensions',
1273 b'.*',
1286 b'.*',
1274 default=None,
1287 default=None,
1275 generic=True,
1288 generic=True,
1276 )
1289 )
1277 coreconfigitem(
1290 coreconfigitem(
1278 b'extdata',
1291 b'extdata',
1279 b'.*',
1292 b'.*',
1280 default=None,
1293 default=None,
1281 generic=True,
1294 generic=True,
1282 )
1295 )
1283 coreconfigitem(
1296 coreconfigitem(
1284 b'format',
1297 b'format',
1285 b'bookmarks-in-store',
1298 b'bookmarks-in-store',
1286 default=False,
1299 default=False,
1287 )
1300 )
1288 coreconfigitem(
1301 coreconfigitem(
1289 b'format',
1302 b'format',
1290 b'chunkcachesize',
1303 b'chunkcachesize',
1291 default=None,
1304 default=None,
1292 experimental=True,
1305 experimental=True,
1293 )
1306 )
1294 coreconfigitem(
1307 coreconfigitem(
1295 b'format',
1308 b'format',
1296 b'dotencode',
1309 b'dotencode',
1297 default=True,
1310 default=True,
1298 )
1311 )
1299 coreconfigitem(
1312 coreconfigitem(
1300 b'format',
1313 b'format',
1301 b'generaldelta',
1314 b'generaldelta',
1302 default=False,
1315 default=False,
1303 experimental=True,
1316 experimental=True,
1304 )
1317 )
1305 coreconfigitem(
1318 coreconfigitem(
1306 b'format',
1319 b'format',
1307 b'manifestcachesize',
1320 b'manifestcachesize',
1308 default=None,
1321 default=None,
1309 experimental=True,
1322 experimental=True,
1310 )
1323 )
1311 coreconfigitem(
1324 coreconfigitem(
1312 b'format',
1325 b'format',
1313 b'maxchainlen',
1326 b'maxchainlen',
1314 default=dynamicdefault,
1327 default=dynamicdefault,
1315 experimental=True,
1328 experimental=True,
1316 )
1329 )
1317 coreconfigitem(
1330 coreconfigitem(
1318 b'format',
1331 b'format',
1319 b'obsstore-version',
1332 b'obsstore-version',
1320 default=None,
1333 default=None,
1321 )
1334 )
1322 coreconfigitem(
1335 coreconfigitem(
1323 b'format',
1336 b'format',
1324 b'sparse-revlog',
1337 b'sparse-revlog',
1325 default=True,
1338 default=True,
1326 )
1339 )
1327 coreconfigitem(
1340 coreconfigitem(
1328 b'format',
1341 b'format',
1329 b'revlog-compression',
1342 b'revlog-compression',
1330 default=lambda: [b'zstd', b'zlib'],
1343 default=lambda: [b'zstd', b'zlib'],
1331 alias=[(b'experimental', b'format.compression')],
1344 alias=[(b'experimental', b'format.compression')],
1332 )
1345 )
1333 coreconfigitem(
1346 coreconfigitem(
1334 b'format',
1347 b'format',
1335 b'usefncache',
1348 b'usefncache',
1336 default=True,
1349 default=True,
1337 )
1350 )
1338 coreconfigitem(
1351 coreconfigitem(
1339 b'format',
1352 b'format',
1340 b'usegeneraldelta',
1353 b'usegeneraldelta',
1341 default=True,
1354 default=True,
1342 )
1355 )
1343 coreconfigitem(
1356 coreconfigitem(
1344 b'format',
1357 b'format',
1345 b'usestore',
1358 b'usestore',
1346 default=True,
1359 default=True,
1347 )
1360 )
1348
1361
1349
1362
1350 def _persistent_nodemap_default():
1363 def _persistent_nodemap_default():
1351 """compute `use-persistent-nodemap` default value
1364 """compute `use-persistent-nodemap` default value
1352
1365
1353 The feature is disabled unless a fast implementation is available.
1366 The feature is disabled unless a fast implementation is available.
1354 """
1367 """
1355 from . import policy
1368 from . import policy
1356
1369
1357 return policy.importrust('revlog') is not None
1370 return policy.importrust('revlog') is not None
1358
1371
1359
1372
1360 coreconfigitem(
1373 coreconfigitem(
1361 b'format',
1374 b'format',
1362 b'use-persistent-nodemap',
1375 b'use-persistent-nodemap',
1363 default=_persistent_nodemap_default,
1376 default=_persistent_nodemap_default,
1364 )
1377 )
1365 coreconfigitem(
1378 coreconfigitem(
1366 b'format',
1379 b'format',
1367 b'exp-use-copies-side-data-changeset',
1380 b'exp-use-copies-side-data-changeset',
1368 default=False,
1381 default=False,
1369 experimental=True,
1382 experimental=True,
1370 )
1383 )
1371 coreconfigitem(
1384 coreconfigitem(
1372 b'format',
1385 b'format',
1373 b'use-share-safe',
1386 b'use-share-safe',
1374 default=False,
1387 default=False,
1375 )
1388 )
1376 coreconfigitem(
1389 coreconfigitem(
1377 b'format',
1390 b'format',
1378 b'internal-phase',
1391 b'internal-phase',
1379 default=False,
1392 default=False,
1380 experimental=True,
1393 experimental=True,
1381 )
1394 )
1382 coreconfigitem(
1395 coreconfigitem(
1383 b'fsmonitor',
1396 b'fsmonitor',
1384 b'warn_when_unused',
1397 b'warn_when_unused',
1385 default=True,
1398 default=True,
1386 )
1399 )
1387 coreconfigitem(
1400 coreconfigitem(
1388 b'fsmonitor',
1401 b'fsmonitor',
1389 b'warn_update_file_count',
1402 b'warn_update_file_count',
1390 default=50000,
1403 default=50000,
1391 )
1404 )
1392 coreconfigitem(
1405 coreconfigitem(
1393 b'fsmonitor',
1406 b'fsmonitor',
1394 b'warn_update_file_count_rust',
1407 b'warn_update_file_count_rust',
1395 default=400000,
1408 default=400000,
1396 )
1409 )
1397 coreconfigitem(
1410 coreconfigitem(
1398 b'help',
1411 b'help',
1399 br'hidden-command\..*',
1412 br'hidden-command\..*',
1400 default=False,
1413 default=False,
1401 generic=True,
1414 generic=True,
1402 )
1415 )
1403 coreconfigitem(
1416 coreconfigitem(
1404 b'help',
1417 b'help',
1405 br'hidden-topic\..*',
1418 br'hidden-topic\..*',
1406 default=False,
1419 default=False,
1407 generic=True,
1420 generic=True,
1408 )
1421 )
1409 coreconfigitem(
1422 coreconfigitem(
1410 b'hooks',
1423 b'hooks',
1411 b'[^:]*',
1424 b'[^:]*',
1412 default=dynamicdefault,
1425 default=dynamicdefault,
1413 generic=True,
1426 generic=True,
1414 )
1427 )
1415 coreconfigitem(
1428 coreconfigitem(
1416 b'hooks',
1429 b'hooks',
1417 b'.*:run-with-plain',
1430 b'.*:run-with-plain',
1418 default=True,
1431 default=True,
1419 generic=True,
1432 generic=True,
1420 )
1433 )
1421 coreconfigitem(
1434 coreconfigitem(
1422 b'hgweb-paths',
1435 b'hgweb-paths',
1423 b'.*',
1436 b'.*',
1424 default=list,
1437 default=list,
1425 generic=True,
1438 generic=True,
1426 )
1439 )
1427 coreconfigitem(
1440 coreconfigitem(
1428 b'hostfingerprints',
1441 b'hostfingerprints',
1429 b'.*',
1442 b'.*',
1430 default=list,
1443 default=list,
1431 generic=True,
1444 generic=True,
1432 )
1445 )
1433 coreconfigitem(
1446 coreconfigitem(
1434 b'hostsecurity',
1447 b'hostsecurity',
1435 b'ciphers',
1448 b'ciphers',
1436 default=None,
1449 default=None,
1437 )
1450 )
1438 coreconfigitem(
1451 coreconfigitem(
1439 b'hostsecurity',
1452 b'hostsecurity',
1440 b'minimumprotocol',
1453 b'minimumprotocol',
1441 default=dynamicdefault,
1454 default=dynamicdefault,
1442 )
1455 )
1443 coreconfigitem(
1456 coreconfigitem(
1444 b'hostsecurity',
1457 b'hostsecurity',
1445 b'.*:minimumprotocol$',
1458 b'.*:minimumprotocol$',
1446 default=dynamicdefault,
1459 default=dynamicdefault,
1447 generic=True,
1460 generic=True,
1448 )
1461 )
1449 coreconfigitem(
1462 coreconfigitem(
1450 b'hostsecurity',
1463 b'hostsecurity',
1451 b'.*:ciphers$',
1464 b'.*:ciphers$',
1452 default=dynamicdefault,
1465 default=dynamicdefault,
1453 generic=True,
1466 generic=True,
1454 )
1467 )
1455 coreconfigitem(
1468 coreconfigitem(
1456 b'hostsecurity',
1469 b'hostsecurity',
1457 b'.*:fingerprints$',
1470 b'.*:fingerprints$',
1458 default=list,
1471 default=list,
1459 generic=True,
1472 generic=True,
1460 )
1473 )
1461 coreconfigitem(
1474 coreconfigitem(
1462 b'hostsecurity',
1475 b'hostsecurity',
1463 b'.*:verifycertsfile$',
1476 b'.*:verifycertsfile$',
1464 default=None,
1477 default=None,
1465 generic=True,
1478 generic=True,
1466 )
1479 )
1467
1480
1468 coreconfigitem(
1481 coreconfigitem(
1469 b'http_proxy',
1482 b'http_proxy',
1470 b'always',
1483 b'always',
1471 default=False,
1484 default=False,
1472 )
1485 )
1473 coreconfigitem(
1486 coreconfigitem(
1474 b'http_proxy',
1487 b'http_proxy',
1475 b'host',
1488 b'host',
1476 default=None,
1489 default=None,
1477 )
1490 )
1478 coreconfigitem(
1491 coreconfigitem(
1479 b'http_proxy',
1492 b'http_proxy',
1480 b'no',
1493 b'no',
1481 default=list,
1494 default=list,
1482 )
1495 )
1483 coreconfigitem(
1496 coreconfigitem(
1484 b'http_proxy',
1497 b'http_proxy',
1485 b'passwd',
1498 b'passwd',
1486 default=None,
1499 default=None,
1487 )
1500 )
1488 coreconfigitem(
1501 coreconfigitem(
1489 b'http_proxy',
1502 b'http_proxy',
1490 b'user',
1503 b'user',
1491 default=None,
1504 default=None,
1492 )
1505 )
1493
1506
1494 coreconfigitem(
1507 coreconfigitem(
1495 b'http',
1508 b'http',
1496 b'timeout',
1509 b'timeout',
1497 default=None,
1510 default=None,
1498 )
1511 )
1499
1512
1500 coreconfigitem(
1513 coreconfigitem(
1501 b'logtoprocess',
1514 b'logtoprocess',
1502 b'commandexception',
1515 b'commandexception',
1503 default=None,
1516 default=None,
1504 )
1517 )
1505 coreconfigitem(
1518 coreconfigitem(
1506 b'logtoprocess',
1519 b'logtoprocess',
1507 b'commandfinish',
1520 b'commandfinish',
1508 default=None,
1521 default=None,
1509 )
1522 )
1510 coreconfigitem(
1523 coreconfigitem(
1511 b'logtoprocess',
1524 b'logtoprocess',
1512 b'command',
1525 b'command',
1513 default=None,
1526 default=None,
1514 )
1527 )
1515 coreconfigitem(
1528 coreconfigitem(
1516 b'logtoprocess',
1529 b'logtoprocess',
1517 b'develwarn',
1530 b'develwarn',
1518 default=None,
1531 default=None,
1519 )
1532 )
1520 coreconfigitem(
1533 coreconfigitem(
1521 b'logtoprocess',
1534 b'logtoprocess',
1522 b'uiblocked',
1535 b'uiblocked',
1523 default=None,
1536 default=None,
1524 )
1537 )
1525 coreconfigitem(
1538 coreconfigitem(
1526 b'merge',
1539 b'merge',
1527 b'checkunknown',
1540 b'checkunknown',
1528 default=b'abort',
1541 default=b'abort',
1529 )
1542 )
1530 coreconfigitem(
1543 coreconfigitem(
1531 b'merge',
1544 b'merge',
1532 b'checkignored',
1545 b'checkignored',
1533 default=b'abort',
1546 default=b'abort',
1534 )
1547 )
1535 coreconfigitem(
1548 coreconfigitem(
1536 b'experimental',
1549 b'experimental',
1537 b'merge.checkpathconflicts',
1550 b'merge.checkpathconflicts',
1538 default=False,
1551 default=False,
1539 )
1552 )
1540 coreconfigitem(
1553 coreconfigitem(
1541 b'merge',
1554 b'merge',
1542 b'followcopies',
1555 b'followcopies',
1543 default=True,
1556 default=True,
1544 )
1557 )
1545 coreconfigitem(
1558 coreconfigitem(
1546 b'merge',
1559 b'merge',
1547 b'on-failure',
1560 b'on-failure',
1548 default=b'continue',
1561 default=b'continue',
1549 )
1562 )
1550 coreconfigitem(
1563 coreconfigitem(
1551 b'merge',
1564 b'merge',
1552 b'preferancestor',
1565 b'preferancestor',
1553 default=lambda: [b'*'],
1566 default=lambda: [b'*'],
1554 experimental=True,
1567 experimental=True,
1555 )
1568 )
1556 coreconfigitem(
1569 coreconfigitem(
1557 b'merge',
1570 b'merge',
1558 b'strict-capability-check',
1571 b'strict-capability-check',
1559 default=False,
1572 default=False,
1560 )
1573 )
1561 coreconfigitem(
1574 coreconfigitem(
1562 b'merge-tools',
1575 b'merge-tools',
1563 b'.*',
1576 b'.*',
1564 default=None,
1577 default=None,
1565 generic=True,
1578 generic=True,
1566 )
1579 )
1567 coreconfigitem(
1580 coreconfigitem(
1568 b'merge-tools',
1581 b'merge-tools',
1569 br'.*\.args$',
1582 br'.*\.args$',
1570 default=b"$local $base $other",
1583 default=b"$local $base $other",
1571 generic=True,
1584 generic=True,
1572 priority=-1,
1585 priority=-1,
1573 )
1586 )
1574 coreconfigitem(
1587 coreconfigitem(
1575 b'merge-tools',
1588 b'merge-tools',
1576 br'.*\.binary$',
1589 br'.*\.binary$',
1577 default=False,
1590 default=False,
1578 generic=True,
1591 generic=True,
1579 priority=-1,
1592 priority=-1,
1580 )
1593 )
1581 coreconfigitem(
1594 coreconfigitem(
1582 b'merge-tools',
1595 b'merge-tools',
1583 br'.*\.check$',
1596 br'.*\.check$',
1584 default=list,
1597 default=list,
1585 generic=True,
1598 generic=True,
1586 priority=-1,
1599 priority=-1,
1587 )
1600 )
1588 coreconfigitem(
1601 coreconfigitem(
1589 b'merge-tools',
1602 b'merge-tools',
1590 br'.*\.checkchanged$',
1603 br'.*\.checkchanged$',
1591 default=False,
1604 default=False,
1592 generic=True,
1605 generic=True,
1593 priority=-1,
1606 priority=-1,
1594 )
1607 )
1595 coreconfigitem(
1608 coreconfigitem(
1596 b'merge-tools',
1609 b'merge-tools',
1597 br'.*\.executable$',
1610 br'.*\.executable$',
1598 default=dynamicdefault,
1611 default=dynamicdefault,
1599 generic=True,
1612 generic=True,
1600 priority=-1,
1613 priority=-1,
1601 )
1614 )
1602 coreconfigitem(
1615 coreconfigitem(
1603 b'merge-tools',
1616 b'merge-tools',
1604 br'.*\.fixeol$',
1617 br'.*\.fixeol$',
1605 default=False,
1618 default=False,
1606 generic=True,
1619 generic=True,
1607 priority=-1,
1620 priority=-1,
1608 )
1621 )
1609 coreconfigitem(
1622 coreconfigitem(
1610 b'merge-tools',
1623 b'merge-tools',
1611 br'.*\.gui$',
1624 br'.*\.gui$',
1612 default=False,
1625 default=False,
1613 generic=True,
1626 generic=True,
1614 priority=-1,
1627 priority=-1,
1615 )
1628 )
1616 coreconfigitem(
1629 coreconfigitem(
1617 b'merge-tools',
1630 b'merge-tools',
1618 br'.*\.mergemarkers$',
1631 br'.*\.mergemarkers$',
1619 default=b'basic',
1632 default=b'basic',
1620 generic=True,
1633 generic=True,
1621 priority=-1,
1634 priority=-1,
1622 )
1635 )
1623 coreconfigitem(
1636 coreconfigitem(
1624 b'merge-tools',
1637 b'merge-tools',
1625 br'.*\.mergemarkertemplate$',
1638 br'.*\.mergemarkertemplate$',
1626 default=dynamicdefault, # take from command-templates.mergemarker
1639 default=dynamicdefault, # take from command-templates.mergemarker
1627 generic=True,
1640 generic=True,
1628 priority=-1,
1641 priority=-1,
1629 )
1642 )
1630 coreconfigitem(
1643 coreconfigitem(
1631 b'merge-tools',
1644 b'merge-tools',
1632 br'.*\.priority$',
1645 br'.*\.priority$',
1633 default=0,
1646 default=0,
1634 generic=True,
1647 generic=True,
1635 priority=-1,
1648 priority=-1,
1636 )
1649 )
1637 coreconfigitem(
1650 coreconfigitem(
1638 b'merge-tools',
1651 b'merge-tools',
1639 br'.*\.premerge$',
1652 br'.*\.premerge$',
1640 default=dynamicdefault,
1653 default=dynamicdefault,
1641 generic=True,
1654 generic=True,
1642 priority=-1,
1655 priority=-1,
1643 )
1656 )
1644 coreconfigitem(
1657 coreconfigitem(
1645 b'merge-tools',
1658 b'merge-tools',
1646 br'.*\.symlink$',
1659 br'.*\.symlink$',
1647 default=False,
1660 default=False,
1648 generic=True,
1661 generic=True,
1649 priority=-1,
1662 priority=-1,
1650 )
1663 )
1651 coreconfigitem(
1664 coreconfigitem(
1652 b'pager',
1665 b'pager',
1653 b'attend-.*',
1666 b'attend-.*',
1654 default=dynamicdefault,
1667 default=dynamicdefault,
1655 generic=True,
1668 generic=True,
1656 )
1669 )
1657 coreconfigitem(
1670 coreconfigitem(
1658 b'pager',
1671 b'pager',
1659 b'ignore',
1672 b'ignore',
1660 default=list,
1673 default=list,
1661 )
1674 )
1662 coreconfigitem(
1675 coreconfigitem(
1663 b'pager',
1676 b'pager',
1664 b'pager',
1677 b'pager',
1665 default=dynamicdefault,
1678 default=dynamicdefault,
1666 )
1679 )
1667 coreconfigitem(
1680 coreconfigitem(
1668 b'patch',
1681 b'patch',
1669 b'eol',
1682 b'eol',
1670 default=b'strict',
1683 default=b'strict',
1671 )
1684 )
1672 coreconfigitem(
1685 coreconfigitem(
1673 b'patch',
1686 b'patch',
1674 b'fuzz',
1687 b'fuzz',
1675 default=2,
1688 default=2,
1676 )
1689 )
1677 coreconfigitem(
1690 coreconfigitem(
1678 b'paths',
1691 b'paths',
1679 b'default',
1692 b'default',
1680 default=None,
1693 default=None,
1681 )
1694 )
1682 coreconfigitem(
1695 coreconfigitem(
1683 b'paths',
1696 b'paths',
1684 b'default-push',
1697 b'default-push',
1685 default=None,
1698 default=None,
1686 )
1699 )
1687 coreconfigitem(
1700 coreconfigitem(
1688 b'paths',
1701 b'paths',
1689 b'.*',
1702 b'.*',
1690 default=None,
1703 default=None,
1691 generic=True,
1704 generic=True,
1692 )
1705 )
1693 coreconfigitem(
1706 coreconfigitem(
1694 b'phases',
1707 b'phases',
1695 b'checksubrepos',
1708 b'checksubrepos',
1696 default=b'follow',
1709 default=b'follow',
1697 )
1710 )
1698 coreconfigitem(
1711 coreconfigitem(
1699 b'phases',
1712 b'phases',
1700 b'new-commit',
1713 b'new-commit',
1701 default=b'draft',
1714 default=b'draft',
1702 )
1715 )
1703 coreconfigitem(
1716 coreconfigitem(
1704 b'phases',
1717 b'phases',
1705 b'publish',
1718 b'publish',
1706 default=True,
1719 default=True,
1707 )
1720 )
1708 coreconfigitem(
1721 coreconfigitem(
1709 b'profiling',
1722 b'profiling',
1710 b'enabled',
1723 b'enabled',
1711 default=False,
1724 default=False,
1712 )
1725 )
1713 coreconfigitem(
1726 coreconfigitem(
1714 b'profiling',
1727 b'profiling',
1715 b'format',
1728 b'format',
1716 default=b'text',
1729 default=b'text',
1717 )
1730 )
1718 coreconfigitem(
1731 coreconfigitem(
1719 b'profiling',
1732 b'profiling',
1720 b'freq',
1733 b'freq',
1721 default=1000,
1734 default=1000,
1722 )
1735 )
1723 coreconfigitem(
1736 coreconfigitem(
1724 b'profiling',
1737 b'profiling',
1725 b'limit',
1738 b'limit',
1726 default=30,
1739 default=30,
1727 )
1740 )
1728 coreconfigitem(
1741 coreconfigitem(
1729 b'profiling',
1742 b'profiling',
1730 b'nested',
1743 b'nested',
1731 default=0,
1744 default=0,
1732 )
1745 )
1733 coreconfigitem(
1746 coreconfigitem(
1734 b'profiling',
1747 b'profiling',
1735 b'output',
1748 b'output',
1736 default=None,
1749 default=None,
1737 )
1750 )
1738 coreconfigitem(
1751 coreconfigitem(
1739 b'profiling',
1752 b'profiling',
1740 b'showmax',
1753 b'showmax',
1741 default=0.999,
1754 default=0.999,
1742 )
1755 )
1743 coreconfigitem(
1756 coreconfigitem(
1744 b'profiling',
1757 b'profiling',
1745 b'showmin',
1758 b'showmin',
1746 default=dynamicdefault,
1759 default=dynamicdefault,
1747 )
1760 )
1748 coreconfigitem(
1761 coreconfigitem(
1749 b'profiling',
1762 b'profiling',
1750 b'showtime',
1763 b'showtime',
1751 default=True,
1764 default=True,
1752 )
1765 )
1753 coreconfigitem(
1766 coreconfigitem(
1754 b'profiling',
1767 b'profiling',
1755 b'sort',
1768 b'sort',
1756 default=b'inlinetime',
1769 default=b'inlinetime',
1757 )
1770 )
1758 coreconfigitem(
1771 coreconfigitem(
1759 b'profiling',
1772 b'profiling',
1760 b'statformat',
1773 b'statformat',
1761 default=b'hotpath',
1774 default=b'hotpath',
1762 )
1775 )
1763 coreconfigitem(
1776 coreconfigitem(
1764 b'profiling',
1777 b'profiling',
1765 b'time-track',
1778 b'time-track',
1766 default=dynamicdefault,
1779 default=dynamicdefault,
1767 )
1780 )
1768 coreconfigitem(
1781 coreconfigitem(
1769 b'profiling',
1782 b'profiling',
1770 b'type',
1783 b'type',
1771 default=b'stat',
1784 default=b'stat',
1772 )
1785 )
1773 coreconfigitem(
1786 coreconfigitem(
1774 b'progress',
1787 b'progress',
1775 b'assume-tty',
1788 b'assume-tty',
1776 default=False,
1789 default=False,
1777 )
1790 )
1778 coreconfigitem(
1791 coreconfigitem(
1779 b'progress',
1792 b'progress',
1780 b'changedelay',
1793 b'changedelay',
1781 default=1,
1794 default=1,
1782 )
1795 )
1783 coreconfigitem(
1796 coreconfigitem(
1784 b'progress',
1797 b'progress',
1785 b'clear-complete',
1798 b'clear-complete',
1786 default=True,
1799 default=True,
1787 )
1800 )
1788 coreconfigitem(
1801 coreconfigitem(
1789 b'progress',
1802 b'progress',
1790 b'debug',
1803 b'debug',
1791 default=False,
1804 default=False,
1792 )
1805 )
1793 coreconfigitem(
1806 coreconfigitem(
1794 b'progress',
1807 b'progress',
1795 b'delay',
1808 b'delay',
1796 default=3,
1809 default=3,
1797 )
1810 )
1798 coreconfigitem(
1811 coreconfigitem(
1799 b'progress',
1812 b'progress',
1800 b'disable',
1813 b'disable',
1801 default=False,
1814 default=False,
1802 )
1815 )
1803 coreconfigitem(
1816 coreconfigitem(
1804 b'progress',
1817 b'progress',
1805 b'estimateinterval',
1818 b'estimateinterval',
1806 default=60.0,
1819 default=60.0,
1807 )
1820 )
1808 coreconfigitem(
1821 coreconfigitem(
1809 b'progress',
1822 b'progress',
1810 b'format',
1823 b'format',
1811 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1824 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1812 )
1825 )
1813 coreconfigitem(
1826 coreconfigitem(
1814 b'progress',
1827 b'progress',
1815 b'refresh',
1828 b'refresh',
1816 default=0.1,
1829 default=0.1,
1817 )
1830 )
1818 coreconfigitem(
1831 coreconfigitem(
1819 b'progress',
1832 b'progress',
1820 b'width',
1833 b'width',
1821 default=dynamicdefault,
1834 default=dynamicdefault,
1822 )
1835 )
1823 coreconfigitem(
1836 coreconfigitem(
1824 b'pull',
1837 b'pull',
1825 b'confirm',
1838 b'confirm',
1826 default=False,
1839 default=False,
1827 )
1840 )
1828 coreconfigitem(
1841 coreconfigitem(
1829 b'push',
1842 b'push',
1830 b'pushvars.server',
1843 b'pushvars.server',
1831 default=False,
1844 default=False,
1832 )
1845 )
1833 coreconfigitem(
1846 coreconfigitem(
1834 b'rewrite',
1847 b'rewrite',
1835 b'backup-bundle',
1848 b'backup-bundle',
1836 default=True,
1849 default=True,
1837 alias=[(b'ui', b'history-editing-backup')],
1850 alias=[(b'ui', b'history-editing-backup')],
1838 )
1851 )
1839 coreconfigitem(
1852 coreconfigitem(
1840 b'rewrite',
1853 b'rewrite',
1841 b'update-timestamp',
1854 b'update-timestamp',
1842 default=False,
1855 default=False,
1843 )
1856 )
1844 coreconfigitem(
1857 coreconfigitem(
1845 b'rewrite',
1858 b'rewrite',
1846 b'empty-successor',
1859 b'empty-successor',
1847 default=b'skip',
1860 default=b'skip',
1848 experimental=True,
1861 experimental=True,
1849 )
1862 )
1850 coreconfigitem(
1863 coreconfigitem(
1851 b'storage',
1864 b'storage',
1852 b'new-repo-backend',
1865 b'new-repo-backend',
1853 default=b'revlogv1',
1866 default=b'revlogv1',
1854 experimental=True,
1867 experimental=True,
1855 )
1868 )
1856 coreconfigitem(
1869 coreconfigitem(
1857 b'storage',
1870 b'storage',
1858 b'revlog.optimize-delta-parent-choice',
1871 b'revlog.optimize-delta-parent-choice',
1859 default=True,
1872 default=True,
1860 alias=[(b'format', b'aggressivemergedeltas')],
1873 alias=[(b'format', b'aggressivemergedeltas')],
1861 )
1874 )
1862 # experimental as long as rust is experimental (or a C version is implemented)
1875 # experimental as long as rust is experimental (or a C version is implemented)
1863 coreconfigitem(
1876 coreconfigitem(
1864 b'storage',
1877 b'storage',
1865 b'revlog.persistent-nodemap.mmap',
1878 b'revlog.persistent-nodemap.mmap',
1866 default=True,
1879 default=True,
1867 )
1880 )
1868 # experimental as long as format.use-persistent-nodemap is.
1881 # experimental as long as format.use-persistent-nodemap is.
1869 coreconfigitem(
1882 coreconfigitem(
1870 b'storage',
1883 b'storage',
1871 b'revlog.persistent-nodemap.slow-path',
1884 b'revlog.persistent-nodemap.slow-path',
1872 default=b"abort",
1885 default=b"abort",
1873 )
1886 )
1874
1887
1875 coreconfigitem(
1888 coreconfigitem(
1876 b'storage',
1889 b'storage',
1877 b'revlog.reuse-external-delta',
1890 b'revlog.reuse-external-delta',
1878 default=True,
1891 default=True,
1879 )
1892 )
1880 coreconfigitem(
1893 coreconfigitem(
1881 b'storage',
1894 b'storage',
1882 b'revlog.reuse-external-delta-parent',
1895 b'revlog.reuse-external-delta-parent',
1883 default=None,
1896 default=None,
1884 )
1897 )
1885 coreconfigitem(
1898 coreconfigitem(
1886 b'storage',
1899 b'storage',
1887 b'revlog.zlib.level',
1900 b'revlog.zlib.level',
1888 default=None,
1901 default=None,
1889 )
1902 )
1890 coreconfigitem(
1903 coreconfigitem(
1891 b'storage',
1904 b'storage',
1892 b'revlog.zstd.level',
1905 b'revlog.zstd.level',
1893 default=None,
1906 default=None,
1894 )
1907 )
1895 coreconfigitem(
1908 coreconfigitem(
1896 b'server',
1909 b'server',
1897 b'bookmarks-pushkey-compat',
1910 b'bookmarks-pushkey-compat',
1898 default=True,
1911 default=True,
1899 )
1912 )
1900 coreconfigitem(
1913 coreconfigitem(
1901 b'server',
1914 b'server',
1902 b'bundle1',
1915 b'bundle1',
1903 default=True,
1916 default=True,
1904 )
1917 )
1905 coreconfigitem(
1918 coreconfigitem(
1906 b'server',
1919 b'server',
1907 b'bundle1gd',
1920 b'bundle1gd',
1908 default=None,
1921 default=None,
1909 )
1922 )
1910 coreconfigitem(
1923 coreconfigitem(
1911 b'server',
1924 b'server',
1912 b'bundle1.pull',
1925 b'bundle1.pull',
1913 default=None,
1926 default=None,
1914 )
1927 )
1915 coreconfigitem(
1928 coreconfigitem(
1916 b'server',
1929 b'server',
1917 b'bundle1gd.pull',
1930 b'bundle1gd.pull',
1918 default=None,
1931 default=None,
1919 )
1932 )
1920 coreconfigitem(
1933 coreconfigitem(
1921 b'server',
1934 b'server',
1922 b'bundle1.push',
1935 b'bundle1.push',
1923 default=None,
1936 default=None,
1924 )
1937 )
1925 coreconfigitem(
1938 coreconfigitem(
1926 b'server',
1939 b'server',
1927 b'bundle1gd.push',
1940 b'bundle1gd.push',
1928 default=None,
1941 default=None,
1929 )
1942 )
1930 coreconfigitem(
1943 coreconfigitem(
1931 b'server',
1944 b'server',
1932 b'bundle2.stream',
1945 b'bundle2.stream',
1933 default=True,
1946 default=True,
1934 alias=[(b'experimental', b'bundle2.stream')],
1947 alias=[(b'experimental', b'bundle2.stream')],
1935 )
1948 )
1936 coreconfigitem(
1949 coreconfigitem(
1937 b'server',
1950 b'server',
1938 b'compressionengines',
1951 b'compressionengines',
1939 default=list,
1952 default=list,
1940 )
1953 )
1941 coreconfigitem(
1954 coreconfigitem(
1942 b'server',
1955 b'server',
1943 b'concurrent-push-mode',
1956 b'concurrent-push-mode',
1944 default=b'check-related',
1957 default=b'check-related',
1945 )
1958 )
1946 coreconfigitem(
1959 coreconfigitem(
1947 b'server',
1960 b'server',
1948 b'disablefullbundle',
1961 b'disablefullbundle',
1949 default=False,
1962 default=False,
1950 )
1963 )
1951 coreconfigitem(
1964 coreconfigitem(
1952 b'server',
1965 b'server',
1953 b'maxhttpheaderlen',
1966 b'maxhttpheaderlen',
1954 default=1024,
1967 default=1024,
1955 )
1968 )
1956 coreconfigitem(
1969 coreconfigitem(
1957 b'server',
1970 b'server',
1958 b'pullbundle',
1971 b'pullbundle',
1959 default=False,
1972 default=False,
1960 )
1973 )
1961 coreconfigitem(
1974 coreconfigitem(
1962 b'server',
1975 b'server',
1963 b'preferuncompressed',
1976 b'preferuncompressed',
1964 default=False,
1977 default=False,
1965 )
1978 )
1966 coreconfigitem(
1979 coreconfigitem(
1967 b'server',
1980 b'server',
1968 b'streamunbundle',
1981 b'streamunbundle',
1969 default=False,
1982 default=False,
1970 )
1983 )
1971 coreconfigitem(
1984 coreconfigitem(
1972 b'server',
1985 b'server',
1973 b'uncompressed',
1986 b'uncompressed',
1974 default=True,
1987 default=True,
1975 )
1988 )
1976 coreconfigitem(
1989 coreconfigitem(
1977 b'server',
1990 b'server',
1978 b'uncompressedallowsecret',
1991 b'uncompressedallowsecret',
1979 default=False,
1992 default=False,
1980 )
1993 )
1981 coreconfigitem(
1994 coreconfigitem(
1982 b'server',
1995 b'server',
1983 b'view',
1996 b'view',
1984 default=b'served',
1997 default=b'served',
1985 )
1998 )
1986 coreconfigitem(
1999 coreconfigitem(
1987 b'server',
2000 b'server',
1988 b'validate',
2001 b'validate',
1989 default=False,
2002 default=False,
1990 )
2003 )
1991 coreconfigitem(
2004 coreconfigitem(
1992 b'server',
2005 b'server',
1993 b'zliblevel',
2006 b'zliblevel',
1994 default=-1,
2007 default=-1,
1995 )
2008 )
1996 coreconfigitem(
2009 coreconfigitem(
1997 b'server',
2010 b'server',
1998 b'zstdlevel',
2011 b'zstdlevel',
1999 default=3,
2012 default=3,
2000 )
2013 )
2001 coreconfigitem(
2014 coreconfigitem(
2002 b'share',
2015 b'share',
2003 b'pool',
2016 b'pool',
2004 default=None,
2017 default=None,
2005 )
2018 )
2006 coreconfigitem(
2019 coreconfigitem(
2007 b'share',
2020 b'share',
2008 b'poolnaming',
2021 b'poolnaming',
2009 default=b'identity',
2022 default=b'identity',
2010 )
2023 )
2011 coreconfigitem(
2024 coreconfigitem(
2012 b'share',
2025 b'share',
2013 b'safe-mismatch.source-not-safe',
2026 b'safe-mismatch.source-not-safe',
2014 default=b'abort',
2027 default=b'abort',
2015 )
2028 )
2016 coreconfigitem(
2029 coreconfigitem(
2017 b'share',
2030 b'share',
2018 b'safe-mismatch.source-safe',
2031 b'safe-mismatch.source-safe',
2019 default=b'abort',
2032 default=b'abort',
2020 )
2033 )
2021 coreconfigitem(
2034 coreconfigitem(
2022 b'share',
2035 b'share',
2023 b'safe-mismatch.source-not-safe.warn',
2036 b'safe-mismatch.source-not-safe.warn',
2024 default=True,
2037 default=True,
2025 )
2038 )
2026 coreconfigitem(
2039 coreconfigitem(
2027 b'share',
2040 b'share',
2028 b'safe-mismatch.source-safe.warn',
2041 b'safe-mismatch.source-safe.warn',
2029 default=True,
2042 default=True,
2030 )
2043 )
2031 coreconfigitem(
2044 coreconfigitem(
2032 b'shelve',
2045 b'shelve',
2033 b'maxbackups',
2046 b'maxbackups',
2034 default=10,
2047 default=10,
2035 )
2048 )
2036 coreconfigitem(
2049 coreconfigitem(
2037 b'smtp',
2050 b'smtp',
2038 b'host',
2051 b'host',
2039 default=None,
2052 default=None,
2040 )
2053 )
2041 coreconfigitem(
2054 coreconfigitem(
2042 b'smtp',
2055 b'smtp',
2043 b'local_hostname',
2056 b'local_hostname',
2044 default=None,
2057 default=None,
2045 )
2058 )
2046 coreconfigitem(
2059 coreconfigitem(
2047 b'smtp',
2060 b'smtp',
2048 b'password',
2061 b'password',
2049 default=None,
2062 default=None,
2050 )
2063 )
2051 coreconfigitem(
2064 coreconfigitem(
2052 b'smtp',
2065 b'smtp',
2053 b'port',
2066 b'port',
2054 default=dynamicdefault,
2067 default=dynamicdefault,
2055 )
2068 )
2056 coreconfigitem(
2069 coreconfigitem(
2057 b'smtp',
2070 b'smtp',
2058 b'tls',
2071 b'tls',
2059 default=b'none',
2072 default=b'none',
2060 )
2073 )
2061 coreconfigitem(
2074 coreconfigitem(
2062 b'smtp',
2075 b'smtp',
2063 b'username',
2076 b'username',
2064 default=None,
2077 default=None,
2065 )
2078 )
2066 coreconfigitem(
2079 coreconfigitem(
2067 b'sparse',
2080 b'sparse',
2068 b'missingwarning',
2081 b'missingwarning',
2069 default=True,
2082 default=True,
2070 experimental=True,
2083 experimental=True,
2071 )
2084 )
2072 coreconfigitem(
2085 coreconfigitem(
2073 b'subrepos',
2086 b'subrepos',
2074 b'allowed',
2087 b'allowed',
2075 default=dynamicdefault, # to make backporting simpler
2088 default=dynamicdefault, # to make backporting simpler
2076 )
2089 )
2077 coreconfigitem(
2090 coreconfigitem(
2078 b'subrepos',
2091 b'subrepos',
2079 b'hg:allowed',
2092 b'hg:allowed',
2080 default=dynamicdefault,
2093 default=dynamicdefault,
2081 )
2094 )
2082 coreconfigitem(
2095 coreconfigitem(
2083 b'subrepos',
2096 b'subrepos',
2084 b'git:allowed',
2097 b'git:allowed',
2085 default=dynamicdefault,
2098 default=dynamicdefault,
2086 )
2099 )
2087 coreconfigitem(
2100 coreconfigitem(
2088 b'subrepos',
2101 b'subrepos',
2089 b'svn:allowed',
2102 b'svn:allowed',
2090 default=dynamicdefault,
2103 default=dynamicdefault,
2091 )
2104 )
2092 coreconfigitem(
2105 coreconfigitem(
2093 b'templates',
2106 b'templates',
2094 b'.*',
2107 b'.*',
2095 default=None,
2108 default=None,
2096 generic=True,
2109 generic=True,
2097 )
2110 )
2098 coreconfigitem(
2111 coreconfigitem(
2099 b'templateconfig',
2112 b'templateconfig',
2100 b'.*',
2113 b'.*',
2101 default=dynamicdefault,
2114 default=dynamicdefault,
2102 generic=True,
2115 generic=True,
2103 )
2116 )
2104 coreconfigitem(
2117 coreconfigitem(
2105 b'trusted',
2118 b'trusted',
2106 b'groups',
2119 b'groups',
2107 default=list,
2120 default=list,
2108 )
2121 )
2109 coreconfigitem(
2122 coreconfigitem(
2110 b'trusted',
2123 b'trusted',
2111 b'users',
2124 b'users',
2112 default=list,
2125 default=list,
2113 )
2126 )
2114 coreconfigitem(
2127 coreconfigitem(
2115 b'ui',
2128 b'ui',
2116 b'_usedassubrepo',
2129 b'_usedassubrepo',
2117 default=False,
2130 default=False,
2118 )
2131 )
2119 coreconfigitem(
2132 coreconfigitem(
2120 b'ui',
2133 b'ui',
2121 b'allowemptycommit',
2134 b'allowemptycommit',
2122 default=False,
2135 default=False,
2123 )
2136 )
2124 coreconfigitem(
2137 coreconfigitem(
2125 b'ui',
2138 b'ui',
2126 b'archivemeta',
2139 b'archivemeta',
2127 default=True,
2140 default=True,
2128 )
2141 )
2129 coreconfigitem(
2142 coreconfigitem(
2130 b'ui',
2143 b'ui',
2131 b'askusername',
2144 b'askusername',
2132 default=False,
2145 default=False,
2133 )
2146 )
2134 coreconfigitem(
2147 coreconfigitem(
2135 b'ui',
2148 b'ui',
2136 b'available-memory',
2149 b'available-memory',
2137 default=None,
2150 default=None,
2138 )
2151 )
2139
2152
2140 coreconfigitem(
2153 coreconfigitem(
2141 b'ui',
2154 b'ui',
2142 b'clonebundlefallback',
2155 b'clonebundlefallback',
2143 default=False,
2156 default=False,
2144 )
2157 )
2145 coreconfigitem(
2158 coreconfigitem(
2146 b'ui',
2159 b'ui',
2147 b'clonebundleprefers',
2160 b'clonebundleprefers',
2148 default=list,
2161 default=list,
2149 )
2162 )
2150 coreconfigitem(
2163 coreconfigitem(
2151 b'ui',
2164 b'ui',
2152 b'clonebundles',
2165 b'clonebundles',
2153 default=True,
2166 default=True,
2154 )
2167 )
2155 coreconfigitem(
2168 coreconfigitem(
2156 b'ui',
2169 b'ui',
2157 b'color',
2170 b'color',
2158 default=b'auto',
2171 default=b'auto',
2159 )
2172 )
2160 coreconfigitem(
2173 coreconfigitem(
2161 b'ui',
2174 b'ui',
2162 b'commitsubrepos',
2175 b'commitsubrepos',
2163 default=False,
2176 default=False,
2164 )
2177 )
2165 coreconfigitem(
2178 coreconfigitem(
2166 b'ui',
2179 b'ui',
2167 b'debug',
2180 b'debug',
2168 default=False,
2181 default=False,
2169 )
2182 )
2170 coreconfigitem(
2183 coreconfigitem(
2171 b'ui',
2184 b'ui',
2172 b'debugger',
2185 b'debugger',
2173 default=None,
2186 default=None,
2174 )
2187 )
2175 coreconfigitem(
2188 coreconfigitem(
2176 b'ui',
2189 b'ui',
2177 b'editor',
2190 b'editor',
2178 default=dynamicdefault,
2191 default=dynamicdefault,
2179 )
2192 )
2180 coreconfigitem(
2193 coreconfigitem(
2181 b'ui',
2194 b'ui',
2182 b'detailed-exit-code',
2195 b'detailed-exit-code',
2183 default=False,
2196 default=False,
2184 experimental=True,
2197 experimental=True,
2185 )
2198 )
2186 coreconfigitem(
2199 coreconfigitem(
2187 b'ui',
2200 b'ui',
2188 b'fallbackencoding',
2201 b'fallbackencoding',
2189 default=None,
2202 default=None,
2190 )
2203 )
2191 coreconfigitem(
2204 coreconfigitem(
2192 b'ui',
2205 b'ui',
2193 b'forcecwd',
2206 b'forcecwd',
2194 default=None,
2207 default=None,
2195 )
2208 )
2196 coreconfigitem(
2209 coreconfigitem(
2197 b'ui',
2210 b'ui',
2198 b'forcemerge',
2211 b'forcemerge',
2199 default=None,
2212 default=None,
2200 )
2213 )
2201 coreconfigitem(
2214 coreconfigitem(
2202 b'ui',
2215 b'ui',
2203 b'formatdebug',
2216 b'formatdebug',
2204 default=False,
2217 default=False,
2205 )
2218 )
2206 coreconfigitem(
2219 coreconfigitem(
2207 b'ui',
2220 b'ui',
2208 b'formatjson',
2221 b'formatjson',
2209 default=False,
2222 default=False,
2210 )
2223 )
2211 coreconfigitem(
2224 coreconfigitem(
2212 b'ui',
2225 b'ui',
2213 b'formatted',
2226 b'formatted',
2214 default=None,
2227 default=None,
2215 )
2228 )
2216 coreconfigitem(
2229 coreconfigitem(
2217 b'ui',
2230 b'ui',
2218 b'interactive',
2231 b'interactive',
2219 default=None,
2232 default=None,
2220 )
2233 )
2221 coreconfigitem(
2234 coreconfigitem(
2222 b'ui',
2235 b'ui',
2223 b'interface',
2236 b'interface',
2224 default=None,
2237 default=None,
2225 )
2238 )
2226 coreconfigitem(
2239 coreconfigitem(
2227 b'ui',
2240 b'ui',
2228 b'interface.chunkselector',
2241 b'interface.chunkselector',
2229 default=None,
2242 default=None,
2230 )
2243 )
2231 coreconfigitem(
2244 coreconfigitem(
2232 b'ui',
2245 b'ui',
2233 b'large-file-limit',
2246 b'large-file-limit',
2234 default=10000000,
2247 default=10000000,
2235 )
2248 )
2236 coreconfigitem(
2249 coreconfigitem(
2237 b'ui',
2250 b'ui',
2238 b'logblockedtimes',
2251 b'logblockedtimes',
2239 default=False,
2252 default=False,
2240 )
2253 )
2241 coreconfigitem(
2254 coreconfigitem(
2242 b'ui',
2255 b'ui',
2243 b'merge',
2256 b'merge',
2244 default=None,
2257 default=None,
2245 )
2258 )
2246 coreconfigitem(
2259 coreconfigitem(
2247 b'ui',
2260 b'ui',
2248 b'mergemarkers',
2261 b'mergemarkers',
2249 default=b'basic',
2262 default=b'basic',
2250 )
2263 )
2251 coreconfigitem(
2264 coreconfigitem(
2252 b'ui',
2265 b'ui',
2253 b'message-output',
2266 b'message-output',
2254 default=b'stdio',
2267 default=b'stdio',
2255 )
2268 )
2256 coreconfigitem(
2269 coreconfigitem(
2257 b'ui',
2270 b'ui',
2258 b'nontty',
2271 b'nontty',
2259 default=False,
2272 default=False,
2260 )
2273 )
2261 coreconfigitem(
2274 coreconfigitem(
2262 b'ui',
2275 b'ui',
2263 b'origbackuppath',
2276 b'origbackuppath',
2264 default=None,
2277 default=None,
2265 )
2278 )
2266 coreconfigitem(
2279 coreconfigitem(
2267 b'ui',
2280 b'ui',
2268 b'paginate',
2281 b'paginate',
2269 default=True,
2282 default=True,
2270 )
2283 )
2271 coreconfigitem(
2284 coreconfigitem(
2272 b'ui',
2285 b'ui',
2273 b'patch',
2286 b'patch',
2274 default=None,
2287 default=None,
2275 )
2288 )
2276 coreconfigitem(
2289 coreconfigitem(
2277 b'ui',
2290 b'ui',
2278 b'portablefilenames',
2291 b'portablefilenames',
2279 default=b'warn',
2292 default=b'warn',
2280 )
2293 )
2281 coreconfigitem(
2294 coreconfigitem(
2282 b'ui',
2295 b'ui',
2283 b'promptecho',
2296 b'promptecho',
2284 default=False,
2297 default=False,
2285 )
2298 )
2286 coreconfigitem(
2299 coreconfigitem(
2287 b'ui',
2300 b'ui',
2288 b'quiet',
2301 b'quiet',
2289 default=False,
2302 default=False,
2290 )
2303 )
2291 coreconfigitem(
2304 coreconfigitem(
2292 b'ui',
2305 b'ui',
2293 b'quietbookmarkmove',
2306 b'quietbookmarkmove',
2294 default=False,
2307 default=False,
2295 )
2308 )
2296 coreconfigitem(
2309 coreconfigitem(
2297 b'ui',
2310 b'ui',
2298 b'relative-paths',
2311 b'relative-paths',
2299 default=b'legacy',
2312 default=b'legacy',
2300 )
2313 )
2301 coreconfigitem(
2314 coreconfigitem(
2302 b'ui',
2315 b'ui',
2303 b'remotecmd',
2316 b'remotecmd',
2304 default=b'hg',
2317 default=b'hg',
2305 )
2318 )
2306 coreconfigitem(
2319 coreconfigitem(
2307 b'ui',
2320 b'ui',
2308 b'report_untrusted',
2321 b'report_untrusted',
2309 default=True,
2322 default=True,
2310 )
2323 )
2311 coreconfigitem(
2324 coreconfigitem(
2312 b'ui',
2325 b'ui',
2313 b'rollback',
2326 b'rollback',
2314 default=True,
2327 default=True,
2315 )
2328 )
2316 coreconfigitem(
2329 coreconfigitem(
2317 b'ui',
2330 b'ui',
2318 b'signal-safe-lock',
2331 b'signal-safe-lock',
2319 default=True,
2332 default=True,
2320 )
2333 )
2321 coreconfigitem(
2334 coreconfigitem(
2322 b'ui',
2335 b'ui',
2323 b'slash',
2336 b'slash',
2324 default=False,
2337 default=False,
2325 )
2338 )
2326 coreconfigitem(
2339 coreconfigitem(
2327 b'ui',
2340 b'ui',
2328 b'ssh',
2341 b'ssh',
2329 default=b'ssh',
2342 default=b'ssh',
2330 )
2343 )
2331 coreconfigitem(
2344 coreconfigitem(
2332 b'ui',
2345 b'ui',
2333 b'ssherrorhint',
2346 b'ssherrorhint',
2334 default=None,
2347 default=None,
2335 )
2348 )
2336 coreconfigitem(
2349 coreconfigitem(
2337 b'ui',
2350 b'ui',
2338 b'statuscopies',
2351 b'statuscopies',
2339 default=False,
2352 default=False,
2340 )
2353 )
2341 coreconfigitem(
2354 coreconfigitem(
2342 b'ui',
2355 b'ui',
2343 b'strict',
2356 b'strict',
2344 default=False,
2357 default=False,
2345 )
2358 )
2346 coreconfigitem(
2359 coreconfigitem(
2347 b'ui',
2360 b'ui',
2348 b'style',
2361 b'style',
2349 default=b'',
2362 default=b'',
2350 )
2363 )
2351 coreconfigitem(
2364 coreconfigitem(
2352 b'ui',
2365 b'ui',
2353 b'supportcontact',
2366 b'supportcontact',
2354 default=None,
2367 default=None,
2355 )
2368 )
2356 coreconfigitem(
2369 coreconfigitem(
2357 b'ui',
2370 b'ui',
2358 b'textwidth',
2371 b'textwidth',
2359 default=78,
2372 default=78,
2360 )
2373 )
2361 coreconfigitem(
2374 coreconfigitem(
2362 b'ui',
2375 b'ui',
2363 b'timeout',
2376 b'timeout',
2364 default=b'600',
2377 default=b'600',
2365 )
2378 )
2366 coreconfigitem(
2379 coreconfigitem(
2367 b'ui',
2380 b'ui',
2368 b'timeout.warn',
2381 b'timeout.warn',
2369 default=0,
2382 default=0,
2370 )
2383 )
2371 coreconfigitem(
2384 coreconfigitem(
2372 b'ui',
2385 b'ui',
2373 b'timestamp-output',
2386 b'timestamp-output',
2374 default=False,
2387 default=False,
2375 )
2388 )
2376 coreconfigitem(
2389 coreconfigitem(
2377 b'ui',
2390 b'ui',
2378 b'traceback',
2391 b'traceback',
2379 default=False,
2392 default=False,
2380 )
2393 )
2381 coreconfigitem(
2394 coreconfigitem(
2382 b'ui',
2395 b'ui',
2383 b'tweakdefaults',
2396 b'tweakdefaults',
2384 default=False,
2397 default=False,
2385 )
2398 )
2386 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2399 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2387 coreconfigitem(
2400 coreconfigitem(
2388 b'ui',
2401 b'ui',
2389 b'verbose',
2402 b'verbose',
2390 default=False,
2403 default=False,
2391 )
2404 )
2392 coreconfigitem(
2405 coreconfigitem(
2393 b'verify',
2406 b'verify',
2394 b'skipflags',
2407 b'skipflags',
2395 default=None,
2408 default=None,
2396 )
2409 )
2397 coreconfigitem(
2410 coreconfigitem(
2398 b'web',
2411 b'web',
2399 b'allowbz2',
2412 b'allowbz2',
2400 default=False,
2413 default=False,
2401 )
2414 )
2402 coreconfigitem(
2415 coreconfigitem(
2403 b'web',
2416 b'web',
2404 b'allowgz',
2417 b'allowgz',
2405 default=False,
2418 default=False,
2406 )
2419 )
2407 coreconfigitem(
2420 coreconfigitem(
2408 b'web',
2421 b'web',
2409 b'allow-pull',
2422 b'allow-pull',
2410 alias=[(b'web', b'allowpull')],
2423 alias=[(b'web', b'allowpull')],
2411 default=True,
2424 default=True,
2412 )
2425 )
2413 coreconfigitem(
2426 coreconfigitem(
2414 b'web',
2427 b'web',
2415 b'allow-push',
2428 b'allow-push',
2416 alias=[(b'web', b'allow_push')],
2429 alias=[(b'web', b'allow_push')],
2417 default=list,
2430 default=list,
2418 )
2431 )
2419 coreconfigitem(
2432 coreconfigitem(
2420 b'web',
2433 b'web',
2421 b'allowzip',
2434 b'allowzip',
2422 default=False,
2435 default=False,
2423 )
2436 )
2424 coreconfigitem(
2437 coreconfigitem(
2425 b'web',
2438 b'web',
2426 b'archivesubrepos',
2439 b'archivesubrepos',
2427 default=False,
2440 default=False,
2428 )
2441 )
2429 coreconfigitem(
2442 coreconfigitem(
2430 b'web',
2443 b'web',
2431 b'cache',
2444 b'cache',
2432 default=True,
2445 default=True,
2433 )
2446 )
2434 coreconfigitem(
2447 coreconfigitem(
2435 b'web',
2448 b'web',
2436 b'comparisoncontext',
2449 b'comparisoncontext',
2437 default=5,
2450 default=5,
2438 )
2451 )
2439 coreconfigitem(
2452 coreconfigitem(
2440 b'web',
2453 b'web',
2441 b'contact',
2454 b'contact',
2442 default=None,
2455 default=None,
2443 )
2456 )
2444 coreconfigitem(
2457 coreconfigitem(
2445 b'web',
2458 b'web',
2446 b'deny_push',
2459 b'deny_push',
2447 default=list,
2460 default=list,
2448 )
2461 )
2449 coreconfigitem(
2462 coreconfigitem(
2450 b'web',
2463 b'web',
2451 b'guessmime',
2464 b'guessmime',
2452 default=False,
2465 default=False,
2453 )
2466 )
2454 coreconfigitem(
2467 coreconfigitem(
2455 b'web',
2468 b'web',
2456 b'hidden',
2469 b'hidden',
2457 default=False,
2470 default=False,
2458 )
2471 )
2459 coreconfigitem(
2472 coreconfigitem(
2460 b'web',
2473 b'web',
2461 b'labels',
2474 b'labels',
2462 default=list,
2475 default=list,
2463 )
2476 )
2464 coreconfigitem(
2477 coreconfigitem(
2465 b'web',
2478 b'web',
2466 b'logoimg',
2479 b'logoimg',
2467 default=b'hglogo.png',
2480 default=b'hglogo.png',
2468 )
2481 )
2469 coreconfigitem(
2482 coreconfigitem(
2470 b'web',
2483 b'web',
2471 b'logourl',
2484 b'logourl',
2472 default=b'https://mercurial-scm.org/',
2485 default=b'https://mercurial-scm.org/',
2473 )
2486 )
2474 coreconfigitem(
2487 coreconfigitem(
2475 b'web',
2488 b'web',
2476 b'accesslog',
2489 b'accesslog',
2477 default=b'-',
2490 default=b'-',
2478 )
2491 )
2479 coreconfigitem(
2492 coreconfigitem(
2480 b'web',
2493 b'web',
2481 b'address',
2494 b'address',
2482 default=b'',
2495 default=b'',
2483 )
2496 )
2484 coreconfigitem(
2497 coreconfigitem(
2485 b'web',
2498 b'web',
2486 b'allow-archive',
2499 b'allow-archive',
2487 alias=[(b'web', b'allow_archive')],
2500 alias=[(b'web', b'allow_archive')],
2488 default=list,
2501 default=list,
2489 )
2502 )
2490 coreconfigitem(
2503 coreconfigitem(
2491 b'web',
2504 b'web',
2492 b'allow_read',
2505 b'allow_read',
2493 default=list,
2506 default=list,
2494 )
2507 )
2495 coreconfigitem(
2508 coreconfigitem(
2496 b'web',
2509 b'web',
2497 b'baseurl',
2510 b'baseurl',
2498 default=None,
2511 default=None,
2499 )
2512 )
2500 coreconfigitem(
2513 coreconfigitem(
2501 b'web',
2514 b'web',
2502 b'cacerts',
2515 b'cacerts',
2503 default=None,
2516 default=None,
2504 )
2517 )
2505 coreconfigitem(
2518 coreconfigitem(
2506 b'web',
2519 b'web',
2507 b'certificate',
2520 b'certificate',
2508 default=None,
2521 default=None,
2509 )
2522 )
2510 coreconfigitem(
2523 coreconfigitem(
2511 b'web',
2524 b'web',
2512 b'collapse',
2525 b'collapse',
2513 default=False,
2526 default=False,
2514 )
2527 )
2515 coreconfigitem(
2528 coreconfigitem(
2516 b'web',
2529 b'web',
2517 b'csp',
2530 b'csp',
2518 default=None,
2531 default=None,
2519 )
2532 )
2520 coreconfigitem(
2533 coreconfigitem(
2521 b'web',
2534 b'web',
2522 b'deny_read',
2535 b'deny_read',
2523 default=list,
2536 default=list,
2524 )
2537 )
2525 coreconfigitem(
2538 coreconfigitem(
2526 b'web',
2539 b'web',
2527 b'descend',
2540 b'descend',
2528 default=True,
2541 default=True,
2529 )
2542 )
2530 coreconfigitem(
2543 coreconfigitem(
2531 b'web',
2544 b'web',
2532 b'description',
2545 b'description',
2533 default=b"",
2546 default=b"",
2534 )
2547 )
2535 coreconfigitem(
2548 coreconfigitem(
2536 b'web',
2549 b'web',
2537 b'encoding',
2550 b'encoding',
2538 default=lambda: encoding.encoding,
2551 default=lambda: encoding.encoding,
2539 )
2552 )
2540 coreconfigitem(
2553 coreconfigitem(
2541 b'web',
2554 b'web',
2542 b'errorlog',
2555 b'errorlog',
2543 default=b'-',
2556 default=b'-',
2544 )
2557 )
2545 coreconfigitem(
2558 coreconfigitem(
2546 b'web',
2559 b'web',
2547 b'ipv6',
2560 b'ipv6',
2548 default=False,
2561 default=False,
2549 )
2562 )
2550 coreconfigitem(
2563 coreconfigitem(
2551 b'web',
2564 b'web',
2552 b'maxchanges',
2565 b'maxchanges',
2553 default=10,
2566 default=10,
2554 )
2567 )
2555 coreconfigitem(
2568 coreconfigitem(
2556 b'web',
2569 b'web',
2557 b'maxfiles',
2570 b'maxfiles',
2558 default=10,
2571 default=10,
2559 )
2572 )
2560 coreconfigitem(
2573 coreconfigitem(
2561 b'web',
2574 b'web',
2562 b'maxshortchanges',
2575 b'maxshortchanges',
2563 default=60,
2576 default=60,
2564 )
2577 )
2565 coreconfigitem(
2578 coreconfigitem(
2566 b'web',
2579 b'web',
2567 b'motd',
2580 b'motd',
2568 default=b'',
2581 default=b'',
2569 )
2582 )
2570 coreconfigitem(
2583 coreconfigitem(
2571 b'web',
2584 b'web',
2572 b'name',
2585 b'name',
2573 default=dynamicdefault,
2586 default=dynamicdefault,
2574 )
2587 )
2575 coreconfigitem(
2588 coreconfigitem(
2576 b'web',
2589 b'web',
2577 b'port',
2590 b'port',
2578 default=8000,
2591 default=8000,
2579 )
2592 )
2580 coreconfigitem(
2593 coreconfigitem(
2581 b'web',
2594 b'web',
2582 b'prefix',
2595 b'prefix',
2583 default=b'',
2596 default=b'',
2584 )
2597 )
2585 coreconfigitem(
2598 coreconfigitem(
2586 b'web',
2599 b'web',
2587 b'push_ssl',
2600 b'push_ssl',
2588 default=True,
2601 default=True,
2589 )
2602 )
2590 coreconfigitem(
2603 coreconfigitem(
2591 b'web',
2604 b'web',
2592 b'refreshinterval',
2605 b'refreshinterval',
2593 default=20,
2606 default=20,
2594 )
2607 )
2595 coreconfigitem(
2608 coreconfigitem(
2596 b'web',
2609 b'web',
2597 b'server-header',
2610 b'server-header',
2598 default=None,
2611 default=None,
2599 )
2612 )
2600 coreconfigitem(
2613 coreconfigitem(
2601 b'web',
2614 b'web',
2602 b'static',
2615 b'static',
2603 default=None,
2616 default=None,
2604 )
2617 )
2605 coreconfigitem(
2618 coreconfigitem(
2606 b'web',
2619 b'web',
2607 b'staticurl',
2620 b'staticurl',
2608 default=None,
2621 default=None,
2609 )
2622 )
2610 coreconfigitem(
2623 coreconfigitem(
2611 b'web',
2624 b'web',
2612 b'stripes',
2625 b'stripes',
2613 default=1,
2626 default=1,
2614 )
2627 )
2615 coreconfigitem(
2628 coreconfigitem(
2616 b'web',
2629 b'web',
2617 b'style',
2630 b'style',
2618 default=b'paper',
2631 default=b'paper',
2619 )
2632 )
2620 coreconfigitem(
2633 coreconfigitem(
2621 b'web',
2634 b'web',
2622 b'templates',
2635 b'templates',
2623 default=None,
2636 default=None,
2624 )
2637 )
2625 coreconfigitem(
2638 coreconfigitem(
2626 b'web',
2639 b'web',
2627 b'view',
2640 b'view',
2628 default=b'served',
2641 default=b'served',
2629 experimental=True,
2642 experimental=True,
2630 )
2643 )
2631 coreconfigitem(
2644 coreconfigitem(
2632 b'worker',
2645 b'worker',
2633 b'backgroundclose',
2646 b'backgroundclose',
2634 default=dynamicdefault,
2647 default=dynamicdefault,
2635 )
2648 )
2636 # Windows defaults to a limit of 512 open files. A buffer of 128
2649 # Windows defaults to a limit of 512 open files. A buffer of 128
2637 # should give us enough headway.
2650 # should give us enough headway.
2638 coreconfigitem(
2651 coreconfigitem(
2639 b'worker',
2652 b'worker',
2640 b'backgroundclosemaxqueue',
2653 b'backgroundclosemaxqueue',
2641 default=384,
2654 default=384,
2642 )
2655 )
2643 coreconfigitem(
2656 coreconfigitem(
2644 b'worker',
2657 b'worker',
2645 b'backgroundcloseminfilecount',
2658 b'backgroundcloseminfilecount',
2646 default=2048,
2659 default=2048,
2647 )
2660 )
2648 coreconfigitem(
2661 coreconfigitem(
2649 b'worker',
2662 b'worker',
2650 b'backgroundclosethreadcount',
2663 b'backgroundclosethreadcount',
2651 default=4,
2664 default=4,
2652 )
2665 )
2653 coreconfigitem(
2666 coreconfigitem(
2654 b'worker',
2667 b'worker',
2655 b'enabled',
2668 b'enabled',
2656 default=True,
2669 default=True,
2657 )
2670 )
2658 coreconfigitem(
2671 coreconfigitem(
2659 b'worker',
2672 b'worker',
2660 b'numcpus',
2673 b'numcpus',
2661 default=None,
2674 default=None,
2662 )
2675 )
2663
2676
2664 # Rebase related configuration moved to core because other extension are doing
2677 # Rebase related configuration moved to core because other extension are doing
2665 # strange things. For example, shelve import the extensions to reuse some bit
2678 # strange things. For example, shelve import the extensions to reuse some bit
2666 # without formally loading it.
2679 # without formally loading it.
2667 coreconfigitem(
2680 coreconfigitem(
2668 b'commands',
2681 b'commands',
2669 b'rebase.requiredest',
2682 b'rebase.requiredest',
2670 default=False,
2683 default=False,
2671 )
2684 )
2672 coreconfigitem(
2685 coreconfigitem(
2673 b'experimental',
2686 b'experimental',
2674 b'rebaseskipobsolete',
2687 b'rebaseskipobsolete',
2675 default=True,
2688 default=True,
2676 )
2689 )
2677 coreconfigitem(
2690 coreconfigitem(
2678 b'rebase',
2691 b'rebase',
2679 b'singletransaction',
2692 b'singletransaction',
2680 default=False,
2693 default=False,
2681 )
2694 )
2682 coreconfigitem(
2695 coreconfigitem(
2683 b'rebase',
2696 b'rebase',
2684 b'experimental.inmemory',
2697 b'experimental.inmemory',
2685 default=False,
2698 default=False,
2686 )
2699 )
@@ -1,3196 +1,3214 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 FEATURES_BY_VERSION,
38 FEATURES_BY_VERSION,
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 INDEX_HEADER,
41 INDEX_HEADER,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 SUPPORTED_FLAGS,
50 SUPPORTED_FLAGS,
51 )
51 )
52 from .revlogutils.flagutil import (
52 from .revlogutils.flagutil import (
53 REVIDX_DEFAULT_FLAGS,
53 REVIDX_DEFAULT_FLAGS,
54 REVIDX_ELLIPSIS,
54 REVIDX_ELLIPSIS,
55 REVIDX_EXTSTORED,
55 REVIDX_EXTSTORED,
56 REVIDX_FLAGS_ORDER,
56 REVIDX_FLAGS_ORDER,
57 REVIDX_HASCOPIESINFO,
57 REVIDX_HASCOPIESINFO,
58 REVIDX_ISCENSORED,
58 REVIDX_ISCENSORED,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 )
60 )
61 from .thirdparty import attr
61 from .thirdparty import attr
62 from . import (
62 from . import (
63 ancestor,
63 ancestor,
64 dagop,
64 dagop,
65 error,
65 error,
66 mdiff,
66 mdiff,
67 policy,
67 policy,
68 pycompat,
68 pycompat,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 docket as docketutil,
78 flagutil,
79 flagutil,
79 nodemap as nodemaputil,
80 nodemap as nodemaputil,
80 revlogv0,
81 revlogv0,
81 sidedata as sidedatautil,
82 sidedata as sidedatautil,
82 )
83 )
83 from .utils import (
84 from .utils import (
84 storageutil,
85 storageutil,
85 stringutil,
86 stringutil,
86 )
87 )
87
88
88 # blanked usage of all the name to prevent pyflakes constraints
89 # blanked usage of all the name to prevent pyflakes constraints
89 # We need these name available in the module for extensions.
90 # We need these name available in the module for extensions.
90
91
91 REVLOGV0
92 REVLOGV0
92 REVLOGV1
93 REVLOGV1
93 REVLOGV2
94 REVLOGV2
94 FLAG_INLINE_DATA
95 FLAG_INLINE_DATA
95 FLAG_GENERALDELTA
96 FLAG_GENERALDELTA
96 REVLOG_DEFAULT_FLAGS
97 REVLOG_DEFAULT_FLAGS
97 REVLOG_DEFAULT_FORMAT
98 REVLOG_DEFAULT_FORMAT
98 REVLOG_DEFAULT_VERSION
99 REVLOG_DEFAULT_VERSION
99 REVLOGV1_FLAGS
100 REVLOGV1_FLAGS
100 REVLOGV2_FLAGS
101 REVLOGV2_FLAGS
101 REVIDX_ISCENSORED
102 REVIDX_ISCENSORED
102 REVIDX_ELLIPSIS
103 REVIDX_ELLIPSIS
103 REVIDX_HASCOPIESINFO
104 REVIDX_HASCOPIESINFO
104 REVIDX_EXTSTORED
105 REVIDX_EXTSTORED
105 REVIDX_DEFAULT_FLAGS
106 REVIDX_DEFAULT_FLAGS
106 REVIDX_FLAGS_ORDER
107 REVIDX_FLAGS_ORDER
107 REVIDX_RAWTEXT_CHANGING_FLAGS
108 REVIDX_RAWTEXT_CHANGING_FLAGS
108
109
109 parsers = policy.importmod('parsers')
110 parsers = policy.importmod('parsers')
110 rustancestor = policy.importrust('ancestor')
111 rustancestor = policy.importrust('ancestor')
111 rustdagop = policy.importrust('dagop')
112 rustdagop = policy.importrust('dagop')
112 rustrevlog = policy.importrust('revlog')
113 rustrevlog = policy.importrust('revlog')
113
114
114 # Aliased for performance.
115 # Aliased for performance.
115 _zlibdecompress = zlib.decompress
116 _zlibdecompress = zlib.decompress
116
117
117 # max size of revlog with inline data
118 # max size of revlog with inline data
118 _maxinline = 131072
119 _maxinline = 131072
119 _chunksize = 1048576
120 _chunksize = 1048576
120
121
121 # Flag processors for REVIDX_ELLIPSIS.
122 # Flag processors for REVIDX_ELLIPSIS.
122 def ellipsisreadprocessor(rl, text):
123 def ellipsisreadprocessor(rl, text):
123 return text, False
124 return text, False
124
125
125
126
126 def ellipsiswriteprocessor(rl, text):
127 def ellipsiswriteprocessor(rl, text):
127 return text, False
128 return text, False
128
129
129
130
130 def ellipsisrawprocessor(rl, text):
131 def ellipsisrawprocessor(rl, text):
131 return False
132 return False
132
133
133
134
134 ellipsisprocessor = (
135 ellipsisprocessor = (
135 ellipsisreadprocessor,
136 ellipsisreadprocessor,
136 ellipsiswriteprocessor,
137 ellipsiswriteprocessor,
137 ellipsisrawprocessor,
138 ellipsisrawprocessor,
138 )
139 )
139
140
140
141
141 def offset_type(offset, type):
142 def offset_type(offset, type):
142 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
143 raise ValueError(b'unknown revlog index flags')
144 raise ValueError(b'unknown revlog index flags')
144 return int(int(offset) << 16 | type)
145 return int(int(offset) << 16 | type)
145
146
146
147
147 def _verify_revision(rl, skipflags, state, node):
148 def _verify_revision(rl, skipflags, state, node):
148 """Verify the integrity of the given revlog ``node`` while providing a hook
149 """Verify the integrity of the given revlog ``node`` while providing a hook
149 point for extensions to influence the operation."""
150 point for extensions to influence the operation."""
150 if skipflags:
151 if skipflags:
151 state[b'skipread'].add(node)
152 state[b'skipread'].add(node)
152 else:
153 else:
153 # Side-effect: read content and verify hash.
154 # Side-effect: read content and verify hash.
154 rl.revision(node)
155 rl.revision(node)
155
156
156
157
157 # True if a fast implementation for persistent-nodemap is available
158 # True if a fast implementation for persistent-nodemap is available
158 #
159 #
159 # We also consider we have a "fast" implementation in "pure" python because
160 # We also consider we have a "fast" implementation in "pure" python because
160 # people using pure don't really have performance consideration (and a
161 # people using pure don't really have performance consideration (and a
161 # wheelbarrow of other slowness source)
162 # wheelbarrow of other slowness source)
162 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
163 parsers, 'BaseIndexObject'
164 parsers, 'BaseIndexObject'
164 )
165 )
165
166
166
167
167 @attr.s(slots=True, frozen=True)
168 @attr.s(slots=True, frozen=True)
168 class _revisioninfo(object):
169 class _revisioninfo(object):
169 """Information about a revision that allows building its fulltext
170 """Information about a revision that allows building its fulltext
170 node: expected hash of the revision
171 node: expected hash of the revision
171 p1, p2: parent revs of the revision
172 p1, p2: parent revs of the revision
172 btext: built text cache consisting of a one-element list
173 btext: built text cache consisting of a one-element list
173 cachedelta: (baserev, uncompressed_delta) or None
174 cachedelta: (baserev, uncompressed_delta) or None
174 flags: flags associated to the revision storage
175 flags: flags associated to the revision storage
175
176
176 One of btext[0] or cachedelta must be set.
177 One of btext[0] or cachedelta must be set.
177 """
178 """
178
179
179 node = attr.ib()
180 node = attr.ib()
180 p1 = attr.ib()
181 p1 = attr.ib()
181 p2 = attr.ib()
182 p2 = attr.ib()
182 btext = attr.ib()
183 btext = attr.ib()
183 textlen = attr.ib()
184 textlen = attr.ib()
184 cachedelta = attr.ib()
185 cachedelta = attr.ib()
185 flags = attr.ib()
186 flags = attr.ib()
186
187
187
188
188 @interfaceutil.implementer(repository.irevisiondelta)
189 @interfaceutil.implementer(repository.irevisiondelta)
189 @attr.s(slots=True)
190 @attr.s(slots=True)
190 class revlogrevisiondelta(object):
191 class revlogrevisiondelta(object):
191 node = attr.ib()
192 node = attr.ib()
192 p1node = attr.ib()
193 p1node = attr.ib()
193 p2node = attr.ib()
194 p2node = attr.ib()
194 basenode = attr.ib()
195 basenode = attr.ib()
195 flags = attr.ib()
196 flags = attr.ib()
196 baserevisionsize = attr.ib()
197 baserevisionsize = attr.ib()
197 revision = attr.ib()
198 revision = attr.ib()
198 delta = attr.ib()
199 delta = attr.ib()
199 sidedata = attr.ib()
200 sidedata = attr.ib()
200 protocol_flags = attr.ib()
201 protocol_flags = attr.ib()
201 linknode = attr.ib(default=None)
202 linknode = attr.ib(default=None)
202
203
203
204
204 @interfaceutil.implementer(repository.iverifyproblem)
205 @interfaceutil.implementer(repository.iverifyproblem)
205 @attr.s(frozen=True)
206 @attr.s(frozen=True)
206 class revlogproblem(object):
207 class revlogproblem(object):
207 warning = attr.ib(default=None)
208 warning = attr.ib(default=None)
208 error = attr.ib(default=None)
209 error = attr.ib(default=None)
209 node = attr.ib(default=None)
210 node = attr.ib(default=None)
210
211
211
212
212 def parse_index_v1(data, inline):
213 def parse_index_v1(data, inline):
213 # call the C implementation to parse the index data
214 # call the C implementation to parse the index data
214 index, cache = parsers.parse_index2(data, inline)
215 index, cache = parsers.parse_index2(data, inline)
215 return index, cache
216 return index, cache
216
217
217
218
218 def parse_index_v2(data, inline):
219 def parse_index_v2(data, inline):
219 # call the C implementation to parse the index data
220 # call the C implementation to parse the index data
220 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
221 return index, cache
222 return index, cache
222
223
223
224
224 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
225
226
226 def parse_index_v1_nodemap(data, inline):
227 def parse_index_v1_nodemap(data, inline):
227 index, cache = parsers.parse_index_devel_nodemap(data, inline)
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
228 return index, cache
229 return index, cache
229
230
230
231
231 else:
232 else:
232 parse_index_v1_nodemap = None
233 parse_index_v1_nodemap = None
233
234
234
235
235 def parse_index_v1_mixed(data, inline):
236 def parse_index_v1_mixed(data, inline):
236 index, cache = parse_index_v1(data, inline)
237 index, cache = parse_index_v1(data, inline)
237 return rustrevlog.MixedIndex(index), cache
238 return rustrevlog.MixedIndex(index), cache
238
239
239
240
240 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
241 # signed integer)
242 # signed integer)
242 _maxentrysize = 0x7FFFFFFF
243 _maxentrysize = 0x7FFFFFFF
243
244
244
245
245 class revlog(object):
246 class revlog(object):
246 """
247 """
247 the underlying revision storage object
248 the underlying revision storage object
248
249
249 A revlog consists of two parts, an index and the revision data.
250 A revlog consists of two parts, an index and the revision data.
250
251
251 The index is a file with a fixed record size containing
252 The index is a file with a fixed record size containing
252 information on each revision, including its nodeid (hash), the
253 information on each revision, including its nodeid (hash), the
253 nodeids of its parents, the position and offset of its data within
254 nodeids of its parents, the position and offset of its data within
254 the data file, and the revision it's based on. Finally, each entry
255 the data file, and the revision it's based on. Finally, each entry
255 contains a linkrev entry that can serve as a pointer to external
256 contains a linkrev entry that can serve as a pointer to external
256 data.
257 data.
257
258
258 The revision data itself is a linear collection of data chunks.
259 The revision data itself is a linear collection of data chunks.
259 Each chunk represents a revision and is usually represented as a
260 Each chunk represents a revision and is usually represented as a
260 delta against the previous chunk. To bound lookup time, runs of
261 delta against the previous chunk. To bound lookup time, runs of
261 deltas are limited to about 2 times the length of the original
262 deltas are limited to about 2 times the length of the original
262 version data. This makes retrieval of a version proportional to
263 version data. This makes retrieval of a version proportional to
263 its size, or O(1) relative to the number of revisions.
264 its size, or O(1) relative to the number of revisions.
264
265
265 Both pieces of the revlog are written to in an append-only
266 Both pieces of the revlog are written to in an append-only
266 fashion, which means we never need to rewrite a file to insert or
267 fashion, which means we never need to rewrite a file to insert or
267 remove data, and can use some simple techniques to avoid the need
268 remove data, and can use some simple techniques to avoid the need
268 for locking while reading.
269 for locking while reading.
269
270
270 If checkambig, indexfile is opened with checkambig=True at
271 If checkambig, indexfile is opened with checkambig=True at
271 writing, to avoid file stat ambiguity.
272 writing, to avoid file stat ambiguity.
272
273
273 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 index will be mmapped rather than read if it is larger than the
275 index will be mmapped rather than read if it is larger than the
275 configured threshold.
276 configured threshold.
276
277
277 If censorable is True, the revlog can have censored revisions.
278 If censorable is True, the revlog can have censored revisions.
278
279
279 If `upperboundcomp` is not None, this is the expected maximal gain from
280 If `upperboundcomp` is not None, this is the expected maximal gain from
280 compression for the data content.
281 compression for the data content.
281
282
282 `concurrencychecker` is an optional function that receives 3 arguments: a
283 `concurrencychecker` is an optional function that receives 3 arguments: a
283 file handle, a filename, and an expected position. It should check whether
284 file handle, a filename, and an expected position. It should check whether
284 the current position in the file handle is valid, and log/warn/fail (by
285 the current position in the file handle is valid, and log/warn/fail (by
285 raising).
286 raising).
286 """
287 """
287
288
288 _flagserrorclass = error.RevlogError
289 _flagserrorclass = error.RevlogError
289
290
290 def __init__(
291 def __init__(
291 self,
292 self,
292 opener,
293 opener,
293 target,
294 target,
294 radix,
295 radix,
295 postfix=None,
296 postfix=None,
296 checkambig=False,
297 checkambig=False,
297 mmaplargeindex=False,
298 mmaplargeindex=False,
298 censorable=False,
299 censorable=False,
299 upperboundcomp=None,
300 upperboundcomp=None,
300 persistentnodemap=False,
301 persistentnodemap=False,
301 concurrencychecker=None,
302 concurrencychecker=None,
302 ):
303 ):
303 """
304 """
304 create a revlog object
305 create a revlog object
305
306
306 opener is a function that abstracts the file opening operation
307 opener is a function that abstracts the file opening operation
307 and can be used to implement COW semantics or the like.
308 and can be used to implement COW semantics or the like.
308
309
309 `target`: a (KIND, ID) tuple that identify the content stored in
310 `target`: a (KIND, ID) tuple that identify the content stored in
310 this revlog. It help the rest of the code to understand what the revlog
311 this revlog. It help the rest of the code to understand what the revlog
311 is about without having to resort to heuristic and index filename
312 is about without having to resort to heuristic and index filename
312 analysis. Note: that this must be reliably be set by normal code, but
313 analysis. Note: that this must be reliably be set by normal code, but
313 that test, debug, or performance measurement code might not set this to
314 that test, debug, or performance measurement code might not set this to
314 accurate value.
315 accurate value.
315 """
316 """
316 self.upperboundcomp = upperboundcomp
317 self.upperboundcomp = upperboundcomp
317
318
318 self.radix = radix
319 self.radix = radix
319
320
321 self._docket_file = None
320 self._indexfile = None
322 self._indexfile = None
321 self._datafile = None
323 self._datafile = None
322 self._nodemap_file = None
324 self._nodemap_file = None
323 self.postfix = postfix
325 self.postfix = postfix
324 self.opener = opener
326 self.opener = opener
325 if persistentnodemap:
327 if persistentnodemap:
326 self._nodemap_file = nodemaputil.get_nodemap_file(self)
328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
327
329
328 assert target[0] in ALL_KINDS
330 assert target[0] in ALL_KINDS
329 assert len(target) == 2
331 assert len(target) == 2
330 self.target = target
332 self.target = target
331 # When True, indexfile is opened with checkambig=True at writing, to
333 # When True, indexfile is opened with checkambig=True at writing, to
332 # avoid file stat ambiguity.
334 # avoid file stat ambiguity.
333 self._checkambig = checkambig
335 self._checkambig = checkambig
334 self._mmaplargeindex = mmaplargeindex
336 self._mmaplargeindex = mmaplargeindex
335 self._censorable = censorable
337 self._censorable = censorable
336 # 3-tuple of (node, rev, text) for a raw revision.
338 # 3-tuple of (node, rev, text) for a raw revision.
337 self._revisioncache = None
339 self._revisioncache = None
338 # Maps rev to chain base rev.
340 # Maps rev to chain base rev.
339 self._chainbasecache = util.lrucachedict(100)
341 self._chainbasecache = util.lrucachedict(100)
340 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
341 self._chunkcache = (0, b'')
343 self._chunkcache = (0, b'')
342 # How much data to read and cache into the raw revlog data cache.
344 # How much data to read and cache into the raw revlog data cache.
343 self._chunkcachesize = 65536
345 self._chunkcachesize = 65536
344 self._maxchainlen = None
346 self._maxchainlen = None
345 self._deltabothparents = True
347 self._deltabothparents = True
346 self.index = None
348 self.index = None
349 self._docket = None
347 self._nodemap_docket = None
350 self._nodemap_docket = None
348 # Mapping of partial identifiers to full nodes.
351 # Mapping of partial identifiers to full nodes.
349 self._pcache = {}
352 self._pcache = {}
350 # Mapping of revision integer to full node.
353 # Mapping of revision integer to full node.
351 self._compengine = b'zlib'
354 self._compengine = b'zlib'
352 self._compengineopts = {}
355 self._compengineopts = {}
353 self._maxdeltachainspan = -1
356 self._maxdeltachainspan = -1
354 self._withsparseread = False
357 self._withsparseread = False
355 self._sparserevlog = False
358 self._sparserevlog = False
356 self.hassidedata = False
359 self.hassidedata = False
357 self._srdensitythreshold = 0.50
360 self._srdensitythreshold = 0.50
358 self._srmingapsize = 262144
361 self._srmingapsize = 262144
359
362
360 # Make copy of flag processors so each revlog instance can support
363 # Make copy of flag processors so each revlog instance can support
361 # custom flags.
364 # custom flags.
362 self._flagprocessors = dict(flagutil.flagprocessors)
365 self._flagprocessors = dict(flagutil.flagprocessors)
363
366
364 # 2-tuple of file handles being used for active writing.
367 # 2-tuple of file handles being used for active writing.
365 self._writinghandles = None
368 self._writinghandles = None
366 # prevent nesting of addgroup
369 # prevent nesting of addgroup
367 self._adding_group = None
370 self._adding_group = None
368
371
369 self._loadindex()
372 self._loadindex()
370
373
371 self._concurrencychecker = concurrencychecker
374 self._concurrencychecker = concurrencychecker
372
375
373 def _init_opts(self):
376 def _init_opts(self):
374 """process options (from above/config) to setup associated default revlog mode
377 """process options (from above/config) to setup associated default revlog mode
375
378
376 These values might be affected when actually reading on disk information.
379 These values might be affected when actually reading on disk information.
377
380
378 The relevant values are returned for use in _loadindex().
381 The relevant values are returned for use in _loadindex().
379
382
380 * newversionflags:
383 * newversionflags:
381 version header to use if we need to create a new revlog
384 version header to use if we need to create a new revlog
382
385
383 * mmapindexthreshold:
386 * mmapindexthreshold:
384 minimal index size for start to use mmap
387 minimal index size for start to use mmap
385
388
386 * force_nodemap:
389 * force_nodemap:
387 force the usage of a "development" version of the nodemap code
390 force the usage of a "development" version of the nodemap code
388 """
391 """
389 mmapindexthreshold = None
392 mmapindexthreshold = None
390 opts = self.opener.options
393 opts = self.opener.options
391
394
392 if b'revlogv2' in opts:
395 if b'revlogv2' in opts:
393 new_header = REVLOGV2 | FLAG_INLINE_DATA
396 new_header = REVLOGV2 | FLAG_INLINE_DATA
394 elif b'revlogv1' in opts:
397 elif b'revlogv1' in opts:
395 new_header = REVLOGV1 | FLAG_INLINE_DATA
398 new_header = REVLOGV1 | FLAG_INLINE_DATA
396 if b'generaldelta' in opts:
399 if b'generaldelta' in opts:
397 new_header |= FLAG_GENERALDELTA
400 new_header |= FLAG_GENERALDELTA
398 elif b'revlogv0' in self.opener.options:
401 elif b'revlogv0' in self.opener.options:
399 new_header = REVLOGV0
402 new_header = REVLOGV0
400 else:
403 else:
401 new_header = REVLOG_DEFAULT_VERSION
404 new_header = REVLOG_DEFAULT_VERSION
402
405
403 if b'chunkcachesize' in opts:
406 if b'chunkcachesize' in opts:
404 self._chunkcachesize = opts[b'chunkcachesize']
407 self._chunkcachesize = opts[b'chunkcachesize']
405 if b'maxchainlen' in opts:
408 if b'maxchainlen' in opts:
406 self._maxchainlen = opts[b'maxchainlen']
409 self._maxchainlen = opts[b'maxchainlen']
407 if b'deltabothparents' in opts:
410 if b'deltabothparents' in opts:
408 self._deltabothparents = opts[b'deltabothparents']
411 self._deltabothparents = opts[b'deltabothparents']
409 self._lazydelta = bool(opts.get(b'lazydelta', True))
412 self._lazydelta = bool(opts.get(b'lazydelta', True))
410 self._lazydeltabase = False
413 self._lazydeltabase = False
411 if self._lazydelta:
414 if self._lazydelta:
412 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
413 if b'compengine' in opts:
416 if b'compengine' in opts:
414 self._compengine = opts[b'compengine']
417 self._compengine = opts[b'compengine']
415 if b'zlib.level' in opts:
418 if b'zlib.level' in opts:
416 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
417 if b'zstd.level' in opts:
420 if b'zstd.level' in opts:
418 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
419 if b'maxdeltachainspan' in opts:
422 if b'maxdeltachainspan' in opts:
420 self._maxdeltachainspan = opts[b'maxdeltachainspan']
423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
421 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
422 mmapindexthreshold = opts[b'mmapindexthreshold']
425 mmapindexthreshold = opts[b'mmapindexthreshold']
423 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
424 withsparseread = bool(opts.get(b'with-sparse-read', False))
427 withsparseread = bool(opts.get(b'with-sparse-read', False))
425 # sparse-revlog forces sparse-read
428 # sparse-revlog forces sparse-read
426 self._withsparseread = self._sparserevlog or withsparseread
429 self._withsparseread = self._sparserevlog or withsparseread
427 if b'sparse-read-density-threshold' in opts:
430 if b'sparse-read-density-threshold' in opts:
428 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
429 if b'sparse-read-min-gap-size' in opts:
432 if b'sparse-read-min-gap-size' in opts:
430 self._srmingapsize = opts[b'sparse-read-min-gap-size']
433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
431 if opts.get(b'enableellipsis'):
434 if opts.get(b'enableellipsis'):
432 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
433
436
434 # revlog v0 doesn't have flag processors
437 # revlog v0 doesn't have flag processors
435 for flag, processor in pycompat.iteritems(
438 for flag, processor in pycompat.iteritems(
436 opts.get(b'flagprocessors', {})
439 opts.get(b'flagprocessors', {})
437 ):
440 ):
438 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
439
442
440 if self._chunkcachesize <= 0:
443 if self._chunkcachesize <= 0:
441 raise error.RevlogError(
444 raise error.RevlogError(
442 _(b'revlog chunk cache size %r is not greater than 0')
445 _(b'revlog chunk cache size %r is not greater than 0')
443 % self._chunkcachesize
446 % self._chunkcachesize
444 )
447 )
445 elif self._chunkcachesize & (self._chunkcachesize - 1):
448 elif self._chunkcachesize & (self._chunkcachesize - 1):
446 raise error.RevlogError(
449 raise error.RevlogError(
447 _(b'revlog chunk cache size %r is not a power of 2')
450 _(b'revlog chunk cache size %r is not a power of 2')
448 % self._chunkcachesize
451 % self._chunkcachesize
449 )
452 )
450 force_nodemap = opts.get(b'devel-force-nodemap', False)
453 force_nodemap = opts.get(b'devel-force-nodemap', False)
451 return new_header, mmapindexthreshold, force_nodemap
454 return new_header, mmapindexthreshold, force_nodemap
452
455
453 def _get_data(self, filepath, mmap_threshold):
456 def _get_data(self, filepath, mmap_threshold):
454 """return a file content with or without mmap
457 """return a file content with or without mmap
455
458
456 If the file is missing return the empty string"""
459 If the file is missing return the empty string"""
457 try:
460 try:
458 with self.opener(filepath) as fp:
461 with self.opener(filepath) as fp:
459 if mmap_threshold is not None:
462 if mmap_threshold is not None:
460 file_size = self.opener.fstat(fp).st_size
463 file_size = self.opener.fstat(fp).st_size
461 if file_size >= mmap_threshold:
464 if file_size >= mmap_threshold:
462 # TODO: should .close() to release resources without
465 # TODO: should .close() to release resources without
463 # relying on Python GC
466 # relying on Python GC
464 return util.buffer(util.mmapread(fp))
467 return util.buffer(util.mmapread(fp))
465 return fp.read()
468 return fp.read()
466 except IOError as inst:
469 except IOError as inst:
467 if inst.errno != errno.ENOENT:
470 if inst.errno != errno.ENOENT:
468 raise
471 raise
469 return b''
472 return b''
470
473
471 def _loadindex(self):
474 def _loadindex(self):
472
475
473 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
476 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
474
477
475 if self.postfix is None:
478 if self.postfix is None:
476 entry_point = b'%s.i' % self.radix
479 entry_point = b'%s.i' % self.radix
477 else:
480 else:
478 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
481 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
479
482
480 entry_data = b''
483 entry_data = b''
481 self._initempty = True
484 self._initempty = True
482 entry_data = self._get_data(entry_point, mmapindexthreshold)
485 entry_data = self._get_data(entry_point, mmapindexthreshold)
483 if len(entry_data) > 0:
486 if len(entry_data) > 0:
484 header = INDEX_HEADER.unpack(entry_data[:4])[0]
487 header = INDEX_HEADER.unpack(entry_data[:4])[0]
485 self._initempty = False
488 self._initempty = False
486 else:
489 else:
487 header = new_header
490 header = new_header
488
491
489 self._format_flags = header & ~0xFFFF
492 self._format_flags = header & ~0xFFFF
490 self._format_version = header & 0xFFFF
493 self._format_version = header & 0xFFFF
491
494
492 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
495 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
493 if supported_flags is None:
496 if supported_flags is None:
494 msg = _(b'unknown version (%d) in revlog %s')
497 msg = _(b'unknown version (%d) in revlog %s')
495 msg %= (self._format_version, self.display_id)
498 msg %= (self._format_version, self.display_id)
496 raise error.RevlogError(msg)
499 raise error.RevlogError(msg)
497 elif self._format_flags & ~supported_flags:
500 elif self._format_flags & ~supported_flags:
498 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
501 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
499 display_flag = self._format_flags >> 16
502 display_flag = self._format_flags >> 16
500 msg %= (display_flag, self._format_version, self.display_id)
503 msg %= (display_flag, self._format_version, self.display_id)
501 raise error.RevlogError(msg)
504 raise error.RevlogError(msg)
502
505
503 features = FEATURES_BY_VERSION[self._format_version]
506 features = FEATURES_BY_VERSION[self._format_version]
504 self._inline = features[b'inline'](self._format_flags)
507 self._inline = features[b'inline'](self._format_flags)
505 self._generaldelta = features[b'generaldelta'](self._format_flags)
508 self._generaldelta = features[b'generaldelta'](self._format_flags)
506 self.hassidedata = features[b'sidedata']
509 self.hassidedata = features[b'sidedata']
507
510
508 index_data = entry_data
511 if not features[b'docket']:
509 self._indexfile = entry_point
512 self._indexfile = entry_point
513 index_data = entry_data
514 else:
515 self._docket_file = entry_point
516 if self._initempty:
517 self._docket = docketutil.default_docket(self, header)
518 else:
519 self._docket = docketutil.parse_docket(self, entry_data)
520 self._indexfile = self._docket.index_filepath()
521 index_data = self._get_data(self._indexfile, mmapindexthreshold)
522 self._inline = False
523 # generaldelta implied by version 2 revlogs.
524 self._generaldelta = True
525 # the logic for persistent nodemap will be dealt with within the
526 # main docket, so disable it for now.
527 self._nodemap_file = None
510
528
511 if self.postfix is None or self.postfix == b'a':
529 if self.postfix is None or self.postfix == b'a':
512 self._datafile = b'%s.d' % self.radix
530 self._datafile = b'%s.d' % self.radix
513 else:
531 else:
514 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
532 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
515
533
516 self.nodeconstants = sha1nodeconstants
534 self.nodeconstants = sha1nodeconstants
517 self.nullid = self.nodeconstants.nullid
535 self.nullid = self.nodeconstants.nullid
518
536
519 # sparse-revlog can't be on without general-delta (issue6056)
537 # sparse-revlog can't be on without general-delta (issue6056)
520 if not self._generaldelta:
538 if not self._generaldelta:
521 self._sparserevlog = False
539 self._sparserevlog = False
522
540
523 self._storedeltachains = True
541 self._storedeltachains = True
524
542
525 devel_nodemap = (
543 devel_nodemap = (
526 self._nodemap_file
544 self._nodemap_file
527 and force_nodemap
545 and force_nodemap
528 and parse_index_v1_nodemap is not None
546 and parse_index_v1_nodemap is not None
529 )
547 )
530
548
531 use_rust_index = False
549 use_rust_index = False
532 if rustrevlog is not None:
550 if rustrevlog is not None:
533 if self._nodemap_file is not None:
551 if self._nodemap_file is not None:
534 use_rust_index = True
552 use_rust_index = True
535 else:
553 else:
536 use_rust_index = self.opener.options.get(b'rust.index')
554 use_rust_index = self.opener.options.get(b'rust.index')
537
555
538 self._parse_index = parse_index_v1
556 self._parse_index = parse_index_v1
539 if self._format_version == REVLOGV0:
557 if self._format_version == REVLOGV0:
540 self._parse_index = revlogv0.parse_index_v0
558 self._parse_index = revlogv0.parse_index_v0
541 elif self._format_version == REVLOGV2:
559 elif self._format_version == REVLOGV2:
542 self._parse_index = parse_index_v2
560 self._parse_index = parse_index_v2
543 elif devel_nodemap:
561 elif devel_nodemap:
544 self._parse_index = parse_index_v1_nodemap
562 self._parse_index = parse_index_v1_nodemap
545 elif use_rust_index:
563 elif use_rust_index:
546 self._parse_index = parse_index_v1_mixed
564 self._parse_index = parse_index_v1_mixed
547 try:
565 try:
548 d = self._parse_index(index_data, self._inline)
566 d = self._parse_index(index_data, self._inline)
549 index, _chunkcache = d
567 index, _chunkcache = d
550 use_nodemap = (
568 use_nodemap = (
551 not self._inline
569 not self._inline
552 and self._nodemap_file is not None
570 and self._nodemap_file is not None
553 and util.safehasattr(index, 'update_nodemap_data')
571 and util.safehasattr(index, 'update_nodemap_data')
554 )
572 )
555 if use_nodemap:
573 if use_nodemap:
556 nodemap_data = nodemaputil.persisted_data(self)
574 nodemap_data = nodemaputil.persisted_data(self)
557 if nodemap_data is not None:
575 if nodemap_data is not None:
558 docket = nodemap_data[0]
576 docket = nodemap_data[0]
559 if (
577 if (
560 len(d[0]) > docket.tip_rev
578 len(d[0]) > docket.tip_rev
561 and d[0][docket.tip_rev][7] == docket.tip_node
579 and d[0][docket.tip_rev][7] == docket.tip_node
562 ):
580 ):
563 # no changelog tampering
581 # no changelog tampering
564 self._nodemap_docket = docket
582 self._nodemap_docket = docket
565 index.update_nodemap_data(*nodemap_data)
583 index.update_nodemap_data(*nodemap_data)
566 except (ValueError, IndexError):
584 except (ValueError, IndexError):
567 raise error.RevlogError(
585 raise error.RevlogError(
568 _(b"index %s is corrupted") % self.display_id
586 _(b"index %s is corrupted") % self.display_id
569 )
587 )
570 self.index, self._chunkcache = d
588 self.index, self._chunkcache = d
571 if not self._chunkcache:
589 if not self._chunkcache:
572 self._chunkclear()
590 self._chunkclear()
573 # revnum -> (chain-length, sum-delta-length)
591 # revnum -> (chain-length, sum-delta-length)
574 self._chaininfocache = util.lrucachedict(500)
592 self._chaininfocache = util.lrucachedict(500)
575 # revlog header -> revlog compressor
593 # revlog header -> revlog compressor
576 self._decompressors = {}
594 self._decompressors = {}
577
595
578 @util.propertycache
596 @util.propertycache
579 def revlog_kind(self):
597 def revlog_kind(self):
580 return self.target[0]
598 return self.target[0]
581
599
582 @util.propertycache
600 @util.propertycache
583 def display_id(self):
601 def display_id(self):
584 """The public facing "ID" of the revlog that we use in message"""
602 """The public facing "ID" of the revlog that we use in message"""
585 # Maybe we should build a user facing representation of
603 # Maybe we should build a user facing representation of
586 # revlog.target instead of using `self.radix`
604 # revlog.target instead of using `self.radix`
587 return self.radix
605 return self.radix
588
606
589 @util.propertycache
607 @util.propertycache
590 def _compressor(self):
608 def _compressor(self):
591 engine = util.compengines[self._compengine]
609 engine = util.compengines[self._compengine]
592 return engine.revlogcompressor(self._compengineopts)
610 return engine.revlogcompressor(self._compengineopts)
593
611
594 def _indexfp(self):
612 def _indexfp(self):
595 """file object for the revlog's index file"""
613 """file object for the revlog's index file"""
596 return self.opener(self._indexfile, mode=b"r")
614 return self.opener(self._indexfile, mode=b"r")
597
615
598 def __index_write_fp(self):
616 def __index_write_fp(self):
599 # You should not use this directly and use `_writing` instead
617 # You should not use this directly and use `_writing` instead
600 try:
618 try:
601 f = self.opener(
619 f = self.opener(
602 self._indexfile, mode=b"r+", checkambig=self._checkambig
620 self._indexfile, mode=b"r+", checkambig=self._checkambig
603 )
621 )
604 f.seek(0, os.SEEK_END)
622 f.seek(0, os.SEEK_END)
605 return f
623 return f
606 except IOError as inst:
624 except IOError as inst:
607 if inst.errno != errno.ENOENT:
625 if inst.errno != errno.ENOENT:
608 raise
626 raise
609 return self.opener(
627 return self.opener(
610 self._indexfile, mode=b"w+", checkambig=self._checkambig
628 self._indexfile, mode=b"w+", checkambig=self._checkambig
611 )
629 )
612
630
613 def __index_new_fp(self):
631 def __index_new_fp(self):
614 # You should not use this unless you are upgrading from inline revlog
632 # You should not use this unless you are upgrading from inline revlog
615 return self.opener(
633 return self.opener(
616 self._indexfile,
634 self._indexfile,
617 mode=b"w",
635 mode=b"w",
618 checkambig=self._checkambig,
636 checkambig=self._checkambig,
619 atomictemp=True,
637 atomictemp=True,
620 )
638 )
621
639
622 def _datafp(self, mode=b'r'):
640 def _datafp(self, mode=b'r'):
623 """file object for the revlog's data file"""
641 """file object for the revlog's data file"""
624 return self.opener(self._datafile, mode=mode)
642 return self.opener(self._datafile, mode=mode)
625
643
626 @contextlib.contextmanager
644 @contextlib.contextmanager
627 def _datareadfp(self, existingfp=None):
645 def _datareadfp(self, existingfp=None):
628 """file object suitable to read data"""
646 """file object suitable to read data"""
629 # Use explicit file handle, if given.
647 # Use explicit file handle, if given.
630 if existingfp is not None:
648 if existingfp is not None:
631 yield existingfp
649 yield existingfp
632
650
633 # Use a file handle being actively used for writes, if available.
651 # Use a file handle being actively used for writes, if available.
634 # There is some danger to doing this because reads will seek the
652 # There is some danger to doing this because reads will seek the
635 # file. However, _writeentry() performs a SEEK_END before all writes,
653 # file. However, _writeentry() performs a SEEK_END before all writes,
636 # so we should be safe.
654 # so we should be safe.
637 elif self._writinghandles:
655 elif self._writinghandles:
638 if self._inline:
656 if self._inline:
639 yield self._writinghandles[0]
657 yield self._writinghandles[0]
640 else:
658 else:
641 yield self._writinghandles[1]
659 yield self._writinghandles[1]
642
660
643 # Otherwise open a new file handle.
661 # Otherwise open a new file handle.
644 else:
662 else:
645 if self._inline:
663 if self._inline:
646 func = self._indexfp
664 func = self._indexfp
647 else:
665 else:
648 func = self._datafp
666 func = self._datafp
649 with func() as fp:
667 with func() as fp:
650 yield fp
668 yield fp
651
669
652 def tiprev(self):
670 def tiprev(self):
653 return len(self.index) - 1
671 return len(self.index) - 1
654
672
655 def tip(self):
673 def tip(self):
656 return self.node(self.tiprev())
674 return self.node(self.tiprev())
657
675
658 def __contains__(self, rev):
676 def __contains__(self, rev):
659 return 0 <= rev < len(self)
677 return 0 <= rev < len(self)
660
678
661 def __len__(self):
679 def __len__(self):
662 return len(self.index)
680 return len(self.index)
663
681
664 def __iter__(self):
682 def __iter__(self):
665 return iter(pycompat.xrange(len(self)))
683 return iter(pycompat.xrange(len(self)))
666
684
667 def revs(self, start=0, stop=None):
685 def revs(self, start=0, stop=None):
668 """iterate over all rev in this revlog (from start to stop)"""
686 """iterate over all rev in this revlog (from start to stop)"""
669 return storageutil.iterrevs(len(self), start=start, stop=stop)
687 return storageutil.iterrevs(len(self), start=start, stop=stop)
670
688
671 @property
689 @property
672 def nodemap(self):
690 def nodemap(self):
673 msg = (
691 msg = (
674 b"revlog.nodemap is deprecated, "
692 b"revlog.nodemap is deprecated, "
675 b"use revlog.index.[has_node|rev|get_rev]"
693 b"use revlog.index.[has_node|rev|get_rev]"
676 )
694 )
677 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
695 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
678 return self.index.nodemap
696 return self.index.nodemap
679
697
680 @property
698 @property
681 def _nodecache(self):
699 def _nodecache(self):
682 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
700 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
683 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
701 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
684 return self.index.nodemap
702 return self.index.nodemap
685
703
686 def hasnode(self, node):
704 def hasnode(self, node):
687 try:
705 try:
688 self.rev(node)
706 self.rev(node)
689 return True
707 return True
690 except KeyError:
708 except KeyError:
691 return False
709 return False
692
710
693 def candelta(self, baserev, rev):
711 def candelta(self, baserev, rev):
694 """whether two revisions (baserev, rev) can be delta-ed or not"""
712 """whether two revisions (baserev, rev) can be delta-ed or not"""
695 # Disable delta if either rev requires a content-changing flag
713 # Disable delta if either rev requires a content-changing flag
696 # processor (ex. LFS). This is because such flag processor can alter
714 # processor (ex. LFS). This is because such flag processor can alter
697 # the rawtext content that the delta will be based on, and two clients
715 # the rawtext content that the delta will be based on, and two clients
698 # could have a same revlog node with different flags (i.e. different
716 # could have a same revlog node with different flags (i.e. different
699 # rawtext contents) and the delta could be incompatible.
717 # rawtext contents) and the delta could be incompatible.
700 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
718 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
701 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
719 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
702 ):
720 ):
703 return False
721 return False
704 return True
722 return True
705
723
706 def update_caches(self, transaction):
724 def update_caches(self, transaction):
707 if self._nodemap_file is not None:
725 if self._nodemap_file is not None:
708 if transaction is None:
726 if transaction is None:
709 nodemaputil.update_persistent_nodemap(self)
727 nodemaputil.update_persistent_nodemap(self)
710 else:
728 else:
711 nodemaputil.setup_persistent_nodemap(transaction, self)
729 nodemaputil.setup_persistent_nodemap(transaction, self)
712
730
713 def clearcaches(self):
731 def clearcaches(self):
714 self._revisioncache = None
732 self._revisioncache = None
715 self._chainbasecache.clear()
733 self._chainbasecache.clear()
716 self._chunkcache = (0, b'')
734 self._chunkcache = (0, b'')
717 self._pcache = {}
735 self._pcache = {}
718 self._nodemap_docket = None
736 self._nodemap_docket = None
719 self.index.clearcaches()
737 self.index.clearcaches()
720 # The python code is the one responsible for validating the docket, we
738 # The python code is the one responsible for validating the docket, we
721 # end up having to refresh it here.
739 # end up having to refresh it here.
722 use_nodemap = (
740 use_nodemap = (
723 not self._inline
741 not self._inline
724 and self._nodemap_file is not None
742 and self._nodemap_file is not None
725 and util.safehasattr(self.index, 'update_nodemap_data')
743 and util.safehasattr(self.index, 'update_nodemap_data')
726 )
744 )
727 if use_nodemap:
745 if use_nodemap:
728 nodemap_data = nodemaputil.persisted_data(self)
746 nodemap_data = nodemaputil.persisted_data(self)
729 if nodemap_data is not None:
747 if nodemap_data is not None:
730 self._nodemap_docket = nodemap_data[0]
748 self._nodemap_docket = nodemap_data[0]
731 self.index.update_nodemap_data(*nodemap_data)
749 self.index.update_nodemap_data(*nodemap_data)
732
750
733 def rev(self, node):
751 def rev(self, node):
734 try:
752 try:
735 return self.index.rev(node)
753 return self.index.rev(node)
736 except TypeError:
754 except TypeError:
737 raise
755 raise
738 except error.RevlogError:
756 except error.RevlogError:
739 # parsers.c radix tree lookup failed
757 # parsers.c radix tree lookup failed
740 if (
758 if (
741 node == self.nodeconstants.wdirid
759 node == self.nodeconstants.wdirid
742 or node in self.nodeconstants.wdirfilenodeids
760 or node in self.nodeconstants.wdirfilenodeids
743 ):
761 ):
744 raise error.WdirUnsupported
762 raise error.WdirUnsupported
745 raise error.LookupError(node, self.display_id, _(b'no node'))
763 raise error.LookupError(node, self.display_id, _(b'no node'))
746
764
747 # Accessors for index entries.
765 # Accessors for index entries.
748
766
749 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
767 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
750 # are flags.
768 # are flags.
751 def start(self, rev):
769 def start(self, rev):
752 return int(self.index[rev][0] >> 16)
770 return int(self.index[rev][0] >> 16)
753
771
754 def flags(self, rev):
772 def flags(self, rev):
755 return self.index[rev][0] & 0xFFFF
773 return self.index[rev][0] & 0xFFFF
756
774
757 def length(self, rev):
775 def length(self, rev):
758 return self.index[rev][1]
776 return self.index[rev][1]
759
777
760 def sidedata_length(self, rev):
778 def sidedata_length(self, rev):
761 if not self.hassidedata:
779 if not self.hassidedata:
762 return 0
780 return 0
763 return self.index[rev][9]
781 return self.index[rev][9]
764
782
765 def rawsize(self, rev):
783 def rawsize(self, rev):
766 """return the length of the uncompressed text for a given revision"""
784 """return the length of the uncompressed text for a given revision"""
767 l = self.index[rev][2]
785 l = self.index[rev][2]
768 if l >= 0:
786 if l >= 0:
769 return l
787 return l
770
788
771 t = self.rawdata(rev)
789 t = self.rawdata(rev)
772 return len(t)
790 return len(t)
773
791
774 def size(self, rev):
792 def size(self, rev):
775 """length of non-raw text (processed by a "read" flag processor)"""
793 """length of non-raw text (processed by a "read" flag processor)"""
776 # fast path: if no "read" flag processor could change the content,
794 # fast path: if no "read" flag processor could change the content,
777 # size is rawsize. note: ELLIPSIS is known to not change the content.
795 # size is rawsize. note: ELLIPSIS is known to not change the content.
778 flags = self.flags(rev)
796 flags = self.flags(rev)
779 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
797 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
780 return self.rawsize(rev)
798 return self.rawsize(rev)
781
799
782 return len(self.revision(rev, raw=False))
800 return len(self.revision(rev, raw=False))
783
801
784 def chainbase(self, rev):
802 def chainbase(self, rev):
785 base = self._chainbasecache.get(rev)
803 base = self._chainbasecache.get(rev)
786 if base is not None:
804 if base is not None:
787 return base
805 return base
788
806
789 index = self.index
807 index = self.index
790 iterrev = rev
808 iterrev = rev
791 base = index[iterrev][3]
809 base = index[iterrev][3]
792 while base != iterrev:
810 while base != iterrev:
793 iterrev = base
811 iterrev = base
794 base = index[iterrev][3]
812 base = index[iterrev][3]
795
813
796 self._chainbasecache[rev] = base
814 self._chainbasecache[rev] = base
797 return base
815 return base
798
816
799 def linkrev(self, rev):
817 def linkrev(self, rev):
800 return self.index[rev][4]
818 return self.index[rev][4]
801
819
802 def parentrevs(self, rev):
820 def parentrevs(self, rev):
803 try:
821 try:
804 entry = self.index[rev]
822 entry = self.index[rev]
805 except IndexError:
823 except IndexError:
806 if rev == wdirrev:
824 if rev == wdirrev:
807 raise error.WdirUnsupported
825 raise error.WdirUnsupported
808 raise
826 raise
809 if entry[5] == nullrev:
827 if entry[5] == nullrev:
810 return entry[6], entry[5]
828 return entry[6], entry[5]
811 else:
829 else:
812 return entry[5], entry[6]
830 return entry[5], entry[6]
813
831
814 # fast parentrevs(rev) where rev isn't filtered
832 # fast parentrevs(rev) where rev isn't filtered
815 _uncheckedparentrevs = parentrevs
833 _uncheckedparentrevs = parentrevs
816
834
817 def node(self, rev):
835 def node(self, rev):
818 try:
836 try:
819 return self.index[rev][7]
837 return self.index[rev][7]
820 except IndexError:
838 except IndexError:
821 if rev == wdirrev:
839 if rev == wdirrev:
822 raise error.WdirUnsupported
840 raise error.WdirUnsupported
823 raise
841 raise
824
842
825 # Derived from index values.
843 # Derived from index values.
826
844
827 def end(self, rev):
845 def end(self, rev):
828 return self.start(rev) + self.length(rev)
846 return self.start(rev) + self.length(rev)
829
847
830 def parents(self, node):
848 def parents(self, node):
831 i = self.index
849 i = self.index
832 d = i[self.rev(node)]
850 d = i[self.rev(node)]
833 # inline node() to avoid function call overhead
851 # inline node() to avoid function call overhead
834 if d[5] == self.nullid:
852 if d[5] == self.nullid:
835 return i[d[6]][7], i[d[5]][7]
853 return i[d[6]][7], i[d[5]][7]
836 else:
854 else:
837 return i[d[5]][7], i[d[6]][7]
855 return i[d[5]][7], i[d[6]][7]
838
856
839 def chainlen(self, rev):
857 def chainlen(self, rev):
840 return self._chaininfo(rev)[0]
858 return self._chaininfo(rev)[0]
841
859
842 def _chaininfo(self, rev):
860 def _chaininfo(self, rev):
843 chaininfocache = self._chaininfocache
861 chaininfocache = self._chaininfocache
844 if rev in chaininfocache:
862 if rev in chaininfocache:
845 return chaininfocache[rev]
863 return chaininfocache[rev]
846 index = self.index
864 index = self.index
847 generaldelta = self._generaldelta
865 generaldelta = self._generaldelta
848 iterrev = rev
866 iterrev = rev
849 e = index[iterrev]
867 e = index[iterrev]
850 clen = 0
868 clen = 0
851 compresseddeltalen = 0
869 compresseddeltalen = 0
852 while iterrev != e[3]:
870 while iterrev != e[3]:
853 clen += 1
871 clen += 1
854 compresseddeltalen += e[1]
872 compresseddeltalen += e[1]
855 if generaldelta:
873 if generaldelta:
856 iterrev = e[3]
874 iterrev = e[3]
857 else:
875 else:
858 iterrev -= 1
876 iterrev -= 1
859 if iterrev in chaininfocache:
877 if iterrev in chaininfocache:
860 t = chaininfocache[iterrev]
878 t = chaininfocache[iterrev]
861 clen += t[0]
879 clen += t[0]
862 compresseddeltalen += t[1]
880 compresseddeltalen += t[1]
863 break
881 break
864 e = index[iterrev]
882 e = index[iterrev]
865 else:
883 else:
866 # Add text length of base since decompressing that also takes
884 # Add text length of base since decompressing that also takes
867 # work. For cache hits the length is already included.
885 # work. For cache hits the length is already included.
868 compresseddeltalen += e[1]
886 compresseddeltalen += e[1]
869 r = (clen, compresseddeltalen)
887 r = (clen, compresseddeltalen)
870 chaininfocache[rev] = r
888 chaininfocache[rev] = r
871 return r
889 return r
872
890
873 def _deltachain(self, rev, stoprev=None):
891 def _deltachain(self, rev, stoprev=None):
874 """Obtain the delta chain for a revision.
892 """Obtain the delta chain for a revision.
875
893
876 ``stoprev`` specifies a revision to stop at. If not specified, we
894 ``stoprev`` specifies a revision to stop at. If not specified, we
877 stop at the base of the chain.
895 stop at the base of the chain.
878
896
879 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
897 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
880 revs in ascending order and ``stopped`` is a bool indicating whether
898 revs in ascending order and ``stopped`` is a bool indicating whether
881 ``stoprev`` was hit.
899 ``stoprev`` was hit.
882 """
900 """
883 # Try C implementation.
901 # Try C implementation.
884 try:
902 try:
885 return self.index.deltachain(rev, stoprev, self._generaldelta)
903 return self.index.deltachain(rev, stoprev, self._generaldelta)
886 except AttributeError:
904 except AttributeError:
887 pass
905 pass
888
906
889 chain = []
907 chain = []
890
908
891 # Alias to prevent attribute lookup in tight loop.
909 # Alias to prevent attribute lookup in tight loop.
892 index = self.index
910 index = self.index
893 generaldelta = self._generaldelta
911 generaldelta = self._generaldelta
894
912
895 iterrev = rev
913 iterrev = rev
896 e = index[iterrev]
914 e = index[iterrev]
897 while iterrev != e[3] and iterrev != stoprev:
915 while iterrev != e[3] and iterrev != stoprev:
898 chain.append(iterrev)
916 chain.append(iterrev)
899 if generaldelta:
917 if generaldelta:
900 iterrev = e[3]
918 iterrev = e[3]
901 else:
919 else:
902 iterrev -= 1
920 iterrev -= 1
903 e = index[iterrev]
921 e = index[iterrev]
904
922
905 if iterrev == stoprev:
923 if iterrev == stoprev:
906 stopped = True
924 stopped = True
907 else:
925 else:
908 chain.append(iterrev)
926 chain.append(iterrev)
909 stopped = False
927 stopped = False
910
928
911 chain.reverse()
929 chain.reverse()
912 return chain, stopped
930 return chain, stopped
913
931
914 def ancestors(self, revs, stoprev=0, inclusive=False):
932 def ancestors(self, revs, stoprev=0, inclusive=False):
915 """Generate the ancestors of 'revs' in reverse revision order.
933 """Generate the ancestors of 'revs' in reverse revision order.
916 Does not generate revs lower than stoprev.
934 Does not generate revs lower than stoprev.
917
935
918 See the documentation for ancestor.lazyancestors for more details."""
936 See the documentation for ancestor.lazyancestors for more details."""
919
937
920 # first, make sure start revisions aren't filtered
938 # first, make sure start revisions aren't filtered
921 revs = list(revs)
939 revs = list(revs)
922 checkrev = self.node
940 checkrev = self.node
923 for r in revs:
941 for r in revs:
924 checkrev(r)
942 checkrev(r)
925 # and we're sure ancestors aren't filtered as well
943 # and we're sure ancestors aren't filtered as well
926
944
927 if rustancestor is not None:
945 if rustancestor is not None:
928 lazyancestors = rustancestor.LazyAncestors
946 lazyancestors = rustancestor.LazyAncestors
929 arg = self.index
947 arg = self.index
930 else:
948 else:
931 lazyancestors = ancestor.lazyancestors
949 lazyancestors = ancestor.lazyancestors
932 arg = self._uncheckedparentrevs
950 arg = self._uncheckedparentrevs
933 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
951 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
934
952
935 def descendants(self, revs):
953 def descendants(self, revs):
936 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
954 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
937
955
938 def findcommonmissing(self, common=None, heads=None):
956 def findcommonmissing(self, common=None, heads=None):
939 """Return a tuple of the ancestors of common and the ancestors of heads
957 """Return a tuple of the ancestors of common and the ancestors of heads
940 that are not ancestors of common. In revset terminology, we return the
958 that are not ancestors of common. In revset terminology, we return the
941 tuple:
959 tuple:
942
960
943 ::common, (::heads) - (::common)
961 ::common, (::heads) - (::common)
944
962
945 The list is sorted by revision number, meaning it is
963 The list is sorted by revision number, meaning it is
946 topologically sorted.
964 topologically sorted.
947
965
948 'heads' and 'common' are both lists of node IDs. If heads is
966 'heads' and 'common' are both lists of node IDs. If heads is
949 not supplied, uses all of the revlog's heads. If common is not
967 not supplied, uses all of the revlog's heads. If common is not
950 supplied, uses nullid."""
968 supplied, uses nullid."""
951 if common is None:
969 if common is None:
952 common = [self.nullid]
970 common = [self.nullid]
953 if heads is None:
971 if heads is None:
954 heads = self.heads()
972 heads = self.heads()
955
973
956 common = [self.rev(n) for n in common]
974 common = [self.rev(n) for n in common]
957 heads = [self.rev(n) for n in heads]
975 heads = [self.rev(n) for n in heads]
958
976
959 # we want the ancestors, but inclusive
977 # we want the ancestors, but inclusive
960 class lazyset(object):
978 class lazyset(object):
961 def __init__(self, lazyvalues):
979 def __init__(self, lazyvalues):
962 self.addedvalues = set()
980 self.addedvalues = set()
963 self.lazyvalues = lazyvalues
981 self.lazyvalues = lazyvalues
964
982
965 def __contains__(self, value):
983 def __contains__(self, value):
966 return value in self.addedvalues or value in self.lazyvalues
984 return value in self.addedvalues or value in self.lazyvalues
967
985
968 def __iter__(self):
986 def __iter__(self):
969 added = self.addedvalues
987 added = self.addedvalues
970 for r in added:
988 for r in added:
971 yield r
989 yield r
972 for r in self.lazyvalues:
990 for r in self.lazyvalues:
973 if not r in added:
991 if not r in added:
974 yield r
992 yield r
975
993
976 def add(self, value):
994 def add(self, value):
977 self.addedvalues.add(value)
995 self.addedvalues.add(value)
978
996
979 def update(self, values):
997 def update(self, values):
980 self.addedvalues.update(values)
998 self.addedvalues.update(values)
981
999
982 has = lazyset(self.ancestors(common))
1000 has = lazyset(self.ancestors(common))
983 has.add(nullrev)
1001 has.add(nullrev)
984 has.update(common)
1002 has.update(common)
985
1003
986 # take all ancestors from heads that aren't in has
1004 # take all ancestors from heads that aren't in has
987 missing = set()
1005 missing = set()
988 visit = collections.deque(r for r in heads if r not in has)
1006 visit = collections.deque(r for r in heads if r not in has)
989 while visit:
1007 while visit:
990 r = visit.popleft()
1008 r = visit.popleft()
991 if r in missing:
1009 if r in missing:
992 continue
1010 continue
993 else:
1011 else:
994 missing.add(r)
1012 missing.add(r)
995 for p in self.parentrevs(r):
1013 for p in self.parentrevs(r):
996 if p not in has:
1014 if p not in has:
997 visit.append(p)
1015 visit.append(p)
998 missing = list(missing)
1016 missing = list(missing)
999 missing.sort()
1017 missing.sort()
1000 return has, [self.node(miss) for miss in missing]
1018 return has, [self.node(miss) for miss in missing]
1001
1019
1002 def incrementalmissingrevs(self, common=None):
1020 def incrementalmissingrevs(self, common=None):
1003 """Return an object that can be used to incrementally compute the
1021 """Return an object that can be used to incrementally compute the
1004 revision numbers of the ancestors of arbitrary sets that are not
1022 revision numbers of the ancestors of arbitrary sets that are not
1005 ancestors of common. This is an ancestor.incrementalmissingancestors
1023 ancestors of common. This is an ancestor.incrementalmissingancestors
1006 object.
1024 object.
1007
1025
1008 'common' is a list of revision numbers. If common is not supplied, uses
1026 'common' is a list of revision numbers. If common is not supplied, uses
1009 nullrev.
1027 nullrev.
1010 """
1028 """
1011 if common is None:
1029 if common is None:
1012 common = [nullrev]
1030 common = [nullrev]
1013
1031
1014 if rustancestor is not None:
1032 if rustancestor is not None:
1015 return rustancestor.MissingAncestors(self.index, common)
1033 return rustancestor.MissingAncestors(self.index, common)
1016 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1034 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1017
1035
1018 def findmissingrevs(self, common=None, heads=None):
1036 def findmissingrevs(self, common=None, heads=None):
1019 """Return the revision numbers of the ancestors of heads that
1037 """Return the revision numbers of the ancestors of heads that
1020 are not ancestors of common.
1038 are not ancestors of common.
1021
1039
1022 More specifically, return a list of revision numbers corresponding to
1040 More specifically, return a list of revision numbers corresponding to
1023 nodes N such that every N satisfies the following constraints:
1041 nodes N such that every N satisfies the following constraints:
1024
1042
1025 1. N is an ancestor of some node in 'heads'
1043 1. N is an ancestor of some node in 'heads'
1026 2. N is not an ancestor of any node in 'common'
1044 2. N is not an ancestor of any node in 'common'
1027
1045
1028 The list is sorted by revision number, meaning it is
1046 The list is sorted by revision number, meaning it is
1029 topologically sorted.
1047 topologically sorted.
1030
1048
1031 'heads' and 'common' are both lists of revision numbers. If heads is
1049 'heads' and 'common' are both lists of revision numbers. If heads is
1032 not supplied, uses all of the revlog's heads. If common is not
1050 not supplied, uses all of the revlog's heads. If common is not
1033 supplied, uses nullid."""
1051 supplied, uses nullid."""
1034 if common is None:
1052 if common is None:
1035 common = [nullrev]
1053 common = [nullrev]
1036 if heads is None:
1054 if heads is None:
1037 heads = self.headrevs()
1055 heads = self.headrevs()
1038
1056
1039 inc = self.incrementalmissingrevs(common=common)
1057 inc = self.incrementalmissingrevs(common=common)
1040 return inc.missingancestors(heads)
1058 return inc.missingancestors(heads)
1041
1059
1042 def findmissing(self, common=None, heads=None):
1060 def findmissing(self, common=None, heads=None):
1043 """Return the ancestors of heads that are not ancestors of common.
1061 """Return the ancestors of heads that are not ancestors of common.
1044
1062
1045 More specifically, return a list of nodes N such that every N
1063 More specifically, return a list of nodes N such that every N
1046 satisfies the following constraints:
1064 satisfies the following constraints:
1047
1065
1048 1. N is an ancestor of some node in 'heads'
1066 1. N is an ancestor of some node in 'heads'
1049 2. N is not an ancestor of any node in 'common'
1067 2. N is not an ancestor of any node in 'common'
1050
1068
1051 The list is sorted by revision number, meaning it is
1069 The list is sorted by revision number, meaning it is
1052 topologically sorted.
1070 topologically sorted.
1053
1071
1054 'heads' and 'common' are both lists of node IDs. If heads is
1072 'heads' and 'common' are both lists of node IDs. If heads is
1055 not supplied, uses all of the revlog's heads. If common is not
1073 not supplied, uses all of the revlog's heads. If common is not
1056 supplied, uses nullid."""
1074 supplied, uses nullid."""
1057 if common is None:
1075 if common is None:
1058 common = [self.nullid]
1076 common = [self.nullid]
1059 if heads is None:
1077 if heads is None:
1060 heads = self.heads()
1078 heads = self.heads()
1061
1079
1062 common = [self.rev(n) for n in common]
1080 common = [self.rev(n) for n in common]
1063 heads = [self.rev(n) for n in heads]
1081 heads = [self.rev(n) for n in heads]
1064
1082
1065 inc = self.incrementalmissingrevs(common=common)
1083 inc = self.incrementalmissingrevs(common=common)
1066 return [self.node(r) for r in inc.missingancestors(heads)]
1084 return [self.node(r) for r in inc.missingancestors(heads)]
1067
1085
1068 def nodesbetween(self, roots=None, heads=None):
1086 def nodesbetween(self, roots=None, heads=None):
1069 """Return a topological path from 'roots' to 'heads'.
1087 """Return a topological path from 'roots' to 'heads'.
1070
1088
1071 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1089 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1072 topologically sorted list of all nodes N that satisfy both of
1090 topologically sorted list of all nodes N that satisfy both of
1073 these constraints:
1091 these constraints:
1074
1092
1075 1. N is a descendant of some node in 'roots'
1093 1. N is a descendant of some node in 'roots'
1076 2. N is an ancestor of some node in 'heads'
1094 2. N is an ancestor of some node in 'heads'
1077
1095
1078 Every node is considered to be both a descendant and an ancestor
1096 Every node is considered to be both a descendant and an ancestor
1079 of itself, so every reachable node in 'roots' and 'heads' will be
1097 of itself, so every reachable node in 'roots' and 'heads' will be
1080 included in 'nodes'.
1098 included in 'nodes'.
1081
1099
1082 'outroots' is the list of reachable nodes in 'roots', i.e., the
1100 'outroots' is the list of reachable nodes in 'roots', i.e., the
1083 subset of 'roots' that is returned in 'nodes'. Likewise,
1101 subset of 'roots' that is returned in 'nodes'. Likewise,
1084 'outheads' is the subset of 'heads' that is also in 'nodes'.
1102 'outheads' is the subset of 'heads' that is also in 'nodes'.
1085
1103
1086 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1104 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1087 unspecified, uses nullid as the only root. If 'heads' is
1105 unspecified, uses nullid as the only root. If 'heads' is
1088 unspecified, uses list of all of the revlog's heads."""
1106 unspecified, uses list of all of the revlog's heads."""
1089 nonodes = ([], [], [])
1107 nonodes = ([], [], [])
1090 if roots is not None:
1108 if roots is not None:
1091 roots = list(roots)
1109 roots = list(roots)
1092 if not roots:
1110 if not roots:
1093 return nonodes
1111 return nonodes
1094 lowestrev = min([self.rev(n) for n in roots])
1112 lowestrev = min([self.rev(n) for n in roots])
1095 else:
1113 else:
1096 roots = [self.nullid] # Everybody's a descendant of nullid
1114 roots = [self.nullid] # Everybody's a descendant of nullid
1097 lowestrev = nullrev
1115 lowestrev = nullrev
1098 if (lowestrev == nullrev) and (heads is None):
1116 if (lowestrev == nullrev) and (heads is None):
1099 # We want _all_ the nodes!
1117 # We want _all_ the nodes!
1100 return (
1118 return (
1101 [self.node(r) for r in self],
1119 [self.node(r) for r in self],
1102 [self.nullid],
1120 [self.nullid],
1103 list(self.heads()),
1121 list(self.heads()),
1104 )
1122 )
1105 if heads is None:
1123 if heads is None:
1106 # All nodes are ancestors, so the latest ancestor is the last
1124 # All nodes are ancestors, so the latest ancestor is the last
1107 # node.
1125 # node.
1108 highestrev = len(self) - 1
1126 highestrev = len(self) - 1
1109 # Set ancestors to None to signal that every node is an ancestor.
1127 # Set ancestors to None to signal that every node is an ancestor.
1110 ancestors = None
1128 ancestors = None
1111 # Set heads to an empty dictionary for later discovery of heads
1129 # Set heads to an empty dictionary for later discovery of heads
1112 heads = {}
1130 heads = {}
1113 else:
1131 else:
1114 heads = list(heads)
1132 heads = list(heads)
1115 if not heads:
1133 if not heads:
1116 return nonodes
1134 return nonodes
1117 ancestors = set()
1135 ancestors = set()
1118 # Turn heads into a dictionary so we can remove 'fake' heads.
1136 # Turn heads into a dictionary so we can remove 'fake' heads.
1119 # Also, later we will be using it to filter out the heads we can't
1137 # Also, later we will be using it to filter out the heads we can't
1120 # find from roots.
1138 # find from roots.
1121 heads = dict.fromkeys(heads, False)
1139 heads = dict.fromkeys(heads, False)
1122 # Start at the top and keep marking parents until we're done.
1140 # Start at the top and keep marking parents until we're done.
1123 nodestotag = set(heads)
1141 nodestotag = set(heads)
1124 # Remember where the top was so we can use it as a limit later.
1142 # Remember where the top was so we can use it as a limit later.
1125 highestrev = max([self.rev(n) for n in nodestotag])
1143 highestrev = max([self.rev(n) for n in nodestotag])
1126 while nodestotag:
1144 while nodestotag:
1127 # grab a node to tag
1145 # grab a node to tag
1128 n = nodestotag.pop()
1146 n = nodestotag.pop()
1129 # Never tag nullid
1147 # Never tag nullid
1130 if n == self.nullid:
1148 if n == self.nullid:
1131 continue
1149 continue
1132 # A node's revision number represents its place in a
1150 # A node's revision number represents its place in a
1133 # topologically sorted list of nodes.
1151 # topologically sorted list of nodes.
1134 r = self.rev(n)
1152 r = self.rev(n)
1135 if r >= lowestrev:
1153 if r >= lowestrev:
1136 if n not in ancestors:
1154 if n not in ancestors:
1137 # If we are possibly a descendant of one of the roots
1155 # If we are possibly a descendant of one of the roots
1138 # and we haven't already been marked as an ancestor
1156 # and we haven't already been marked as an ancestor
1139 ancestors.add(n) # Mark as ancestor
1157 ancestors.add(n) # Mark as ancestor
1140 # Add non-nullid parents to list of nodes to tag.
1158 # Add non-nullid parents to list of nodes to tag.
1141 nodestotag.update(
1159 nodestotag.update(
1142 [p for p in self.parents(n) if p != self.nullid]
1160 [p for p in self.parents(n) if p != self.nullid]
1143 )
1161 )
1144 elif n in heads: # We've seen it before, is it a fake head?
1162 elif n in heads: # We've seen it before, is it a fake head?
1145 # So it is, real heads should not be the ancestors of
1163 # So it is, real heads should not be the ancestors of
1146 # any other heads.
1164 # any other heads.
1147 heads.pop(n)
1165 heads.pop(n)
1148 if not ancestors:
1166 if not ancestors:
1149 return nonodes
1167 return nonodes
1150 # Now that we have our set of ancestors, we want to remove any
1168 # Now that we have our set of ancestors, we want to remove any
1151 # roots that are not ancestors.
1169 # roots that are not ancestors.
1152
1170
1153 # If one of the roots was nullid, everything is included anyway.
1171 # If one of the roots was nullid, everything is included anyway.
1154 if lowestrev > nullrev:
1172 if lowestrev > nullrev:
1155 # But, since we weren't, let's recompute the lowest rev to not
1173 # But, since we weren't, let's recompute the lowest rev to not
1156 # include roots that aren't ancestors.
1174 # include roots that aren't ancestors.
1157
1175
1158 # Filter out roots that aren't ancestors of heads
1176 # Filter out roots that aren't ancestors of heads
1159 roots = [root for root in roots if root in ancestors]
1177 roots = [root for root in roots if root in ancestors]
1160 # Recompute the lowest revision
1178 # Recompute the lowest revision
1161 if roots:
1179 if roots:
1162 lowestrev = min([self.rev(root) for root in roots])
1180 lowestrev = min([self.rev(root) for root in roots])
1163 else:
1181 else:
1164 # No more roots? Return empty list
1182 # No more roots? Return empty list
1165 return nonodes
1183 return nonodes
1166 else:
1184 else:
1167 # We are descending from nullid, and don't need to care about
1185 # We are descending from nullid, and don't need to care about
1168 # any other roots.
1186 # any other roots.
1169 lowestrev = nullrev
1187 lowestrev = nullrev
1170 roots = [self.nullid]
1188 roots = [self.nullid]
1171 # Transform our roots list into a set.
1189 # Transform our roots list into a set.
1172 descendants = set(roots)
1190 descendants = set(roots)
1173 # Also, keep the original roots so we can filter out roots that aren't
1191 # Also, keep the original roots so we can filter out roots that aren't
1174 # 'real' roots (i.e. are descended from other roots).
1192 # 'real' roots (i.e. are descended from other roots).
1175 roots = descendants.copy()
1193 roots = descendants.copy()
1176 # Our topologically sorted list of output nodes.
1194 # Our topologically sorted list of output nodes.
1177 orderedout = []
1195 orderedout = []
1178 # Don't start at nullid since we don't want nullid in our output list,
1196 # Don't start at nullid since we don't want nullid in our output list,
1179 # and if nullid shows up in descendants, empty parents will look like
1197 # and if nullid shows up in descendants, empty parents will look like
1180 # they're descendants.
1198 # they're descendants.
1181 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1199 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1182 n = self.node(r)
1200 n = self.node(r)
1183 isdescendant = False
1201 isdescendant = False
1184 if lowestrev == nullrev: # Everybody is a descendant of nullid
1202 if lowestrev == nullrev: # Everybody is a descendant of nullid
1185 isdescendant = True
1203 isdescendant = True
1186 elif n in descendants:
1204 elif n in descendants:
1187 # n is already a descendant
1205 # n is already a descendant
1188 isdescendant = True
1206 isdescendant = True
1189 # This check only needs to be done here because all the roots
1207 # This check only needs to be done here because all the roots
1190 # will start being marked is descendants before the loop.
1208 # will start being marked is descendants before the loop.
1191 if n in roots:
1209 if n in roots:
1192 # If n was a root, check if it's a 'real' root.
1210 # If n was a root, check if it's a 'real' root.
1193 p = tuple(self.parents(n))
1211 p = tuple(self.parents(n))
1194 # If any of its parents are descendants, it's not a root.
1212 # If any of its parents are descendants, it's not a root.
1195 if (p[0] in descendants) or (p[1] in descendants):
1213 if (p[0] in descendants) or (p[1] in descendants):
1196 roots.remove(n)
1214 roots.remove(n)
1197 else:
1215 else:
1198 p = tuple(self.parents(n))
1216 p = tuple(self.parents(n))
1199 # A node is a descendant if either of its parents are
1217 # A node is a descendant if either of its parents are
1200 # descendants. (We seeded the dependents list with the roots
1218 # descendants. (We seeded the dependents list with the roots
1201 # up there, remember?)
1219 # up there, remember?)
1202 if (p[0] in descendants) or (p[1] in descendants):
1220 if (p[0] in descendants) or (p[1] in descendants):
1203 descendants.add(n)
1221 descendants.add(n)
1204 isdescendant = True
1222 isdescendant = True
1205 if isdescendant and ((ancestors is None) or (n in ancestors)):
1223 if isdescendant and ((ancestors is None) or (n in ancestors)):
1206 # Only include nodes that are both descendants and ancestors.
1224 # Only include nodes that are both descendants and ancestors.
1207 orderedout.append(n)
1225 orderedout.append(n)
1208 if (ancestors is not None) and (n in heads):
1226 if (ancestors is not None) and (n in heads):
1209 # We're trying to figure out which heads are reachable
1227 # We're trying to figure out which heads are reachable
1210 # from roots.
1228 # from roots.
1211 # Mark this head as having been reached
1229 # Mark this head as having been reached
1212 heads[n] = True
1230 heads[n] = True
1213 elif ancestors is None:
1231 elif ancestors is None:
1214 # Otherwise, we're trying to discover the heads.
1232 # Otherwise, we're trying to discover the heads.
1215 # Assume this is a head because if it isn't, the next step
1233 # Assume this is a head because if it isn't, the next step
1216 # will eventually remove it.
1234 # will eventually remove it.
1217 heads[n] = True
1235 heads[n] = True
1218 # But, obviously its parents aren't.
1236 # But, obviously its parents aren't.
1219 for p in self.parents(n):
1237 for p in self.parents(n):
1220 heads.pop(p, None)
1238 heads.pop(p, None)
1221 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1239 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1222 roots = list(roots)
1240 roots = list(roots)
1223 assert orderedout
1241 assert orderedout
1224 assert roots
1242 assert roots
1225 assert heads
1243 assert heads
1226 return (orderedout, roots, heads)
1244 return (orderedout, roots, heads)
1227
1245
1228 def headrevs(self, revs=None):
1246 def headrevs(self, revs=None):
1229 if revs is None:
1247 if revs is None:
1230 try:
1248 try:
1231 return self.index.headrevs()
1249 return self.index.headrevs()
1232 except AttributeError:
1250 except AttributeError:
1233 return self._headrevs()
1251 return self._headrevs()
1234 if rustdagop is not None:
1252 if rustdagop is not None:
1235 return rustdagop.headrevs(self.index, revs)
1253 return rustdagop.headrevs(self.index, revs)
1236 return dagop.headrevs(revs, self._uncheckedparentrevs)
1254 return dagop.headrevs(revs, self._uncheckedparentrevs)
1237
1255
1238 def computephases(self, roots):
1256 def computephases(self, roots):
1239 return self.index.computephasesmapsets(roots)
1257 return self.index.computephasesmapsets(roots)
1240
1258
1241 def _headrevs(self):
1259 def _headrevs(self):
1242 count = len(self)
1260 count = len(self)
1243 if not count:
1261 if not count:
1244 return [nullrev]
1262 return [nullrev]
1245 # we won't iter over filtered rev so nobody is a head at start
1263 # we won't iter over filtered rev so nobody is a head at start
1246 ishead = [0] * (count + 1)
1264 ishead = [0] * (count + 1)
1247 index = self.index
1265 index = self.index
1248 for r in self:
1266 for r in self:
1249 ishead[r] = 1 # I may be an head
1267 ishead[r] = 1 # I may be an head
1250 e = index[r]
1268 e = index[r]
1251 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1269 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1252 return [r for r, val in enumerate(ishead) if val]
1270 return [r for r, val in enumerate(ishead) if val]
1253
1271
1254 def heads(self, start=None, stop=None):
1272 def heads(self, start=None, stop=None):
1255 """return the list of all nodes that have no children
1273 """return the list of all nodes that have no children
1256
1274
1257 if start is specified, only heads that are descendants of
1275 if start is specified, only heads that are descendants of
1258 start will be returned
1276 start will be returned
1259 if stop is specified, it will consider all the revs from stop
1277 if stop is specified, it will consider all the revs from stop
1260 as if they had no children
1278 as if they had no children
1261 """
1279 """
1262 if start is None and stop is None:
1280 if start is None and stop is None:
1263 if not len(self):
1281 if not len(self):
1264 return [self.nullid]
1282 return [self.nullid]
1265 return [self.node(r) for r in self.headrevs()]
1283 return [self.node(r) for r in self.headrevs()]
1266
1284
1267 if start is None:
1285 if start is None:
1268 start = nullrev
1286 start = nullrev
1269 else:
1287 else:
1270 start = self.rev(start)
1288 start = self.rev(start)
1271
1289
1272 stoprevs = {self.rev(n) for n in stop or []}
1290 stoprevs = {self.rev(n) for n in stop or []}
1273
1291
1274 revs = dagop.headrevssubset(
1292 revs = dagop.headrevssubset(
1275 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1293 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1276 )
1294 )
1277
1295
1278 return [self.node(rev) for rev in revs]
1296 return [self.node(rev) for rev in revs]
1279
1297
1280 def children(self, node):
1298 def children(self, node):
1281 """find the children of a given node"""
1299 """find the children of a given node"""
1282 c = []
1300 c = []
1283 p = self.rev(node)
1301 p = self.rev(node)
1284 for r in self.revs(start=p + 1):
1302 for r in self.revs(start=p + 1):
1285 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1303 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1286 if prevs:
1304 if prevs:
1287 for pr in prevs:
1305 for pr in prevs:
1288 if pr == p:
1306 if pr == p:
1289 c.append(self.node(r))
1307 c.append(self.node(r))
1290 elif p == nullrev:
1308 elif p == nullrev:
1291 c.append(self.node(r))
1309 c.append(self.node(r))
1292 return c
1310 return c
1293
1311
1294 def commonancestorsheads(self, a, b):
1312 def commonancestorsheads(self, a, b):
1295 """calculate all the heads of the common ancestors of nodes a and b"""
1313 """calculate all the heads of the common ancestors of nodes a and b"""
1296 a, b = self.rev(a), self.rev(b)
1314 a, b = self.rev(a), self.rev(b)
1297 ancs = self._commonancestorsheads(a, b)
1315 ancs = self._commonancestorsheads(a, b)
1298 return pycompat.maplist(self.node, ancs)
1316 return pycompat.maplist(self.node, ancs)
1299
1317
1300 def _commonancestorsheads(self, *revs):
1318 def _commonancestorsheads(self, *revs):
1301 """calculate all the heads of the common ancestors of revs"""
1319 """calculate all the heads of the common ancestors of revs"""
1302 try:
1320 try:
1303 ancs = self.index.commonancestorsheads(*revs)
1321 ancs = self.index.commonancestorsheads(*revs)
1304 except (AttributeError, OverflowError): # C implementation failed
1322 except (AttributeError, OverflowError): # C implementation failed
1305 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1323 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1306 return ancs
1324 return ancs
1307
1325
1308 def isancestor(self, a, b):
1326 def isancestor(self, a, b):
1309 """return True if node a is an ancestor of node b
1327 """return True if node a is an ancestor of node b
1310
1328
1311 A revision is considered an ancestor of itself."""
1329 A revision is considered an ancestor of itself."""
1312 a, b = self.rev(a), self.rev(b)
1330 a, b = self.rev(a), self.rev(b)
1313 return self.isancestorrev(a, b)
1331 return self.isancestorrev(a, b)
1314
1332
1315 def isancestorrev(self, a, b):
1333 def isancestorrev(self, a, b):
1316 """return True if revision a is an ancestor of revision b
1334 """return True if revision a is an ancestor of revision b
1317
1335
1318 A revision is considered an ancestor of itself.
1336 A revision is considered an ancestor of itself.
1319
1337
1320 The implementation of this is trivial but the use of
1338 The implementation of this is trivial but the use of
1321 reachableroots is not."""
1339 reachableroots is not."""
1322 if a == nullrev:
1340 if a == nullrev:
1323 return True
1341 return True
1324 elif a == b:
1342 elif a == b:
1325 return True
1343 return True
1326 elif a > b:
1344 elif a > b:
1327 return False
1345 return False
1328 return bool(self.reachableroots(a, [b], [a], includepath=False))
1346 return bool(self.reachableroots(a, [b], [a], includepath=False))
1329
1347
1330 def reachableroots(self, minroot, heads, roots, includepath=False):
1348 def reachableroots(self, minroot, heads, roots, includepath=False):
1331 """return (heads(::(<roots> and <roots>::<heads>)))
1349 """return (heads(::(<roots> and <roots>::<heads>)))
1332
1350
1333 If includepath is True, return (<roots>::<heads>)."""
1351 If includepath is True, return (<roots>::<heads>)."""
1334 try:
1352 try:
1335 return self.index.reachableroots2(
1353 return self.index.reachableroots2(
1336 minroot, heads, roots, includepath
1354 minroot, heads, roots, includepath
1337 )
1355 )
1338 except AttributeError:
1356 except AttributeError:
1339 return dagop._reachablerootspure(
1357 return dagop._reachablerootspure(
1340 self.parentrevs, minroot, roots, heads, includepath
1358 self.parentrevs, minroot, roots, heads, includepath
1341 )
1359 )
1342
1360
1343 def ancestor(self, a, b):
1361 def ancestor(self, a, b):
1344 """calculate the "best" common ancestor of nodes a and b"""
1362 """calculate the "best" common ancestor of nodes a and b"""
1345
1363
1346 a, b = self.rev(a), self.rev(b)
1364 a, b = self.rev(a), self.rev(b)
1347 try:
1365 try:
1348 ancs = self.index.ancestors(a, b)
1366 ancs = self.index.ancestors(a, b)
1349 except (AttributeError, OverflowError):
1367 except (AttributeError, OverflowError):
1350 ancs = ancestor.ancestors(self.parentrevs, a, b)
1368 ancs = ancestor.ancestors(self.parentrevs, a, b)
1351 if ancs:
1369 if ancs:
1352 # choose a consistent winner when there's a tie
1370 # choose a consistent winner when there's a tie
1353 return min(map(self.node, ancs))
1371 return min(map(self.node, ancs))
1354 return self.nullid
1372 return self.nullid
1355
1373
1356 def _match(self, id):
1374 def _match(self, id):
1357 if isinstance(id, int):
1375 if isinstance(id, int):
1358 # rev
1376 # rev
1359 return self.node(id)
1377 return self.node(id)
1360 if len(id) == self.nodeconstants.nodelen:
1378 if len(id) == self.nodeconstants.nodelen:
1361 # possibly a binary node
1379 # possibly a binary node
1362 # odds of a binary node being all hex in ASCII are 1 in 10**25
1380 # odds of a binary node being all hex in ASCII are 1 in 10**25
1363 try:
1381 try:
1364 node = id
1382 node = id
1365 self.rev(node) # quick search the index
1383 self.rev(node) # quick search the index
1366 return node
1384 return node
1367 except error.LookupError:
1385 except error.LookupError:
1368 pass # may be partial hex id
1386 pass # may be partial hex id
1369 try:
1387 try:
1370 # str(rev)
1388 # str(rev)
1371 rev = int(id)
1389 rev = int(id)
1372 if b"%d" % rev != id:
1390 if b"%d" % rev != id:
1373 raise ValueError
1391 raise ValueError
1374 if rev < 0:
1392 if rev < 0:
1375 rev = len(self) + rev
1393 rev = len(self) + rev
1376 if rev < 0 or rev >= len(self):
1394 if rev < 0 or rev >= len(self):
1377 raise ValueError
1395 raise ValueError
1378 return self.node(rev)
1396 return self.node(rev)
1379 except (ValueError, OverflowError):
1397 except (ValueError, OverflowError):
1380 pass
1398 pass
1381 if len(id) == 2 * self.nodeconstants.nodelen:
1399 if len(id) == 2 * self.nodeconstants.nodelen:
1382 try:
1400 try:
1383 # a full hex nodeid?
1401 # a full hex nodeid?
1384 node = bin(id)
1402 node = bin(id)
1385 self.rev(node)
1403 self.rev(node)
1386 return node
1404 return node
1387 except (TypeError, error.LookupError):
1405 except (TypeError, error.LookupError):
1388 pass
1406 pass
1389
1407
1390 def _partialmatch(self, id):
1408 def _partialmatch(self, id):
1391 # we don't care wdirfilenodeids as they should be always full hash
1409 # we don't care wdirfilenodeids as they should be always full hash
1392 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1410 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1393 try:
1411 try:
1394 partial = self.index.partialmatch(id)
1412 partial = self.index.partialmatch(id)
1395 if partial and self.hasnode(partial):
1413 if partial and self.hasnode(partial):
1396 if maybewdir:
1414 if maybewdir:
1397 # single 'ff...' match in radix tree, ambiguous with wdir
1415 # single 'ff...' match in radix tree, ambiguous with wdir
1398 raise error.RevlogError
1416 raise error.RevlogError
1399 return partial
1417 return partial
1400 if maybewdir:
1418 if maybewdir:
1401 # no 'ff...' match in radix tree, wdir identified
1419 # no 'ff...' match in radix tree, wdir identified
1402 raise error.WdirUnsupported
1420 raise error.WdirUnsupported
1403 return None
1421 return None
1404 except error.RevlogError:
1422 except error.RevlogError:
1405 # parsers.c radix tree lookup gave multiple matches
1423 # parsers.c radix tree lookup gave multiple matches
1406 # fast path: for unfiltered changelog, radix tree is accurate
1424 # fast path: for unfiltered changelog, radix tree is accurate
1407 if not getattr(self, 'filteredrevs', None):
1425 if not getattr(self, 'filteredrevs', None):
1408 raise error.AmbiguousPrefixLookupError(
1426 raise error.AmbiguousPrefixLookupError(
1409 id, self.display_id, _(b'ambiguous identifier')
1427 id, self.display_id, _(b'ambiguous identifier')
1410 )
1428 )
1411 # fall through to slow path that filters hidden revisions
1429 # fall through to slow path that filters hidden revisions
1412 except (AttributeError, ValueError):
1430 except (AttributeError, ValueError):
1413 # we are pure python, or key was too short to search radix tree
1431 # we are pure python, or key was too short to search radix tree
1414 pass
1432 pass
1415
1433
1416 if id in self._pcache:
1434 if id in self._pcache:
1417 return self._pcache[id]
1435 return self._pcache[id]
1418
1436
1419 if len(id) <= 40:
1437 if len(id) <= 40:
1420 try:
1438 try:
1421 # hex(node)[:...]
1439 # hex(node)[:...]
1422 l = len(id) // 2 # grab an even number of digits
1440 l = len(id) // 2 # grab an even number of digits
1423 prefix = bin(id[: l * 2])
1441 prefix = bin(id[: l * 2])
1424 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1442 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1425 nl = [
1443 nl = [
1426 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1444 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1427 ]
1445 ]
1428 if self.nodeconstants.nullhex.startswith(id):
1446 if self.nodeconstants.nullhex.startswith(id):
1429 nl.append(self.nullid)
1447 nl.append(self.nullid)
1430 if len(nl) > 0:
1448 if len(nl) > 0:
1431 if len(nl) == 1 and not maybewdir:
1449 if len(nl) == 1 and not maybewdir:
1432 self._pcache[id] = nl[0]
1450 self._pcache[id] = nl[0]
1433 return nl[0]
1451 return nl[0]
1434 raise error.AmbiguousPrefixLookupError(
1452 raise error.AmbiguousPrefixLookupError(
1435 id, self.display_id, _(b'ambiguous identifier')
1453 id, self.display_id, _(b'ambiguous identifier')
1436 )
1454 )
1437 if maybewdir:
1455 if maybewdir:
1438 raise error.WdirUnsupported
1456 raise error.WdirUnsupported
1439 return None
1457 return None
1440 except TypeError:
1458 except TypeError:
1441 pass
1459 pass
1442
1460
1443 def lookup(self, id):
1461 def lookup(self, id):
1444 """locate a node based on:
1462 """locate a node based on:
1445 - revision number or str(revision number)
1463 - revision number or str(revision number)
1446 - nodeid or subset of hex nodeid
1464 - nodeid or subset of hex nodeid
1447 """
1465 """
1448 n = self._match(id)
1466 n = self._match(id)
1449 if n is not None:
1467 if n is not None:
1450 return n
1468 return n
1451 n = self._partialmatch(id)
1469 n = self._partialmatch(id)
1452 if n:
1470 if n:
1453 return n
1471 return n
1454
1472
1455 raise error.LookupError(id, self.display_id, _(b'no match found'))
1473 raise error.LookupError(id, self.display_id, _(b'no match found'))
1456
1474
1457 def shortest(self, node, minlength=1):
1475 def shortest(self, node, minlength=1):
1458 """Find the shortest unambiguous prefix that matches node."""
1476 """Find the shortest unambiguous prefix that matches node."""
1459
1477
1460 def isvalid(prefix):
1478 def isvalid(prefix):
1461 try:
1479 try:
1462 matchednode = self._partialmatch(prefix)
1480 matchednode = self._partialmatch(prefix)
1463 except error.AmbiguousPrefixLookupError:
1481 except error.AmbiguousPrefixLookupError:
1464 return False
1482 return False
1465 except error.WdirUnsupported:
1483 except error.WdirUnsupported:
1466 # single 'ff...' match
1484 # single 'ff...' match
1467 return True
1485 return True
1468 if matchednode is None:
1486 if matchednode is None:
1469 raise error.LookupError(node, self.display_id, _(b'no node'))
1487 raise error.LookupError(node, self.display_id, _(b'no node'))
1470 return True
1488 return True
1471
1489
1472 def maybewdir(prefix):
1490 def maybewdir(prefix):
1473 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1491 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1474
1492
1475 hexnode = hex(node)
1493 hexnode = hex(node)
1476
1494
1477 def disambiguate(hexnode, minlength):
1495 def disambiguate(hexnode, minlength):
1478 """Disambiguate against wdirid."""
1496 """Disambiguate against wdirid."""
1479 for length in range(minlength, len(hexnode) + 1):
1497 for length in range(minlength, len(hexnode) + 1):
1480 prefix = hexnode[:length]
1498 prefix = hexnode[:length]
1481 if not maybewdir(prefix):
1499 if not maybewdir(prefix):
1482 return prefix
1500 return prefix
1483
1501
1484 if not getattr(self, 'filteredrevs', None):
1502 if not getattr(self, 'filteredrevs', None):
1485 try:
1503 try:
1486 length = max(self.index.shortest(node), minlength)
1504 length = max(self.index.shortest(node), minlength)
1487 return disambiguate(hexnode, length)
1505 return disambiguate(hexnode, length)
1488 except error.RevlogError:
1506 except error.RevlogError:
1489 if node != self.nodeconstants.wdirid:
1507 if node != self.nodeconstants.wdirid:
1490 raise error.LookupError(
1508 raise error.LookupError(
1491 node, self.display_id, _(b'no node')
1509 node, self.display_id, _(b'no node')
1492 )
1510 )
1493 except AttributeError:
1511 except AttributeError:
1494 # Fall through to pure code
1512 # Fall through to pure code
1495 pass
1513 pass
1496
1514
1497 if node == self.nodeconstants.wdirid:
1515 if node == self.nodeconstants.wdirid:
1498 for length in range(minlength, len(hexnode) + 1):
1516 for length in range(minlength, len(hexnode) + 1):
1499 prefix = hexnode[:length]
1517 prefix = hexnode[:length]
1500 if isvalid(prefix):
1518 if isvalid(prefix):
1501 return prefix
1519 return prefix
1502
1520
1503 for length in range(minlength, len(hexnode) + 1):
1521 for length in range(minlength, len(hexnode) + 1):
1504 prefix = hexnode[:length]
1522 prefix = hexnode[:length]
1505 if isvalid(prefix):
1523 if isvalid(prefix):
1506 return disambiguate(hexnode, length)
1524 return disambiguate(hexnode, length)
1507
1525
1508 def cmp(self, node, text):
1526 def cmp(self, node, text):
1509 """compare text with a given file revision
1527 """compare text with a given file revision
1510
1528
1511 returns True if text is different than what is stored.
1529 returns True if text is different than what is stored.
1512 """
1530 """
1513 p1, p2 = self.parents(node)
1531 p1, p2 = self.parents(node)
1514 return storageutil.hashrevisionsha1(text, p1, p2) != node
1532 return storageutil.hashrevisionsha1(text, p1, p2) != node
1515
1533
1516 def _cachesegment(self, offset, data):
1534 def _cachesegment(self, offset, data):
1517 """Add a segment to the revlog cache.
1535 """Add a segment to the revlog cache.
1518
1536
1519 Accepts an absolute offset and the data that is at that location.
1537 Accepts an absolute offset and the data that is at that location.
1520 """
1538 """
1521 o, d = self._chunkcache
1539 o, d = self._chunkcache
1522 # try to add to existing cache
1540 # try to add to existing cache
1523 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1541 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1524 self._chunkcache = o, d + data
1542 self._chunkcache = o, d + data
1525 else:
1543 else:
1526 self._chunkcache = offset, data
1544 self._chunkcache = offset, data
1527
1545
1528 def _readsegment(self, offset, length, df=None):
1546 def _readsegment(self, offset, length, df=None):
1529 """Load a segment of raw data from the revlog.
1547 """Load a segment of raw data from the revlog.
1530
1548
1531 Accepts an absolute offset, length to read, and an optional existing
1549 Accepts an absolute offset, length to read, and an optional existing
1532 file handle to read from.
1550 file handle to read from.
1533
1551
1534 If an existing file handle is passed, it will be seeked and the
1552 If an existing file handle is passed, it will be seeked and the
1535 original seek position will NOT be restored.
1553 original seek position will NOT be restored.
1536
1554
1537 Returns a str or buffer of raw byte data.
1555 Returns a str or buffer of raw byte data.
1538
1556
1539 Raises if the requested number of bytes could not be read.
1557 Raises if the requested number of bytes could not be read.
1540 """
1558 """
1541 # Cache data both forward and backward around the requested
1559 # Cache data both forward and backward around the requested
1542 # data, in a fixed size window. This helps speed up operations
1560 # data, in a fixed size window. This helps speed up operations
1543 # involving reading the revlog backwards.
1561 # involving reading the revlog backwards.
1544 cachesize = self._chunkcachesize
1562 cachesize = self._chunkcachesize
1545 realoffset = offset & ~(cachesize - 1)
1563 realoffset = offset & ~(cachesize - 1)
1546 reallength = (
1564 reallength = (
1547 (offset + length + cachesize) & ~(cachesize - 1)
1565 (offset + length + cachesize) & ~(cachesize - 1)
1548 ) - realoffset
1566 ) - realoffset
1549 with self._datareadfp(df) as df:
1567 with self._datareadfp(df) as df:
1550 df.seek(realoffset)
1568 df.seek(realoffset)
1551 d = df.read(reallength)
1569 d = df.read(reallength)
1552
1570
1553 self._cachesegment(realoffset, d)
1571 self._cachesegment(realoffset, d)
1554 if offset != realoffset or reallength != length:
1572 if offset != realoffset or reallength != length:
1555 startoffset = offset - realoffset
1573 startoffset = offset - realoffset
1556 if len(d) - startoffset < length:
1574 if len(d) - startoffset < length:
1557 raise error.RevlogError(
1575 raise error.RevlogError(
1558 _(
1576 _(
1559 b'partial read of revlog %s; expected %d bytes from '
1577 b'partial read of revlog %s; expected %d bytes from '
1560 b'offset %d, got %d'
1578 b'offset %d, got %d'
1561 )
1579 )
1562 % (
1580 % (
1563 self._indexfile if self._inline else self._datafile,
1581 self._indexfile if self._inline else self._datafile,
1564 length,
1582 length,
1565 offset,
1583 offset,
1566 len(d) - startoffset,
1584 len(d) - startoffset,
1567 )
1585 )
1568 )
1586 )
1569
1587
1570 return util.buffer(d, startoffset, length)
1588 return util.buffer(d, startoffset, length)
1571
1589
1572 if len(d) < length:
1590 if len(d) < length:
1573 raise error.RevlogError(
1591 raise error.RevlogError(
1574 _(
1592 _(
1575 b'partial read of revlog %s; expected %d bytes from offset '
1593 b'partial read of revlog %s; expected %d bytes from offset '
1576 b'%d, got %d'
1594 b'%d, got %d'
1577 )
1595 )
1578 % (
1596 % (
1579 self._indexfile if self._inline else self._datafile,
1597 self._indexfile if self._inline else self._datafile,
1580 length,
1598 length,
1581 offset,
1599 offset,
1582 len(d),
1600 len(d),
1583 )
1601 )
1584 )
1602 )
1585
1603
1586 return d
1604 return d
1587
1605
1588 def _getsegment(self, offset, length, df=None):
1606 def _getsegment(self, offset, length, df=None):
1589 """Obtain a segment of raw data from the revlog.
1607 """Obtain a segment of raw data from the revlog.
1590
1608
1591 Accepts an absolute offset, length of bytes to obtain, and an
1609 Accepts an absolute offset, length of bytes to obtain, and an
1592 optional file handle to the already-opened revlog. If the file
1610 optional file handle to the already-opened revlog. If the file
1593 handle is used, it's original seek position will not be preserved.
1611 handle is used, it's original seek position will not be preserved.
1594
1612
1595 Requests for data may be returned from a cache.
1613 Requests for data may be returned from a cache.
1596
1614
1597 Returns a str or a buffer instance of raw byte data.
1615 Returns a str or a buffer instance of raw byte data.
1598 """
1616 """
1599 o, d = self._chunkcache
1617 o, d = self._chunkcache
1600 l = len(d)
1618 l = len(d)
1601
1619
1602 # is it in the cache?
1620 # is it in the cache?
1603 cachestart = offset - o
1621 cachestart = offset - o
1604 cacheend = cachestart + length
1622 cacheend = cachestart + length
1605 if cachestart >= 0 and cacheend <= l:
1623 if cachestart >= 0 and cacheend <= l:
1606 if cachestart == 0 and cacheend == l:
1624 if cachestart == 0 and cacheend == l:
1607 return d # avoid a copy
1625 return d # avoid a copy
1608 return util.buffer(d, cachestart, cacheend - cachestart)
1626 return util.buffer(d, cachestart, cacheend - cachestart)
1609
1627
1610 return self._readsegment(offset, length, df=df)
1628 return self._readsegment(offset, length, df=df)
1611
1629
1612 def _getsegmentforrevs(self, startrev, endrev, df=None):
1630 def _getsegmentforrevs(self, startrev, endrev, df=None):
1613 """Obtain a segment of raw data corresponding to a range of revisions.
1631 """Obtain a segment of raw data corresponding to a range of revisions.
1614
1632
1615 Accepts the start and end revisions and an optional already-open
1633 Accepts the start and end revisions and an optional already-open
1616 file handle to be used for reading. If the file handle is read, its
1634 file handle to be used for reading. If the file handle is read, its
1617 seek position will not be preserved.
1635 seek position will not be preserved.
1618
1636
1619 Requests for data may be satisfied by a cache.
1637 Requests for data may be satisfied by a cache.
1620
1638
1621 Returns a 2-tuple of (offset, data) for the requested range of
1639 Returns a 2-tuple of (offset, data) for the requested range of
1622 revisions. Offset is the integer offset from the beginning of the
1640 revisions. Offset is the integer offset from the beginning of the
1623 revlog and data is a str or buffer of the raw byte data.
1641 revlog and data is a str or buffer of the raw byte data.
1624
1642
1625 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1643 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1626 to determine where each revision's data begins and ends.
1644 to determine where each revision's data begins and ends.
1627 """
1645 """
1628 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1646 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1629 # (functions are expensive).
1647 # (functions are expensive).
1630 index = self.index
1648 index = self.index
1631 istart = index[startrev]
1649 istart = index[startrev]
1632 start = int(istart[0] >> 16)
1650 start = int(istart[0] >> 16)
1633 if startrev == endrev:
1651 if startrev == endrev:
1634 end = start + istart[1]
1652 end = start + istart[1]
1635 else:
1653 else:
1636 iend = index[endrev]
1654 iend = index[endrev]
1637 end = int(iend[0] >> 16) + iend[1]
1655 end = int(iend[0] >> 16) + iend[1]
1638
1656
1639 if self._inline:
1657 if self._inline:
1640 start += (startrev + 1) * self.index.entry_size
1658 start += (startrev + 1) * self.index.entry_size
1641 end += (endrev + 1) * self.index.entry_size
1659 end += (endrev + 1) * self.index.entry_size
1642 length = end - start
1660 length = end - start
1643
1661
1644 return start, self._getsegment(start, length, df=df)
1662 return start, self._getsegment(start, length, df=df)
1645
1663
1646 def _chunk(self, rev, df=None):
1664 def _chunk(self, rev, df=None):
1647 """Obtain a single decompressed chunk for a revision.
1665 """Obtain a single decompressed chunk for a revision.
1648
1666
1649 Accepts an integer revision and an optional already-open file handle
1667 Accepts an integer revision and an optional already-open file handle
1650 to be used for reading. If used, the seek position of the file will not
1668 to be used for reading. If used, the seek position of the file will not
1651 be preserved.
1669 be preserved.
1652
1670
1653 Returns a str holding uncompressed data for the requested revision.
1671 Returns a str holding uncompressed data for the requested revision.
1654 """
1672 """
1655 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1673 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1656
1674
1657 def _chunks(self, revs, df=None, targetsize=None):
1675 def _chunks(self, revs, df=None, targetsize=None):
1658 """Obtain decompressed chunks for the specified revisions.
1676 """Obtain decompressed chunks for the specified revisions.
1659
1677
1660 Accepts an iterable of numeric revisions that are assumed to be in
1678 Accepts an iterable of numeric revisions that are assumed to be in
1661 ascending order. Also accepts an optional already-open file handle
1679 ascending order. Also accepts an optional already-open file handle
1662 to be used for reading. If used, the seek position of the file will
1680 to be used for reading. If used, the seek position of the file will
1663 not be preserved.
1681 not be preserved.
1664
1682
1665 This function is similar to calling ``self._chunk()`` multiple times,
1683 This function is similar to calling ``self._chunk()`` multiple times,
1666 but is faster.
1684 but is faster.
1667
1685
1668 Returns a list with decompressed data for each requested revision.
1686 Returns a list with decompressed data for each requested revision.
1669 """
1687 """
1670 if not revs:
1688 if not revs:
1671 return []
1689 return []
1672 start = self.start
1690 start = self.start
1673 length = self.length
1691 length = self.length
1674 inline = self._inline
1692 inline = self._inline
1675 iosize = self.index.entry_size
1693 iosize = self.index.entry_size
1676 buffer = util.buffer
1694 buffer = util.buffer
1677
1695
1678 l = []
1696 l = []
1679 ladd = l.append
1697 ladd = l.append
1680
1698
1681 if not self._withsparseread:
1699 if not self._withsparseread:
1682 slicedchunks = (revs,)
1700 slicedchunks = (revs,)
1683 else:
1701 else:
1684 slicedchunks = deltautil.slicechunk(
1702 slicedchunks = deltautil.slicechunk(
1685 self, revs, targetsize=targetsize
1703 self, revs, targetsize=targetsize
1686 )
1704 )
1687
1705
1688 for revschunk in slicedchunks:
1706 for revschunk in slicedchunks:
1689 firstrev = revschunk[0]
1707 firstrev = revschunk[0]
1690 # Skip trailing revisions with empty diff
1708 # Skip trailing revisions with empty diff
1691 for lastrev in revschunk[::-1]:
1709 for lastrev in revschunk[::-1]:
1692 if length(lastrev) != 0:
1710 if length(lastrev) != 0:
1693 break
1711 break
1694
1712
1695 try:
1713 try:
1696 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1714 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1697 except OverflowError:
1715 except OverflowError:
1698 # issue4215 - we can't cache a run of chunks greater than
1716 # issue4215 - we can't cache a run of chunks greater than
1699 # 2G on Windows
1717 # 2G on Windows
1700 return [self._chunk(rev, df=df) for rev in revschunk]
1718 return [self._chunk(rev, df=df) for rev in revschunk]
1701
1719
1702 decomp = self.decompress
1720 decomp = self.decompress
1703 for rev in revschunk:
1721 for rev in revschunk:
1704 chunkstart = start(rev)
1722 chunkstart = start(rev)
1705 if inline:
1723 if inline:
1706 chunkstart += (rev + 1) * iosize
1724 chunkstart += (rev + 1) * iosize
1707 chunklength = length(rev)
1725 chunklength = length(rev)
1708 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1726 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1709
1727
1710 return l
1728 return l
1711
1729
1712 def _chunkclear(self):
1730 def _chunkclear(self):
1713 """Clear the raw chunk cache."""
1731 """Clear the raw chunk cache."""
1714 self._chunkcache = (0, b'')
1732 self._chunkcache = (0, b'')
1715
1733
1716 def deltaparent(self, rev):
1734 def deltaparent(self, rev):
1717 """return deltaparent of the given revision"""
1735 """return deltaparent of the given revision"""
1718 base = self.index[rev][3]
1736 base = self.index[rev][3]
1719 if base == rev:
1737 if base == rev:
1720 return nullrev
1738 return nullrev
1721 elif self._generaldelta:
1739 elif self._generaldelta:
1722 return base
1740 return base
1723 else:
1741 else:
1724 return rev - 1
1742 return rev - 1
1725
1743
1726 def issnapshot(self, rev):
1744 def issnapshot(self, rev):
1727 """tells whether rev is a snapshot"""
1745 """tells whether rev is a snapshot"""
1728 if not self._sparserevlog:
1746 if not self._sparserevlog:
1729 return self.deltaparent(rev) == nullrev
1747 return self.deltaparent(rev) == nullrev
1730 elif util.safehasattr(self.index, b'issnapshot'):
1748 elif util.safehasattr(self.index, b'issnapshot'):
1731 # directly assign the method to cache the testing and access
1749 # directly assign the method to cache the testing and access
1732 self.issnapshot = self.index.issnapshot
1750 self.issnapshot = self.index.issnapshot
1733 return self.issnapshot(rev)
1751 return self.issnapshot(rev)
1734 if rev == nullrev:
1752 if rev == nullrev:
1735 return True
1753 return True
1736 entry = self.index[rev]
1754 entry = self.index[rev]
1737 base = entry[3]
1755 base = entry[3]
1738 if base == rev:
1756 if base == rev:
1739 return True
1757 return True
1740 if base == nullrev:
1758 if base == nullrev:
1741 return True
1759 return True
1742 p1 = entry[5]
1760 p1 = entry[5]
1743 p2 = entry[6]
1761 p2 = entry[6]
1744 if base == p1 or base == p2:
1762 if base == p1 or base == p2:
1745 return False
1763 return False
1746 return self.issnapshot(base)
1764 return self.issnapshot(base)
1747
1765
1748 def snapshotdepth(self, rev):
1766 def snapshotdepth(self, rev):
1749 """number of snapshot in the chain before this one"""
1767 """number of snapshot in the chain before this one"""
1750 if not self.issnapshot(rev):
1768 if not self.issnapshot(rev):
1751 raise error.ProgrammingError(b'revision %d not a snapshot')
1769 raise error.ProgrammingError(b'revision %d not a snapshot')
1752 return len(self._deltachain(rev)[0]) - 1
1770 return len(self._deltachain(rev)[0]) - 1
1753
1771
1754 def revdiff(self, rev1, rev2):
1772 def revdiff(self, rev1, rev2):
1755 """return or calculate a delta between two revisions
1773 """return or calculate a delta between two revisions
1756
1774
1757 The delta calculated is in binary form and is intended to be written to
1775 The delta calculated is in binary form and is intended to be written to
1758 revlog data directly. So this function needs raw revision data.
1776 revlog data directly. So this function needs raw revision data.
1759 """
1777 """
1760 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1778 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1761 return bytes(self._chunk(rev2))
1779 return bytes(self._chunk(rev2))
1762
1780
1763 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1781 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1764
1782
1765 def _processflags(self, text, flags, operation, raw=False):
1783 def _processflags(self, text, flags, operation, raw=False):
1766 """deprecated entry point to access flag processors"""
1784 """deprecated entry point to access flag processors"""
1767 msg = b'_processflag(...) use the specialized variant'
1785 msg = b'_processflag(...) use the specialized variant'
1768 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1786 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1769 if raw:
1787 if raw:
1770 return text, flagutil.processflagsraw(self, text, flags)
1788 return text, flagutil.processflagsraw(self, text, flags)
1771 elif operation == b'read':
1789 elif operation == b'read':
1772 return flagutil.processflagsread(self, text, flags)
1790 return flagutil.processflagsread(self, text, flags)
1773 else: # write operation
1791 else: # write operation
1774 return flagutil.processflagswrite(self, text, flags)
1792 return flagutil.processflagswrite(self, text, flags)
1775
1793
1776 def revision(self, nodeorrev, _df=None, raw=False):
1794 def revision(self, nodeorrev, _df=None, raw=False):
1777 """return an uncompressed revision of a given node or revision
1795 """return an uncompressed revision of a given node or revision
1778 number.
1796 number.
1779
1797
1780 _df - an existing file handle to read from. (internal-only)
1798 _df - an existing file handle to read from. (internal-only)
1781 raw - an optional argument specifying if the revision data is to be
1799 raw - an optional argument specifying if the revision data is to be
1782 treated as raw data when applying flag transforms. 'raw' should be set
1800 treated as raw data when applying flag transforms. 'raw' should be set
1783 to True when generating changegroups or in debug commands.
1801 to True when generating changegroups or in debug commands.
1784 """
1802 """
1785 if raw:
1803 if raw:
1786 msg = (
1804 msg = (
1787 b'revlog.revision(..., raw=True) is deprecated, '
1805 b'revlog.revision(..., raw=True) is deprecated, '
1788 b'use revlog.rawdata(...)'
1806 b'use revlog.rawdata(...)'
1789 )
1807 )
1790 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1791 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1809 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1792
1810
1793 def sidedata(self, nodeorrev, _df=None):
1811 def sidedata(self, nodeorrev, _df=None):
1794 """a map of extra data related to the changeset but not part of the hash
1812 """a map of extra data related to the changeset but not part of the hash
1795
1813
1796 This function currently return a dictionary. However, more advanced
1814 This function currently return a dictionary. However, more advanced
1797 mapping object will likely be used in the future for a more
1815 mapping object will likely be used in the future for a more
1798 efficient/lazy code.
1816 efficient/lazy code.
1799 """
1817 """
1800 return self._revisiondata(nodeorrev, _df)[1]
1818 return self._revisiondata(nodeorrev, _df)[1]
1801
1819
1802 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1820 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1803 # deal with <nodeorrev> argument type
1821 # deal with <nodeorrev> argument type
1804 if isinstance(nodeorrev, int):
1822 if isinstance(nodeorrev, int):
1805 rev = nodeorrev
1823 rev = nodeorrev
1806 node = self.node(rev)
1824 node = self.node(rev)
1807 else:
1825 else:
1808 node = nodeorrev
1826 node = nodeorrev
1809 rev = None
1827 rev = None
1810
1828
1811 # fast path the special `nullid` rev
1829 # fast path the special `nullid` rev
1812 if node == self.nullid:
1830 if node == self.nullid:
1813 return b"", {}
1831 return b"", {}
1814
1832
1815 # ``rawtext`` is the text as stored inside the revlog. Might be the
1833 # ``rawtext`` is the text as stored inside the revlog. Might be the
1816 # revision or might need to be processed to retrieve the revision.
1834 # revision or might need to be processed to retrieve the revision.
1817 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1835 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1818
1836
1819 if self.hassidedata:
1837 if self.hassidedata:
1820 if rev is None:
1838 if rev is None:
1821 rev = self.rev(node)
1839 rev = self.rev(node)
1822 sidedata = self._sidedata(rev)
1840 sidedata = self._sidedata(rev)
1823 else:
1841 else:
1824 sidedata = {}
1842 sidedata = {}
1825
1843
1826 if raw and validated:
1844 if raw and validated:
1827 # if we don't want to process the raw text and that raw
1845 # if we don't want to process the raw text and that raw
1828 # text is cached, we can exit early.
1846 # text is cached, we can exit early.
1829 return rawtext, sidedata
1847 return rawtext, sidedata
1830 if rev is None:
1848 if rev is None:
1831 rev = self.rev(node)
1849 rev = self.rev(node)
1832 # the revlog's flag for this revision
1850 # the revlog's flag for this revision
1833 # (usually alter its state or content)
1851 # (usually alter its state or content)
1834 flags = self.flags(rev)
1852 flags = self.flags(rev)
1835
1853
1836 if validated and flags == REVIDX_DEFAULT_FLAGS:
1854 if validated and flags == REVIDX_DEFAULT_FLAGS:
1837 # no extra flags set, no flag processor runs, text = rawtext
1855 # no extra flags set, no flag processor runs, text = rawtext
1838 return rawtext, sidedata
1856 return rawtext, sidedata
1839
1857
1840 if raw:
1858 if raw:
1841 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1859 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1842 text = rawtext
1860 text = rawtext
1843 else:
1861 else:
1844 r = flagutil.processflagsread(self, rawtext, flags)
1862 r = flagutil.processflagsread(self, rawtext, flags)
1845 text, validatehash = r
1863 text, validatehash = r
1846 if validatehash:
1864 if validatehash:
1847 self.checkhash(text, node, rev=rev)
1865 self.checkhash(text, node, rev=rev)
1848 if not validated:
1866 if not validated:
1849 self._revisioncache = (node, rev, rawtext)
1867 self._revisioncache = (node, rev, rawtext)
1850
1868
1851 return text, sidedata
1869 return text, sidedata
1852
1870
1853 def _rawtext(self, node, rev, _df=None):
1871 def _rawtext(self, node, rev, _df=None):
1854 """return the possibly unvalidated rawtext for a revision
1872 """return the possibly unvalidated rawtext for a revision
1855
1873
1856 returns (rev, rawtext, validated)
1874 returns (rev, rawtext, validated)
1857 """
1875 """
1858
1876
1859 # revision in the cache (could be useful to apply delta)
1877 # revision in the cache (could be useful to apply delta)
1860 cachedrev = None
1878 cachedrev = None
1861 # An intermediate text to apply deltas to
1879 # An intermediate text to apply deltas to
1862 basetext = None
1880 basetext = None
1863
1881
1864 # Check if we have the entry in cache
1882 # Check if we have the entry in cache
1865 # The cache entry looks like (node, rev, rawtext)
1883 # The cache entry looks like (node, rev, rawtext)
1866 if self._revisioncache:
1884 if self._revisioncache:
1867 if self._revisioncache[0] == node:
1885 if self._revisioncache[0] == node:
1868 return (rev, self._revisioncache[2], True)
1886 return (rev, self._revisioncache[2], True)
1869 cachedrev = self._revisioncache[1]
1887 cachedrev = self._revisioncache[1]
1870
1888
1871 if rev is None:
1889 if rev is None:
1872 rev = self.rev(node)
1890 rev = self.rev(node)
1873
1891
1874 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1892 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1875 if stopped:
1893 if stopped:
1876 basetext = self._revisioncache[2]
1894 basetext = self._revisioncache[2]
1877
1895
1878 # drop cache to save memory, the caller is expected to
1896 # drop cache to save memory, the caller is expected to
1879 # update self._revisioncache after validating the text
1897 # update self._revisioncache after validating the text
1880 self._revisioncache = None
1898 self._revisioncache = None
1881
1899
1882 targetsize = None
1900 targetsize = None
1883 rawsize = self.index[rev][2]
1901 rawsize = self.index[rev][2]
1884 if 0 <= rawsize:
1902 if 0 <= rawsize:
1885 targetsize = 4 * rawsize
1903 targetsize = 4 * rawsize
1886
1904
1887 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1905 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1888 if basetext is None:
1906 if basetext is None:
1889 basetext = bytes(bins[0])
1907 basetext = bytes(bins[0])
1890 bins = bins[1:]
1908 bins = bins[1:]
1891
1909
1892 rawtext = mdiff.patches(basetext, bins)
1910 rawtext = mdiff.patches(basetext, bins)
1893 del basetext # let us have a chance to free memory early
1911 del basetext # let us have a chance to free memory early
1894 return (rev, rawtext, False)
1912 return (rev, rawtext, False)
1895
1913
1896 def _sidedata(self, rev):
1914 def _sidedata(self, rev):
1897 """Return the sidedata for a given revision number."""
1915 """Return the sidedata for a given revision number."""
1898 index_entry = self.index[rev]
1916 index_entry = self.index[rev]
1899 sidedata_offset = index_entry[8]
1917 sidedata_offset = index_entry[8]
1900 sidedata_size = index_entry[9]
1918 sidedata_size = index_entry[9]
1901
1919
1902 if self._inline:
1920 if self._inline:
1903 sidedata_offset += self.index.entry_size * (1 + rev)
1921 sidedata_offset += self.index.entry_size * (1 + rev)
1904 if sidedata_size == 0:
1922 if sidedata_size == 0:
1905 return {}
1923 return {}
1906
1924
1907 segment = self._getsegment(sidedata_offset, sidedata_size)
1925 segment = self._getsegment(sidedata_offset, sidedata_size)
1908 sidedata = sidedatautil.deserialize_sidedata(segment)
1926 sidedata = sidedatautil.deserialize_sidedata(segment)
1909 return sidedata
1927 return sidedata
1910
1928
1911 def rawdata(self, nodeorrev, _df=None):
1929 def rawdata(self, nodeorrev, _df=None):
1912 """return an uncompressed raw data of a given node or revision number.
1930 """return an uncompressed raw data of a given node or revision number.
1913
1931
1914 _df - an existing file handle to read from. (internal-only)
1932 _df - an existing file handle to read from. (internal-only)
1915 """
1933 """
1916 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1934 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1917
1935
1918 def hash(self, text, p1, p2):
1936 def hash(self, text, p1, p2):
1919 """Compute a node hash.
1937 """Compute a node hash.
1920
1938
1921 Available as a function so that subclasses can replace the hash
1939 Available as a function so that subclasses can replace the hash
1922 as needed.
1940 as needed.
1923 """
1941 """
1924 return storageutil.hashrevisionsha1(text, p1, p2)
1942 return storageutil.hashrevisionsha1(text, p1, p2)
1925
1943
1926 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1944 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1927 """Check node hash integrity.
1945 """Check node hash integrity.
1928
1946
1929 Available as a function so that subclasses can extend hash mismatch
1947 Available as a function so that subclasses can extend hash mismatch
1930 behaviors as needed.
1948 behaviors as needed.
1931 """
1949 """
1932 try:
1950 try:
1933 if p1 is None and p2 is None:
1951 if p1 is None and p2 is None:
1934 p1, p2 = self.parents(node)
1952 p1, p2 = self.parents(node)
1935 if node != self.hash(text, p1, p2):
1953 if node != self.hash(text, p1, p2):
1936 # Clear the revision cache on hash failure. The revision cache
1954 # Clear the revision cache on hash failure. The revision cache
1937 # only stores the raw revision and clearing the cache does have
1955 # only stores the raw revision and clearing the cache does have
1938 # the side-effect that we won't have a cache hit when the raw
1956 # the side-effect that we won't have a cache hit when the raw
1939 # revision data is accessed. But this case should be rare and
1957 # revision data is accessed. But this case should be rare and
1940 # it is extra work to teach the cache about the hash
1958 # it is extra work to teach the cache about the hash
1941 # verification state.
1959 # verification state.
1942 if self._revisioncache and self._revisioncache[0] == node:
1960 if self._revisioncache and self._revisioncache[0] == node:
1943 self._revisioncache = None
1961 self._revisioncache = None
1944
1962
1945 revornode = rev
1963 revornode = rev
1946 if revornode is None:
1964 if revornode is None:
1947 revornode = templatefilters.short(hex(node))
1965 revornode = templatefilters.short(hex(node))
1948 raise error.RevlogError(
1966 raise error.RevlogError(
1949 _(b"integrity check failed on %s:%s")
1967 _(b"integrity check failed on %s:%s")
1950 % (self.display_id, pycompat.bytestr(revornode))
1968 % (self.display_id, pycompat.bytestr(revornode))
1951 )
1969 )
1952 except error.RevlogError:
1970 except error.RevlogError:
1953 if self._censorable and storageutil.iscensoredtext(text):
1971 if self._censorable and storageutil.iscensoredtext(text):
1954 raise error.CensoredNodeError(self.display_id, node, text)
1972 raise error.CensoredNodeError(self.display_id, node, text)
1955 raise
1973 raise
1956
1974
1957 def _enforceinlinesize(self, tr):
1975 def _enforceinlinesize(self, tr):
1958 """Check if the revlog is too big for inline and convert if so.
1976 """Check if the revlog is too big for inline and convert if so.
1959
1977
1960 This should be called after revisions are added to the revlog. If the
1978 This should be called after revisions are added to the revlog. If the
1961 revlog has grown too large to be an inline revlog, it will convert it
1979 revlog has grown too large to be an inline revlog, it will convert it
1962 to use multiple index and data files.
1980 to use multiple index and data files.
1963 """
1981 """
1964 tiprev = len(self) - 1
1982 tiprev = len(self) - 1
1965 total_size = self.start(tiprev) + self.length(tiprev)
1983 total_size = self.start(tiprev) + self.length(tiprev)
1966 if not self._inline or total_size < _maxinline:
1984 if not self._inline or total_size < _maxinline:
1967 return
1985 return
1968
1986
1969 troffset = tr.findoffset(self._indexfile)
1987 troffset = tr.findoffset(self._indexfile)
1970 if troffset is None:
1988 if troffset is None:
1971 raise error.RevlogError(
1989 raise error.RevlogError(
1972 _(b"%s not found in the transaction") % self._indexfile
1990 _(b"%s not found in the transaction") % self._indexfile
1973 )
1991 )
1974 trindex = 0
1992 trindex = 0
1975 tr.add(self._datafile, 0)
1993 tr.add(self._datafile, 0)
1976
1994
1977 existing_handles = False
1995 existing_handles = False
1978 if self._writinghandles is not None:
1996 if self._writinghandles is not None:
1979 existing_handles = True
1997 existing_handles = True
1980 fp = self._writinghandles[0]
1998 fp = self._writinghandles[0]
1981 fp.flush()
1999 fp.flush()
1982 fp.close()
2000 fp.close()
1983 # We can't use the cached file handle after close(). So prevent
2001 # We can't use the cached file handle after close(). So prevent
1984 # its usage.
2002 # its usage.
1985 self._writinghandles = None
2003 self._writinghandles = None
1986
2004
1987 new_dfh = self._datafp(b'w+')
2005 new_dfh = self._datafp(b'w+')
1988 new_dfh.truncate(0) # drop any potentially existing data
2006 new_dfh.truncate(0) # drop any potentially existing data
1989 try:
2007 try:
1990 with self._indexfp() as read_ifh:
2008 with self._indexfp() as read_ifh:
1991 for r in self:
2009 for r in self:
1992 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2010 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
1993 if troffset <= self.start(r):
2011 if troffset <= self.start(r):
1994 trindex = r
2012 trindex = r
1995 new_dfh.flush()
2013 new_dfh.flush()
1996
2014
1997 with self.__index_new_fp() as fp:
2015 with self.__index_new_fp() as fp:
1998 self._format_flags &= ~FLAG_INLINE_DATA
2016 self._format_flags &= ~FLAG_INLINE_DATA
1999 self._inline = False
2017 self._inline = False
2000 for i in self:
2018 for i in self:
2001 e = self.index.entry_binary(i)
2019 e = self.index.entry_binary(i)
2002 if i == 0:
2020 if i == 0:
2003 header = self._format_flags | self._format_version
2021 header = self._format_flags | self._format_version
2004 header = self.index.pack_header(header)
2022 header = self.index.pack_header(header)
2005 e = header + e
2023 e = header + e
2006 fp.write(e)
2024 fp.write(e)
2007 # the temp file replace the real index when we exit the context
2025 # the temp file replace the real index when we exit the context
2008 # manager
2026 # manager
2009
2027
2010 tr.replace(self._indexfile, trindex * self.index.entry_size)
2028 tr.replace(self._indexfile, trindex * self.index.entry_size)
2011 nodemaputil.setup_persistent_nodemap(tr, self)
2029 nodemaputil.setup_persistent_nodemap(tr, self)
2012 self._chunkclear()
2030 self._chunkclear()
2013
2031
2014 if existing_handles:
2032 if existing_handles:
2015 # switched from inline to conventional reopen the index
2033 # switched from inline to conventional reopen the index
2016 ifh = self.__index_write_fp()
2034 ifh = self.__index_write_fp()
2017 self._writinghandles = (ifh, new_dfh)
2035 self._writinghandles = (ifh, new_dfh)
2018 new_dfh = None
2036 new_dfh = None
2019 finally:
2037 finally:
2020 if new_dfh is not None:
2038 if new_dfh is not None:
2021 new_dfh.close()
2039 new_dfh.close()
2022
2040
2023 def _nodeduplicatecallback(self, transaction, node):
2041 def _nodeduplicatecallback(self, transaction, node):
2024 """called when trying to add a node already stored."""
2042 """called when trying to add a node already stored."""
2025
2043
2026 @contextlib.contextmanager
2044 @contextlib.contextmanager
2027 def _writing(self, transaction):
2045 def _writing(self, transaction):
2028 if self._writinghandles is not None:
2046 if self._writinghandles is not None:
2029 yield
2047 yield
2030 else:
2048 else:
2031 r = len(self)
2049 r = len(self)
2032 dsize = 0
2050 dsize = 0
2033 if r:
2051 if r:
2034 dsize = self.end(r - 1)
2052 dsize = self.end(r - 1)
2035 dfh = None
2053 dfh = None
2036 if not self._inline:
2054 if not self._inline:
2037 try:
2055 try:
2038 dfh = self._datafp(b"r+")
2056 dfh = self._datafp(b"r+")
2039 dfh.seek(0, os.SEEK_END)
2057 dfh.seek(0, os.SEEK_END)
2040 except IOError as inst:
2058 except IOError as inst:
2041 if inst.errno != errno.ENOENT:
2059 if inst.errno != errno.ENOENT:
2042 raise
2060 raise
2043 dfh = self._datafp(b"w+")
2061 dfh = self._datafp(b"w+")
2044 transaction.add(self._datafile, dsize)
2062 transaction.add(self._datafile, dsize)
2045 try:
2063 try:
2046 isize = r * self.index.entry_size
2064 isize = r * self.index.entry_size
2047 ifh = self.__index_write_fp()
2065 ifh = self.__index_write_fp()
2048 if self._inline:
2066 if self._inline:
2049 transaction.add(self._indexfile, dsize + isize)
2067 transaction.add(self._indexfile, dsize + isize)
2050 else:
2068 else:
2051 transaction.add(self._indexfile, isize)
2069 transaction.add(self._indexfile, isize)
2052 try:
2070 try:
2053 self._writinghandles = (ifh, dfh)
2071 self._writinghandles = (ifh, dfh)
2054 try:
2072 try:
2055 yield
2073 yield
2074 if self._docket is not None:
2075 self._docket.write(transaction)
2056 finally:
2076 finally:
2057 self._writinghandles = None
2077 self._writinghandles = None
2058 finally:
2078 finally:
2059 ifh.close()
2079 ifh.close()
2060 finally:
2080 finally:
2061 if dfh is not None:
2081 if dfh is not None:
2062 dfh.close()
2082 dfh.close()
2063
2083
2064 def addrevision(
2084 def addrevision(
2065 self,
2085 self,
2066 text,
2086 text,
2067 transaction,
2087 transaction,
2068 link,
2088 link,
2069 p1,
2089 p1,
2070 p2,
2090 p2,
2071 cachedelta=None,
2091 cachedelta=None,
2072 node=None,
2092 node=None,
2073 flags=REVIDX_DEFAULT_FLAGS,
2093 flags=REVIDX_DEFAULT_FLAGS,
2074 deltacomputer=None,
2094 deltacomputer=None,
2075 sidedata=None,
2095 sidedata=None,
2076 ):
2096 ):
2077 """add a revision to the log
2097 """add a revision to the log
2078
2098
2079 text - the revision data to add
2099 text - the revision data to add
2080 transaction - the transaction object used for rollback
2100 transaction - the transaction object used for rollback
2081 link - the linkrev data to add
2101 link - the linkrev data to add
2082 p1, p2 - the parent nodeids of the revision
2102 p1, p2 - the parent nodeids of the revision
2083 cachedelta - an optional precomputed delta
2103 cachedelta - an optional precomputed delta
2084 node - nodeid of revision; typically node is not specified, and it is
2104 node - nodeid of revision; typically node is not specified, and it is
2085 computed by default as hash(text, p1, p2), however subclasses might
2105 computed by default as hash(text, p1, p2), however subclasses might
2086 use different hashing method (and override checkhash() in such case)
2106 use different hashing method (and override checkhash() in such case)
2087 flags - the known flags to set on the revision
2107 flags - the known flags to set on the revision
2088 deltacomputer - an optional deltacomputer instance shared between
2108 deltacomputer - an optional deltacomputer instance shared between
2089 multiple calls
2109 multiple calls
2090 """
2110 """
2091 if link == nullrev:
2111 if link == nullrev:
2092 raise error.RevlogError(
2112 raise error.RevlogError(
2093 _(b"attempted to add linkrev -1 to %s") % self.display_id
2113 _(b"attempted to add linkrev -1 to %s") % self.display_id
2094 )
2114 )
2095
2115
2096 if sidedata is None:
2116 if sidedata is None:
2097 sidedata = {}
2117 sidedata = {}
2098 elif sidedata and not self.hassidedata:
2118 elif sidedata and not self.hassidedata:
2099 raise error.ProgrammingError(
2119 raise error.ProgrammingError(
2100 _(b"trying to add sidedata to a revlog who don't support them")
2120 _(b"trying to add sidedata to a revlog who don't support them")
2101 )
2121 )
2102
2122
2103 if flags:
2123 if flags:
2104 node = node or self.hash(text, p1, p2)
2124 node = node or self.hash(text, p1, p2)
2105
2125
2106 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2126 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2107
2127
2108 # If the flag processor modifies the revision data, ignore any provided
2128 # If the flag processor modifies the revision data, ignore any provided
2109 # cachedelta.
2129 # cachedelta.
2110 if rawtext != text:
2130 if rawtext != text:
2111 cachedelta = None
2131 cachedelta = None
2112
2132
2113 if len(rawtext) > _maxentrysize:
2133 if len(rawtext) > _maxentrysize:
2114 raise error.RevlogError(
2134 raise error.RevlogError(
2115 _(
2135 _(
2116 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2136 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2117 )
2137 )
2118 % (self.display_id, len(rawtext))
2138 % (self.display_id, len(rawtext))
2119 )
2139 )
2120
2140
2121 node = node or self.hash(rawtext, p1, p2)
2141 node = node or self.hash(rawtext, p1, p2)
2122 rev = self.index.get_rev(node)
2142 rev = self.index.get_rev(node)
2123 if rev is not None:
2143 if rev is not None:
2124 return rev
2144 return rev
2125
2145
2126 if validatehash:
2146 if validatehash:
2127 self.checkhash(rawtext, node, p1=p1, p2=p2)
2147 self.checkhash(rawtext, node, p1=p1, p2=p2)
2128
2148
2129 return self.addrawrevision(
2149 return self.addrawrevision(
2130 rawtext,
2150 rawtext,
2131 transaction,
2151 transaction,
2132 link,
2152 link,
2133 p1,
2153 p1,
2134 p2,
2154 p2,
2135 node,
2155 node,
2136 flags,
2156 flags,
2137 cachedelta=cachedelta,
2157 cachedelta=cachedelta,
2138 deltacomputer=deltacomputer,
2158 deltacomputer=deltacomputer,
2139 sidedata=sidedata,
2159 sidedata=sidedata,
2140 )
2160 )
2141
2161
2142 def addrawrevision(
2162 def addrawrevision(
2143 self,
2163 self,
2144 rawtext,
2164 rawtext,
2145 transaction,
2165 transaction,
2146 link,
2166 link,
2147 p1,
2167 p1,
2148 p2,
2168 p2,
2149 node,
2169 node,
2150 flags,
2170 flags,
2151 cachedelta=None,
2171 cachedelta=None,
2152 deltacomputer=None,
2172 deltacomputer=None,
2153 sidedata=None,
2173 sidedata=None,
2154 ):
2174 ):
2155 """add a raw revision with known flags, node and parents
2175 """add a raw revision with known flags, node and parents
2156 useful when reusing a revision not stored in this revlog (ex: received
2176 useful when reusing a revision not stored in this revlog (ex: received
2157 over wire, or read from an external bundle).
2177 over wire, or read from an external bundle).
2158 """
2178 """
2159 with self._writing(transaction):
2179 with self._writing(transaction):
2160 return self._addrevision(
2180 return self._addrevision(
2161 node,
2181 node,
2162 rawtext,
2182 rawtext,
2163 transaction,
2183 transaction,
2164 link,
2184 link,
2165 p1,
2185 p1,
2166 p2,
2186 p2,
2167 flags,
2187 flags,
2168 cachedelta,
2188 cachedelta,
2169 deltacomputer=deltacomputer,
2189 deltacomputer=deltacomputer,
2170 sidedata=sidedata,
2190 sidedata=sidedata,
2171 )
2191 )
2172
2192
2173 def compress(self, data):
2193 def compress(self, data):
2174 """Generate a possibly-compressed representation of data."""
2194 """Generate a possibly-compressed representation of data."""
2175 if not data:
2195 if not data:
2176 return b'', data
2196 return b'', data
2177
2197
2178 compressed = self._compressor.compress(data)
2198 compressed = self._compressor.compress(data)
2179
2199
2180 if compressed:
2200 if compressed:
2181 # The revlog compressor added the header in the returned data.
2201 # The revlog compressor added the header in the returned data.
2182 return b'', compressed
2202 return b'', compressed
2183
2203
2184 if data[0:1] == b'\0':
2204 if data[0:1] == b'\0':
2185 return b'', data
2205 return b'', data
2186 return b'u', data
2206 return b'u', data
2187
2207
2188 def decompress(self, data):
2208 def decompress(self, data):
2189 """Decompress a revlog chunk.
2209 """Decompress a revlog chunk.
2190
2210
2191 The chunk is expected to begin with a header identifying the
2211 The chunk is expected to begin with a header identifying the
2192 format type so it can be routed to an appropriate decompressor.
2212 format type so it can be routed to an appropriate decompressor.
2193 """
2213 """
2194 if not data:
2214 if not data:
2195 return data
2215 return data
2196
2216
2197 # Revlogs are read much more frequently than they are written and many
2217 # Revlogs are read much more frequently than they are written and many
2198 # chunks only take microseconds to decompress, so performance is
2218 # chunks only take microseconds to decompress, so performance is
2199 # important here.
2219 # important here.
2200 #
2220 #
2201 # We can make a few assumptions about revlogs:
2221 # We can make a few assumptions about revlogs:
2202 #
2222 #
2203 # 1) the majority of chunks will be compressed (as opposed to inline
2223 # 1) the majority of chunks will be compressed (as opposed to inline
2204 # raw data).
2224 # raw data).
2205 # 2) decompressing *any* data will likely by at least 10x slower than
2225 # 2) decompressing *any* data will likely by at least 10x slower than
2206 # returning raw inline data.
2226 # returning raw inline data.
2207 # 3) we want to prioritize common and officially supported compression
2227 # 3) we want to prioritize common and officially supported compression
2208 # engines
2228 # engines
2209 #
2229 #
2210 # It follows that we want to optimize for "decompress compressed data
2230 # It follows that we want to optimize for "decompress compressed data
2211 # when encoded with common and officially supported compression engines"
2231 # when encoded with common and officially supported compression engines"
2212 # case over "raw data" and "data encoded by less common or non-official
2232 # case over "raw data" and "data encoded by less common or non-official
2213 # compression engines." That is why we have the inline lookup first
2233 # compression engines." That is why we have the inline lookup first
2214 # followed by the compengines lookup.
2234 # followed by the compengines lookup.
2215 #
2235 #
2216 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2236 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2217 # compressed chunks. And this matters for changelog and manifest reads.
2237 # compressed chunks. And this matters for changelog and manifest reads.
2218 t = data[0:1]
2238 t = data[0:1]
2219
2239
2220 if t == b'x':
2240 if t == b'x':
2221 try:
2241 try:
2222 return _zlibdecompress(data)
2242 return _zlibdecompress(data)
2223 except zlib.error as e:
2243 except zlib.error as e:
2224 raise error.RevlogError(
2244 raise error.RevlogError(
2225 _(b'revlog decompress error: %s')
2245 _(b'revlog decompress error: %s')
2226 % stringutil.forcebytestr(e)
2246 % stringutil.forcebytestr(e)
2227 )
2247 )
2228 # '\0' is more common than 'u' so it goes first.
2248 # '\0' is more common than 'u' so it goes first.
2229 elif t == b'\0':
2249 elif t == b'\0':
2230 return data
2250 return data
2231 elif t == b'u':
2251 elif t == b'u':
2232 return util.buffer(data, 1)
2252 return util.buffer(data, 1)
2233
2253
2234 try:
2254 try:
2235 compressor = self._decompressors[t]
2255 compressor = self._decompressors[t]
2236 except KeyError:
2256 except KeyError:
2237 try:
2257 try:
2238 engine = util.compengines.forrevlogheader(t)
2258 engine = util.compengines.forrevlogheader(t)
2239 compressor = engine.revlogcompressor(self._compengineopts)
2259 compressor = engine.revlogcompressor(self._compengineopts)
2240 self._decompressors[t] = compressor
2260 self._decompressors[t] = compressor
2241 except KeyError:
2261 except KeyError:
2242 raise error.RevlogError(
2262 raise error.RevlogError(
2243 _(b'unknown compression type %s') % binascii.hexlify(t)
2263 _(b'unknown compression type %s') % binascii.hexlify(t)
2244 )
2264 )
2245
2265
2246 return compressor.decompress(data)
2266 return compressor.decompress(data)
2247
2267
2248 def _addrevision(
2268 def _addrevision(
2249 self,
2269 self,
2250 node,
2270 node,
2251 rawtext,
2271 rawtext,
2252 transaction,
2272 transaction,
2253 link,
2273 link,
2254 p1,
2274 p1,
2255 p2,
2275 p2,
2256 flags,
2276 flags,
2257 cachedelta,
2277 cachedelta,
2258 alwayscache=False,
2278 alwayscache=False,
2259 deltacomputer=None,
2279 deltacomputer=None,
2260 sidedata=None,
2280 sidedata=None,
2261 ):
2281 ):
2262 """internal function to add revisions to the log
2282 """internal function to add revisions to the log
2263
2283
2264 see addrevision for argument descriptions.
2284 see addrevision for argument descriptions.
2265
2285
2266 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2286 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2267
2287
2268 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2288 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2269 be used.
2289 be used.
2270
2290
2271 invariants:
2291 invariants:
2272 - rawtext is optional (can be None); if not set, cachedelta must be set.
2292 - rawtext is optional (can be None); if not set, cachedelta must be set.
2273 if both are set, they must correspond to each other.
2293 if both are set, they must correspond to each other.
2274 """
2294 """
2275 if node == self.nullid:
2295 if node == self.nullid:
2276 raise error.RevlogError(
2296 raise error.RevlogError(
2277 _(b"%s: attempt to add null revision") % self.display_id
2297 _(b"%s: attempt to add null revision") % self.display_id
2278 )
2298 )
2279 if (
2299 if (
2280 node == self.nodeconstants.wdirid
2300 node == self.nodeconstants.wdirid
2281 or node in self.nodeconstants.wdirfilenodeids
2301 or node in self.nodeconstants.wdirfilenodeids
2282 ):
2302 ):
2283 raise error.RevlogError(
2303 raise error.RevlogError(
2284 _(b"%s: attempt to add wdir revision") % self.display_id
2304 _(b"%s: attempt to add wdir revision") % self.display_id
2285 )
2305 )
2286 if self._writinghandles is None:
2306 if self._writinghandles is None:
2287 msg = b'adding revision outside `revlog._writing` context'
2307 msg = b'adding revision outside `revlog._writing` context'
2288 raise error.ProgrammingError(msg)
2308 raise error.ProgrammingError(msg)
2289
2309
2290 if self._inline:
2310 if self._inline:
2291 fh = self._writinghandles[0]
2311 fh = self._writinghandles[0]
2292 else:
2312 else:
2293 fh = self._writinghandles[1]
2313 fh = self._writinghandles[1]
2294
2314
2295 btext = [rawtext]
2315 btext = [rawtext]
2296
2316
2297 curr = len(self)
2317 curr = len(self)
2298 prev = curr - 1
2318 prev = curr - 1
2299
2319
2300 offset = self._get_data_offset(prev)
2320 offset = self._get_data_offset(prev)
2301
2321
2302 if self._concurrencychecker:
2322 if self._concurrencychecker:
2303 ifh, dfh = self._writinghandles
2323 ifh, dfh = self._writinghandles
2304 if self._inline:
2324 if self._inline:
2305 # offset is "as if" it were in the .d file, so we need to add on
2325 # offset is "as if" it were in the .d file, so we need to add on
2306 # the size of the entry metadata.
2326 # the size of the entry metadata.
2307 self._concurrencychecker(
2327 self._concurrencychecker(
2308 ifh, self._indexfile, offset + curr * self.index.entry_size
2328 ifh, self._indexfile, offset + curr * self.index.entry_size
2309 )
2329 )
2310 else:
2330 else:
2311 # Entries in the .i are a consistent size.
2331 # Entries in the .i are a consistent size.
2312 self._concurrencychecker(
2332 self._concurrencychecker(
2313 ifh, self._indexfile, curr * self.index.entry_size
2333 ifh, self._indexfile, curr * self.index.entry_size
2314 )
2334 )
2315 self._concurrencychecker(dfh, self._datafile, offset)
2335 self._concurrencychecker(dfh, self._datafile, offset)
2316
2336
2317 p1r, p2r = self.rev(p1), self.rev(p2)
2337 p1r, p2r = self.rev(p1), self.rev(p2)
2318
2338
2319 # full versions are inserted when the needed deltas
2339 # full versions are inserted when the needed deltas
2320 # become comparable to the uncompressed text
2340 # become comparable to the uncompressed text
2321 if rawtext is None:
2341 if rawtext is None:
2322 # need rawtext size, before changed by flag processors, which is
2342 # need rawtext size, before changed by flag processors, which is
2323 # the non-raw size. use revlog explicitly to avoid filelog's extra
2343 # the non-raw size. use revlog explicitly to avoid filelog's extra
2324 # logic that might remove metadata size.
2344 # logic that might remove metadata size.
2325 textlen = mdiff.patchedsize(
2345 textlen = mdiff.patchedsize(
2326 revlog.size(self, cachedelta[0]), cachedelta[1]
2346 revlog.size(self, cachedelta[0]), cachedelta[1]
2327 )
2347 )
2328 else:
2348 else:
2329 textlen = len(rawtext)
2349 textlen = len(rawtext)
2330
2350
2331 if deltacomputer is None:
2351 if deltacomputer is None:
2332 deltacomputer = deltautil.deltacomputer(self)
2352 deltacomputer = deltautil.deltacomputer(self)
2333
2353
2334 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2354 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2335
2355
2336 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2356 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2337
2357
2338 if sidedata and self.hassidedata:
2358 if sidedata and self.hassidedata:
2339 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2359 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2340 sidedata_offset = offset + deltainfo.deltalen
2360 sidedata_offset = offset + deltainfo.deltalen
2341 else:
2361 else:
2342 serialized_sidedata = b""
2362 serialized_sidedata = b""
2343 # Don't store the offset if the sidedata is empty, that way
2363 # Don't store the offset if the sidedata is empty, that way
2344 # we can easily detect empty sidedata and they will be no different
2364 # we can easily detect empty sidedata and they will be no different
2345 # than ones we manually add.
2365 # than ones we manually add.
2346 sidedata_offset = 0
2366 sidedata_offset = 0
2347
2367
2348 e = (
2368 e = (
2349 offset_type(offset, flags),
2369 offset_type(offset, flags),
2350 deltainfo.deltalen,
2370 deltainfo.deltalen,
2351 textlen,
2371 textlen,
2352 deltainfo.base,
2372 deltainfo.base,
2353 link,
2373 link,
2354 p1r,
2374 p1r,
2355 p2r,
2375 p2r,
2356 node,
2376 node,
2357 sidedata_offset,
2377 sidedata_offset,
2358 len(serialized_sidedata),
2378 len(serialized_sidedata),
2359 )
2379 )
2360
2380
2361 self.index.append(e)
2381 self.index.append(e)
2362 entry = self.index.entry_binary(curr)
2382 entry = self.index.entry_binary(curr)
2363 if curr == 0:
2383 if curr == 0:
2364 header = self._format_flags | self._format_version
2384 header = self._format_flags | self._format_version
2365 header = self.index.pack_header(header)
2385 header = self.index.pack_header(header)
2366 entry = header + entry
2386 entry = header + entry
2367 self._writeentry(
2387 self._writeentry(
2368 transaction,
2388 transaction,
2369 entry,
2389 entry,
2370 deltainfo.data,
2390 deltainfo.data,
2371 link,
2391 link,
2372 offset,
2392 offset,
2373 serialized_sidedata,
2393 serialized_sidedata,
2374 )
2394 )
2375
2395
2376 rawtext = btext[0]
2396 rawtext = btext[0]
2377
2397
2378 if alwayscache and rawtext is None:
2398 if alwayscache and rawtext is None:
2379 rawtext = deltacomputer.buildtext(revinfo, fh)
2399 rawtext = deltacomputer.buildtext(revinfo, fh)
2380
2400
2381 if type(rawtext) == bytes: # only accept immutable objects
2401 if type(rawtext) == bytes: # only accept immutable objects
2382 self._revisioncache = (node, curr, rawtext)
2402 self._revisioncache = (node, curr, rawtext)
2383 self._chainbasecache[curr] = deltainfo.chainbase
2403 self._chainbasecache[curr] = deltainfo.chainbase
2384 return curr
2404 return curr
2385
2405
2386 def _get_data_offset(self, prev):
2406 def _get_data_offset(self, prev):
2387 """Returns the current offset in the (in-transaction) data file.
2407 """Returns the current offset in the (in-transaction) data file.
2388 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2408 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2389 file to store that information: since sidedata can be rewritten to the
2409 file to store that information: since sidedata can be rewritten to the
2390 end of the data file within a transaction, you can have cases where, for
2410 end of the data file within a transaction, you can have cases where, for
2391 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2411 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2392 to `n - 1`'s sidedata being written after `n`'s data.
2412 to `n - 1`'s sidedata being written after `n`'s data.
2393
2413
2394 TODO cache this in a docket file before getting out of experimental."""
2414 TODO cache this in a docket file before getting out of experimental."""
2395 if self._format_version != REVLOGV2:
2415 if self._format_version != REVLOGV2:
2396 return self.end(prev)
2416 return self.end(prev)
2397
2417
2398 offset = 0
2418 offset = 0
2399 for rev, entry in enumerate(self.index):
2419 for rev, entry in enumerate(self.index):
2400 sidedata_end = entry[8] + entry[9]
2420 sidedata_end = entry[8] + entry[9]
2401 # Sidedata for a previous rev has potentially been written after
2421 # Sidedata for a previous rev has potentially been written after
2402 # this rev's end, so take the max.
2422 # this rev's end, so take the max.
2403 offset = max(self.end(rev), offset, sidedata_end)
2423 offset = max(self.end(rev), offset, sidedata_end)
2404 return offset
2424 return offset
2405
2425
2406 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2426 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2407 # Files opened in a+ mode have inconsistent behavior on various
2427 # Files opened in a+ mode have inconsistent behavior on various
2408 # platforms. Windows requires that a file positioning call be made
2428 # platforms. Windows requires that a file positioning call be made
2409 # when the file handle transitions between reads and writes. See
2429 # when the file handle transitions between reads and writes. See
2410 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2430 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2411 # platforms, Python or the platform itself can be buggy. Some versions
2431 # platforms, Python or the platform itself can be buggy. Some versions
2412 # of Solaris have been observed to not append at the end of the file
2432 # of Solaris have been observed to not append at the end of the file
2413 # if the file was seeked to before the end. See issue4943 for more.
2433 # if the file was seeked to before the end. See issue4943 for more.
2414 #
2434 #
2415 # We work around this issue by inserting a seek() before writing.
2435 # We work around this issue by inserting a seek() before writing.
2416 # Note: This is likely not necessary on Python 3. However, because
2436 # Note: This is likely not necessary on Python 3. However, because
2417 # the file handle is reused for reads and may be seeked there, we need
2437 # the file handle is reused for reads and may be seeked there, we need
2418 # to be careful before changing this.
2438 # to be careful before changing this.
2419 if self._writinghandles is None:
2439 if self._writinghandles is None:
2420 msg = b'adding revision outside `revlog._writing` context'
2440 msg = b'adding revision outside `revlog._writing` context'
2421 raise error.ProgrammingError(msg)
2441 raise error.ProgrammingError(msg)
2422 ifh, dfh = self._writinghandles
2442 ifh, dfh = self._writinghandles
2423 ifh.seek(0, os.SEEK_END)
2443 ifh.seek(0, os.SEEK_END)
2424 if dfh:
2444 if dfh:
2425 dfh.seek(0, os.SEEK_END)
2445 dfh.seek(0, os.SEEK_END)
2426
2446
2427 curr = len(self) - 1
2447 curr = len(self) - 1
2428 if not self._inline:
2448 if not self._inline:
2429 transaction.add(self._datafile, offset)
2449 transaction.add(self._datafile, offset)
2430 transaction.add(self._indexfile, curr * len(entry))
2450 transaction.add(self._indexfile, curr * len(entry))
2431 if data[0]:
2451 if data[0]:
2432 dfh.write(data[0])
2452 dfh.write(data[0])
2433 dfh.write(data[1])
2453 dfh.write(data[1])
2434 if sidedata:
2454 if sidedata:
2435 dfh.write(sidedata)
2455 dfh.write(sidedata)
2436 ifh.write(entry)
2456 ifh.write(entry)
2437 else:
2457 else:
2438 offset += curr * self.index.entry_size
2458 offset += curr * self.index.entry_size
2439 transaction.add(self._indexfile, offset)
2459 transaction.add(self._indexfile, offset)
2440 ifh.write(entry)
2460 ifh.write(entry)
2441 ifh.write(data[0])
2461 ifh.write(data[0])
2442 ifh.write(data[1])
2462 ifh.write(data[1])
2443 if sidedata:
2463 if sidedata:
2444 ifh.write(sidedata)
2464 ifh.write(sidedata)
2445 self._enforceinlinesize(transaction)
2465 self._enforceinlinesize(transaction)
2446 nodemaputil.setup_persistent_nodemap(transaction, self)
2466 nodemaputil.setup_persistent_nodemap(transaction, self)
2447
2467
2448 def addgroup(
2468 def addgroup(
2449 self,
2469 self,
2450 deltas,
2470 deltas,
2451 linkmapper,
2471 linkmapper,
2452 transaction,
2472 transaction,
2453 alwayscache=False,
2473 alwayscache=False,
2454 addrevisioncb=None,
2474 addrevisioncb=None,
2455 duplicaterevisioncb=None,
2475 duplicaterevisioncb=None,
2456 ):
2476 ):
2457 """
2477 """
2458 add a delta group
2478 add a delta group
2459
2479
2460 given a set of deltas, add them to the revision log. the
2480 given a set of deltas, add them to the revision log. the
2461 first delta is against its parent, which should be in our
2481 first delta is against its parent, which should be in our
2462 log, the rest are against the previous delta.
2482 log, the rest are against the previous delta.
2463
2483
2464 If ``addrevisioncb`` is defined, it will be called with arguments of
2484 If ``addrevisioncb`` is defined, it will be called with arguments of
2465 this revlog and the node that was added.
2485 this revlog and the node that was added.
2466 """
2486 """
2467
2487
2468 if self._adding_group:
2488 if self._adding_group:
2469 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2489 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2470
2490
2471 self._adding_group = True
2491 self._adding_group = True
2472 empty = True
2492 empty = True
2473 try:
2493 try:
2474 with self._writing(transaction):
2494 with self._writing(transaction):
2475 deltacomputer = deltautil.deltacomputer(self)
2495 deltacomputer = deltautil.deltacomputer(self)
2476 # loop through our set of deltas
2496 # loop through our set of deltas
2477 for data in deltas:
2497 for data in deltas:
2478 (
2498 (
2479 node,
2499 node,
2480 p1,
2500 p1,
2481 p2,
2501 p2,
2482 linknode,
2502 linknode,
2483 deltabase,
2503 deltabase,
2484 delta,
2504 delta,
2485 flags,
2505 flags,
2486 sidedata,
2506 sidedata,
2487 ) = data
2507 ) = data
2488 link = linkmapper(linknode)
2508 link = linkmapper(linknode)
2489 flags = flags or REVIDX_DEFAULT_FLAGS
2509 flags = flags or REVIDX_DEFAULT_FLAGS
2490
2510
2491 rev = self.index.get_rev(node)
2511 rev = self.index.get_rev(node)
2492 if rev is not None:
2512 if rev is not None:
2493 # this can happen if two branches make the same change
2513 # this can happen if two branches make the same change
2494 self._nodeduplicatecallback(transaction, rev)
2514 self._nodeduplicatecallback(transaction, rev)
2495 if duplicaterevisioncb:
2515 if duplicaterevisioncb:
2496 duplicaterevisioncb(self, rev)
2516 duplicaterevisioncb(self, rev)
2497 empty = False
2517 empty = False
2498 continue
2518 continue
2499
2519
2500 for p in (p1, p2):
2520 for p in (p1, p2):
2501 if not self.index.has_node(p):
2521 if not self.index.has_node(p):
2502 raise error.LookupError(
2522 raise error.LookupError(
2503 p, self.radix, _(b'unknown parent')
2523 p, self.radix, _(b'unknown parent')
2504 )
2524 )
2505
2525
2506 if not self.index.has_node(deltabase):
2526 if not self.index.has_node(deltabase):
2507 raise error.LookupError(
2527 raise error.LookupError(
2508 deltabase, self.display_id, _(b'unknown delta base')
2528 deltabase, self.display_id, _(b'unknown delta base')
2509 )
2529 )
2510
2530
2511 baserev = self.rev(deltabase)
2531 baserev = self.rev(deltabase)
2512
2532
2513 if baserev != nullrev and self.iscensored(baserev):
2533 if baserev != nullrev and self.iscensored(baserev):
2514 # if base is censored, delta must be full replacement in a
2534 # if base is censored, delta must be full replacement in a
2515 # single patch operation
2535 # single patch operation
2516 hlen = struct.calcsize(b">lll")
2536 hlen = struct.calcsize(b">lll")
2517 oldlen = self.rawsize(baserev)
2537 oldlen = self.rawsize(baserev)
2518 newlen = len(delta) - hlen
2538 newlen = len(delta) - hlen
2519 if delta[:hlen] != mdiff.replacediffheader(
2539 if delta[:hlen] != mdiff.replacediffheader(
2520 oldlen, newlen
2540 oldlen, newlen
2521 ):
2541 ):
2522 raise error.CensoredBaseError(
2542 raise error.CensoredBaseError(
2523 self.display_id, self.node(baserev)
2543 self.display_id, self.node(baserev)
2524 )
2544 )
2525
2545
2526 if not flags and self._peek_iscensored(baserev, delta):
2546 if not flags and self._peek_iscensored(baserev, delta):
2527 flags |= REVIDX_ISCENSORED
2547 flags |= REVIDX_ISCENSORED
2528
2548
2529 # We assume consumers of addrevisioncb will want to retrieve
2549 # We assume consumers of addrevisioncb will want to retrieve
2530 # the added revision, which will require a call to
2550 # the added revision, which will require a call to
2531 # revision(). revision() will fast path if there is a cache
2551 # revision(). revision() will fast path if there is a cache
2532 # hit. So, we tell _addrevision() to always cache in this case.
2552 # hit. So, we tell _addrevision() to always cache in this case.
2533 # We're only using addgroup() in the context of changegroup
2553 # We're only using addgroup() in the context of changegroup
2534 # generation so the revision data can always be handled as raw
2554 # generation so the revision data can always be handled as raw
2535 # by the flagprocessor.
2555 # by the flagprocessor.
2536 rev = self._addrevision(
2556 rev = self._addrevision(
2537 node,
2557 node,
2538 None,
2558 None,
2539 transaction,
2559 transaction,
2540 link,
2560 link,
2541 p1,
2561 p1,
2542 p2,
2562 p2,
2543 flags,
2563 flags,
2544 (baserev, delta),
2564 (baserev, delta),
2545 alwayscache=alwayscache,
2565 alwayscache=alwayscache,
2546 deltacomputer=deltacomputer,
2566 deltacomputer=deltacomputer,
2547 sidedata=sidedata,
2567 sidedata=sidedata,
2548 )
2568 )
2549
2569
2550 if addrevisioncb:
2570 if addrevisioncb:
2551 addrevisioncb(self, rev)
2571 addrevisioncb(self, rev)
2552 empty = False
2572 empty = False
2553 finally:
2573 finally:
2554 self._adding_group = False
2574 self._adding_group = False
2555 return not empty
2575 return not empty
2556
2576
2557 def iscensored(self, rev):
2577 def iscensored(self, rev):
2558 """Check if a file revision is censored."""
2578 """Check if a file revision is censored."""
2559 if not self._censorable:
2579 if not self._censorable:
2560 return False
2580 return False
2561
2581
2562 return self.flags(rev) & REVIDX_ISCENSORED
2582 return self.flags(rev) & REVIDX_ISCENSORED
2563
2583
2564 def _peek_iscensored(self, baserev, delta):
2584 def _peek_iscensored(self, baserev, delta):
2565 """Quickly check if a delta produces a censored revision."""
2585 """Quickly check if a delta produces a censored revision."""
2566 if not self._censorable:
2586 if not self._censorable:
2567 return False
2587 return False
2568
2588
2569 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2589 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2570
2590
2571 def getstrippoint(self, minlink):
2591 def getstrippoint(self, minlink):
2572 """find the minimum rev that must be stripped to strip the linkrev
2592 """find the minimum rev that must be stripped to strip the linkrev
2573
2593
2574 Returns a tuple containing the minimum rev and a set of all revs that
2594 Returns a tuple containing the minimum rev and a set of all revs that
2575 have linkrevs that will be broken by this strip.
2595 have linkrevs that will be broken by this strip.
2576 """
2596 """
2577 return storageutil.resolvestripinfo(
2597 return storageutil.resolvestripinfo(
2578 minlink,
2598 minlink,
2579 len(self) - 1,
2599 len(self) - 1,
2580 self.headrevs(),
2600 self.headrevs(),
2581 self.linkrev,
2601 self.linkrev,
2582 self.parentrevs,
2602 self.parentrevs,
2583 )
2603 )
2584
2604
2585 def strip(self, minlink, transaction):
2605 def strip(self, minlink, transaction):
2586 """truncate the revlog on the first revision with a linkrev >= minlink
2606 """truncate the revlog on the first revision with a linkrev >= minlink
2587
2607
2588 This function is called when we're stripping revision minlink and
2608 This function is called when we're stripping revision minlink and
2589 its descendants from the repository.
2609 its descendants from the repository.
2590
2610
2591 We have to remove all revisions with linkrev >= minlink, because
2611 We have to remove all revisions with linkrev >= minlink, because
2592 the equivalent changelog revisions will be renumbered after the
2612 the equivalent changelog revisions will be renumbered after the
2593 strip.
2613 strip.
2594
2614
2595 So we truncate the revlog on the first of these revisions, and
2615 So we truncate the revlog on the first of these revisions, and
2596 trust that the caller has saved the revisions that shouldn't be
2616 trust that the caller has saved the revisions that shouldn't be
2597 removed and that it'll re-add them after this truncation.
2617 removed and that it'll re-add them after this truncation.
2598 """
2618 """
2599 if len(self) == 0:
2619 if len(self) == 0:
2600 return
2620 return
2601
2621
2602 rev, _ = self.getstrippoint(minlink)
2622 rev, _ = self.getstrippoint(minlink)
2603 if rev == len(self):
2623 if rev == len(self):
2604 return
2624 return
2605
2625
2606 # first truncate the files on disk
2626 # first truncate the files on disk
2607 end = self.start(rev)
2627 end = self.start(rev)
2608 if not self._inline:
2628 if not self._inline:
2609 transaction.add(self._datafile, end)
2629 transaction.add(self._datafile, end)
2610 end = rev * self.index.entry_size
2630 end = rev * self.index.entry_size
2611 else:
2631 else:
2612 end += rev * self.index.entry_size
2632 end += rev * self.index.entry_size
2613
2633
2614 transaction.add(self._indexfile, end)
2634 transaction.add(self._indexfile, end)
2615
2635
2616 # then reset internal state in memory to forget those revisions
2636 # then reset internal state in memory to forget those revisions
2617 self._revisioncache = None
2637 self._revisioncache = None
2618 self._chaininfocache = util.lrucachedict(500)
2638 self._chaininfocache = util.lrucachedict(500)
2619 self._chunkclear()
2639 self._chunkclear()
2620
2640
2621 del self.index[rev:-1]
2641 del self.index[rev:-1]
2622
2642
2623 def checksize(self):
2643 def checksize(self):
2624 """Check size of index and data files
2644 """Check size of index and data files
2625
2645
2626 return a (dd, di) tuple.
2646 return a (dd, di) tuple.
2627 - dd: extra bytes for the "data" file
2647 - dd: extra bytes for the "data" file
2628 - di: extra bytes for the "index" file
2648 - di: extra bytes for the "index" file
2629
2649
2630 A healthy revlog will return (0, 0).
2650 A healthy revlog will return (0, 0).
2631 """
2651 """
2632 expected = 0
2652 expected = 0
2633 if len(self):
2653 if len(self):
2634 expected = max(0, self.end(len(self) - 1))
2654 expected = max(0, self.end(len(self) - 1))
2635
2655
2636 try:
2656 try:
2637 with self._datafp() as f:
2657 with self._datafp() as f:
2638 f.seek(0, io.SEEK_END)
2658 f.seek(0, io.SEEK_END)
2639 actual = f.tell()
2659 actual = f.tell()
2640 dd = actual - expected
2660 dd = actual - expected
2641 except IOError as inst:
2661 except IOError as inst:
2642 if inst.errno != errno.ENOENT:
2662 if inst.errno != errno.ENOENT:
2643 raise
2663 raise
2644 dd = 0
2664 dd = 0
2645
2665
2646 try:
2666 try:
2647 f = self.opener(self._indexfile)
2667 f = self.opener(self._indexfile)
2648 f.seek(0, io.SEEK_END)
2668 f.seek(0, io.SEEK_END)
2649 actual = f.tell()
2669 actual = f.tell()
2650 f.close()
2670 f.close()
2651 s = self.index.entry_size
2671 s = self.index.entry_size
2652 i = max(0, actual // s)
2672 i = max(0, actual // s)
2653 di = actual - (i * s)
2673 di = actual - (i * s)
2654 if self._inline:
2674 if self._inline:
2655 databytes = 0
2675 databytes = 0
2656 for r in self:
2676 for r in self:
2657 databytes += max(0, self.length(r))
2677 databytes += max(0, self.length(r))
2658 dd = 0
2678 dd = 0
2659 di = actual - len(self) * s - databytes
2679 di = actual - len(self) * s - databytes
2660 except IOError as inst:
2680 except IOError as inst:
2661 if inst.errno != errno.ENOENT:
2681 if inst.errno != errno.ENOENT:
2662 raise
2682 raise
2663 di = 0
2683 di = 0
2664
2684
2665 return (dd, di)
2685 return (dd, di)
2666
2686
2667 def files(self):
2687 def files(self):
2668 res = [self._indexfile]
2688 res = [self._indexfile]
2669 if not self._inline:
2689 if not self._inline:
2670 res.append(self._datafile)
2690 res.append(self._datafile)
2671 return res
2691 return res
2672
2692
2673 def emitrevisions(
2693 def emitrevisions(
2674 self,
2694 self,
2675 nodes,
2695 nodes,
2676 nodesorder=None,
2696 nodesorder=None,
2677 revisiondata=False,
2697 revisiondata=False,
2678 assumehaveparentrevisions=False,
2698 assumehaveparentrevisions=False,
2679 deltamode=repository.CG_DELTAMODE_STD,
2699 deltamode=repository.CG_DELTAMODE_STD,
2680 sidedata_helpers=None,
2700 sidedata_helpers=None,
2681 ):
2701 ):
2682 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2702 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2683 raise error.ProgrammingError(
2703 raise error.ProgrammingError(
2684 b'unhandled value for nodesorder: %s' % nodesorder
2704 b'unhandled value for nodesorder: %s' % nodesorder
2685 )
2705 )
2686
2706
2687 if nodesorder is None and not self._generaldelta:
2707 if nodesorder is None and not self._generaldelta:
2688 nodesorder = b'storage'
2708 nodesorder = b'storage'
2689
2709
2690 if (
2710 if (
2691 not self._storedeltachains
2711 not self._storedeltachains
2692 and deltamode != repository.CG_DELTAMODE_PREV
2712 and deltamode != repository.CG_DELTAMODE_PREV
2693 ):
2713 ):
2694 deltamode = repository.CG_DELTAMODE_FULL
2714 deltamode = repository.CG_DELTAMODE_FULL
2695
2715
2696 return storageutil.emitrevisions(
2716 return storageutil.emitrevisions(
2697 self,
2717 self,
2698 nodes,
2718 nodes,
2699 nodesorder,
2719 nodesorder,
2700 revlogrevisiondelta,
2720 revlogrevisiondelta,
2701 deltaparentfn=self.deltaparent,
2721 deltaparentfn=self.deltaparent,
2702 candeltafn=self.candelta,
2722 candeltafn=self.candelta,
2703 rawsizefn=self.rawsize,
2723 rawsizefn=self.rawsize,
2704 revdifffn=self.revdiff,
2724 revdifffn=self.revdiff,
2705 flagsfn=self.flags,
2725 flagsfn=self.flags,
2706 deltamode=deltamode,
2726 deltamode=deltamode,
2707 revisiondata=revisiondata,
2727 revisiondata=revisiondata,
2708 assumehaveparentrevisions=assumehaveparentrevisions,
2728 assumehaveparentrevisions=assumehaveparentrevisions,
2709 sidedata_helpers=sidedata_helpers,
2729 sidedata_helpers=sidedata_helpers,
2710 )
2730 )
2711
2731
2712 DELTAREUSEALWAYS = b'always'
2732 DELTAREUSEALWAYS = b'always'
2713 DELTAREUSESAMEREVS = b'samerevs'
2733 DELTAREUSESAMEREVS = b'samerevs'
2714 DELTAREUSENEVER = b'never'
2734 DELTAREUSENEVER = b'never'
2715
2735
2716 DELTAREUSEFULLADD = b'fulladd'
2736 DELTAREUSEFULLADD = b'fulladd'
2717
2737
2718 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2738 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2719
2739
2720 def clone(
2740 def clone(
2721 self,
2741 self,
2722 tr,
2742 tr,
2723 destrevlog,
2743 destrevlog,
2724 addrevisioncb=None,
2744 addrevisioncb=None,
2725 deltareuse=DELTAREUSESAMEREVS,
2745 deltareuse=DELTAREUSESAMEREVS,
2726 forcedeltabothparents=None,
2746 forcedeltabothparents=None,
2727 sidedata_helpers=None,
2747 sidedata_helpers=None,
2728 ):
2748 ):
2729 """Copy this revlog to another, possibly with format changes.
2749 """Copy this revlog to another, possibly with format changes.
2730
2750
2731 The destination revlog will contain the same revisions and nodes.
2751 The destination revlog will contain the same revisions and nodes.
2732 However, it may not be bit-for-bit identical due to e.g. delta encoding
2752 However, it may not be bit-for-bit identical due to e.g. delta encoding
2733 differences.
2753 differences.
2734
2754
2735 The ``deltareuse`` argument control how deltas from the existing revlog
2755 The ``deltareuse`` argument control how deltas from the existing revlog
2736 are preserved in the destination revlog. The argument can have the
2756 are preserved in the destination revlog. The argument can have the
2737 following values:
2757 following values:
2738
2758
2739 DELTAREUSEALWAYS
2759 DELTAREUSEALWAYS
2740 Deltas will always be reused (if possible), even if the destination
2760 Deltas will always be reused (if possible), even if the destination
2741 revlog would not select the same revisions for the delta. This is the
2761 revlog would not select the same revisions for the delta. This is the
2742 fastest mode of operation.
2762 fastest mode of operation.
2743 DELTAREUSESAMEREVS
2763 DELTAREUSESAMEREVS
2744 Deltas will be reused if the destination revlog would pick the same
2764 Deltas will be reused if the destination revlog would pick the same
2745 revisions for the delta. This mode strikes a balance between speed
2765 revisions for the delta. This mode strikes a balance between speed
2746 and optimization.
2766 and optimization.
2747 DELTAREUSENEVER
2767 DELTAREUSENEVER
2748 Deltas will never be reused. This is the slowest mode of execution.
2768 Deltas will never be reused. This is the slowest mode of execution.
2749 This mode can be used to recompute deltas (e.g. if the diff/delta
2769 This mode can be used to recompute deltas (e.g. if the diff/delta
2750 algorithm changes).
2770 algorithm changes).
2751 DELTAREUSEFULLADD
2771 DELTAREUSEFULLADD
2752 Revision will be re-added as if their were new content. This is
2772 Revision will be re-added as if their were new content. This is
2753 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2773 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2754 eg: large file detection and handling.
2774 eg: large file detection and handling.
2755
2775
2756 Delta computation can be slow, so the choice of delta reuse policy can
2776 Delta computation can be slow, so the choice of delta reuse policy can
2757 significantly affect run time.
2777 significantly affect run time.
2758
2778
2759 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2779 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2760 two extremes. Deltas will be reused if they are appropriate. But if the
2780 two extremes. Deltas will be reused if they are appropriate. But if the
2761 delta could choose a better revision, it will do so. This means if you
2781 delta could choose a better revision, it will do so. This means if you
2762 are converting a non-generaldelta revlog to a generaldelta revlog,
2782 are converting a non-generaldelta revlog to a generaldelta revlog,
2763 deltas will be recomputed if the delta's parent isn't a parent of the
2783 deltas will be recomputed if the delta's parent isn't a parent of the
2764 revision.
2784 revision.
2765
2785
2766 In addition to the delta policy, the ``forcedeltabothparents``
2786 In addition to the delta policy, the ``forcedeltabothparents``
2767 argument controls whether to force compute deltas against both parents
2787 argument controls whether to force compute deltas against both parents
2768 for merges. By default, the current default is used.
2788 for merges. By default, the current default is used.
2769
2789
2770 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2790 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2771 `sidedata_helpers`.
2791 `sidedata_helpers`.
2772 """
2792 """
2773 if deltareuse not in self.DELTAREUSEALL:
2793 if deltareuse not in self.DELTAREUSEALL:
2774 raise ValueError(
2794 raise ValueError(
2775 _(b'value for deltareuse invalid: %s') % deltareuse
2795 _(b'value for deltareuse invalid: %s') % deltareuse
2776 )
2796 )
2777
2797
2778 if len(destrevlog):
2798 if len(destrevlog):
2779 raise ValueError(_(b'destination revlog is not empty'))
2799 raise ValueError(_(b'destination revlog is not empty'))
2780
2800
2781 if getattr(self, 'filteredrevs', None):
2801 if getattr(self, 'filteredrevs', None):
2782 raise ValueError(_(b'source revlog has filtered revisions'))
2802 raise ValueError(_(b'source revlog has filtered revisions'))
2783 if getattr(destrevlog, 'filteredrevs', None):
2803 if getattr(destrevlog, 'filteredrevs', None):
2784 raise ValueError(_(b'destination revlog has filtered revisions'))
2804 raise ValueError(_(b'destination revlog has filtered revisions'))
2785
2805
2786 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2806 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2787 # if possible.
2807 # if possible.
2788 oldlazydelta = destrevlog._lazydelta
2808 oldlazydelta = destrevlog._lazydelta
2789 oldlazydeltabase = destrevlog._lazydeltabase
2809 oldlazydeltabase = destrevlog._lazydeltabase
2790 oldamd = destrevlog._deltabothparents
2810 oldamd = destrevlog._deltabothparents
2791
2811
2792 try:
2812 try:
2793 if deltareuse == self.DELTAREUSEALWAYS:
2813 if deltareuse == self.DELTAREUSEALWAYS:
2794 destrevlog._lazydeltabase = True
2814 destrevlog._lazydeltabase = True
2795 destrevlog._lazydelta = True
2815 destrevlog._lazydelta = True
2796 elif deltareuse == self.DELTAREUSESAMEREVS:
2816 elif deltareuse == self.DELTAREUSESAMEREVS:
2797 destrevlog._lazydeltabase = False
2817 destrevlog._lazydeltabase = False
2798 destrevlog._lazydelta = True
2818 destrevlog._lazydelta = True
2799 elif deltareuse == self.DELTAREUSENEVER:
2819 elif deltareuse == self.DELTAREUSENEVER:
2800 destrevlog._lazydeltabase = False
2820 destrevlog._lazydeltabase = False
2801 destrevlog._lazydelta = False
2821 destrevlog._lazydelta = False
2802
2822
2803 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2823 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2804
2824
2805 self._clone(
2825 self._clone(
2806 tr,
2826 tr,
2807 destrevlog,
2827 destrevlog,
2808 addrevisioncb,
2828 addrevisioncb,
2809 deltareuse,
2829 deltareuse,
2810 forcedeltabothparents,
2830 forcedeltabothparents,
2811 sidedata_helpers,
2831 sidedata_helpers,
2812 )
2832 )
2813
2833
2814 finally:
2834 finally:
2815 destrevlog._lazydelta = oldlazydelta
2835 destrevlog._lazydelta = oldlazydelta
2816 destrevlog._lazydeltabase = oldlazydeltabase
2836 destrevlog._lazydeltabase = oldlazydeltabase
2817 destrevlog._deltabothparents = oldamd
2837 destrevlog._deltabothparents = oldamd
2818
2838
2819 def _clone(
2839 def _clone(
2820 self,
2840 self,
2821 tr,
2841 tr,
2822 destrevlog,
2842 destrevlog,
2823 addrevisioncb,
2843 addrevisioncb,
2824 deltareuse,
2844 deltareuse,
2825 forcedeltabothparents,
2845 forcedeltabothparents,
2826 sidedata_helpers,
2846 sidedata_helpers,
2827 ):
2847 ):
2828 """perform the core duty of `revlog.clone` after parameter processing"""
2848 """perform the core duty of `revlog.clone` after parameter processing"""
2829 deltacomputer = deltautil.deltacomputer(destrevlog)
2849 deltacomputer = deltautil.deltacomputer(destrevlog)
2830 index = self.index
2850 index = self.index
2831 for rev in self:
2851 for rev in self:
2832 entry = index[rev]
2852 entry = index[rev]
2833
2853
2834 # Some classes override linkrev to take filtered revs into
2854 # Some classes override linkrev to take filtered revs into
2835 # account. Use raw entry from index.
2855 # account. Use raw entry from index.
2836 flags = entry[0] & 0xFFFF
2856 flags = entry[0] & 0xFFFF
2837 linkrev = entry[4]
2857 linkrev = entry[4]
2838 p1 = index[entry[5]][7]
2858 p1 = index[entry[5]][7]
2839 p2 = index[entry[6]][7]
2859 p2 = index[entry[6]][7]
2840 node = entry[7]
2860 node = entry[7]
2841
2861
2842 # (Possibly) reuse the delta from the revlog if allowed and
2862 # (Possibly) reuse the delta from the revlog if allowed and
2843 # the revlog chunk is a delta.
2863 # the revlog chunk is a delta.
2844 cachedelta = None
2864 cachedelta = None
2845 rawtext = None
2865 rawtext = None
2846 if deltareuse == self.DELTAREUSEFULLADD:
2866 if deltareuse == self.DELTAREUSEFULLADD:
2847 text, sidedata = self._revisiondata(rev)
2867 text, sidedata = self._revisiondata(rev)
2848
2868
2849 if sidedata_helpers is not None:
2869 if sidedata_helpers is not None:
2850 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2870 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2851 self, sidedata_helpers, sidedata, rev
2871 self, sidedata_helpers, sidedata, rev
2852 )
2872 )
2853 flags = flags | new_flags[0] & ~new_flags[1]
2873 flags = flags | new_flags[0] & ~new_flags[1]
2854
2874
2855 destrevlog.addrevision(
2875 destrevlog.addrevision(
2856 text,
2876 text,
2857 tr,
2877 tr,
2858 linkrev,
2878 linkrev,
2859 p1,
2879 p1,
2860 p2,
2880 p2,
2861 cachedelta=cachedelta,
2881 cachedelta=cachedelta,
2862 node=node,
2882 node=node,
2863 flags=flags,
2883 flags=flags,
2864 deltacomputer=deltacomputer,
2884 deltacomputer=deltacomputer,
2865 sidedata=sidedata,
2885 sidedata=sidedata,
2866 )
2886 )
2867 else:
2887 else:
2868 if destrevlog._lazydelta:
2888 if destrevlog._lazydelta:
2869 dp = self.deltaparent(rev)
2889 dp = self.deltaparent(rev)
2870 if dp != nullrev:
2890 if dp != nullrev:
2871 cachedelta = (dp, bytes(self._chunk(rev)))
2891 cachedelta = (dp, bytes(self._chunk(rev)))
2872
2892
2873 sidedata = None
2893 sidedata = None
2874 if not cachedelta:
2894 if not cachedelta:
2875 rawtext, sidedata = self._revisiondata(rev)
2895 rawtext, sidedata = self._revisiondata(rev)
2876 if sidedata is None:
2896 if sidedata is None:
2877 sidedata = self.sidedata(rev)
2897 sidedata = self.sidedata(rev)
2878
2898
2879 if sidedata_helpers is not None:
2899 if sidedata_helpers is not None:
2880 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2900 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2881 self, sidedata_helpers, sidedata, rev
2901 self, sidedata_helpers, sidedata, rev
2882 )
2902 )
2883 flags = flags | new_flags[0] & ~new_flags[1]
2903 flags = flags | new_flags[0] & ~new_flags[1]
2884
2904
2885 with destrevlog._writing(tr):
2905 with destrevlog._writing(tr):
2886 destrevlog._addrevision(
2906 destrevlog._addrevision(
2887 node,
2907 node,
2888 rawtext,
2908 rawtext,
2889 tr,
2909 tr,
2890 linkrev,
2910 linkrev,
2891 p1,
2911 p1,
2892 p2,
2912 p2,
2893 flags,
2913 flags,
2894 cachedelta,
2914 cachedelta,
2895 deltacomputer=deltacomputer,
2915 deltacomputer=deltacomputer,
2896 sidedata=sidedata,
2916 sidedata=sidedata,
2897 )
2917 )
2898
2918
2899 if addrevisioncb:
2919 if addrevisioncb:
2900 addrevisioncb(self, rev, node)
2920 addrevisioncb(self, rev, node)
2901
2921
2902 def censorrevision(self, tr, censornode, tombstone=b''):
2922 def censorrevision(self, tr, censornode, tombstone=b''):
2903 if self._format_version == REVLOGV0:
2923 if self._format_version == REVLOGV0:
2904 raise error.RevlogError(
2924 raise error.RevlogError(
2905 _(b'cannot censor with version %d revlogs')
2925 _(b'cannot censor with version %d revlogs')
2906 % self._format_version
2926 % self._format_version
2907 )
2927 )
2908
2928
2909 censorrev = self.rev(censornode)
2929 censorrev = self.rev(censornode)
2910 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2930 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2911
2931
2912 if len(tombstone) > self.rawsize(censorrev):
2932 if len(tombstone) > self.rawsize(censorrev):
2913 raise error.Abort(
2933 raise error.Abort(
2914 _(b'censor tombstone must be no longer than censored data')
2934 _(b'censor tombstone must be no longer than censored data')
2915 )
2935 )
2916
2936
2917 # Rewriting the revlog in place is hard. Our strategy for censoring is
2937 # Rewriting the revlog in place is hard. Our strategy for censoring is
2918 # to create a new revlog, copy all revisions to it, then replace the
2938 # to create a new revlog, copy all revisions to it, then replace the
2919 # revlogs on transaction close.
2939 # revlogs on transaction close.
2920 #
2940 #
2921 # This is a bit dangerous. We could easily have a mismatch of state.
2941 # This is a bit dangerous. We could easily have a mismatch of state.
2922 newrl = revlog(
2942 newrl = revlog(
2923 self.opener,
2943 self.opener,
2924 target=self.target,
2944 target=self.target,
2925 radix=self.radix,
2945 radix=self.radix,
2926 postfix=b'tmpcensored',
2946 postfix=b'tmpcensored',
2927 censorable=True,
2947 censorable=True,
2928 )
2948 )
2929 newrl._format_version = self._format_version
2949 newrl._format_version = self._format_version
2930 newrl._format_flags = self._format_flags
2950 newrl._format_flags = self._format_flags
2931 newrl._generaldelta = self._generaldelta
2951 newrl._generaldelta = self._generaldelta
2932 newrl._parse_index = self._parse_index
2952 newrl._parse_index = self._parse_index
2933
2953
2934 for rev in self.revs():
2954 for rev in self.revs():
2935 node = self.node(rev)
2955 node = self.node(rev)
2936 p1, p2 = self.parents(node)
2956 p1, p2 = self.parents(node)
2937
2957
2938 if rev == censorrev:
2958 if rev == censorrev:
2939 newrl.addrawrevision(
2959 newrl.addrawrevision(
2940 tombstone,
2960 tombstone,
2941 tr,
2961 tr,
2942 self.linkrev(censorrev),
2962 self.linkrev(censorrev),
2943 p1,
2963 p1,
2944 p2,
2964 p2,
2945 censornode,
2965 censornode,
2946 REVIDX_ISCENSORED,
2966 REVIDX_ISCENSORED,
2947 )
2967 )
2948
2968
2949 if newrl.deltaparent(rev) != nullrev:
2969 if newrl.deltaparent(rev) != nullrev:
2950 raise error.Abort(
2970 raise error.Abort(
2951 _(
2971 _(
2952 b'censored revision stored as delta; '
2972 b'censored revision stored as delta; '
2953 b'cannot censor'
2973 b'cannot censor'
2954 ),
2974 ),
2955 hint=_(
2975 hint=_(
2956 b'censoring of revlogs is not '
2976 b'censoring of revlogs is not '
2957 b'fully implemented; please report '
2977 b'fully implemented; please report '
2958 b'this bug'
2978 b'this bug'
2959 ),
2979 ),
2960 )
2980 )
2961 continue
2981 continue
2962
2982
2963 if self.iscensored(rev):
2983 if self.iscensored(rev):
2964 if self.deltaparent(rev) != nullrev:
2984 if self.deltaparent(rev) != nullrev:
2965 raise error.Abort(
2985 raise error.Abort(
2966 _(
2986 _(
2967 b'cannot censor due to censored '
2987 b'cannot censor due to censored '
2968 b'revision having delta stored'
2988 b'revision having delta stored'
2969 )
2989 )
2970 )
2990 )
2971 rawtext = self._chunk(rev)
2991 rawtext = self._chunk(rev)
2972 else:
2992 else:
2973 rawtext = self.rawdata(rev)
2993 rawtext = self.rawdata(rev)
2974
2994
2975 newrl.addrawrevision(
2995 newrl.addrawrevision(
2976 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2996 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2977 )
2997 )
2978
2998
2979 tr.addbackup(self._indexfile, location=b'store')
2999 tr.addbackup(self._indexfile, location=b'store')
2980 if not self._inline:
3000 if not self._inline:
2981 tr.addbackup(self._datafile, location=b'store')
3001 tr.addbackup(self._datafile, location=b'store')
2982
3002
2983 self.opener.rename(newrl._indexfile, self._indexfile)
3003 self.opener.rename(newrl._indexfile, self._indexfile)
2984 if not self._inline:
3004 if not self._inline:
2985 self.opener.rename(newrl._datafile, self._datafile)
3005 self.opener.rename(newrl._datafile, self._datafile)
2986
3006
2987 self.clearcaches()
3007 self.clearcaches()
2988 self._loadindex()
3008 self._loadindex()
2989
3009
2990 def verifyintegrity(self, state):
3010 def verifyintegrity(self, state):
2991 """Verifies the integrity of the revlog.
3011 """Verifies the integrity of the revlog.
2992
3012
2993 Yields ``revlogproblem`` instances describing problems that are
3013 Yields ``revlogproblem`` instances describing problems that are
2994 found.
3014 found.
2995 """
3015 """
2996 dd, di = self.checksize()
3016 dd, di = self.checksize()
2997 if dd:
3017 if dd:
2998 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3018 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2999 if di:
3019 if di:
3000 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3020 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3001
3021
3002 version = self._format_version
3022 version = self._format_version
3003
3023
3004 # The verifier tells us what version revlog we should be.
3024 # The verifier tells us what version revlog we should be.
3005 if version != state[b'expectedversion']:
3025 if version != state[b'expectedversion']:
3006 yield revlogproblem(
3026 yield revlogproblem(
3007 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3027 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3008 % (self.display_id, version, state[b'expectedversion'])
3028 % (self.display_id, version, state[b'expectedversion'])
3009 )
3029 )
3010
3030
3011 state[b'skipread'] = set()
3031 state[b'skipread'] = set()
3012 state[b'safe_renamed'] = set()
3032 state[b'safe_renamed'] = set()
3013
3033
3014 for rev in self:
3034 for rev in self:
3015 node = self.node(rev)
3035 node = self.node(rev)
3016
3036
3017 # Verify contents. 4 cases to care about:
3037 # Verify contents. 4 cases to care about:
3018 #
3038 #
3019 # common: the most common case
3039 # common: the most common case
3020 # rename: with a rename
3040 # rename: with a rename
3021 # meta: file content starts with b'\1\n', the metadata
3041 # meta: file content starts with b'\1\n', the metadata
3022 # header defined in filelog.py, but without a rename
3042 # header defined in filelog.py, but without a rename
3023 # ext: content stored externally
3043 # ext: content stored externally
3024 #
3044 #
3025 # More formally, their differences are shown below:
3045 # More formally, their differences are shown below:
3026 #
3046 #
3027 # | common | rename | meta | ext
3047 # | common | rename | meta | ext
3028 # -------------------------------------------------------
3048 # -------------------------------------------------------
3029 # flags() | 0 | 0 | 0 | not 0
3049 # flags() | 0 | 0 | 0 | not 0
3030 # renamed() | False | True | False | ?
3050 # renamed() | False | True | False | ?
3031 # rawtext[0:2]=='\1\n'| False | True | True | ?
3051 # rawtext[0:2]=='\1\n'| False | True | True | ?
3032 #
3052 #
3033 # "rawtext" means the raw text stored in revlog data, which
3053 # "rawtext" means the raw text stored in revlog data, which
3034 # could be retrieved by "rawdata(rev)". "text"
3054 # could be retrieved by "rawdata(rev)". "text"
3035 # mentioned below is "revision(rev)".
3055 # mentioned below is "revision(rev)".
3036 #
3056 #
3037 # There are 3 different lengths stored physically:
3057 # There are 3 different lengths stored physically:
3038 # 1. L1: rawsize, stored in revlog index
3058 # 1. L1: rawsize, stored in revlog index
3039 # 2. L2: len(rawtext), stored in revlog data
3059 # 2. L2: len(rawtext), stored in revlog data
3040 # 3. L3: len(text), stored in revlog data if flags==0, or
3060 # 3. L3: len(text), stored in revlog data if flags==0, or
3041 # possibly somewhere else if flags!=0
3061 # possibly somewhere else if flags!=0
3042 #
3062 #
3043 # L1 should be equal to L2. L3 could be different from them.
3063 # L1 should be equal to L2. L3 could be different from them.
3044 # "text" may or may not affect commit hash depending on flag
3064 # "text" may or may not affect commit hash depending on flag
3045 # processors (see flagutil.addflagprocessor).
3065 # processors (see flagutil.addflagprocessor).
3046 #
3066 #
3047 # | common | rename | meta | ext
3067 # | common | rename | meta | ext
3048 # -------------------------------------------------
3068 # -------------------------------------------------
3049 # rawsize() | L1 | L1 | L1 | L1
3069 # rawsize() | L1 | L1 | L1 | L1
3050 # size() | L1 | L2-LM | L1(*) | L1 (?)
3070 # size() | L1 | L2-LM | L1(*) | L1 (?)
3051 # len(rawtext) | L2 | L2 | L2 | L2
3071 # len(rawtext) | L2 | L2 | L2 | L2
3052 # len(text) | L2 | L2 | L2 | L3
3072 # len(text) | L2 | L2 | L2 | L3
3053 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3073 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3054 #
3074 #
3055 # LM: length of metadata, depending on rawtext
3075 # LM: length of metadata, depending on rawtext
3056 # (*): not ideal, see comment in filelog.size
3076 # (*): not ideal, see comment in filelog.size
3057 # (?): could be "- len(meta)" if the resolved content has
3077 # (?): could be "- len(meta)" if the resolved content has
3058 # rename metadata
3078 # rename metadata
3059 #
3079 #
3060 # Checks needed to be done:
3080 # Checks needed to be done:
3061 # 1. length check: L1 == L2, in all cases.
3081 # 1. length check: L1 == L2, in all cases.
3062 # 2. hash check: depending on flag processor, we may need to
3082 # 2. hash check: depending on flag processor, we may need to
3063 # use either "text" (external), or "rawtext" (in revlog).
3083 # use either "text" (external), or "rawtext" (in revlog).
3064
3084
3065 try:
3085 try:
3066 skipflags = state.get(b'skipflags', 0)
3086 skipflags = state.get(b'skipflags', 0)
3067 if skipflags:
3087 if skipflags:
3068 skipflags &= self.flags(rev)
3088 skipflags &= self.flags(rev)
3069
3089
3070 _verify_revision(self, skipflags, state, node)
3090 _verify_revision(self, skipflags, state, node)
3071
3091
3072 l1 = self.rawsize(rev)
3092 l1 = self.rawsize(rev)
3073 l2 = len(self.rawdata(node))
3093 l2 = len(self.rawdata(node))
3074
3094
3075 if l1 != l2:
3095 if l1 != l2:
3076 yield revlogproblem(
3096 yield revlogproblem(
3077 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3097 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3078 node=node,
3098 node=node,
3079 )
3099 )
3080
3100
3081 except error.CensoredNodeError:
3101 except error.CensoredNodeError:
3082 if state[b'erroroncensored']:
3102 if state[b'erroroncensored']:
3083 yield revlogproblem(
3103 yield revlogproblem(
3084 error=_(b'censored file data'), node=node
3104 error=_(b'censored file data'), node=node
3085 )
3105 )
3086 state[b'skipread'].add(node)
3106 state[b'skipread'].add(node)
3087 except Exception as e:
3107 except Exception as e:
3088 yield revlogproblem(
3108 yield revlogproblem(
3089 error=_(b'unpacking %s: %s')
3109 error=_(b'unpacking %s: %s')
3090 % (short(node), stringutil.forcebytestr(e)),
3110 % (short(node), stringutil.forcebytestr(e)),
3091 node=node,
3111 node=node,
3092 )
3112 )
3093 state[b'skipread'].add(node)
3113 state[b'skipread'].add(node)
3094
3114
3095 def storageinfo(
3115 def storageinfo(
3096 self,
3116 self,
3097 exclusivefiles=False,
3117 exclusivefiles=False,
3098 sharedfiles=False,
3118 sharedfiles=False,
3099 revisionscount=False,
3119 revisionscount=False,
3100 trackedsize=False,
3120 trackedsize=False,
3101 storedsize=False,
3121 storedsize=False,
3102 ):
3122 ):
3103 d = {}
3123 d = {}
3104
3124
3105 if exclusivefiles:
3125 if exclusivefiles:
3106 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3126 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3107 if not self._inline:
3127 if not self._inline:
3108 d[b'exclusivefiles'].append((self.opener, self._datafile))
3128 d[b'exclusivefiles'].append((self.opener, self._datafile))
3109
3129
3110 if sharedfiles:
3130 if sharedfiles:
3111 d[b'sharedfiles'] = []
3131 d[b'sharedfiles'] = []
3112
3132
3113 if revisionscount:
3133 if revisionscount:
3114 d[b'revisionscount'] = len(self)
3134 d[b'revisionscount'] = len(self)
3115
3135
3116 if trackedsize:
3136 if trackedsize:
3117 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3137 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3118
3138
3119 if storedsize:
3139 if storedsize:
3120 d[b'storedsize'] = sum(
3140 d[b'storedsize'] = sum(
3121 self.opener.stat(path).st_size for path in self.files()
3141 self.opener.stat(path).st_size for path in self.files()
3122 )
3142 )
3123
3143
3124 return d
3144 return d
3125
3145
3126 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3146 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3127 if not self.hassidedata:
3147 if not self.hassidedata:
3128 return
3148 return
3129 # inline are not yet supported because they suffer from an issue when
3149 # revlog formats with sidedata support does not support inline
3130 # rewriting them (since it's not an append-only operation).
3131 # See issue6485.
3132 assert not self._inline
3150 assert not self._inline
3133 if not helpers[1] and not helpers[2]:
3151 if not helpers[1] and not helpers[2]:
3134 # Nothing to generate or remove
3152 # Nothing to generate or remove
3135 return
3153 return
3136
3154
3137 # changelog implement some "delayed" writing mechanism that assume that
3155 # changelog implement some "delayed" writing mechanism that assume that
3138 # all index data is writen in append mode and is therefor incompatible
3156 # all index data is writen in append mode and is therefor incompatible
3139 # with the seeked write done in this method. The use of such "delayed"
3157 # with the seeked write done in this method. The use of such "delayed"
3140 # writing will soon be removed for revlog version that support side
3158 # writing will soon be removed for revlog version that support side
3141 # data, so for now, we only keep this simple assert to highlight the
3159 # data, so for now, we only keep this simple assert to highlight the
3142 # situation.
3160 # situation.
3143 delayed = getattr(self, '_delayed', False)
3161 delayed = getattr(self, '_delayed', False)
3144 diverted = getattr(self, '_divert', False)
3162 diverted = getattr(self, '_divert', False)
3145 if delayed and not diverted:
3163 if delayed and not diverted:
3146 msg = "cannot rewrite_sidedata of a delayed revlog"
3164 msg = "cannot rewrite_sidedata of a delayed revlog"
3147 raise error.ProgrammingError(msg)
3165 raise error.ProgrammingError(msg)
3148
3166
3149 new_entries = []
3167 new_entries = []
3150 # append the new sidedata
3168 # append the new sidedata
3151 with self._writing(transaction):
3169 with self._writing(transaction):
3152 ifh, dfh = self._writinghandles
3170 ifh, dfh = self._writinghandles
3153 dfh.seek(0, os.SEEK_END)
3171 dfh.seek(0, os.SEEK_END)
3154 current_offset = dfh.tell()
3172 current_offset = dfh.tell()
3155 for rev in range(startrev, endrev + 1):
3173 for rev in range(startrev, endrev + 1):
3156 entry = self.index[rev]
3174 entry = self.index[rev]
3157 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3175 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3158 store=self,
3176 store=self,
3159 sidedata_helpers=helpers,
3177 sidedata_helpers=helpers,
3160 sidedata={},
3178 sidedata={},
3161 rev=rev,
3179 rev=rev,
3162 )
3180 )
3163
3181
3164 serialized_sidedata = sidedatautil.serialize_sidedata(
3182 serialized_sidedata = sidedatautil.serialize_sidedata(
3165 new_sidedata
3183 new_sidedata
3166 )
3184 )
3167 if entry[8] != 0 or entry[9] != 0:
3185 if entry[8] != 0 or entry[9] != 0:
3168 # rewriting entries that already have sidedata is not
3186 # rewriting entries that already have sidedata is not
3169 # supported yet, because it introduces garbage data in the
3187 # supported yet, because it introduces garbage data in the
3170 # revlog.
3188 # revlog.
3171 msg = b"rewriting existing sidedata is not supported yet"
3189 msg = b"rewriting existing sidedata is not supported yet"
3172 raise error.Abort(msg)
3190 raise error.Abort(msg)
3173
3191
3174 # Apply (potential) flags to add and to remove after running
3192 # Apply (potential) flags to add and to remove after running
3175 # the sidedata helpers
3193 # the sidedata helpers
3176 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3194 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3177 entry = (new_offset_flags,) + entry[1:8]
3195 entry = (new_offset_flags,) + entry[1:8]
3178 entry += (current_offset, len(serialized_sidedata))
3196 entry += (current_offset, len(serialized_sidedata))
3179
3197
3180 # the sidedata computation might have move the file cursors around
3198 # the sidedata computation might have move the file cursors around
3181 dfh.seek(current_offset, os.SEEK_SET)
3199 dfh.seek(current_offset, os.SEEK_SET)
3182 dfh.write(serialized_sidedata)
3200 dfh.write(serialized_sidedata)
3183 new_entries.append(entry)
3201 new_entries.append(entry)
3184 current_offset += len(serialized_sidedata)
3202 current_offset += len(serialized_sidedata)
3185
3203
3186 # rewrite the new index entries
3204 # rewrite the new index entries
3187 ifh.seek(startrev * self.index.entry_size)
3205 ifh.seek(startrev * self.index.entry_size)
3188 for i, e in enumerate(new_entries):
3206 for i, e in enumerate(new_entries):
3189 rev = startrev + i
3207 rev = startrev + i
3190 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3208 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3191 packed = self.index.entry_binary(rev)
3209 packed = self.index.entry_binary(rev)
3192 if rev == 0:
3210 if rev == 0:
3193 header = self._format_flags | self._format_version
3211 header = self._format_flags | self._format_version
3194 header = self.index.pack_header(header)
3212 header = self.index.pack_header(header)
3195 packed = header + packed
3213 packed = header + packed
3196 ifh.write(packed)
3214 ifh.write(packed)
@@ -1,153 +1,155 b''
1 # revlogdeltas.py - constant used for revlog logic
1 # revlogdeltas.py - constant used for revlog logic
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import struct
12 import struct
13
13
14 from ..interfaces import repository
14 from ..interfaces import repository
15
15
16 ### Internal utily constants
16 ### Internal utily constants
17
17
18 KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
18 KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
19 KIND_MANIFESTLOG = 1002
19 KIND_MANIFESTLOG = 1002
20 KIND_FILELOG = 1003
20 KIND_FILELOG = 1003
21 KIND_OTHER = 1004
21 KIND_OTHER = 1004
22
22
23 ALL_KINDS = {
23 ALL_KINDS = {
24 KIND_CHANGELOG,
24 KIND_CHANGELOG,
25 KIND_MANIFESTLOG,
25 KIND_MANIFESTLOG,
26 KIND_FILELOG,
26 KIND_FILELOG,
27 KIND_OTHER,
27 KIND_OTHER,
28 }
28 }
29
29
30 ### main revlog header
30 ### main revlog header
31
31
32 INDEX_HEADER = struct.Struct(b">I")
32 INDEX_HEADER = struct.Struct(b">I")
33
33
34 ## revlog version
34 ## revlog version
35 REVLOGV0 = 0
35 REVLOGV0 = 0
36 REVLOGV1 = 1
36 REVLOGV1 = 1
37 # Dummy value until file format is finalized.
37 # Dummy value until file format is finalized.
38 REVLOGV2 = 0xDEAD
38 REVLOGV2 = 0xDEAD
39
39
40 ## global revlog header flags
40 ## global revlog header flags
41 # Shared across v1 and v2.
41 # Shared across v1 and v2.
42 FLAG_INLINE_DATA = 1 << 16
42 FLAG_INLINE_DATA = 1 << 16
43 # Only used by v1, implied by v2.
43 # Only used by v1, implied by v2.
44 FLAG_GENERALDELTA = 1 << 17
44 FLAG_GENERALDELTA = 1 << 17
45 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
45 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
46 REVLOG_DEFAULT_FORMAT = REVLOGV1
46 REVLOG_DEFAULT_FORMAT = REVLOGV1
47 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
47 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
48 REVLOGV0_FLAGS = 0
48 REVLOGV0_FLAGS = 0
49 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
49 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
50 REVLOGV2_FLAGS = FLAG_INLINE_DATA
50 REVLOGV2_FLAGS = FLAG_INLINE_DATA
51
51
52 ### individual entry
52 ### individual entry
53
53
54 ## index v0:
54 ## index v0:
55 # 4 bytes: offset
55 # 4 bytes: offset
56 # 4 bytes: compressed length
56 # 4 bytes: compressed length
57 # 4 bytes: base rev
57 # 4 bytes: base rev
58 # 4 bytes: link rev
58 # 4 bytes: link rev
59 # 20 bytes: parent 1 nodeid
59 # 20 bytes: parent 1 nodeid
60 # 20 bytes: parent 2 nodeid
60 # 20 bytes: parent 2 nodeid
61 # 20 bytes: nodeid
61 # 20 bytes: nodeid
62 INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
62 INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
63
63
64 ## index v1
64 ## index v1
65 # 6 bytes: offset
65 # 6 bytes: offset
66 # 2 bytes: flags
66 # 2 bytes: flags
67 # 4 bytes: compressed length
67 # 4 bytes: compressed length
68 # 4 bytes: uncompressed length
68 # 4 bytes: uncompressed length
69 # 4 bytes: base rev
69 # 4 bytes: base rev
70 # 4 bytes: link rev
70 # 4 bytes: link rev
71 # 4 bytes: parent 1 rev
71 # 4 bytes: parent 1 rev
72 # 4 bytes: parent 2 rev
72 # 4 bytes: parent 2 rev
73 # 32 bytes: nodeid
73 # 32 bytes: nodeid
74 INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
74 INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
75 assert INDEX_ENTRY_V1.size == 32 * 2
75 assert INDEX_ENTRY_V1.size == 32 * 2
76
76
77 # 6 bytes: offset
77 # 6 bytes: offset
78 # 2 bytes: flags
78 # 2 bytes: flags
79 # 4 bytes: compressed length
79 # 4 bytes: compressed length
80 # 4 bytes: uncompressed length
80 # 4 bytes: uncompressed length
81 # 4 bytes: base rev
81 # 4 bytes: base rev
82 # 4 bytes: link rev
82 # 4 bytes: link rev
83 # 4 bytes: parent 1 rev
83 # 4 bytes: parent 1 rev
84 # 4 bytes: parent 2 rev
84 # 4 bytes: parent 2 rev
85 # 32 bytes: nodeid
85 # 32 bytes: nodeid
86 # 8 bytes: sidedata offset
86 # 8 bytes: sidedata offset
87 # 4 bytes: sidedata compressed length
87 # 4 bytes: sidedata compressed length
88 # 20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
88 # 20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
89 INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQi20x")
89 INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQi20x")
90 assert INDEX_ENTRY_V2.size == 32 * 3
90 assert INDEX_ENTRY_V2.size == 32 * 3
91
91
92 # revlog index flags
92 # revlog index flags
93
93
94 # For historical reasons, revlog's internal flags were exposed via the
94 # For historical reasons, revlog's internal flags were exposed via the
95 # wire protocol and are even exposed in parts of the storage APIs.
95 # wire protocol and are even exposed in parts of the storage APIs.
96
96
97 # revision has censor metadata, must be verified
97 # revision has censor metadata, must be verified
98 REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
98 REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
99 # revision hash does not match data (narrowhg)
99 # revision hash does not match data (narrowhg)
100 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
100 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
101 # revision data is stored externally
101 # revision data is stored externally
102 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
102 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
103 # revision changes files in a way that could affect copy tracing.
103 # revision changes files in a way that could affect copy tracing.
104 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
104 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
105 REVIDX_DEFAULT_FLAGS = 0
105 REVIDX_DEFAULT_FLAGS = 0
106 # stable order in which flags need to be processed and their processors applied
106 # stable order in which flags need to be processed and their processors applied
107 REVIDX_FLAGS_ORDER = [
107 REVIDX_FLAGS_ORDER = [
108 REVIDX_ISCENSORED,
108 REVIDX_ISCENSORED,
109 REVIDX_ELLIPSIS,
109 REVIDX_ELLIPSIS,
110 REVIDX_EXTSTORED,
110 REVIDX_EXTSTORED,
111 REVIDX_HASCOPIESINFO,
111 REVIDX_HASCOPIESINFO,
112 ]
112 ]
113
113
114 # bitmark for flags that could cause rawdata content change
114 # bitmark for flags that could cause rawdata content change
115 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
115 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
116
116
117 SUPPORTED_FLAGS = {
117 SUPPORTED_FLAGS = {
118 REVLOGV0: REVLOGV0_FLAGS,
118 REVLOGV0: REVLOGV0_FLAGS,
119 REVLOGV1: REVLOGV1_FLAGS,
119 REVLOGV1: REVLOGV1_FLAGS,
120 REVLOGV2: REVLOGV2_FLAGS,
120 REVLOGV2: REVLOGV2_FLAGS,
121 }
121 }
122
122
123 _no = lambda flags: False
123 _no = lambda flags: False
124 _yes = lambda flags: True
124 _yes = lambda flags: True
125
125
126
126
127 def _from_flag(flag):
127 def _from_flag(flag):
128 return lambda flags: bool(flags & flag)
128 return lambda flags: bool(flags & flag)
129
129
130
130
131 FEATURES_BY_VERSION = {
131 FEATURES_BY_VERSION = {
132 REVLOGV0: {
132 REVLOGV0: {
133 b'inline': _no,
133 b'inline': _no,
134 b'generaldelta': _no,
134 b'generaldelta': _no,
135 b'sidedata': False,
135 b'sidedata': False,
136 b'docket': False,
136 },
137 },
137 REVLOGV1: {
138 REVLOGV1: {
138 b'inline': _from_flag(FLAG_INLINE_DATA),
139 b'inline': _from_flag(FLAG_INLINE_DATA),
139 b'generaldelta': _from_flag(FLAG_GENERALDELTA),
140 b'generaldelta': _from_flag(FLAG_GENERALDELTA),
140 b'sidedata': False,
141 b'sidedata': False,
142 b'docket': False,
141 },
143 },
142 REVLOGV2: {
144 REVLOGV2: {
143 # There is a bug in the transaction handling when going from an
145 # The point of inline-revlog is to reduce the number of files used in
144 # inline revlog to a separate index and data file. Turn it off until
146 # the store. Using a docket defeat this purpose. So we needs other
145 # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
147 # means to reduce the number of files for revlogv2.
146 # See issue6485
147 b'inline': _no,
148 b'inline': _no,
148 b'generaldelta': _yes,
149 b'generaldelta': _yes,
149 b'sidedata': True,
150 b'sidedata': True,
151 b'docket': True,
150 },
152 },
151 }
153 }
152
154
153 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
155 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
@@ -1,814 +1,814 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import getattr
17 from .pycompat import getattr
18 from .node import hex
18 from .node import hex
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 error,
21 error,
22 manifest,
22 manifest,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28 from .utils import hashutil
28 from .utils import hashutil
29
29
30 parsers = policy.importmod('parsers')
30 parsers = policy.importmod('parsers')
31 # how much bytes should be read from fncache in one read
31 # how much bytes should be read from fncache in one read
32 # It is done to prevent loading large fncache files into memory
32 # It is done to prevent loading large fncache files into memory
33 fncache_chunksize = 10 ** 6
33 fncache_chunksize = 10 ** 6
34
34
35
35
36 def _matchtrackedpath(path, matcher):
36 def _matchtrackedpath(path, matcher):
37 """parses a fncache entry and returns whether the entry is tracking a path
37 """parses a fncache entry and returns whether the entry is tracking a path
38 matched by matcher or not.
38 matched by matcher or not.
39
39
40 If matcher is None, returns True"""
40 If matcher is None, returns True"""
41
41
42 if matcher is None:
42 if matcher is None:
43 return True
43 return True
44 path = decodedir(path)
44 path = decodedir(path)
45 if path.startswith(b'data/'):
45 if path.startswith(b'data/'):
46 return matcher(path[len(b'data/') : -len(b'.i')])
46 return matcher(path[len(b'data/') : -len(b'.i')])
47 elif path.startswith(b'meta/'):
47 elif path.startswith(b'meta/'):
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49
49
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51
51
52
52
53 # This avoids a collision between a file named foo and a dir named
53 # This avoids a collision between a file named foo and a dir named
54 # foo.i or foo.d
54 # foo.i or foo.d
55 def _encodedir(path):
55 def _encodedir(path):
56 """
56 """
57 >>> _encodedir(b'data/foo.i')
57 >>> _encodedir(b'data/foo.i')
58 'data/foo.i'
58 'data/foo.i'
59 >>> _encodedir(b'data/foo.i/bla.i')
59 >>> _encodedir(b'data/foo.i/bla.i')
60 'data/foo.i.hg/bla.i'
60 'data/foo.i.hg/bla.i'
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 'data/foo.i.hg.hg/bla.i'
62 'data/foo.i.hg.hg/bla.i'
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 """
65 """
66 return (
66 return (
67 path.replace(b".hg/", b".hg.hg/")
67 path.replace(b".hg/", b".hg.hg/")
68 .replace(b".i/", b".i.hg/")
68 .replace(b".i/", b".i.hg/")
69 .replace(b".d/", b".d.hg/")
69 .replace(b".d/", b".d.hg/")
70 )
70 )
71
71
72
72
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74
74
75
75
76 def decodedir(path):
76 def decodedir(path):
77 """
77 """
78 >>> decodedir(b'data/foo.i')
78 >>> decodedir(b'data/foo.i')
79 'data/foo.i'
79 'data/foo.i'
80 >>> decodedir(b'data/foo.i.hg/bla.i')
80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 'data/foo.i/bla.i'
81 'data/foo.i/bla.i'
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 'data/foo.i.hg/bla.i'
83 'data/foo.i.hg/bla.i'
84 """
84 """
85 if b".hg/" not in path:
85 if b".hg/" not in path:
86 return path
86 return path
87 return (
87 return (
88 path.replace(b".d.hg/", b".d/")
88 path.replace(b".d.hg/", b".d/")
89 .replace(b".i.hg/", b".i/")
89 .replace(b".i.hg/", b".i/")
90 .replace(b".hg.hg/", b".hg/")
90 .replace(b".hg.hg/", b".hg/")
91 )
91 )
92
92
93
93
94 def _reserved():
94 def _reserved():
95 """characters that are problematic for filesystems
95 """characters that are problematic for filesystems
96
96
97 * ascii escapes (0..31)
97 * ascii escapes (0..31)
98 * ascii hi (126..255)
98 * ascii hi (126..255)
99 * windows specials
99 * windows specials
100
100
101 these characters will be escaped by encodefunctions
101 these characters will be escaped by encodefunctions
102 """
102 """
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 for x in range(32):
104 for x in range(32):
105 yield x
105 yield x
106 for x in range(126, 256):
106 for x in range(126, 256):
107 yield x
107 yield x
108 for x in winreserved:
108 for x in winreserved:
109 yield x
109 yield x
110
110
111
111
112 def _buildencodefun():
112 def _buildencodefun():
113 """
113 """
114 >>> enc, dec = _buildencodefun()
114 >>> enc, dec = _buildencodefun()
115
115
116 >>> enc(b'nothing/special.txt')
116 >>> enc(b'nothing/special.txt')
117 'nothing/special.txt'
117 'nothing/special.txt'
118 >>> dec(b'nothing/special.txt')
118 >>> dec(b'nothing/special.txt')
119 'nothing/special.txt'
119 'nothing/special.txt'
120
120
121 >>> enc(b'HELLO')
121 >>> enc(b'HELLO')
122 '_h_e_l_l_o'
122 '_h_e_l_l_o'
123 >>> dec(b'_h_e_l_l_o')
123 >>> dec(b'_h_e_l_l_o')
124 'HELLO'
124 'HELLO'
125
125
126 >>> enc(b'hello:world?')
126 >>> enc(b'hello:world?')
127 'hello~3aworld~3f'
127 'hello~3aworld~3f'
128 >>> dec(b'hello~3aworld~3f')
128 >>> dec(b'hello~3aworld~3f')
129 'hello:world?'
129 'hello:world?'
130
130
131 >>> enc(b'the\\x07quick\\xADshot')
131 >>> enc(b'the\\x07quick\\xADshot')
132 'the~07quick~adshot'
132 'the~07quick~adshot'
133 >>> dec(b'the~07quick~adshot')
133 >>> dec(b'the~07quick~adshot')
134 'the\\x07quick\\xadshot'
134 'the\\x07quick\\xadshot'
135 """
135 """
136 e = b'_'
136 e = b'_'
137 xchr = pycompat.bytechr
137 xchr = pycompat.bytechr
138 asciistr = list(map(xchr, range(127)))
138 asciistr = list(map(xchr, range(127)))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140
140
141 cmap = {x: x for x in asciistr}
141 cmap = {x: x for x in asciistr}
142 for x in _reserved():
142 for x in _reserved():
143 cmap[xchr(x)] = b"~%02x" % x
143 cmap[xchr(x)] = b"~%02x" % x
144 for x in capitals + [ord(e)]:
144 for x in capitals + [ord(e)]:
145 cmap[xchr(x)] = e + xchr(x).lower()
145 cmap[xchr(x)] = e + xchr(x).lower()
146
146
147 dmap = {}
147 dmap = {}
148 for k, v in pycompat.iteritems(cmap):
148 for k, v in pycompat.iteritems(cmap):
149 dmap[v] = k
149 dmap[v] = k
150
150
151 def decode(s):
151 def decode(s):
152 i = 0
152 i = 0
153 while i < len(s):
153 while i < len(s):
154 for l in pycompat.xrange(1, 4):
154 for l in pycompat.xrange(1, 4):
155 try:
155 try:
156 yield dmap[s[i : i + l]]
156 yield dmap[s[i : i + l]]
157 i += l
157 i += l
158 break
158 break
159 except KeyError:
159 except KeyError:
160 pass
160 pass
161 else:
161 else:
162 raise KeyError
162 raise KeyError
163
163
164 return (
164 return (
165 lambda s: b''.join(
165 lambda s: b''.join(
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 ),
167 ),
168 lambda s: b''.join(list(decode(s))),
168 lambda s: b''.join(list(decode(s))),
169 )
169 )
170
170
171
171
172 _encodefname, _decodefname = _buildencodefun()
172 _encodefname, _decodefname = _buildencodefun()
173
173
174
174
175 def encodefilename(s):
175 def encodefilename(s):
176 """
176 """
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 """
179 """
180 return _encodefname(encodedir(s))
180 return _encodefname(encodedir(s))
181
181
182
182
183 def decodefilename(s):
183 def decodefilename(s):
184 """
184 """
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 """
187 """
188 return decodedir(_decodefname(s))
188 return decodedir(_decodefname(s))
189
189
190
190
191 def _buildlowerencodefun():
191 def _buildlowerencodefun():
192 """
192 """
193 >>> f = _buildlowerencodefun()
193 >>> f = _buildlowerencodefun()
194 >>> f(b'nothing/special.txt')
194 >>> f(b'nothing/special.txt')
195 'nothing/special.txt'
195 'nothing/special.txt'
196 >>> f(b'HELLO')
196 >>> f(b'HELLO')
197 'hello'
197 'hello'
198 >>> f(b'hello:world?')
198 >>> f(b'hello:world?')
199 'hello~3aworld~3f'
199 'hello~3aworld~3f'
200 >>> f(b'the\\x07quick\\xADshot')
200 >>> f(b'the\\x07quick\\xADshot')
201 'the~07quick~adshot'
201 'the~07quick~adshot'
202 """
202 """
203 xchr = pycompat.bytechr
203 xchr = pycompat.bytechr
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 for x in _reserved():
205 for x in _reserved():
206 cmap[xchr(x)] = b"~%02x" % x
206 cmap[xchr(x)] = b"~%02x" % x
207 for x in range(ord(b"A"), ord(b"Z") + 1):
207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 cmap[xchr(x)] = xchr(x).lower()
208 cmap[xchr(x)] = xchr(x).lower()
209
209
210 def lowerencode(s):
210 def lowerencode(s):
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212
212
213 return lowerencode
213 return lowerencode
214
214
215
215
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217
217
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221
221
222
222
223 def _auxencode(path, dotencode):
223 def _auxencode(path, dotencode):
224 """
224 """
225 Encodes filenames containing names reserved by Windows or which end in
225 Encodes filenames containing names reserved by Windows or which end in
226 period or space. Does not touch other single reserved characters c.
226 period or space. Does not touch other single reserved characters c.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 Additionally encodes space or period at the beginning, if dotencode is
228 Additionally encodes space or period at the beginning, if dotencode is
229 True. Parameter path is assumed to be all lowercase.
229 True. Parameter path is assumed to be all lowercase.
230 A segment only needs encoding if a reserved name appears as a
230 A segment only needs encoding if a reserved name appears as a
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 doesn't need encoding.
232 doesn't need encoding.
233
233
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 >>> _auxencode(s.split(b'/'), True)
235 >>> _auxencode(s.split(b'/'), True)
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 >>> _auxencode(s.split(b'/'), False)
238 >>> _auxencode(s.split(b'/'), False)
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 >>> _auxencode([b'foo. '], True)
240 >>> _auxencode([b'foo. '], True)
241 ['foo.~20']
241 ['foo.~20']
242 >>> _auxencode([b' .foo'], True)
242 >>> _auxencode([b' .foo'], True)
243 ['~20.foo']
243 ['~20.foo']
244 """
244 """
245 for i, n in enumerate(path):
245 for i, n in enumerate(path):
246 if not n:
246 if not n:
247 continue
247 continue
248 if dotencode and n[0] in b'. ':
248 if dotencode and n[0] in b'. ':
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 path[i] = n
250 path[i] = n
251 else:
251 else:
252 l = n.find(b'.')
252 l = n.find(b'.')
253 if l == -1:
253 if l == -1:
254 l = len(n)
254 l = len(n)
255 if (l == 3 and n[:3] in _winres3) or (
255 if (l == 3 and n[:3] in _winres3) or (
256 l == 4
256 l == 4
257 and n[3:4] <= b'9'
257 and n[3:4] <= b'9'
258 and n[3:4] >= b'1'
258 and n[3:4] >= b'1'
259 and n[:3] in _winres4
259 and n[:3] in _winres4
260 ):
260 ):
261 # encode third letter ('aux' -> 'au~78')
261 # encode third letter ('aux' -> 'au~78')
262 ec = b"~%02x" % ord(n[2:3])
262 ec = b"~%02x" % ord(n[2:3])
263 n = n[0:2] + ec + n[3:]
263 n = n[0:2] + ec + n[3:]
264 path[i] = n
264 path[i] = n
265 if n[-1] in b'. ':
265 if n[-1] in b'. ':
266 # encode last period or space ('foo...' -> 'foo..~2e')
266 # encode last period or space ('foo...' -> 'foo..~2e')
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 return path
268 return path
269
269
270
270
271 _maxstorepathlen = 120
271 _maxstorepathlen = 120
272 _dirprefixlen = 8
272 _dirprefixlen = 8
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274
274
275
275
276 def _hashencode(path, dotencode):
276 def _hashencode(path, dotencode):
277 digest = hex(hashutil.sha1(path).digest())
277 digest = hex(hashutil.sha1(path).digest())
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 parts = _auxencode(le, dotencode)
279 parts = _auxencode(le, dotencode)
280 basename = parts[-1]
280 basename = parts[-1]
281 _root, ext = os.path.splitext(basename)
281 _root, ext = os.path.splitext(basename)
282 sdirs = []
282 sdirs = []
283 sdirslen = 0
283 sdirslen = 0
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 d = p[:_dirprefixlen]
285 d = p[:_dirprefixlen]
286 if d[-1] in b'. ':
286 if d[-1] in b'. ':
287 # Windows can't access dirs ending in period or space
287 # Windows can't access dirs ending in period or space
288 d = d[:-1] + b'_'
288 d = d[:-1] + b'_'
289 if sdirslen == 0:
289 if sdirslen == 0:
290 t = len(d)
290 t = len(d)
291 else:
291 else:
292 t = sdirslen + 1 + len(d)
292 t = sdirslen + 1 + len(d)
293 if t > _maxshortdirslen:
293 if t > _maxshortdirslen:
294 break
294 break
295 sdirs.append(d)
295 sdirs.append(d)
296 sdirslen = t
296 sdirslen = t
297 dirs = b'/'.join(sdirs)
297 dirs = b'/'.join(sdirs)
298 if len(dirs) > 0:
298 if len(dirs) > 0:
299 dirs += b'/'
299 dirs += b'/'
300 res = b'dh/' + dirs + digest + ext
300 res = b'dh/' + dirs + digest + ext
301 spaceleft = _maxstorepathlen - len(res)
301 spaceleft = _maxstorepathlen - len(res)
302 if spaceleft > 0:
302 if spaceleft > 0:
303 filler = basename[:spaceleft]
303 filler = basename[:spaceleft]
304 res = b'dh/' + dirs + filler + digest + ext
304 res = b'dh/' + dirs + filler + digest + ext
305 return res
305 return res
306
306
307
307
308 def _hybridencode(path, dotencode):
308 def _hybridencode(path, dotencode):
309 """encodes path with a length limit
309 """encodes path with a length limit
310
310
311 Encodes all paths that begin with 'data/', according to the following.
311 Encodes all paths that begin with 'data/', according to the following.
312
312
313 Default encoding (reversible):
313 Default encoding (reversible):
314
314
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 characters are encoded as '~xx', where xx is the two digit hex code
316 characters are encoded as '~xx', where xx is the two digit hex code
317 of the character (see encodefilename).
317 of the character (see encodefilename).
318 Relevant path components consisting of Windows reserved filenames are
318 Relevant path components consisting of Windows reserved filenames are
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320
320
321 Hashed encoding (not reversible):
321 Hashed encoding (not reversible):
322
322
323 If the default-encoded path is longer than _maxstorepathlen, a
323 If the default-encoded path is longer than _maxstorepathlen, a
324 non-reversible hybrid hashing of the path is done instead.
324 non-reversible hybrid hashing of the path is done instead.
325 This encoding uses up to _dirprefixlen characters of all directory
325 This encoding uses up to _dirprefixlen characters of all directory
326 levels of the lowerencoded path, but not more levels than can fit into
326 levels of the lowerencoded path, but not more levels than can fit into
327 _maxshortdirslen.
327 _maxshortdirslen.
328 Then follows the filler followed by the sha digest of the full path.
328 Then follows the filler followed by the sha digest of the full path.
329 The filler is the beginning of the basename of the lowerencoded path
329 The filler is the beginning of the basename of the lowerencoded path
330 (the basename is everything after the last path separator). The filler
330 (the basename is everything after the last path separator). The filler
331 is as long as possible, filling in characters from the basename until
331 is as long as possible, filling in characters from the basename until
332 the encoded path has _maxstorepathlen characters (or all chars of the
332 the encoded path has _maxstorepathlen characters (or all chars of the
333 basename have been taken).
333 basename have been taken).
334 The extension (e.g. '.i' or '.d') is preserved.
334 The extension (e.g. '.i' or '.d') is preserved.
335
335
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 encoding was used.
337 encoding was used.
338 """
338 """
339 path = encodedir(path)
339 path = encodedir(path)
340 ef = _encodefname(path).split(b'/')
340 ef = _encodefname(path).split(b'/')
341 res = b'/'.join(_auxencode(ef, dotencode))
341 res = b'/'.join(_auxencode(ef, dotencode))
342 if len(res) > _maxstorepathlen:
342 if len(res) > _maxstorepathlen:
343 res = _hashencode(path, dotencode)
343 res = _hashencode(path, dotencode)
344 return res
344 return res
345
345
346
346
347 def _pathencode(path):
347 def _pathencode(path):
348 de = encodedir(path)
348 de = encodedir(path)
349 if len(path) > _maxstorepathlen:
349 if len(path) > _maxstorepathlen:
350 return _hashencode(de, True)
350 return _hashencode(de, True)
351 ef = _encodefname(de).split(b'/')
351 ef = _encodefname(de).split(b'/')
352 res = b'/'.join(_auxencode(ef, True))
352 res = b'/'.join(_auxencode(ef, True))
353 if len(res) > _maxstorepathlen:
353 if len(res) > _maxstorepathlen:
354 return _hashencode(de, True)
354 return _hashencode(de, True)
355 return res
355 return res
356
356
357
357
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359
359
360
360
361 def _plainhybridencode(f):
361 def _plainhybridencode(f):
362 return _hybridencode(f, False)
362 return _hybridencode(f, False)
363
363
364
364
365 def _calcmode(vfs):
365 def _calcmode(vfs):
366 try:
366 try:
367 # files in .hg/ will be created using this mode
367 # files in .hg/ will be created using this mode
368 mode = vfs.stat().st_mode
368 mode = vfs.stat().st_mode
369 # avoid some useless chmods
369 # avoid some useless chmods
370 if (0o777 & ~util.umask) == (0o777 & mode):
370 if (0o777 & ~util.umask) == (0o777 & mode):
371 mode = None
371 mode = None
372 except OSError:
372 except OSError:
373 mode = None
373 mode = None
374 return mode
374 return mode
375
375
376
376
377 _data = [
377 _data = [
378 b'bookmarks',
378 b'bookmarks',
379 b'narrowspec',
379 b'narrowspec',
380 b'data',
380 b'data',
381 b'meta',
381 b'meta',
382 b'00manifest.d',
382 b'00manifest.d',
383 b'00manifest.i',
383 b'00manifest.i',
384 b'00changelog.d',
384 b'00changelog.d',
385 b'00changelog.i',
385 b'00changelog.i',
386 b'phaseroots',
386 b'phaseroots',
387 b'obsstore',
387 b'obsstore',
388 b'requires',
388 b'requires',
389 ]
389 ]
390
390
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
392 REVLOG_FILES_OTHER_EXT = (b'.idx', b'.d', b'.n', b'.nd', b'd.tmpcensored')
393 # files that are "volatile" and might change between listing and streaming
393 # files that are "volatile" and might change between listing and streaming
394 #
394 #
395 # note: the ".nd" file are nodemap data and won't "change" but they might be
395 # note: the ".nd" file are nodemap data and won't "change" but they might be
396 # deleted.
396 # deleted.
397 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
397 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
398
398
399 # some exception to the above matching
399 # some exception to the above matching
400 EXCLUDED = re.compile(b'.*undo\.[^/]+\.nd?$')
400 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
401
401
402
402
403 def is_revlog(f, kind, st):
403 def is_revlog(f, kind, st):
404 if kind != stat.S_IFREG:
404 if kind != stat.S_IFREG:
405 return None
405 return None
406 return revlog_type(f)
406 return revlog_type(f)
407
407
408
408
409 def revlog_type(f):
409 def revlog_type(f):
410 if f.endswith(REVLOG_FILES_MAIN_EXT):
410 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
411 return FILEFLAGS_REVLOG_MAIN
411 return FILEFLAGS_REVLOG_MAIN
412 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
412 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
413 t = FILETYPE_FILELOG_OTHER
413 t = FILETYPE_FILELOG_OTHER
414 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
414 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
415 t |= FILEFLAGS_VOLATILE
415 t |= FILEFLAGS_VOLATILE
416 return t
416 return t
417
417
418
418
419 # the file is part of changelog data
419 # the file is part of changelog data
420 FILEFLAGS_CHANGELOG = 1 << 13
420 FILEFLAGS_CHANGELOG = 1 << 13
421 # the file is part of manifest data
421 # the file is part of manifest data
422 FILEFLAGS_MANIFESTLOG = 1 << 12
422 FILEFLAGS_MANIFESTLOG = 1 << 12
423 # the file is part of filelog data
423 # the file is part of filelog data
424 FILEFLAGS_FILELOG = 1 << 11
424 FILEFLAGS_FILELOG = 1 << 11
425 # file that are not directly part of a revlog
425 # file that are not directly part of a revlog
426 FILEFLAGS_OTHER = 1 << 10
426 FILEFLAGS_OTHER = 1 << 10
427
427
428 # the main entry point for a revlog
428 # the main entry point for a revlog
429 FILEFLAGS_REVLOG_MAIN = 1 << 1
429 FILEFLAGS_REVLOG_MAIN = 1 << 1
430 # a secondary file for a revlog
430 # a secondary file for a revlog
431 FILEFLAGS_REVLOG_OTHER = 1 << 0
431 FILEFLAGS_REVLOG_OTHER = 1 << 0
432
432
433 # files that are "volatile" and might change between listing and streaming
433 # files that are "volatile" and might change between listing and streaming
434 FILEFLAGS_VOLATILE = 1 << 20
434 FILEFLAGS_VOLATILE = 1 << 20
435
435
436 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
436 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
437 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
437 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
438 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
438 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
439 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
439 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
440 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
440 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
441 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
441 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
442 FILETYPE_OTHER = FILEFLAGS_OTHER
442 FILETYPE_OTHER = FILEFLAGS_OTHER
443
443
444
444
445 class basicstore(object):
445 class basicstore(object):
446 '''base class for local repository stores'''
446 '''base class for local repository stores'''
447
447
448 def __init__(self, path, vfstype):
448 def __init__(self, path, vfstype):
449 vfs = vfstype(path)
449 vfs = vfstype(path)
450 self.path = vfs.base
450 self.path = vfs.base
451 self.createmode = _calcmode(vfs)
451 self.createmode = _calcmode(vfs)
452 vfs.createmode = self.createmode
452 vfs.createmode = self.createmode
453 self.rawvfs = vfs
453 self.rawvfs = vfs
454 self.vfs = vfsmod.filtervfs(vfs, encodedir)
454 self.vfs = vfsmod.filtervfs(vfs, encodedir)
455 self.opener = self.vfs
455 self.opener = self.vfs
456
456
457 def join(self, f):
457 def join(self, f):
458 return self.path + b'/' + encodedir(f)
458 return self.path + b'/' + encodedir(f)
459
459
460 def _walk(self, relpath, recurse):
460 def _walk(self, relpath, recurse):
461 '''yields (unencoded, encoded, size)'''
461 '''yields (unencoded, encoded, size)'''
462 path = self.path
462 path = self.path
463 if relpath:
463 if relpath:
464 path += b'/' + relpath
464 path += b'/' + relpath
465 striplen = len(self.path) + 1
465 striplen = len(self.path) + 1
466 l = []
466 l = []
467 if self.rawvfs.isdir(path):
467 if self.rawvfs.isdir(path):
468 visit = [path]
468 visit = [path]
469 readdir = self.rawvfs.readdir
469 readdir = self.rawvfs.readdir
470 while visit:
470 while visit:
471 p = visit.pop()
471 p = visit.pop()
472 for f, kind, st in readdir(p, stat=True):
472 for f, kind, st in readdir(p, stat=True):
473 fp = p + b'/' + f
473 fp = p + b'/' + f
474 rl_type = is_revlog(f, kind, st)
474 rl_type = is_revlog(f, kind, st)
475 if rl_type is not None:
475 if rl_type is not None:
476 n = util.pconvert(fp[striplen:])
476 n = util.pconvert(fp[striplen:])
477 l.append((rl_type, decodedir(n), n, st.st_size))
477 l.append((rl_type, decodedir(n), n, st.st_size))
478 elif kind == stat.S_IFDIR and recurse:
478 elif kind == stat.S_IFDIR and recurse:
479 visit.append(fp)
479 visit.append(fp)
480 l.sort()
480 l.sort()
481 return l
481 return l
482
482
483 def changelog(self, trypending, concurrencychecker=None):
483 def changelog(self, trypending, concurrencychecker=None):
484 return changelog.changelog(
484 return changelog.changelog(
485 self.vfs,
485 self.vfs,
486 trypending=trypending,
486 trypending=trypending,
487 concurrencychecker=concurrencychecker,
487 concurrencychecker=concurrencychecker,
488 )
488 )
489
489
490 def manifestlog(self, repo, storenarrowmatch):
490 def manifestlog(self, repo, storenarrowmatch):
491 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
491 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
492 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
492 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
493
493
494 def datafiles(self, matcher=None):
494 def datafiles(self, matcher=None):
495 files = self._walk(b'data', True) + self._walk(b'meta', True)
495 files = self._walk(b'data', True) + self._walk(b'meta', True)
496 for (t, u, e, s) in files:
496 for (t, u, e, s) in files:
497 yield (FILEFLAGS_FILELOG | t, u, e, s)
497 yield (FILEFLAGS_FILELOG | t, u, e, s)
498
498
499 def topfiles(self):
499 def topfiles(self):
500 # yield manifest before changelog
500 # yield manifest before changelog
501 files = reversed(self._walk(b'', False))
501 files = reversed(self._walk(b'', False))
502 for (t, u, e, s) in files:
502 for (t, u, e, s) in files:
503 if u.startswith(b'00changelog'):
503 if u.startswith(b'00changelog'):
504 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
504 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
505 elif u.startswith(b'00manifest'):
505 elif u.startswith(b'00manifest'):
506 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
506 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
507 else:
507 else:
508 yield (FILETYPE_OTHER | t, u, e, s)
508 yield (FILETYPE_OTHER | t, u, e, s)
509
509
510 def walk(self, matcher=None):
510 def walk(self, matcher=None):
511 """return file related to data storage (ie: revlogs)
511 """return file related to data storage (ie: revlogs)
512
512
513 yields (file_type, unencoded, encoded, size)
513 yields (file_type, unencoded, encoded, size)
514
514
515 if a matcher is passed, storage files of only those tracked paths
515 if a matcher is passed, storage files of only those tracked paths
516 are passed with matches the matcher
516 are passed with matches the matcher
517 """
517 """
518 # yield data files first
518 # yield data files first
519 for x in self.datafiles(matcher):
519 for x in self.datafiles(matcher):
520 yield x
520 yield x
521 for x in self.topfiles():
521 for x in self.topfiles():
522 yield x
522 yield x
523
523
524 def copylist(self):
524 def copylist(self):
525 return _data
525 return _data
526
526
527 def write(self, tr):
527 def write(self, tr):
528 pass
528 pass
529
529
530 def invalidatecaches(self):
530 def invalidatecaches(self):
531 pass
531 pass
532
532
533 def markremoved(self, fn):
533 def markremoved(self, fn):
534 pass
534 pass
535
535
536 def __contains__(self, path):
536 def __contains__(self, path):
537 '''Checks if the store contains path'''
537 '''Checks if the store contains path'''
538 path = b"/".join((b"data", path))
538 path = b"/".join((b"data", path))
539 # file?
539 # file?
540 if self.vfs.exists(path + b".i"):
540 if self.vfs.exists(path + b".i"):
541 return True
541 return True
542 # dir?
542 # dir?
543 if not path.endswith(b"/"):
543 if not path.endswith(b"/"):
544 path = path + b"/"
544 path = path + b"/"
545 return self.vfs.exists(path)
545 return self.vfs.exists(path)
546
546
547
547
548 class encodedstore(basicstore):
548 class encodedstore(basicstore):
549 def __init__(self, path, vfstype):
549 def __init__(self, path, vfstype):
550 vfs = vfstype(path + b'/store')
550 vfs = vfstype(path + b'/store')
551 self.path = vfs.base
551 self.path = vfs.base
552 self.createmode = _calcmode(vfs)
552 self.createmode = _calcmode(vfs)
553 vfs.createmode = self.createmode
553 vfs.createmode = self.createmode
554 self.rawvfs = vfs
554 self.rawvfs = vfs
555 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
555 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
556 self.opener = self.vfs
556 self.opener = self.vfs
557
557
558 def datafiles(self, matcher=None):
558 def datafiles(self, matcher=None):
559 for t, a, b, size in super(encodedstore, self).datafiles():
559 for t, a, b, size in super(encodedstore, self).datafiles():
560 try:
560 try:
561 a = decodefilename(a)
561 a = decodefilename(a)
562 except KeyError:
562 except KeyError:
563 a = None
563 a = None
564 if a is not None and not _matchtrackedpath(a, matcher):
564 if a is not None and not _matchtrackedpath(a, matcher):
565 continue
565 continue
566 yield t, a, b, size
566 yield t, a, b, size
567
567
568 def join(self, f):
568 def join(self, f):
569 return self.path + b'/' + encodefilename(f)
569 return self.path + b'/' + encodefilename(f)
570
570
571 def copylist(self):
571 def copylist(self):
572 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
572 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
573
573
574
574
575 class fncache(object):
575 class fncache(object):
576 # the filename used to be partially encoded
576 # the filename used to be partially encoded
577 # hence the encodedir/decodedir dance
577 # hence the encodedir/decodedir dance
578 def __init__(self, vfs):
578 def __init__(self, vfs):
579 self.vfs = vfs
579 self.vfs = vfs
580 self.entries = None
580 self.entries = None
581 self._dirty = False
581 self._dirty = False
582 # set of new additions to fncache
582 # set of new additions to fncache
583 self.addls = set()
583 self.addls = set()
584
584
585 def ensureloaded(self, warn=None):
585 def ensureloaded(self, warn=None):
586 """read the fncache file if not already read.
586 """read the fncache file if not already read.
587
587
588 If the file on disk is corrupted, raise. If warn is provided,
588 If the file on disk is corrupted, raise. If warn is provided,
589 warn and keep going instead."""
589 warn and keep going instead."""
590 if self.entries is None:
590 if self.entries is None:
591 self._load(warn)
591 self._load(warn)
592
592
593 def _load(self, warn=None):
593 def _load(self, warn=None):
594 '''fill the entries from the fncache file'''
594 '''fill the entries from the fncache file'''
595 self._dirty = False
595 self._dirty = False
596 try:
596 try:
597 fp = self.vfs(b'fncache', mode=b'rb')
597 fp = self.vfs(b'fncache', mode=b'rb')
598 except IOError:
598 except IOError:
599 # skip nonexistent file
599 # skip nonexistent file
600 self.entries = set()
600 self.entries = set()
601 return
601 return
602
602
603 self.entries = set()
603 self.entries = set()
604 chunk = b''
604 chunk = b''
605 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
605 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
606 chunk += c
606 chunk += c
607 try:
607 try:
608 p = chunk.rindex(b'\n')
608 p = chunk.rindex(b'\n')
609 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
609 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
610 chunk = chunk[p + 1 :]
610 chunk = chunk[p + 1 :]
611 except ValueError:
611 except ValueError:
612 # substring '\n' not found, maybe the entry is bigger than the
612 # substring '\n' not found, maybe the entry is bigger than the
613 # chunksize, so let's keep iterating
613 # chunksize, so let's keep iterating
614 pass
614 pass
615
615
616 if chunk:
616 if chunk:
617 msg = _(b"fncache does not ends with a newline")
617 msg = _(b"fncache does not ends with a newline")
618 if warn:
618 if warn:
619 warn(msg + b'\n')
619 warn(msg + b'\n')
620 else:
620 else:
621 raise error.Abort(
621 raise error.Abort(
622 msg,
622 msg,
623 hint=_(
623 hint=_(
624 b"use 'hg debugrebuildfncache' to "
624 b"use 'hg debugrebuildfncache' to "
625 b"rebuild the fncache"
625 b"rebuild the fncache"
626 ),
626 ),
627 )
627 )
628 self._checkentries(fp, warn)
628 self._checkentries(fp, warn)
629 fp.close()
629 fp.close()
630
630
631 def _checkentries(self, fp, warn):
631 def _checkentries(self, fp, warn):
632 """make sure there is no empty string in entries"""
632 """make sure there is no empty string in entries"""
633 if b'' in self.entries:
633 if b'' in self.entries:
634 fp.seek(0)
634 fp.seek(0)
635 for n, line in enumerate(util.iterfile(fp)):
635 for n, line in enumerate(util.iterfile(fp)):
636 if not line.rstrip(b'\n'):
636 if not line.rstrip(b'\n'):
637 t = _(b'invalid entry in fncache, line %d') % (n + 1)
637 t = _(b'invalid entry in fncache, line %d') % (n + 1)
638 if warn:
638 if warn:
639 warn(t + b'\n')
639 warn(t + b'\n')
640 else:
640 else:
641 raise error.Abort(t)
641 raise error.Abort(t)
642
642
643 def write(self, tr):
643 def write(self, tr):
644 if self._dirty:
644 if self._dirty:
645 assert self.entries is not None
645 assert self.entries is not None
646 self.entries = self.entries | self.addls
646 self.entries = self.entries | self.addls
647 self.addls = set()
647 self.addls = set()
648 tr.addbackup(b'fncache')
648 tr.addbackup(b'fncache')
649 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
649 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
650 if self.entries:
650 if self.entries:
651 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
651 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
652 fp.close()
652 fp.close()
653 self._dirty = False
653 self._dirty = False
654 if self.addls:
654 if self.addls:
655 # if we have just new entries, let's append them to the fncache
655 # if we have just new entries, let's append them to the fncache
656 tr.addbackup(b'fncache')
656 tr.addbackup(b'fncache')
657 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
657 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
658 if self.addls:
658 if self.addls:
659 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
659 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
660 fp.close()
660 fp.close()
661 self.entries = None
661 self.entries = None
662 self.addls = set()
662 self.addls = set()
663
663
664 def add(self, fn):
664 def add(self, fn):
665 if self.entries is None:
665 if self.entries is None:
666 self._load()
666 self._load()
667 if fn not in self.entries:
667 if fn not in self.entries:
668 self.addls.add(fn)
668 self.addls.add(fn)
669
669
670 def remove(self, fn):
670 def remove(self, fn):
671 if self.entries is None:
671 if self.entries is None:
672 self._load()
672 self._load()
673 if fn in self.addls:
673 if fn in self.addls:
674 self.addls.remove(fn)
674 self.addls.remove(fn)
675 return
675 return
676 try:
676 try:
677 self.entries.remove(fn)
677 self.entries.remove(fn)
678 self._dirty = True
678 self._dirty = True
679 except KeyError:
679 except KeyError:
680 pass
680 pass
681
681
682 def __contains__(self, fn):
682 def __contains__(self, fn):
683 if fn in self.addls:
683 if fn in self.addls:
684 return True
684 return True
685 if self.entries is None:
685 if self.entries is None:
686 self._load()
686 self._load()
687 return fn in self.entries
687 return fn in self.entries
688
688
689 def __iter__(self):
689 def __iter__(self):
690 if self.entries is None:
690 if self.entries is None:
691 self._load()
691 self._load()
692 return iter(self.entries | self.addls)
692 return iter(self.entries | self.addls)
693
693
694
694
695 class _fncachevfs(vfsmod.proxyvfs):
695 class _fncachevfs(vfsmod.proxyvfs):
696 def __init__(self, vfs, fnc, encode):
696 def __init__(self, vfs, fnc, encode):
697 vfsmod.proxyvfs.__init__(self, vfs)
697 vfsmod.proxyvfs.__init__(self, vfs)
698 self.fncache = fnc
698 self.fncache = fnc
699 self.encode = encode
699 self.encode = encode
700
700
701 def __call__(self, path, mode=b'r', *args, **kw):
701 def __call__(self, path, mode=b'r', *args, **kw):
702 encoded = self.encode(path)
702 encoded = self.encode(path)
703 if mode not in (b'r', b'rb') and (
703 if mode not in (b'r', b'rb') and (
704 path.startswith(b'data/') or path.startswith(b'meta/')
704 path.startswith(b'data/') or path.startswith(b'meta/')
705 ):
705 ):
706 # do not trigger a fncache load when adding a file that already is
706 # do not trigger a fncache load when adding a file that already is
707 # known to exist.
707 # known to exist.
708 notload = self.fncache.entries is None and self.vfs.exists(encoded)
708 notload = self.fncache.entries is None and self.vfs.exists(encoded)
709 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
709 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
710 # when appending to an existing file, if the file has size zero,
710 # when appending to an existing file, if the file has size zero,
711 # it should be considered as missing. Such zero-size files are
711 # it should be considered as missing. Such zero-size files are
712 # the result of truncation when a transaction is aborted.
712 # the result of truncation when a transaction is aborted.
713 notload = False
713 notload = False
714 if not notload:
714 if not notload:
715 self.fncache.add(path)
715 self.fncache.add(path)
716 return self.vfs(encoded, mode, *args, **kw)
716 return self.vfs(encoded, mode, *args, **kw)
717
717
718 def join(self, path):
718 def join(self, path):
719 if path:
719 if path:
720 return self.vfs.join(self.encode(path))
720 return self.vfs.join(self.encode(path))
721 else:
721 else:
722 return self.vfs.join(path)
722 return self.vfs.join(path)
723
723
724
724
725 class fncachestore(basicstore):
725 class fncachestore(basicstore):
726 def __init__(self, path, vfstype, dotencode):
726 def __init__(self, path, vfstype, dotencode):
727 if dotencode:
727 if dotencode:
728 encode = _pathencode
728 encode = _pathencode
729 else:
729 else:
730 encode = _plainhybridencode
730 encode = _plainhybridencode
731 self.encode = encode
731 self.encode = encode
732 vfs = vfstype(path + b'/store')
732 vfs = vfstype(path + b'/store')
733 self.path = vfs.base
733 self.path = vfs.base
734 self.pathsep = self.path + b'/'
734 self.pathsep = self.path + b'/'
735 self.createmode = _calcmode(vfs)
735 self.createmode = _calcmode(vfs)
736 vfs.createmode = self.createmode
736 vfs.createmode = self.createmode
737 self.rawvfs = vfs
737 self.rawvfs = vfs
738 fnc = fncache(vfs)
738 fnc = fncache(vfs)
739 self.fncache = fnc
739 self.fncache = fnc
740 self.vfs = _fncachevfs(vfs, fnc, encode)
740 self.vfs = _fncachevfs(vfs, fnc, encode)
741 self.opener = self.vfs
741 self.opener = self.vfs
742
742
743 def join(self, f):
743 def join(self, f):
744 return self.pathsep + self.encode(f)
744 return self.pathsep + self.encode(f)
745
745
746 def getsize(self, path):
746 def getsize(self, path):
747 return self.rawvfs.stat(path).st_size
747 return self.rawvfs.stat(path).st_size
748
748
749 def datafiles(self, matcher=None):
749 def datafiles(self, matcher=None):
750 for f in sorted(self.fncache):
750 for f in sorted(self.fncache):
751 if not _matchtrackedpath(f, matcher):
751 if not _matchtrackedpath(f, matcher):
752 continue
752 continue
753 ef = self.encode(f)
753 ef = self.encode(f)
754 try:
754 try:
755 t = revlog_type(f)
755 t = revlog_type(f)
756 t |= FILEFLAGS_FILELOG
756 t |= FILEFLAGS_FILELOG
757 yield t, f, ef, self.getsize(ef)
757 yield t, f, ef, self.getsize(ef)
758 except OSError as err:
758 except OSError as err:
759 if err.errno != errno.ENOENT:
759 if err.errno != errno.ENOENT:
760 raise
760 raise
761
761
762 def copylist(self):
762 def copylist(self):
763 d = (
763 d = (
764 b'bookmarks',
764 b'bookmarks',
765 b'narrowspec',
765 b'narrowspec',
766 b'data',
766 b'data',
767 b'meta',
767 b'meta',
768 b'dh',
768 b'dh',
769 b'fncache',
769 b'fncache',
770 b'phaseroots',
770 b'phaseroots',
771 b'obsstore',
771 b'obsstore',
772 b'00manifest.d',
772 b'00manifest.d',
773 b'00manifest.i',
773 b'00manifest.i',
774 b'00changelog.d',
774 b'00changelog.d',
775 b'00changelog.i',
775 b'00changelog.i',
776 b'requires',
776 b'requires',
777 )
777 )
778 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
778 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
779
779
780 def write(self, tr):
780 def write(self, tr):
781 self.fncache.write(tr)
781 self.fncache.write(tr)
782
782
783 def invalidatecaches(self):
783 def invalidatecaches(self):
784 self.fncache.entries = None
784 self.fncache.entries = None
785 self.fncache.addls = set()
785 self.fncache.addls = set()
786
786
787 def markremoved(self, fn):
787 def markremoved(self, fn):
788 self.fncache.remove(fn)
788 self.fncache.remove(fn)
789
789
790 def _exists(self, f):
790 def _exists(self, f):
791 ef = self.encode(f)
791 ef = self.encode(f)
792 try:
792 try:
793 self.getsize(ef)
793 self.getsize(ef)
794 return True
794 return True
795 except OSError as err:
795 except OSError as err:
796 if err.errno != errno.ENOENT:
796 if err.errno != errno.ENOENT:
797 raise
797 raise
798 # nonexistent entry
798 # nonexistent entry
799 return False
799 return False
800
800
801 def __contains__(self, path):
801 def __contains__(self, path):
802 '''Checks if the store contains path'''
802 '''Checks if the store contains path'''
803 path = b"/".join((b"data", path))
803 path = b"/".join((b"data", path))
804 # check for files (exact match)
804 # check for files (exact match)
805 e = path + b'.i'
805 e = path + b'.i'
806 if e in self.fncache and self._exists(e):
806 if e in self.fncache and self._exists(e):
807 return True
807 return True
808 # now check for directories (prefix match)
808 # now check for directories (prefix match)
809 if not path.endswith(b'/'):
809 if not path.endswith(b'/'):
810 path += b'/'
810 path += b'/'
811 for e in self.fncache:
811 for e in self.fncache:
812 if e.startswith(path) and self._exists(e):
812 if e.startswith(path) and self._exists(e):
813 return True
813 return True
814 return False
814 return False
General Comments 0
You need to be logged in to leave comments. Login now