##// END OF EJS Templates
nodemap: write nodemap data on disk...
marmoute -
r44789:5962fd0d default
parent child Browse files
Show More
@@ -1,626 +1,629 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 copies,
19 copies,
20 encoding,
20 encoding,
21 error,
21 error,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [
85 items = [
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
87 for k in sorted(d)
87 for k in sorted(d)
88 ]
88 ]
89 return b"\0".join(items)
89 return b"\0".join(items)
90
90
91
91
92 def stripdesc(desc):
92 def stripdesc(desc):
93 """strip trailing whitespace and leading and trailing empty lines"""
93 """strip trailing whitespace and leading and trailing empty lines"""
94 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
94 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
95
95
96
96
97 class appender(object):
97 class appender(object):
98 '''the changelog index must be updated last on disk, so we use this class
98 '''the changelog index must be updated last on disk, so we use this class
99 to delay writes to it'''
99 to delay writes to it'''
100
100
101 def __init__(self, vfs, name, mode, buf):
101 def __init__(self, vfs, name, mode, buf):
102 self.data = buf
102 self.data = buf
103 fp = vfs(name, mode)
103 fp = vfs(name, mode)
104 self.fp = fp
104 self.fp = fp
105 self.offset = fp.tell()
105 self.offset = fp.tell()
106 self.size = vfs.fstat(fp).st_size
106 self.size = vfs.fstat(fp).st_size
107 self._end = self.size
107 self._end = self.size
108
108
109 def end(self):
109 def end(self):
110 return self._end
110 return self._end
111
111
112 def tell(self):
112 def tell(self):
113 return self.offset
113 return self.offset
114
114
115 def flush(self):
115 def flush(self):
116 pass
116 pass
117
117
118 @property
118 @property
119 def closed(self):
119 def closed(self):
120 return self.fp.closed
120 return self.fp.closed
121
121
122 def close(self):
122 def close(self):
123 self.fp.close()
123 self.fp.close()
124
124
125 def seek(self, offset, whence=0):
125 def seek(self, offset, whence=0):
126 '''virtual file offset spans real file and data'''
126 '''virtual file offset spans real file and data'''
127 if whence == 0:
127 if whence == 0:
128 self.offset = offset
128 self.offset = offset
129 elif whence == 1:
129 elif whence == 1:
130 self.offset += offset
130 self.offset += offset
131 elif whence == 2:
131 elif whence == 2:
132 self.offset = self.end() + offset
132 self.offset = self.end() + offset
133 if self.offset < self.size:
133 if self.offset < self.size:
134 self.fp.seek(self.offset)
134 self.fp.seek(self.offset)
135
135
136 def read(self, count=-1):
136 def read(self, count=-1):
137 '''only trick here is reads that span real file and data'''
137 '''only trick here is reads that span real file and data'''
138 ret = b""
138 ret = b""
139 if self.offset < self.size:
139 if self.offset < self.size:
140 s = self.fp.read(count)
140 s = self.fp.read(count)
141 ret = s
141 ret = s
142 self.offset += len(s)
142 self.offset += len(s)
143 if count > 0:
143 if count > 0:
144 count -= len(s)
144 count -= len(s)
145 if count != 0:
145 if count != 0:
146 doff = self.offset - self.size
146 doff = self.offset - self.size
147 self.data.insert(0, b"".join(self.data))
147 self.data.insert(0, b"".join(self.data))
148 del self.data[1:]
148 del self.data[1:]
149 s = self.data[0][doff : doff + count]
149 s = self.data[0][doff : doff + count]
150 self.offset += len(s)
150 self.offset += len(s)
151 ret += s
151 ret += s
152 return ret
152 return ret
153
153
154 def write(self, s):
154 def write(self, s):
155 self.data.append(bytes(s))
155 self.data.append(bytes(s))
156 self.offset += len(s)
156 self.offset += len(s)
157 self._end += len(s)
157 self._end += len(s)
158
158
159 def __enter__(self):
159 def __enter__(self):
160 self.fp.__enter__()
160 self.fp.__enter__()
161 return self
161 return self
162
162
163 def __exit__(self, *args):
163 def __exit__(self, *args):
164 return self.fp.__exit__(*args)
164 return self.fp.__exit__(*args)
165
165
166
166
167 def _divertopener(opener, target):
167 def _divertopener(opener, target):
168 """build an opener that writes in 'target.a' instead of 'target'"""
168 """build an opener that writes in 'target.a' instead of 'target'"""
169
169
170 def _divert(name, mode=b'r', checkambig=False, **kwargs):
170 def _divert(name, mode=b'r', checkambig=False, **kwargs):
171 if name != target:
171 if name != target:
172 return opener(name, mode, **kwargs)
172 return opener(name, mode, **kwargs)
173 return opener(name + b".a", mode, **kwargs)
173 return opener(name + b".a", mode, **kwargs)
174
174
175 return _divert
175 return _divert
176
176
177
177
178 def _delayopener(opener, target, buf):
178 def _delayopener(opener, target, buf):
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
180
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 if name != target:
182 if name != target:
183 return opener(name, mode, **kwargs)
183 return opener(name, mode, **kwargs)
184 assert not kwargs
184 assert not kwargs
185 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
186
186
187 return _delay
187 return _delay
188
188
189
189
190 @attr.s
190 @attr.s
191 class _changelogrevision(object):
191 class _changelogrevision(object):
192 # Extensions might modify _defaultextra, so let the constructor below pass
192 # Extensions might modify _defaultextra, so let the constructor below pass
193 # it in
193 # it in
194 extra = attr.ib()
194 extra = attr.ib()
195 manifest = attr.ib(default=nullid)
195 manifest = attr.ib(default=nullid)
196 user = attr.ib(default=b'')
196 user = attr.ib(default=b'')
197 date = attr.ib(default=(0, 0))
197 date = attr.ib(default=(0, 0))
198 files = attr.ib(default=attr.Factory(list))
198 files = attr.ib(default=attr.Factory(list))
199 filesadded = attr.ib(default=None)
199 filesadded = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
203 description = attr.ib(default=b'')
203 description = attr.ib(default=b'')
204
204
205
205
206 class changelogrevision(object):
206 class changelogrevision(object):
207 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
208
208
209 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
210 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
211 the parsed object.
211 the parsed object.
212 """
212 """
213
213
214 __slots__ = (
214 __slots__ = (
215 '_offsets',
215 '_offsets',
216 '_text',
216 '_text',
217 '_sidedata',
217 '_sidedata',
218 '_cpsd',
218 '_cpsd',
219 )
219 )
220
220
221 def __new__(cls, text, sidedata, cpsd):
221 def __new__(cls, text, sidedata, cpsd):
222 if not text:
222 if not text:
223 return _changelogrevision(extra=_defaultextra)
223 return _changelogrevision(extra=_defaultextra)
224
224
225 self = super(changelogrevision, cls).__new__(cls)
225 self = super(changelogrevision, cls).__new__(cls)
226 # We could return here and implement the following as an __init__.
226 # We could return here and implement the following as an __init__.
227 # But doing it here is equivalent and saves an extra function call.
227 # But doing it here is equivalent and saves an extra function call.
228
228
229 # format used:
229 # format used:
230 # nodeid\n : manifest node in ascii
230 # nodeid\n : manifest node in ascii
231 # user\n : user, no \n or \r allowed
231 # user\n : user, no \n or \r allowed
232 # time tz extra\n : date (time is int or float, timezone is int)
232 # time tz extra\n : date (time is int or float, timezone is int)
233 # : extra is metadata, encoded and separated by '\0'
233 # : extra is metadata, encoded and separated by '\0'
234 # : older versions ignore it
234 # : older versions ignore it
235 # files\n\n : files modified by the cset, no \n or \r allowed
235 # files\n\n : files modified by the cset, no \n or \r allowed
236 # (.*) : comment (free text, ideally utf-8)
236 # (.*) : comment (free text, ideally utf-8)
237 #
237 #
238 # changelog v0 doesn't use extra
238 # changelog v0 doesn't use extra
239
239
240 nl1 = text.index(b'\n')
240 nl1 = text.index(b'\n')
241 nl2 = text.index(b'\n', nl1 + 1)
241 nl2 = text.index(b'\n', nl1 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
243
243
244 # The list of files may be empty. Which means nl3 is the first of the
244 # The list of files may be empty. Which means nl3 is the first of the
245 # double newline that precedes the description.
245 # double newline that precedes the description.
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 doublenl = nl3
247 doublenl = nl3
248 else:
248 else:
249 doublenl = text.index(b'\n\n', nl3 + 1)
249 doublenl = text.index(b'\n\n', nl3 + 1)
250
250
251 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._text = text
252 self._text = text
253 self._sidedata = sidedata
253 self._sidedata = sidedata
254 self._cpsd = cpsd
254 self._cpsd = cpsd
255
255
256 return self
256 return self
257
257
258 @property
258 @property
259 def manifest(self):
259 def manifest(self):
260 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
261
261
262 @property
262 @property
263 def user(self):
263 def user(self):
264 off = self._offsets
264 off = self._offsets
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266
266
267 @property
267 @property
268 def _rawdate(self):
268 def _rawdate(self):
269 off = self._offsets
269 off = self._offsets
270 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
271 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
272
272
273 @property
273 @property
274 def _rawextra(self):
274 def _rawextra(self):
275 off = self._offsets
275 off = self._offsets
276 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
277 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
278 if len(fields) != 3:
278 if len(fields) != 3:
279 return None
279 return None
280
280
281 return fields[2]
281 return fields[2]
282
282
283 @property
283 @property
284 def date(self):
284 def date(self):
285 raw = self._rawdate
285 raw = self._rawdate
286 time = float(raw[0])
286 time = float(raw[0])
287 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
288 try:
288 try:
289 timezone = int(raw[1])
289 timezone = int(raw[1])
290 except ValueError:
290 except ValueError:
291 timezone = 0
291 timezone = 0
292
292
293 return time, timezone
293 return time, timezone
294
294
295 @property
295 @property
296 def extra(self):
296 def extra(self):
297 raw = self._rawextra
297 raw = self._rawextra
298 if raw is None:
298 if raw is None:
299 return _defaultextra
299 return _defaultextra
300
300
301 return decodeextra(raw)
301 return decodeextra(raw)
302
302
303 @property
303 @property
304 def files(self):
304 def files(self):
305 off = self._offsets
305 off = self._offsets
306 if off[2] == off[3]:
306 if off[2] == off[3]:
307 return []
307 return []
308
308
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
310
310
311 @property
311 @property
312 def filesadded(self):
312 def filesadded(self):
313 if self._cpsd:
313 if self._cpsd:
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
315 if not rawindices:
315 if not rawindices:
316 return []
316 return []
317 else:
317 else:
318 rawindices = self.extra.get(b'filesadded')
318 rawindices = self.extra.get(b'filesadded')
319 if rawindices is None:
319 if rawindices is None:
320 return None
320 return None
321 return copies.decodefileindices(self.files, rawindices)
321 return copies.decodefileindices(self.files, rawindices)
322
322
323 @property
323 @property
324 def filesremoved(self):
324 def filesremoved(self):
325 if self._cpsd:
325 if self._cpsd:
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
327 if not rawindices:
327 if not rawindices:
328 return []
328 return []
329 else:
329 else:
330 rawindices = self.extra.get(b'filesremoved')
330 rawindices = self.extra.get(b'filesremoved')
331 if rawindices is None:
331 if rawindices is None:
332 return None
332 return None
333 return copies.decodefileindices(self.files, rawindices)
333 return copies.decodefileindices(self.files, rawindices)
334
334
335 @property
335 @property
336 def p1copies(self):
336 def p1copies(self):
337 if self._cpsd:
337 if self._cpsd:
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
339 if not rawcopies:
339 if not rawcopies:
340 return {}
340 return {}
341 else:
341 else:
342 rawcopies = self.extra.get(b'p1copies')
342 rawcopies = self.extra.get(b'p1copies')
343 if rawcopies is None:
343 if rawcopies is None:
344 return None
344 return None
345 return copies.decodecopies(self.files, rawcopies)
345 return copies.decodecopies(self.files, rawcopies)
346
346
347 @property
347 @property
348 def p2copies(self):
348 def p2copies(self):
349 if self._cpsd:
349 if self._cpsd:
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
351 if not rawcopies:
351 if not rawcopies:
352 return {}
352 return {}
353 else:
353 else:
354 rawcopies = self.extra.get(b'p2copies')
354 rawcopies = self.extra.get(b'p2copies')
355 if rawcopies is None:
355 if rawcopies is None:
356 return None
356 return None
357 return copies.decodecopies(self.files, rawcopies)
357 return copies.decodecopies(self.files, rawcopies)
358
358
359 @property
359 @property
360 def description(self):
360 def description(self):
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
362
362
363
363
364 class changelog(revlog.revlog):
364 class changelog(revlog.revlog):
365 def __init__(self, opener, trypending=False):
365 def __init__(self, opener, trypending=False):
366 """Load a changelog revlog using an opener.
366 """Load a changelog revlog using an opener.
367
367
368 If ``trypending`` is true, we attempt to load the index from a
368 If ``trypending`` is true, we attempt to load the index from a
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
370 The ``00changelog.i.a`` file contains index (and possibly inline
370 The ``00changelog.i.a`` file contains index (and possibly inline
371 revision) data for a transaction that hasn't been finalized yet.
371 revision) data for a transaction that hasn't been finalized yet.
372 It exists in a separate file to facilitate readers (such as
372 It exists in a separate file to facilitate readers (such as
373 hooks processes) accessing data before a transaction is finalized.
373 hooks processes) accessing data before a transaction is finalized.
374 """
374 """
375 if trypending and opener.exists(b'00changelog.i.a'):
375 if trypending and opener.exists(b'00changelog.i.a'):
376 indexfile = b'00changelog.i.a'
376 indexfile = b'00changelog.i.a'
377 else:
377 else:
378 indexfile = b'00changelog.i'
378 indexfile = b'00changelog.i'
379
379
380 datafile = b'00changelog.d'
380 datafile = b'00changelog.d'
381 revlog.revlog.__init__(
381 revlog.revlog.__init__(
382 self,
382 self,
383 opener,
383 opener,
384 indexfile,
384 indexfile,
385 datafile=datafile,
385 datafile=datafile,
386 checkambig=True,
386 checkambig=True,
387 mmaplargeindex=True,
387 mmaplargeindex=True,
388 persistentnodemap=opener.options.get(
389 b'exp-persistent-nodemap', False
390 ),
388 )
391 )
389
392
390 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
393 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
391 # changelogs don't benefit from generaldelta.
394 # changelogs don't benefit from generaldelta.
392
395
393 self.version &= ~revlog.FLAG_GENERALDELTA
396 self.version &= ~revlog.FLAG_GENERALDELTA
394 self._generaldelta = False
397 self._generaldelta = False
395
398
396 # Delta chains for changelogs tend to be very small because entries
399 # Delta chains for changelogs tend to be very small because entries
397 # tend to be small and don't delta well with each. So disable delta
400 # tend to be small and don't delta well with each. So disable delta
398 # chains.
401 # chains.
399 self._storedeltachains = False
402 self._storedeltachains = False
400
403
401 self._realopener = opener
404 self._realopener = opener
402 self._delayed = False
405 self._delayed = False
403 self._delaybuf = None
406 self._delaybuf = None
404 self._divert = False
407 self._divert = False
405 self.filteredrevs = frozenset()
408 self.filteredrevs = frozenset()
406 self._copiesstorage = opener.options.get(b'copies-storage')
409 self._copiesstorage = opener.options.get(b'copies-storage')
407
410
408 def delayupdate(self, tr):
411 def delayupdate(self, tr):
409 """delay visibility of index updates to other readers"""
412 """delay visibility of index updates to other readers"""
410
413
411 if not self._delayed:
414 if not self._delayed:
412 if len(self) == 0:
415 if len(self) == 0:
413 self._divert = True
416 self._divert = True
414 if self._realopener.exists(self.indexfile + b'.a'):
417 if self._realopener.exists(self.indexfile + b'.a'):
415 self._realopener.unlink(self.indexfile + b'.a')
418 self._realopener.unlink(self.indexfile + b'.a')
416 self.opener = _divertopener(self._realopener, self.indexfile)
419 self.opener = _divertopener(self._realopener, self.indexfile)
417 else:
420 else:
418 self._delaybuf = []
421 self._delaybuf = []
419 self.opener = _delayopener(
422 self.opener = _delayopener(
420 self._realopener, self.indexfile, self._delaybuf
423 self._realopener, self.indexfile, self._delaybuf
421 )
424 )
422 self._delayed = True
425 self._delayed = True
423 tr.addpending(b'cl-%i' % id(self), self._writepending)
426 tr.addpending(b'cl-%i' % id(self), self._writepending)
424 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
427 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
425
428
426 def _finalize(self, tr):
429 def _finalize(self, tr):
427 """finalize index updates"""
430 """finalize index updates"""
428 self._delayed = False
431 self._delayed = False
429 self.opener = self._realopener
432 self.opener = self._realopener
430 # move redirected index data back into place
433 # move redirected index data back into place
431 if self._divert:
434 if self._divert:
432 assert not self._delaybuf
435 assert not self._delaybuf
433 tmpname = self.indexfile + b".a"
436 tmpname = self.indexfile + b".a"
434 nfile = self.opener.open(tmpname)
437 nfile = self.opener.open(tmpname)
435 nfile.close()
438 nfile.close()
436 self.opener.rename(tmpname, self.indexfile, checkambig=True)
439 self.opener.rename(tmpname, self.indexfile, checkambig=True)
437 elif self._delaybuf:
440 elif self._delaybuf:
438 fp = self.opener(self.indexfile, b'a', checkambig=True)
441 fp = self.opener(self.indexfile, b'a', checkambig=True)
439 fp.write(b"".join(self._delaybuf))
442 fp.write(b"".join(self._delaybuf))
440 fp.close()
443 fp.close()
441 self._delaybuf = None
444 self._delaybuf = None
442 self._divert = False
445 self._divert = False
443 # split when we're done
446 # split when we're done
444 self._enforceinlinesize(tr)
447 self._enforceinlinesize(tr)
445
448
446 def _writepending(self, tr):
449 def _writepending(self, tr):
447 """create a file containing the unfinalized state for
450 """create a file containing the unfinalized state for
448 pretxnchangegroup"""
451 pretxnchangegroup"""
449 if self._delaybuf:
452 if self._delaybuf:
450 # make a temporary copy of the index
453 # make a temporary copy of the index
451 fp1 = self._realopener(self.indexfile)
454 fp1 = self._realopener(self.indexfile)
452 pendingfilename = self.indexfile + b".a"
455 pendingfilename = self.indexfile + b".a"
453 # register as a temp file to ensure cleanup on failure
456 # register as a temp file to ensure cleanup on failure
454 tr.registertmp(pendingfilename)
457 tr.registertmp(pendingfilename)
455 # write existing data
458 # write existing data
456 fp2 = self._realopener(pendingfilename, b"w")
459 fp2 = self._realopener(pendingfilename, b"w")
457 fp2.write(fp1.read())
460 fp2.write(fp1.read())
458 # add pending data
461 # add pending data
459 fp2.write(b"".join(self._delaybuf))
462 fp2.write(b"".join(self._delaybuf))
460 fp2.close()
463 fp2.close()
461 # switch modes so finalize can simply rename
464 # switch modes so finalize can simply rename
462 self._delaybuf = None
465 self._delaybuf = None
463 self._divert = True
466 self._divert = True
464 self.opener = _divertopener(self._realopener, self.indexfile)
467 self.opener = _divertopener(self._realopener, self.indexfile)
465
468
466 if self._divert:
469 if self._divert:
467 return True
470 return True
468
471
469 return False
472 return False
470
473
471 def _enforceinlinesize(self, tr, fp=None):
474 def _enforceinlinesize(self, tr, fp=None):
472 if not self._delayed:
475 if not self._delayed:
473 revlog.revlog._enforceinlinesize(self, tr, fp)
476 revlog.revlog._enforceinlinesize(self, tr, fp)
474
477
475 def read(self, node):
478 def read(self, node):
476 """Obtain data from a parsed changelog revision.
479 """Obtain data from a parsed changelog revision.
477
480
478 Returns a 6-tuple of:
481 Returns a 6-tuple of:
479
482
480 - manifest node in binary
483 - manifest node in binary
481 - author/user as a localstr
484 - author/user as a localstr
482 - date as a 2-tuple of (time, timezone)
485 - date as a 2-tuple of (time, timezone)
483 - list of files
486 - list of files
484 - commit message as a localstr
487 - commit message as a localstr
485 - dict of extra metadata
488 - dict of extra metadata
486
489
487 Unless you need to access all fields, consider calling
490 Unless you need to access all fields, consider calling
488 ``changelogrevision`` instead, as it is faster for partial object
491 ``changelogrevision`` instead, as it is faster for partial object
489 access.
492 access.
490 """
493 """
491 d, s = self._revisiondata(node)
494 d, s = self._revisiondata(node)
492 c = changelogrevision(
495 c = changelogrevision(
493 d, s, self._copiesstorage == b'changeset-sidedata'
496 d, s, self._copiesstorage == b'changeset-sidedata'
494 )
497 )
495 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
498 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
496
499
497 def changelogrevision(self, nodeorrev):
500 def changelogrevision(self, nodeorrev):
498 """Obtain a ``changelogrevision`` for a node or revision."""
501 """Obtain a ``changelogrevision`` for a node or revision."""
499 text, sidedata = self._revisiondata(nodeorrev)
502 text, sidedata = self._revisiondata(nodeorrev)
500 return changelogrevision(
503 return changelogrevision(
501 text, sidedata, self._copiesstorage == b'changeset-sidedata'
504 text, sidedata, self._copiesstorage == b'changeset-sidedata'
502 )
505 )
503
506
504 def readfiles(self, node):
507 def readfiles(self, node):
505 """
508 """
506 short version of read that only returns the files modified by the cset
509 short version of read that only returns the files modified by the cset
507 """
510 """
508 text = self.revision(node)
511 text = self.revision(node)
509 if not text:
512 if not text:
510 return []
513 return []
511 last = text.index(b"\n\n")
514 last = text.index(b"\n\n")
512 l = text[:last].split(b'\n')
515 l = text[:last].split(b'\n')
513 return l[3:]
516 return l[3:]
514
517
515 def add(
518 def add(
516 self,
519 self,
517 manifest,
520 manifest,
518 files,
521 files,
519 desc,
522 desc,
520 transaction,
523 transaction,
521 p1,
524 p1,
522 p2,
525 p2,
523 user,
526 user,
524 date=None,
527 date=None,
525 extra=None,
528 extra=None,
526 p1copies=None,
529 p1copies=None,
527 p2copies=None,
530 p2copies=None,
528 filesadded=None,
531 filesadded=None,
529 filesremoved=None,
532 filesremoved=None,
530 ):
533 ):
531 # Convert to UTF-8 encoded bytestrings as the very first
534 # Convert to UTF-8 encoded bytestrings as the very first
532 # thing: calling any method on a localstr object will turn it
535 # thing: calling any method on a localstr object will turn it
533 # into a str object and the cached UTF-8 string is thus lost.
536 # into a str object and the cached UTF-8 string is thus lost.
534 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
537 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
535
538
536 user = user.strip()
539 user = user.strip()
537 # An empty username or a username with a "\n" will make the
540 # An empty username or a username with a "\n" will make the
538 # revision text contain two "\n\n" sequences -> corrupt
541 # revision text contain two "\n\n" sequences -> corrupt
539 # repository since read cannot unpack the revision.
542 # repository since read cannot unpack the revision.
540 if not user:
543 if not user:
541 raise error.StorageError(_(b"empty username"))
544 raise error.StorageError(_(b"empty username"))
542 if b"\n" in user:
545 if b"\n" in user:
543 raise error.StorageError(
546 raise error.StorageError(
544 _(b"username %r contains a newline") % pycompat.bytestr(user)
547 _(b"username %r contains a newline") % pycompat.bytestr(user)
545 )
548 )
546
549
547 desc = stripdesc(desc)
550 desc = stripdesc(desc)
548
551
549 if date:
552 if date:
550 parseddate = b"%d %d" % dateutil.parsedate(date)
553 parseddate = b"%d %d" % dateutil.parsedate(date)
551 else:
554 else:
552 parseddate = b"%d %d" % dateutil.makedate()
555 parseddate = b"%d %d" % dateutil.makedate()
553 if extra:
556 if extra:
554 branch = extra.get(b"branch")
557 branch = extra.get(b"branch")
555 if branch in (b"default", b""):
558 if branch in (b"default", b""):
556 del extra[b"branch"]
559 del extra[b"branch"]
557 elif branch in (b".", b"null", b"tip"):
560 elif branch in (b".", b"null", b"tip"):
558 raise error.StorageError(
561 raise error.StorageError(
559 _(b'the name \'%s\' is reserved') % branch
562 _(b'the name \'%s\' is reserved') % branch
560 )
563 )
561 sortedfiles = sorted(files)
564 sortedfiles = sorted(files)
562 sidedata = None
565 sidedata = None
563 if extra is not None:
566 if extra is not None:
564 for name in (
567 for name in (
565 b'p1copies',
568 b'p1copies',
566 b'p2copies',
569 b'p2copies',
567 b'filesadded',
570 b'filesadded',
568 b'filesremoved',
571 b'filesremoved',
569 ):
572 ):
570 extra.pop(name, None)
573 extra.pop(name, None)
571 if p1copies is not None:
574 if p1copies is not None:
572 p1copies = copies.encodecopies(sortedfiles, p1copies)
575 p1copies = copies.encodecopies(sortedfiles, p1copies)
573 if p2copies is not None:
576 if p2copies is not None:
574 p2copies = copies.encodecopies(sortedfiles, p2copies)
577 p2copies = copies.encodecopies(sortedfiles, p2copies)
575 if filesadded is not None:
578 if filesadded is not None:
576 filesadded = copies.encodefileindices(sortedfiles, filesadded)
579 filesadded = copies.encodefileindices(sortedfiles, filesadded)
577 if filesremoved is not None:
580 if filesremoved is not None:
578 filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
581 filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
579 if self._copiesstorage == b'extra':
582 if self._copiesstorage == b'extra':
580 extrasentries = p1copies, p2copies, filesadded, filesremoved
583 extrasentries = p1copies, p2copies, filesadded, filesremoved
581 if extra is None and any(x is not None for x in extrasentries):
584 if extra is None and any(x is not None for x in extrasentries):
582 extra = {}
585 extra = {}
583 if p1copies is not None:
586 if p1copies is not None:
584 extra[b'p1copies'] = p1copies
587 extra[b'p1copies'] = p1copies
585 if p2copies is not None:
588 if p2copies is not None:
586 extra[b'p2copies'] = p2copies
589 extra[b'p2copies'] = p2copies
587 if filesadded is not None:
590 if filesadded is not None:
588 extra[b'filesadded'] = filesadded
591 extra[b'filesadded'] = filesadded
589 if filesremoved is not None:
592 if filesremoved is not None:
590 extra[b'filesremoved'] = filesremoved
593 extra[b'filesremoved'] = filesremoved
591 elif self._copiesstorage == b'changeset-sidedata':
594 elif self._copiesstorage == b'changeset-sidedata':
592 sidedata = {}
595 sidedata = {}
593 if p1copies:
596 if p1copies:
594 sidedata[sidedatamod.SD_P1COPIES] = p1copies
597 sidedata[sidedatamod.SD_P1COPIES] = p1copies
595 if p2copies:
598 if p2copies:
596 sidedata[sidedatamod.SD_P2COPIES] = p2copies
599 sidedata[sidedatamod.SD_P2COPIES] = p2copies
597 if filesadded:
600 if filesadded:
598 sidedata[sidedatamod.SD_FILESADDED] = filesadded
601 sidedata[sidedatamod.SD_FILESADDED] = filesadded
599 if filesremoved:
602 if filesremoved:
600 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
603 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
601 if not sidedata:
604 if not sidedata:
602 sidedata = None
605 sidedata = None
603
606
604 if extra:
607 if extra:
605 extra = encodeextra(extra)
608 extra = encodeextra(extra)
606 parseddate = b"%s %s" % (parseddate, extra)
609 parseddate = b"%s %s" % (parseddate, extra)
607 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
610 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
608 text = b"\n".join(l)
611 text = b"\n".join(l)
609 return self.addrevision(
612 return self.addrevision(
610 text, transaction, len(self), p1, p2, sidedata=sidedata
613 text, transaction, len(self), p1, p2, sidedata=sidedata
611 )
614 )
612
615
613 def branchinfo(self, rev):
616 def branchinfo(self, rev):
614 """return the branch name and open/close state of a revision
617 """return the branch name and open/close state of a revision
615
618
616 This function exists because creating a changectx object
619 This function exists because creating a changectx object
617 just to access this is costly."""
620 just to access this is costly."""
618 extra = self.read(rev)[5]
621 extra = self.read(rev)[5]
619 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
622 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
620
623
621 def _nodeduplicatecallback(self, transaction, node):
624 def _nodeduplicatecallback(self, transaction, node):
622 # keep track of revisions that got "re-added", eg: unbunde of know rev.
625 # keep track of revisions that got "re-added", eg: unbunde of know rev.
623 #
626 #
624 # We track them in a list to preserve their order from the source bundle
627 # We track them in a list to preserve their order from the source bundle
625 duplicates = transaction.changes.setdefault(b'revduplicates', [])
628 duplicates = transaction.changes.setdefault(b'revduplicates', [])
626 duplicates.append(self.rev(node))
629 duplicates.append(self.rev(node))
@@ -1,1555 +1,1558 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'merge.require-rev', default=False,
231 b'commands', b'merge.require-rev', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'push.require-revs', default=False,
234 b'commands', b'push.require-revs', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.confirm', default=False,
237 b'commands', b'resolve.confirm', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.explicit-re-merge', default=False,
240 b'commands', b'resolve.explicit-re-merge', default=False,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'commands', b'resolve.mark-check', default=b'none',
243 b'commands', b'resolve.mark-check', default=b'none',
244 )
244 )
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'show.aliasprefix', default=list,
247 b'commands', b'show.aliasprefix', default=list,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.relative', default=False,
250 b'commands', b'status.relative', default=False,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.skipstates', default=[], experimental=True,
253 b'commands', b'status.skipstates', default=[], experimental=True,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.terse', default=b'',
256 b'commands', b'status.terse', default=b'',
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'status.verbose', default=False,
259 b'commands', b'status.verbose', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.check', default=None,
262 b'commands', b'update.check', default=None,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'commands', b'update.requiredest', default=False,
265 b'commands', b'update.requiredest', default=False,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'committemplate', b'.*', default=None, generic=True,
268 b'committemplate', b'.*', default=None, generic=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'bzr.saverev', default=True,
271 b'convert', b'bzr.saverev', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.cache', default=True,
274 b'convert', b'cvsps.cache', default=True,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.fuzz', default=60,
277 b'convert', b'cvsps.fuzz', default=60,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.logencoding', default=None,
280 b'convert', b'cvsps.logencoding', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergefrom', default=None,
283 b'convert', b'cvsps.mergefrom', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'cvsps.mergeto', default=None,
286 b'convert', b'cvsps.mergeto', default=None,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.extrakeys', default=list,
292 b'convert', b'git.extrakeys', default=list,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.findcopiesharder', default=False,
295 b'convert', b'git.findcopiesharder', default=False,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.remoteprefix', default=b'remote',
298 b'convert', b'git.remoteprefix', default=b'remote',
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.renamelimit', default=400,
301 b'convert', b'git.renamelimit', default=400,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.saverev', default=True,
304 b'convert', b'git.saverev', default=True,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.similarity', default=50,
307 b'convert', b'git.similarity', default=50,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'git.skipsubmodules', default=False,
310 b'convert', b'git.skipsubmodules', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.clonebranches', default=False,
313 b'convert', b'hg.clonebranches', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.ignoreerrors', default=False,
316 b'convert', b'hg.ignoreerrors', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.preserve-hash', default=False,
319 b'convert', b'hg.preserve-hash', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.revs', default=None,
322 b'convert', b'hg.revs', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.saverev', default=False,
325 b'convert', b'hg.saverev', default=False,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.sourcename', default=None,
328 b'convert', b'hg.sourcename', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.startrev', default=None,
331 b'convert', b'hg.startrev', default=None,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.tagsbranch', default=b'default',
334 b'convert', b'hg.tagsbranch', default=b'default',
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'hg.usebranchnames', default=True,
337 b'convert', b'hg.usebranchnames', default=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'localtimezone', default=False,
343 b'convert', b'localtimezone', default=False,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.encoding', default=dynamicdefault,
346 b'convert', b'p4.encoding', default=dynamicdefault,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'p4.startrev', default=0,
349 b'convert', b'p4.startrev', default=0,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'skiptags', default=False,
352 b'convert', b'skiptags', default=False,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.debugsvnlog', default=True,
355 b'convert', b'svn.debugsvnlog', default=True,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.trunk', default=None,
358 b'convert', b'svn.trunk', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.tags', default=None,
361 b'convert', b'svn.tags', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.branches', default=None,
364 b'convert', b'svn.branches', default=None,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'convert', b'svn.startrev', default=0,
367 b'convert', b'svn.startrev', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'debug', b'dirstate.delaywrite', default=0,
370 b'debug', b'dirstate.delaywrite', default=0,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'defaults', b'.*', default=None, generic=True,
373 b'defaults', b'.*', default=None, generic=True,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'all-warnings', default=False,
376 b'devel', b'all-warnings', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle2.debug', default=False,
379 b'devel', b'bundle2.debug', default=False,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'bundle.delta', default=b'',
382 b'devel', b'bundle.delta', default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'cache-vfs', default=None,
385 b'devel', b'cache-vfs', default=None,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-locks', default=False,
388 b'devel', b'check-locks', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'check-relroot', default=False,
391 b'devel', b'check-relroot', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'default-date', default=None,
394 b'devel', b'default-date', default=None,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'deprec-warn', default=False,
397 b'devel', b'deprec-warn', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'disableloaddefaultcerts', default=False,
400 b'devel', b'disableloaddefaultcerts', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'warn-empty-changegroup', default=False,
403 b'devel', b'warn-empty-changegroup', default=False,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'legacy.exchange', default=list,
406 b'devel', b'legacy.exchange', default=list,
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'servercafile', default=b'',
409 b'devel', b'servercafile', default=b'',
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'devel', b'serverexactprotocol', default=b'',
412 b'devel', b'serverexactprotocol', default=b'',
413 )
413 )
414 coreconfigitem(
414 coreconfigitem(
415 b'devel', b'serverrequirecert', default=False,
415 b'devel', b'serverrequirecert', default=False,
416 )
416 )
417 coreconfigitem(
417 coreconfigitem(
418 b'devel', b'strip-obsmarkers', default=True,
418 b'devel', b'strip-obsmarkers', default=True,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'devel', b'warn-config', default=None,
421 b'devel', b'warn-config', default=None,
422 )
422 )
423 coreconfigitem(
423 coreconfigitem(
424 b'devel', b'warn-config-default', default=None,
424 b'devel', b'warn-config-default', default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'devel', b'user.obsmarker', default=None,
427 b'devel', b'user.obsmarker', default=None,
428 )
428 )
429 coreconfigitem(
429 coreconfigitem(
430 b'devel', b'warn-config-unknown', default=None,
430 b'devel', b'warn-config-unknown', default=None,
431 )
431 )
432 coreconfigitem(
432 coreconfigitem(
433 b'devel', b'debug.copies', default=False,
433 b'devel', b'debug.copies', default=False,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'devel', b'debug.extensions', default=False,
436 b'devel', b'debug.extensions', default=False,
437 )
437 )
438 coreconfigitem(
438 coreconfigitem(
439 b'devel', b'debug.repo-filters', default=False,
439 b'devel', b'debug.repo-filters', default=False,
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'devel', b'debug.peer-request', default=False,
442 b'devel', b'debug.peer-request', default=False,
443 )
443 )
444 coreconfigitem(
444 coreconfigitem(
445 b'devel', b'discovery.randomize', default=True,
445 b'devel', b'discovery.randomize', default=True,
446 )
446 )
447 _registerdiffopts(section=b'diff')
447 _registerdiffopts(section=b'diff')
448 coreconfigitem(
448 coreconfigitem(
449 b'email', b'bcc', default=None,
449 b'email', b'bcc', default=None,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'email', b'cc', default=None,
452 b'email', b'cc', default=None,
453 )
453 )
454 coreconfigitem(
454 coreconfigitem(
455 b'email', b'charsets', default=list,
455 b'email', b'charsets', default=list,
456 )
456 )
457 coreconfigitem(
457 coreconfigitem(
458 b'email', b'from', default=None,
458 b'email', b'from', default=None,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'email', b'method', default=b'smtp',
461 b'email', b'method', default=b'smtp',
462 )
462 )
463 coreconfigitem(
463 coreconfigitem(
464 b'email', b'reply-to', default=None,
464 b'email', b'reply-to', default=None,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'email', b'to', default=None,
467 b'email', b'to', default=None,
468 )
468 )
469 coreconfigitem(
469 coreconfigitem(
470 b'experimental', b'archivemetatemplate', default=dynamicdefault,
470 b'experimental', b'archivemetatemplate', default=dynamicdefault,
471 )
471 )
472 coreconfigitem(
472 coreconfigitem(
473 b'experimental', b'auto-publish', default=b'publish',
473 b'experimental', b'auto-publish', default=b'publish',
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'experimental', b'bundle-phases', default=False,
476 b'experimental', b'bundle-phases', default=False,
477 )
477 )
478 coreconfigitem(
478 coreconfigitem(
479 b'experimental', b'bundle2-advertise', default=True,
479 b'experimental', b'bundle2-advertise', default=True,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'experimental', b'bundle2-output-capture', default=False,
482 b'experimental', b'bundle2-output-capture', default=False,
483 )
483 )
484 coreconfigitem(
484 coreconfigitem(
485 b'experimental', b'bundle2.pushback', default=False,
485 b'experimental', b'bundle2.pushback', default=False,
486 )
486 )
487 coreconfigitem(
487 coreconfigitem(
488 b'experimental', b'bundle2lazylocking', default=False,
488 b'experimental', b'bundle2lazylocking', default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'experimental', b'bundlecomplevel', default=None,
491 b'experimental', b'bundlecomplevel', default=None,
492 )
492 )
493 coreconfigitem(
493 coreconfigitem(
494 b'experimental', b'bundlecomplevel.bzip2', default=None,
494 b'experimental', b'bundlecomplevel.bzip2', default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'experimental', b'bundlecomplevel.gzip', default=None,
497 b'experimental', b'bundlecomplevel.gzip', default=None,
498 )
498 )
499 coreconfigitem(
499 coreconfigitem(
500 b'experimental', b'bundlecomplevel.none', default=None,
500 b'experimental', b'bundlecomplevel.none', default=None,
501 )
501 )
502 coreconfigitem(
502 coreconfigitem(
503 b'experimental', b'bundlecomplevel.zstd', default=None,
503 b'experimental', b'bundlecomplevel.zstd', default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'experimental', b'changegroup3', default=False,
506 b'experimental', b'changegroup3', default=False,
507 )
507 )
508 coreconfigitem(
508 coreconfigitem(
509 b'experimental', b'cleanup-as-archived', default=False,
509 b'experimental', b'cleanup-as-archived', default=False,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'experimental', b'clientcompressionengines', default=list,
512 b'experimental', b'clientcompressionengines', default=list,
513 )
513 )
514 coreconfigitem(
514 coreconfigitem(
515 b'experimental', b'copytrace', default=b'on',
515 b'experimental', b'copytrace', default=b'on',
516 )
516 )
517 coreconfigitem(
517 coreconfigitem(
518 b'experimental', b'copytrace.movecandidateslimit', default=100,
518 b'experimental', b'copytrace.movecandidateslimit', default=100,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'experimental', b'copytrace.sourcecommitlimit', default=100,
521 b'experimental', b'copytrace.sourcecommitlimit', default=100,
522 )
522 )
523 coreconfigitem(
523 coreconfigitem(
524 b'experimental', b'copies.read-from', default=b"filelog-only",
524 b'experimental', b'copies.read-from', default=b"filelog-only",
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'experimental', b'copies.write-to', default=b'filelog-only',
527 b'experimental', b'copies.write-to', default=b'filelog-only',
528 )
528 )
529 coreconfigitem(
529 coreconfigitem(
530 b'experimental', b'crecordtest', default=None,
530 b'experimental', b'crecordtest', default=None,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'experimental', b'directaccess', default=False,
533 b'experimental', b'directaccess', default=False,
534 )
534 )
535 coreconfigitem(
535 coreconfigitem(
536 b'experimental', b'directaccess.revnums', default=False,
536 b'experimental', b'directaccess.revnums', default=False,
537 )
537 )
538 coreconfigitem(
538 coreconfigitem(
539 b'experimental', b'editortmpinhg', default=False,
539 b'experimental', b'editortmpinhg', default=False,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'experimental', b'evolution', default=list,
542 b'experimental', b'evolution', default=list,
543 )
543 )
544 coreconfigitem(
544 coreconfigitem(
545 b'experimental',
545 b'experimental',
546 b'evolution.allowdivergence',
546 b'evolution.allowdivergence',
547 default=False,
547 default=False,
548 alias=[(b'experimental', b'allowdivergence')],
548 alias=[(b'experimental', b'allowdivergence')],
549 )
549 )
550 coreconfigitem(
550 coreconfigitem(
551 b'experimental', b'evolution.allowunstable', default=None,
551 b'experimental', b'evolution.allowunstable', default=None,
552 )
552 )
553 coreconfigitem(
553 coreconfigitem(
554 b'experimental', b'evolution.createmarkers', default=None,
554 b'experimental', b'evolution.createmarkers', default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'experimental',
557 b'experimental',
558 b'evolution.effect-flags',
558 b'evolution.effect-flags',
559 default=True,
559 default=True,
560 alias=[(b'experimental', b'effect-flags')],
560 alias=[(b'experimental', b'effect-flags')],
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'experimental', b'evolution.exchange', default=None,
563 b'experimental', b'evolution.exchange', default=None,
564 )
564 )
565 coreconfigitem(
565 coreconfigitem(
566 b'experimental', b'evolution.bundle-obsmarker', default=False,
566 b'experimental', b'evolution.bundle-obsmarker', default=False,
567 )
567 )
568 coreconfigitem(
568 coreconfigitem(
569 b'experimental', b'log.topo', default=False,
569 b'experimental', b'log.topo', default=False,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'experimental', b'evolution.report-instabilities', default=True,
572 b'experimental', b'evolution.report-instabilities', default=True,
573 )
573 )
574 coreconfigitem(
574 coreconfigitem(
575 b'experimental', b'evolution.track-operation', default=True,
575 b'experimental', b'evolution.track-operation', default=True,
576 )
576 )
577 # repo-level config to exclude a revset visibility
577 # repo-level config to exclude a revset visibility
578 #
578 #
579 # The target use case is to use `share` to expose different subset of the same
579 # The target use case is to use `share` to expose different subset of the same
580 # repository, especially server side. See also `server.view`.
580 # repository, especially server side. See also `server.view`.
581 coreconfigitem(
581 coreconfigitem(
582 b'experimental', b'extra-filter-revs', default=None,
582 b'experimental', b'extra-filter-revs', default=None,
583 )
583 )
584 coreconfigitem(
584 coreconfigitem(
585 b'experimental', b'maxdeltachainspan', default=-1,
585 b'experimental', b'maxdeltachainspan', default=-1,
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'experimental', b'mergetempdirprefix', default=None,
588 b'experimental', b'mergetempdirprefix', default=None,
589 )
589 )
590 coreconfigitem(
590 coreconfigitem(
591 b'experimental', b'mmapindexthreshold', default=None,
591 b'experimental', b'mmapindexthreshold', default=None,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'experimental', b'narrow', default=False,
594 b'experimental', b'narrow', default=False,
595 )
595 )
596 coreconfigitem(
596 coreconfigitem(
597 b'experimental', b'nonnormalparanoidcheck', default=False,
597 b'experimental', b'nonnormalparanoidcheck', default=False,
598 )
598 )
599 coreconfigitem(
599 coreconfigitem(
600 b'experimental', b'exportableenviron', default=list,
600 b'experimental', b'exportableenviron', default=list,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'experimental', b'extendedheader.index', default=None,
603 b'experimental', b'extendedheader.index', default=None,
604 )
604 )
605 coreconfigitem(
605 coreconfigitem(
606 b'experimental', b'extendedheader.similarity', default=False,
606 b'experimental', b'extendedheader.similarity', default=False,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'experimental', b'graphshorten', default=False,
609 b'experimental', b'graphshorten', default=False,
610 )
610 )
611 coreconfigitem(
611 coreconfigitem(
612 b'experimental', b'graphstyle.parent', default=dynamicdefault,
612 b'experimental', b'graphstyle.parent', default=dynamicdefault,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'experimental', b'graphstyle.missing', default=dynamicdefault,
615 b'experimental', b'graphstyle.missing', default=dynamicdefault,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
618 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
619 )
619 )
620 coreconfigitem(
620 coreconfigitem(
621 b'experimental', b'hook-track-tags', default=False,
621 b'experimental', b'hook-track-tags', default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'experimental', b'httppeer.advertise-v2', default=False,
624 b'experimental', b'httppeer.advertise-v2', default=False,
625 )
625 )
626 coreconfigitem(
626 coreconfigitem(
627 b'experimental', b'httppeer.v2-encoder-order', default=None,
627 b'experimental', b'httppeer.v2-encoder-order', default=None,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'experimental', b'httppostargs', default=False,
630 b'experimental', b'httppostargs', default=False,
631 )
631 )
632 coreconfigitem(
632 coreconfigitem(
633 b'experimental', b'mergedriver', default=None,
633 b'experimental', b'mergedriver', default=None,
634 )
634 )
635 coreconfigitem(b'experimental', b'nointerrupt', default=False)
635 coreconfigitem(b'experimental', b'nointerrupt', default=False)
636 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
636 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
637
637
638 coreconfigitem(
638 coreconfigitem(
639 b'experimental', b'obsmarkers-exchange-debug', default=False,
639 b'experimental', b'obsmarkers-exchange-debug', default=False,
640 )
640 )
641 coreconfigitem(
641 coreconfigitem(
642 b'experimental', b'remotenames', default=False,
642 b'experimental', b'remotenames', default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'experimental', b'removeemptydirs', default=True,
645 b'experimental', b'removeemptydirs', default=True,
646 )
646 )
647 coreconfigitem(
647 coreconfigitem(
648 b'experimental', b'revert.interactive.select-to-keep', default=False,
648 b'experimental', b'revert.interactive.select-to-keep', default=False,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'experimental', b'revisions.prefixhexnode', default=False,
651 b'experimental', b'revisions.prefixhexnode', default=False,
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'experimental', b'revlogv2', default=None,
654 b'experimental', b'revlogv2', default=None,
655 )
655 )
656 coreconfigitem(
656 coreconfigitem(
657 b'experimental', b'revisions.disambiguatewithin', default=None,
657 b'experimental', b'revisions.disambiguatewithin', default=None,
658 )
658 )
659 coreconfigitem(
659 coreconfigitem(
660 b'experimental', b'rust.index', default=False,
660 b'experimental', b'rust.index', default=False,
661 )
661 )
662 coreconfigitem(
662 coreconfigitem(
663 b'experimental', b'exp-persistent-nodemap', default=False,
664 )
665 coreconfigitem(
663 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
666 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
664 )
667 )
665 coreconfigitem(
668 coreconfigitem(
666 b'experimental',
669 b'experimental',
667 b'server.manifestdata.recommended-batch-size',
670 b'server.manifestdata.recommended-batch-size',
668 default=100000,
671 default=100000,
669 )
672 )
670 coreconfigitem(
673 coreconfigitem(
671 b'experimental', b'server.stream-narrow-clones', default=False,
674 b'experimental', b'server.stream-narrow-clones', default=False,
672 )
675 )
673 coreconfigitem(
676 coreconfigitem(
674 b'experimental', b'single-head-per-branch', default=False,
677 b'experimental', b'single-head-per-branch', default=False,
675 )
678 )
676 coreconfigitem(
679 coreconfigitem(
677 b'experimental',
680 b'experimental',
678 b'single-head-per-branch:account-closed-heads',
681 b'single-head-per-branch:account-closed-heads',
679 default=False,
682 default=False,
680 )
683 )
681 coreconfigitem(
684 coreconfigitem(
682 b'experimental', b'sshserver.support-v2', default=False,
685 b'experimental', b'sshserver.support-v2', default=False,
683 )
686 )
684 coreconfigitem(
687 coreconfigitem(
685 b'experimental', b'sparse-read', default=False,
688 b'experimental', b'sparse-read', default=False,
686 )
689 )
687 coreconfigitem(
690 coreconfigitem(
688 b'experimental', b'sparse-read.density-threshold', default=0.50,
691 b'experimental', b'sparse-read.density-threshold', default=0.50,
689 )
692 )
690 coreconfigitem(
693 coreconfigitem(
691 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
694 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
692 )
695 )
693 coreconfigitem(
696 coreconfigitem(
694 b'experimental', b'treemanifest', default=False,
697 b'experimental', b'treemanifest', default=False,
695 )
698 )
696 coreconfigitem(
699 coreconfigitem(
697 b'experimental', b'update.atomic-file', default=False,
700 b'experimental', b'update.atomic-file', default=False,
698 )
701 )
699 coreconfigitem(
702 coreconfigitem(
700 b'experimental', b'sshpeer.advertise-v2', default=False,
703 b'experimental', b'sshpeer.advertise-v2', default=False,
701 )
704 )
702 coreconfigitem(
705 coreconfigitem(
703 b'experimental', b'web.apiserver', default=False,
706 b'experimental', b'web.apiserver', default=False,
704 )
707 )
705 coreconfigitem(
708 coreconfigitem(
706 b'experimental', b'web.api.http-v2', default=False,
709 b'experimental', b'web.api.http-v2', default=False,
707 )
710 )
708 coreconfigitem(
711 coreconfigitem(
709 b'experimental', b'web.api.debugreflect', default=False,
712 b'experimental', b'web.api.debugreflect', default=False,
710 )
713 )
711 coreconfigitem(
714 coreconfigitem(
712 b'experimental', b'worker.wdir-get-thread-safe', default=False,
715 b'experimental', b'worker.wdir-get-thread-safe', default=False,
713 )
716 )
714 coreconfigitem(
717 coreconfigitem(
715 b'experimental', b'worker.repository-upgrade', default=False,
718 b'experimental', b'worker.repository-upgrade', default=False,
716 )
719 )
717 coreconfigitem(
720 coreconfigitem(
718 b'experimental', b'xdiff', default=False,
721 b'experimental', b'xdiff', default=False,
719 )
722 )
720 coreconfigitem(
723 coreconfigitem(
721 b'extensions', b'.*', default=None, generic=True,
724 b'extensions', b'.*', default=None, generic=True,
722 )
725 )
723 coreconfigitem(
726 coreconfigitem(
724 b'extdata', b'.*', default=None, generic=True,
727 b'extdata', b'.*', default=None, generic=True,
725 )
728 )
726 coreconfigitem(
729 coreconfigitem(
727 b'format', b'bookmarks-in-store', default=False,
730 b'format', b'bookmarks-in-store', default=False,
728 )
731 )
729 coreconfigitem(
732 coreconfigitem(
730 b'format', b'chunkcachesize', default=None, experimental=True,
733 b'format', b'chunkcachesize', default=None, experimental=True,
731 )
734 )
732 coreconfigitem(
735 coreconfigitem(
733 b'format', b'dotencode', default=True,
736 b'format', b'dotencode', default=True,
734 )
737 )
735 coreconfigitem(
738 coreconfigitem(
736 b'format', b'generaldelta', default=False, experimental=True,
739 b'format', b'generaldelta', default=False, experimental=True,
737 )
740 )
738 coreconfigitem(
741 coreconfigitem(
739 b'format', b'manifestcachesize', default=None, experimental=True,
742 b'format', b'manifestcachesize', default=None, experimental=True,
740 )
743 )
741 coreconfigitem(
744 coreconfigitem(
742 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
745 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
743 )
746 )
744 coreconfigitem(
747 coreconfigitem(
745 b'format', b'obsstore-version', default=None,
748 b'format', b'obsstore-version', default=None,
746 )
749 )
747 coreconfigitem(
750 coreconfigitem(
748 b'format', b'sparse-revlog', default=True,
751 b'format', b'sparse-revlog', default=True,
749 )
752 )
750 coreconfigitem(
753 coreconfigitem(
751 b'format',
754 b'format',
752 b'revlog-compression',
755 b'revlog-compression',
753 default=b'zlib',
756 default=b'zlib',
754 alias=[(b'experimental', b'format.compression')],
757 alias=[(b'experimental', b'format.compression')],
755 )
758 )
756 coreconfigitem(
759 coreconfigitem(
757 b'format', b'usefncache', default=True,
760 b'format', b'usefncache', default=True,
758 )
761 )
759 coreconfigitem(
762 coreconfigitem(
760 b'format', b'usegeneraldelta', default=True,
763 b'format', b'usegeneraldelta', default=True,
761 )
764 )
762 coreconfigitem(
765 coreconfigitem(
763 b'format', b'usestore', default=True,
766 b'format', b'usestore', default=True,
764 )
767 )
765 coreconfigitem(
768 coreconfigitem(
766 b'format',
769 b'format',
767 b'exp-use-copies-side-data-changeset',
770 b'exp-use-copies-side-data-changeset',
768 default=False,
771 default=False,
769 experimental=True,
772 experimental=True,
770 )
773 )
771 coreconfigitem(
774 coreconfigitem(
772 b'format', b'exp-use-side-data', default=False, experimental=True,
775 b'format', b'exp-use-side-data', default=False, experimental=True,
773 )
776 )
774 coreconfigitem(
777 coreconfigitem(
775 b'format', b'internal-phase', default=False, experimental=True,
778 b'format', b'internal-phase', default=False, experimental=True,
776 )
779 )
777 coreconfigitem(
780 coreconfigitem(
778 b'fsmonitor', b'warn_when_unused', default=True,
781 b'fsmonitor', b'warn_when_unused', default=True,
779 )
782 )
780 coreconfigitem(
783 coreconfigitem(
781 b'fsmonitor', b'warn_update_file_count', default=50000,
784 b'fsmonitor', b'warn_update_file_count', default=50000,
782 )
785 )
783 coreconfigitem(
786 coreconfigitem(
784 b'help', br'hidden-command\..*', default=False, generic=True,
787 b'help', br'hidden-command\..*', default=False, generic=True,
785 )
788 )
786 coreconfigitem(
789 coreconfigitem(
787 b'help', br'hidden-topic\..*', default=False, generic=True,
790 b'help', br'hidden-topic\..*', default=False, generic=True,
788 )
791 )
789 coreconfigitem(
792 coreconfigitem(
790 b'hooks', b'.*', default=dynamicdefault, generic=True,
793 b'hooks', b'.*', default=dynamicdefault, generic=True,
791 )
794 )
792 coreconfigitem(
795 coreconfigitem(
793 b'hgweb-paths', b'.*', default=list, generic=True,
796 b'hgweb-paths', b'.*', default=list, generic=True,
794 )
797 )
795 coreconfigitem(
798 coreconfigitem(
796 b'hostfingerprints', b'.*', default=list, generic=True,
799 b'hostfingerprints', b'.*', default=list, generic=True,
797 )
800 )
798 coreconfigitem(
801 coreconfigitem(
799 b'hostsecurity', b'ciphers', default=None,
802 b'hostsecurity', b'ciphers', default=None,
800 )
803 )
801 coreconfigitem(
804 coreconfigitem(
802 b'hostsecurity', b'disabletls10warning', default=False,
805 b'hostsecurity', b'disabletls10warning', default=False,
803 )
806 )
804 coreconfigitem(
807 coreconfigitem(
805 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
808 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
806 )
809 )
807 coreconfigitem(
810 coreconfigitem(
808 b'hostsecurity',
811 b'hostsecurity',
809 b'.*:minimumprotocol$',
812 b'.*:minimumprotocol$',
810 default=dynamicdefault,
813 default=dynamicdefault,
811 generic=True,
814 generic=True,
812 )
815 )
813 coreconfigitem(
816 coreconfigitem(
814 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
817 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
815 )
818 )
816 coreconfigitem(
819 coreconfigitem(
817 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
820 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
818 )
821 )
819 coreconfigitem(
822 coreconfigitem(
820 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
823 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
821 )
824 )
822
825
823 coreconfigitem(
826 coreconfigitem(
824 b'http_proxy', b'always', default=False,
827 b'http_proxy', b'always', default=False,
825 )
828 )
826 coreconfigitem(
829 coreconfigitem(
827 b'http_proxy', b'host', default=None,
830 b'http_proxy', b'host', default=None,
828 )
831 )
829 coreconfigitem(
832 coreconfigitem(
830 b'http_proxy', b'no', default=list,
833 b'http_proxy', b'no', default=list,
831 )
834 )
832 coreconfigitem(
835 coreconfigitem(
833 b'http_proxy', b'passwd', default=None,
836 b'http_proxy', b'passwd', default=None,
834 )
837 )
835 coreconfigitem(
838 coreconfigitem(
836 b'http_proxy', b'user', default=None,
839 b'http_proxy', b'user', default=None,
837 )
840 )
838
841
839 coreconfigitem(
842 coreconfigitem(
840 b'http', b'timeout', default=None,
843 b'http', b'timeout', default=None,
841 )
844 )
842
845
843 coreconfigitem(
846 coreconfigitem(
844 b'logtoprocess', b'commandexception', default=None,
847 b'logtoprocess', b'commandexception', default=None,
845 )
848 )
846 coreconfigitem(
849 coreconfigitem(
847 b'logtoprocess', b'commandfinish', default=None,
850 b'logtoprocess', b'commandfinish', default=None,
848 )
851 )
849 coreconfigitem(
852 coreconfigitem(
850 b'logtoprocess', b'command', default=None,
853 b'logtoprocess', b'command', default=None,
851 )
854 )
852 coreconfigitem(
855 coreconfigitem(
853 b'logtoprocess', b'develwarn', default=None,
856 b'logtoprocess', b'develwarn', default=None,
854 )
857 )
855 coreconfigitem(
858 coreconfigitem(
856 b'logtoprocess', b'uiblocked', default=None,
859 b'logtoprocess', b'uiblocked', default=None,
857 )
860 )
858 coreconfigitem(
861 coreconfigitem(
859 b'merge', b'checkunknown', default=b'abort',
862 b'merge', b'checkunknown', default=b'abort',
860 )
863 )
861 coreconfigitem(
864 coreconfigitem(
862 b'merge', b'checkignored', default=b'abort',
865 b'merge', b'checkignored', default=b'abort',
863 )
866 )
864 coreconfigitem(
867 coreconfigitem(
865 b'experimental', b'merge.checkpathconflicts', default=False,
868 b'experimental', b'merge.checkpathconflicts', default=False,
866 )
869 )
867 coreconfigitem(
870 coreconfigitem(
868 b'merge', b'followcopies', default=True,
871 b'merge', b'followcopies', default=True,
869 )
872 )
870 coreconfigitem(
873 coreconfigitem(
871 b'merge', b'on-failure', default=b'continue',
874 b'merge', b'on-failure', default=b'continue',
872 )
875 )
873 coreconfigitem(
876 coreconfigitem(
874 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
877 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
875 )
878 )
876 coreconfigitem(
879 coreconfigitem(
877 b'merge', b'strict-capability-check', default=False,
880 b'merge', b'strict-capability-check', default=False,
878 )
881 )
879 coreconfigitem(
882 coreconfigitem(
880 b'merge-tools', b'.*', default=None, generic=True,
883 b'merge-tools', b'.*', default=None, generic=True,
881 )
884 )
882 coreconfigitem(
885 coreconfigitem(
883 b'merge-tools',
886 b'merge-tools',
884 br'.*\.args$',
887 br'.*\.args$',
885 default=b"$local $base $other",
888 default=b"$local $base $other",
886 generic=True,
889 generic=True,
887 priority=-1,
890 priority=-1,
888 )
891 )
889 coreconfigitem(
892 coreconfigitem(
890 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
893 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
891 )
894 )
892 coreconfigitem(
895 coreconfigitem(
893 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
896 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
894 )
897 )
895 coreconfigitem(
898 coreconfigitem(
896 b'merge-tools',
899 b'merge-tools',
897 br'.*\.checkchanged$',
900 br'.*\.checkchanged$',
898 default=False,
901 default=False,
899 generic=True,
902 generic=True,
900 priority=-1,
903 priority=-1,
901 )
904 )
902 coreconfigitem(
905 coreconfigitem(
903 b'merge-tools',
906 b'merge-tools',
904 br'.*\.executable$',
907 br'.*\.executable$',
905 default=dynamicdefault,
908 default=dynamicdefault,
906 generic=True,
909 generic=True,
907 priority=-1,
910 priority=-1,
908 )
911 )
909 coreconfigitem(
912 coreconfigitem(
910 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
913 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
911 )
914 )
912 coreconfigitem(
915 coreconfigitem(
913 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
916 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
914 )
917 )
915 coreconfigitem(
918 coreconfigitem(
916 b'merge-tools',
919 b'merge-tools',
917 br'.*\.mergemarkers$',
920 br'.*\.mergemarkers$',
918 default=b'basic',
921 default=b'basic',
919 generic=True,
922 generic=True,
920 priority=-1,
923 priority=-1,
921 )
924 )
922 coreconfigitem(
925 coreconfigitem(
923 b'merge-tools',
926 b'merge-tools',
924 br'.*\.mergemarkertemplate$',
927 br'.*\.mergemarkertemplate$',
925 default=dynamicdefault, # take from ui.mergemarkertemplate
928 default=dynamicdefault, # take from ui.mergemarkertemplate
926 generic=True,
929 generic=True,
927 priority=-1,
930 priority=-1,
928 )
931 )
929 coreconfigitem(
932 coreconfigitem(
930 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
933 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
931 )
934 )
932 coreconfigitem(
935 coreconfigitem(
933 b'merge-tools',
936 b'merge-tools',
934 br'.*\.premerge$',
937 br'.*\.premerge$',
935 default=dynamicdefault,
938 default=dynamicdefault,
936 generic=True,
939 generic=True,
937 priority=-1,
940 priority=-1,
938 )
941 )
939 coreconfigitem(
942 coreconfigitem(
940 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
943 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
941 )
944 )
942 coreconfigitem(
945 coreconfigitem(
943 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
946 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
944 )
947 )
945 coreconfigitem(
948 coreconfigitem(
946 b'pager', b'ignore', default=list,
949 b'pager', b'ignore', default=list,
947 )
950 )
948 coreconfigitem(
951 coreconfigitem(
949 b'pager', b'pager', default=dynamicdefault,
952 b'pager', b'pager', default=dynamicdefault,
950 )
953 )
951 coreconfigitem(
954 coreconfigitem(
952 b'patch', b'eol', default=b'strict',
955 b'patch', b'eol', default=b'strict',
953 )
956 )
954 coreconfigitem(
957 coreconfigitem(
955 b'patch', b'fuzz', default=2,
958 b'patch', b'fuzz', default=2,
956 )
959 )
957 coreconfigitem(
960 coreconfigitem(
958 b'paths', b'default', default=None,
961 b'paths', b'default', default=None,
959 )
962 )
960 coreconfigitem(
963 coreconfigitem(
961 b'paths', b'default-push', default=None,
964 b'paths', b'default-push', default=None,
962 )
965 )
963 coreconfigitem(
966 coreconfigitem(
964 b'paths', b'.*', default=None, generic=True,
967 b'paths', b'.*', default=None, generic=True,
965 )
968 )
966 coreconfigitem(
969 coreconfigitem(
967 b'phases', b'checksubrepos', default=b'follow',
970 b'phases', b'checksubrepos', default=b'follow',
968 )
971 )
969 coreconfigitem(
972 coreconfigitem(
970 b'phases', b'new-commit', default=b'draft',
973 b'phases', b'new-commit', default=b'draft',
971 )
974 )
972 coreconfigitem(
975 coreconfigitem(
973 b'phases', b'publish', default=True,
976 b'phases', b'publish', default=True,
974 )
977 )
975 coreconfigitem(
978 coreconfigitem(
976 b'profiling', b'enabled', default=False,
979 b'profiling', b'enabled', default=False,
977 )
980 )
978 coreconfigitem(
981 coreconfigitem(
979 b'profiling', b'format', default=b'text',
982 b'profiling', b'format', default=b'text',
980 )
983 )
981 coreconfigitem(
984 coreconfigitem(
982 b'profiling', b'freq', default=1000,
985 b'profiling', b'freq', default=1000,
983 )
986 )
984 coreconfigitem(
987 coreconfigitem(
985 b'profiling', b'limit', default=30,
988 b'profiling', b'limit', default=30,
986 )
989 )
987 coreconfigitem(
990 coreconfigitem(
988 b'profiling', b'nested', default=0,
991 b'profiling', b'nested', default=0,
989 )
992 )
990 coreconfigitem(
993 coreconfigitem(
991 b'profiling', b'output', default=None,
994 b'profiling', b'output', default=None,
992 )
995 )
993 coreconfigitem(
996 coreconfigitem(
994 b'profiling', b'showmax', default=0.999,
997 b'profiling', b'showmax', default=0.999,
995 )
998 )
996 coreconfigitem(
999 coreconfigitem(
997 b'profiling', b'showmin', default=dynamicdefault,
1000 b'profiling', b'showmin', default=dynamicdefault,
998 )
1001 )
999 coreconfigitem(
1002 coreconfigitem(
1000 b'profiling', b'showtime', default=True,
1003 b'profiling', b'showtime', default=True,
1001 )
1004 )
1002 coreconfigitem(
1005 coreconfigitem(
1003 b'profiling', b'sort', default=b'inlinetime',
1006 b'profiling', b'sort', default=b'inlinetime',
1004 )
1007 )
1005 coreconfigitem(
1008 coreconfigitem(
1006 b'profiling', b'statformat', default=b'hotpath',
1009 b'profiling', b'statformat', default=b'hotpath',
1007 )
1010 )
1008 coreconfigitem(
1011 coreconfigitem(
1009 b'profiling', b'time-track', default=dynamicdefault,
1012 b'profiling', b'time-track', default=dynamicdefault,
1010 )
1013 )
1011 coreconfigitem(
1014 coreconfigitem(
1012 b'profiling', b'type', default=b'stat',
1015 b'profiling', b'type', default=b'stat',
1013 )
1016 )
1014 coreconfigitem(
1017 coreconfigitem(
1015 b'progress', b'assume-tty', default=False,
1018 b'progress', b'assume-tty', default=False,
1016 )
1019 )
1017 coreconfigitem(
1020 coreconfigitem(
1018 b'progress', b'changedelay', default=1,
1021 b'progress', b'changedelay', default=1,
1019 )
1022 )
1020 coreconfigitem(
1023 coreconfigitem(
1021 b'progress', b'clear-complete', default=True,
1024 b'progress', b'clear-complete', default=True,
1022 )
1025 )
1023 coreconfigitem(
1026 coreconfigitem(
1024 b'progress', b'debug', default=False,
1027 b'progress', b'debug', default=False,
1025 )
1028 )
1026 coreconfigitem(
1029 coreconfigitem(
1027 b'progress', b'delay', default=3,
1030 b'progress', b'delay', default=3,
1028 )
1031 )
1029 coreconfigitem(
1032 coreconfigitem(
1030 b'progress', b'disable', default=False,
1033 b'progress', b'disable', default=False,
1031 )
1034 )
1032 coreconfigitem(
1035 coreconfigitem(
1033 b'progress', b'estimateinterval', default=60.0,
1036 b'progress', b'estimateinterval', default=60.0,
1034 )
1037 )
1035 coreconfigitem(
1038 coreconfigitem(
1036 b'progress',
1039 b'progress',
1037 b'format',
1040 b'format',
1038 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1041 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1039 )
1042 )
1040 coreconfigitem(
1043 coreconfigitem(
1041 b'progress', b'refresh', default=0.1,
1044 b'progress', b'refresh', default=0.1,
1042 )
1045 )
1043 coreconfigitem(
1046 coreconfigitem(
1044 b'progress', b'width', default=dynamicdefault,
1047 b'progress', b'width', default=dynamicdefault,
1045 )
1048 )
1046 coreconfigitem(
1049 coreconfigitem(
1047 b'push', b'pushvars.server', default=False,
1050 b'push', b'pushvars.server', default=False,
1048 )
1051 )
1049 coreconfigitem(
1052 coreconfigitem(
1050 b'rewrite',
1053 b'rewrite',
1051 b'backup-bundle',
1054 b'backup-bundle',
1052 default=True,
1055 default=True,
1053 alias=[(b'ui', b'history-editing-backup')],
1056 alias=[(b'ui', b'history-editing-backup')],
1054 )
1057 )
1055 coreconfigitem(
1058 coreconfigitem(
1056 b'rewrite', b'update-timestamp', default=False,
1059 b'rewrite', b'update-timestamp', default=False,
1057 )
1060 )
1058 coreconfigitem(
1061 coreconfigitem(
1059 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1062 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1060 )
1063 )
1061 coreconfigitem(
1064 coreconfigitem(
1062 b'storage',
1065 b'storage',
1063 b'revlog.optimize-delta-parent-choice',
1066 b'revlog.optimize-delta-parent-choice',
1064 default=True,
1067 default=True,
1065 alias=[(b'format', b'aggressivemergedeltas')],
1068 alias=[(b'format', b'aggressivemergedeltas')],
1066 )
1069 )
1067 coreconfigitem(
1070 coreconfigitem(
1068 b'storage', b'revlog.reuse-external-delta', default=True,
1071 b'storage', b'revlog.reuse-external-delta', default=True,
1069 )
1072 )
1070 coreconfigitem(
1073 coreconfigitem(
1071 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1074 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1072 )
1075 )
1073 coreconfigitem(
1076 coreconfigitem(
1074 b'storage', b'revlog.zlib.level', default=None,
1077 b'storage', b'revlog.zlib.level', default=None,
1075 )
1078 )
1076 coreconfigitem(
1079 coreconfigitem(
1077 b'storage', b'revlog.zstd.level', default=None,
1080 b'storage', b'revlog.zstd.level', default=None,
1078 )
1081 )
1079 coreconfigitem(
1082 coreconfigitem(
1080 b'server', b'bookmarks-pushkey-compat', default=True,
1083 b'server', b'bookmarks-pushkey-compat', default=True,
1081 )
1084 )
1082 coreconfigitem(
1085 coreconfigitem(
1083 b'server', b'bundle1', default=True,
1086 b'server', b'bundle1', default=True,
1084 )
1087 )
1085 coreconfigitem(
1088 coreconfigitem(
1086 b'server', b'bundle1gd', default=None,
1089 b'server', b'bundle1gd', default=None,
1087 )
1090 )
1088 coreconfigitem(
1091 coreconfigitem(
1089 b'server', b'bundle1.pull', default=None,
1092 b'server', b'bundle1.pull', default=None,
1090 )
1093 )
1091 coreconfigitem(
1094 coreconfigitem(
1092 b'server', b'bundle1gd.pull', default=None,
1095 b'server', b'bundle1gd.pull', default=None,
1093 )
1096 )
1094 coreconfigitem(
1097 coreconfigitem(
1095 b'server', b'bundle1.push', default=None,
1098 b'server', b'bundle1.push', default=None,
1096 )
1099 )
1097 coreconfigitem(
1100 coreconfigitem(
1098 b'server', b'bundle1gd.push', default=None,
1101 b'server', b'bundle1gd.push', default=None,
1099 )
1102 )
1100 coreconfigitem(
1103 coreconfigitem(
1101 b'server',
1104 b'server',
1102 b'bundle2.stream',
1105 b'bundle2.stream',
1103 default=True,
1106 default=True,
1104 alias=[(b'experimental', b'bundle2.stream')],
1107 alias=[(b'experimental', b'bundle2.stream')],
1105 )
1108 )
1106 coreconfigitem(
1109 coreconfigitem(
1107 b'server', b'compressionengines', default=list,
1110 b'server', b'compressionengines', default=list,
1108 )
1111 )
1109 coreconfigitem(
1112 coreconfigitem(
1110 b'server', b'concurrent-push-mode', default=b'strict',
1113 b'server', b'concurrent-push-mode', default=b'strict',
1111 )
1114 )
1112 coreconfigitem(
1115 coreconfigitem(
1113 b'server', b'disablefullbundle', default=False,
1116 b'server', b'disablefullbundle', default=False,
1114 )
1117 )
1115 coreconfigitem(
1118 coreconfigitem(
1116 b'server', b'maxhttpheaderlen', default=1024,
1119 b'server', b'maxhttpheaderlen', default=1024,
1117 )
1120 )
1118 coreconfigitem(
1121 coreconfigitem(
1119 b'server', b'pullbundle', default=False,
1122 b'server', b'pullbundle', default=False,
1120 )
1123 )
1121 coreconfigitem(
1124 coreconfigitem(
1122 b'server', b'preferuncompressed', default=False,
1125 b'server', b'preferuncompressed', default=False,
1123 )
1126 )
1124 coreconfigitem(
1127 coreconfigitem(
1125 b'server', b'streamunbundle', default=False,
1128 b'server', b'streamunbundle', default=False,
1126 )
1129 )
1127 coreconfigitem(
1130 coreconfigitem(
1128 b'server', b'uncompressed', default=True,
1131 b'server', b'uncompressed', default=True,
1129 )
1132 )
1130 coreconfigitem(
1133 coreconfigitem(
1131 b'server', b'uncompressedallowsecret', default=False,
1134 b'server', b'uncompressedallowsecret', default=False,
1132 )
1135 )
1133 coreconfigitem(
1136 coreconfigitem(
1134 b'server', b'view', default=b'served',
1137 b'server', b'view', default=b'served',
1135 )
1138 )
1136 coreconfigitem(
1139 coreconfigitem(
1137 b'server', b'validate', default=False,
1140 b'server', b'validate', default=False,
1138 )
1141 )
1139 coreconfigitem(
1142 coreconfigitem(
1140 b'server', b'zliblevel', default=-1,
1143 b'server', b'zliblevel', default=-1,
1141 )
1144 )
1142 coreconfigitem(
1145 coreconfigitem(
1143 b'server', b'zstdlevel', default=3,
1146 b'server', b'zstdlevel', default=3,
1144 )
1147 )
1145 coreconfigitem(
1148 coreconfigitem(
1146 b'share', b'pool', default=None,
1149 b'share', b'pool', default=None,
1147 )
1150 )
1148 coreconfigitem(
1151 coreconfigitem(
1149 b'share', b'poolnaming', default=b'identity',
1152 b'share', b'poolnaming', default=b'identity',
1150 )
1153 )
1151 coreconfigitem(
1154 coreconfigitem(
1152 b'shelve', b'maxbackups', default=10,
1155 b'shelve', b'maxbackups', default=10,
1153 )
1156 )
1154 coreconfigitem(
1157 coreconfigitem(
1155 b'smtp', b'host', default=None,
1158 b'smtp', b'host', default=None,
1156 )
1159 )
1157 coreconfigitem(
1160 coreconfigitem(
1158 b'smtp', b'local_hostname', default=None,
1161 b'smtp', b'local_hostname', default=None,
1159 )
1162 )
1160 coreconfigitem(
1163 coreconfigitem(
1161 b'smtp', b'password', default=None,
1164 b'smtp', b'password', default=None,
1162 )
1165 )
1163 coreconfigitem(
1166 coreconfigitem(
1164 b'smtp', b'port', default=dynamicdefault,
1167 b'smtp', b'port', default=dynamicdefault,
1165 )
1168 )
1166 coreconfigitem(
1169 coreconfigitem(
1167 b'smtp', b'tls', default=b'none',
1170 b'smtp', b'tls', default=b'none',
1168 )
1171 )
1169 coreconfigitem(
1172 coreconfigitem(
1170 b'smtp', b'username', default=None,
1173 b'smtp', b'username', default=None,
1171 )
1174 )
1172 coreconfigitem(
1175 coreconfigitem(
1173 b'sparse', b'missingwarning', default=True, experimental=True,
1176 b'sparse', b'missingwarning', default=True, experimental=True,
1174 )
1177 )
1175 coreconfigitem(
1178 coreconfigitem(
1176 b'subrepos',
1179 b'subrepos',
1177 b'allowed',
1180 b'allowed',
1178 default=dynamicdefault, # to make backporting simpler
1181 default=dynamicdefault, # to make backporting simpler
1179 )
1182 )
1180 coreconfigitem(
1183 coreconfigitem(
1181 b'subrepos', b'hg:allowed', default=dynamicdefault,
1184 b'subrepos', b'hg:allowed', default=dynamicdefault,
1182 )
1185 )
1183 coreconfigitem(
1186 coreconfigitem(
1184 b'subrepos', b'git:allowed', default=dynamicdefault,
1187 b'subrepos', b'git:allowed', default=dynamicdefault,
1185 )
1188 )
1186 coreconfigitem(
1189 coreconfigitem(
1187 b'subrepos', b'svn:allowed', default=dynamicdefault,
1190 b'subrepos', b'svn:allowed', default=dynamicdefault,
1188 )
1191 )
1189 coreconfigitem(
1192 coreconfigitem(
1190 b'templates', b'.*', default=None, generic=True,
1193 b'templates', b'.*', default=None, generic=True,
1191 )
1194 )
1192 coreconfigitem(
1195 coreconfigitem(
1193 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1196 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1194 )
1197 )
1195 coreconfigitem(
1198 coreconfigitem(
1196 b'trusted', b'groups', default=list,
1199 b'trusted', b'groups', default=list,
1197 )
1200 )
1198 coreconfigitem(
1201 coreconfigitem(
1199 b'trusted', b'users', default=list,
1202 b'trusted', b'users', default=list,
1200 )
1203 )
1201 coreconfigitem(
1204 coreconfigitem(
1202 b'ui', b'_usedassubrepo', default=False,
1205 b'ui', b'_usedassubrepo', default=False,
1203 )
1206 )
1204 coreconfigitem(
1207 coreconfigitem(
1205 b'ui', b'allowemptycommit', default=False,
1208 b'ui', b'allowemptycommit', default=False,
1206 )
1209 )
1207 coreconfigitem(
1210 coreconfigitem(
1208 b'ui', b'archivemeta', default=True,
1211 b'ui', b'archivemeta', default=True,
1209 )
1212 )
1210 coreconfigitem(
1213 coreconfigitem(
1211 b'ui', b'askusername', default=False,
1214 b'ui', b'askusername', default=False,
1212 )
1215 )
1213 coreconfigitem(
1216 coreconfigitem(
1214 b'ui', b'clonebundlefallback', default=False,
1217 b'ui', b'clonebundlefallback', default=False,
1215 )
1218 )
1216 coreconfigitem(
1219 coreconfigitem(
1217 b'ui', b'clonebundleprefers', default=list,
1220 b'ui', b'clonebundleprefers', default=list,
1218 )
1221 )
1219 coreconfigitem(
1222 coreconfigitem(
1220 b'ui', b'clonebundles', default=True,
1223 b'ui', b'clonebundles', default=True,
1221 )
1224 )
1222 coreconfigitem(
1225 coreconfigitem(
1223 b'ui', b'color', default=b'auto',
1226 b'ui', b'color', default=b'auto',
1224 )
1227 )
1225 coreconfigitem(
1228 coreconfigitem(
1226 b'ui', b'commitsubrepos', default=False,
1229 b'ui', b'commitsubrepos', default=False,
1227 )
1230 )
1228 coreconfigitem(
1231 coreconfigitem(
1229 b'ui', b'debug', default=False,
1232 b'ui', b'debug', default=False,
1230 )
1233 )
1231 coreconfigitem(
1234 coreconfigitem(
1232 b'ui', b'debugger', default=None,
1235 b'ui', b'debugger', default=None,
1233 )
1236 )
1234 coreconfigitem(
1237 coreconfigitem(
1235 b'ui', b'editor', default=dynamicdefault,
1238 b'ui', b'editor', default=dynamicdefault,
1236 )
1239 )
1237 coreconfigitem(
1240 coreconfigitem(
1238 b'ui', b'fallbackencoding', default=None,
1241 b'ui', b'fallbackencoding', default=None,
1239 )
1242 )
1240 coreconfigitem(
1243 coreconfigitem(
1241 b'ui', b'forcecwd', default=None,
1244 b'ui', b'forcecwd', default=None,
1242 )
1245 )
1243 coreconfigitem(
1246 coreconfigitem(
1244 b'ui', b'forcemerge', default=None,
1247 b'ui', b'forcemerge', default=None,
1245 )
1248 )
1246 coreconfigitem(
1249 coreconfigitem(
1247 b'ui', b'formatdebug', default=False,
1250 b'ui', b'formatdebug', default=False,
1248 )
1251 )
1249 coreconfigitem(
1252 coreconfigitem(
1250 b'ui', b'formatjson', default=False,
1253 b'ui', b'formatjson', default=False,
1251 )
1254 )
1252 coreconfigitem(
1255 coreconfigitem(
1253 b'ui', b'formatted', default=None,
1256 b'ui', b'formatted', default=None,
1254 )
1257 )
1255 coreconfigitem(
1258 coreconfigitem(
1256 b'ui', b'graphnodetemplate', default=None,
1259 b'ui', b'graphnodetemplate', default=None,
1257 )
1260 )
1258 coreconfigitem(
1261 coreconfigitem(
1259 b'ui', b'interactive', default=None,
1262 b'ui', b'interactive', default=None,
1260 )
1263 )
1261 coreconfigitem(
1264 coreconfigitem(
1262 b'ui', b'interface', default=None,
1265 b'ui', b'interface', default=None,
1263 )
1266 )
1264 coreconfigitem(
1267 coreconfigitem(
1265 b'ui', b'interface.chunkselector', default=None,
1268 b'ui', b'interface.chunkselector', default=None,
1266 )
1269 )
1267 coreconfigitem(
1270 coreconfigitem(
1268 b'ui', b'large-file-limit', default=10000000,
1271 b'ui', b'large-file-limit', default=10000000,
1269 )
1272 )
1270 coreconfigitem(
1273 coreconfigitem(
1271 b'ui', b'logblockedtimes', default=False,
1274 b'ui', b'logblockedtimes', default=False,
1272 )
1275 )
1273 coreconfigitem(
1276 coreconfigitem(
1274 b'ui', b'logtemplate', default=None,
1277 b'ui', b'logtemplate', default=None,
1275 )
1278 )
1276 coreconfigitem(
1279 coreconfigitem(
1277 b'ui', b'merge', default=None,
1280 b'ui', b'merge', default=None,
1278 )
1281 )
1279 coreconfigitem(
1282 coreconfigitem(
1280 b'ui', b'mergemarkers', default=b'basic',
1283 b'ui', b'mergemarkers', default=b'basic',
1281 )
1284 )
1282 coreconfigitem(
1285 coreconfigitem(
1283 b'ui',
1286 b'ui',
1284 b'mergemarkertemplate',
1287 b'mergemarkertemplate',
1285 default=(
1288 default=(
1286 b'{node|short} '
1289 b'{node|short} '
1287 b'{ifeq(tags, "tip", "", '
1290 b'{ifeq(tags, "tip", "", '
1288 b'ifeq(tags, "", "", "{tags} "))}'
1291 b'ifeq(tags, "", "", "{tags} "))}'
1289 b'{if(bookmarks, "{bookmarks} ")}'
1292 b'{if(bookmarks, "{bookmarks} ")}'
1290 b'{ifeq(branch, "default", "", "{branch} ")}'
1293 b'{ifeq(branch, "default", "", "{branch} ")}'
1291 b'- {author|user}: {desc|firstline}'
1294 b'- {author|user}: {desc|firstline}'
1292 ),
1295 ),
1293 )
1296 )
1294 coreconfigitem(
1297 coreconfigitem(
1295 b'ui', b'message-output', default=b'stdio',
1298 b'ui', b'message-output', default=b'stdio',
1296 )
1299 )
1297 coreconfigitem(
1300 coreconfigitem(
1298 b'ui', b'nontty', default=False,
1301 b'ui', b'nontty', default=False,
1299 )
1302 )
1300 coreconfigitem(
1303 coreconfigitem(
1301 b'ui', b'origbackuppath', default=None,
1304 b'ui', b'origbackuppath', default=None,
1302 )
1305 )
1303 coreconfigitem(
1306 coreconfigitem(
1304 b'ui', b'paginate', default=True,
1307 b'ui', b'paginate', default=True,
1305 )
1308 )
1306 coreconfigitem(
1309 coreconfigitem(
1307 b'ui', b'patch', default=None,
1310 b'ui', b'patch', default=None,
1308 )
1311 )
1309 coreconfigitem(
1312 coreconfigitem(
1310 b'ui', b'pre-merge-tool-output-template', default=None,
1313 b'ui', b'pre-merge-tool-output-template', default=None,
1311 )
1314 )
1312 coreconfigitem(
1315 coreconfigitem(
1313 b'ui', b'portablefilenames', default=b'warn',
1316 b'ui', b'portablefilenames', default=b'warn',
1314 )
1317 )
1315 coreconfigitem(
1318 coreconfigitem(
1316 b'ui', b'promptecho', default=False,
1319 b'ui', b'promptecho', default=False,
1317 )
1320 )
1318 coreconfigitem(
1321 coreconfigitem(
1319 b'ui', b'quiet', default=False,
1322 b'ui', b'quiet', default=False,
1320 )
1323 )
1321 coreconfigitem(
1324 coreconfigitem(
1322 b'ui', b'quietbookmarkmove', default=False,
1325 b'ui', b'quietbookmarkmove', default=False,
1323 )
1326 )
1324 coreconfigitem(
1327 coreconfigitem(
1325 b'ui', b'relative-paths', default=b'legacy',
1328 b'ui', b'relative-paths', default=b'legacy',
1326 )
1329 )
1327 coreconfigitem(
1330 coreconfigitem(
1328 b'ui', b'remotecmd', default=b'hg',
1331 b'ui', b'remotecmd', default=b'hg',
1329 )
1332 )
1330 coreconfigitem(
1333 coreconfigitem(
1331 b'ui', b'report_untrusted', default=True,
1334 b'ui', b'report_untrusted', default=True,
1332 )
1335 )
1333 coreconfigitem(
1336 coreconfigitem(
1334 b'ui', b'rollback', default=True,
1337 b'ui', b'rollback', default=True,
1335 )
1338 )
1336 coreconfigitem(
1339 coreconfigitem(
1337 b'ui', b'signal-safe-lock', default=True,
1340 b'ui', b'signal-safe-lock', default=True,
1338 )
1341 )
1339 coreconfigitem(
1342 coreconfigitem(
1340 b'ui', b'slash', default=False,
1343 b'ui', b'slash', default=False,
1341 )
1344 )
1342 coreconfigitem(
1345 coreconfigitem(
1343 b'ui', b'ssh', default=b'ssh',
1346 b'ui', b'ssh', default=b'ssh',
1344 )
1347 )
1345 coreconfigitem(
1348 coreconfigitem(
1346 b'ui', b'ssherrorhint', default=None,
1349 b'ui', b'ssherrorhint', default=None,
1347 )
1350 )
1348 coreconfigitem(
1351 coreconfigitem(
1349 b'ui', b'statuscopies', default=False,
1352 b'ui', b'statuscopies', default=False,
1350 )
1353 )
1351 coreconfigitem(
1354 coreconfigitem(
1352 b'ui', b'strict', default=False,
1355 b'ui', b'strict', default=False,
1353 )
1356 )
1354 coreconfigitem(
1357 coreconfigitem(
1355 b'ui', b'style', default=b'',
1358 b'ui', b'style', default=b'',
1356 )
1359 )
1357 coreconfigitem(
1360 coreconfigitem(
1358 b'ui', b'supportcontact', default=None,
1361 b'ui', b'supportcontact', default=None,
1359 )
1362 )
1360 coreconfigitem(
1363 coreconfigitem(
1361 b'ui', b'textwidth', default=78,
1364 b'ui', b'textwidth', default=78,
1362 )
1365 )
1363 coreconfigitem(
1366 coreconfigitem(
1364 b'ui', b'timeout', default=b'600',
1367 b'ui', b'timeout', default=b'600',
1365 )
1368 )
1366 coreconfigitem(
1369 coreconfigitem(
1367 b'ui', b'timeout.warn', default=0,
1370 b'ui', b'timeout.warn', default=0,
1368 )
1371 )
1369 coreconfigitem(
1372 coreconfigitem(
1370 b'ui', b'traceback', default=False,
1373 b'ui', b'traceback', default=False,
1371 )
1374 )
1372 coreconfigitem(
1375 coreconfigitem(
1373 b'ui', b'tweakdefaults', default=False,
1376 b'ui', b'tweakdefaults', default=False,
1374 )
1377 )
1375 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1378 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1376 coreconfigitem(
1379 coreconfigitem(
1377 b'ui', b'verbose', default=False,
1380 b'ui', b'verbose', default=False,
1378 )
1381 )
1379 coreconfigitem(
1382 coreconfigitem(
1380 b'verify', b'skipflags', default=None,
1383 b'verify', b'skipflags', default=None,
1381 )
1384 )
1382 coreconfigitem(
1385 coreconfigitem(
1383 b'web', b'allowbz2', default=False,
1386 b'web', b'allowbz2', default=False,
1384 )
1387 )
1385 coreconfigitem(
1388 coreconfigitem(
1386 b'web', b'allowgz', default=False,
1389 b'web', b'allowgz', default=False,
1387 )
1390 )
1388 coreconfigitem(
1391 coreconfigitem(
1389 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1392 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1390 )
1393 )
1391 coreconfigitem(
1394 coreconfigitem(
1392 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1395 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1393 )
1396 )
1394 coreconfigitem(
1397 coreconfigitem(
1395 b'web', b'allowzip', default=False,
1398 b'web', b'allowzip', default=False,
1396 )
1399 )
1397 coreconfigitem(
1400 coreconfigitem(
1398 b'web', b'archivesubrepos', default=False,
1401 b'web', b'archivesubrepos', default=False,
1399 )
1402 )
1400 coreconfigitem(
1403 coreconfigitem(
1401 b'web', b'cache', default=True,
1404 b'web', b'cache', default=True,
1402 )
1405 )
1403 coreconfigitem(
1406 coreconfigitem(
1404 b'web', b'comparisoncontext', default=5,
1407 b'web', b'comparisoncontext', default=5,
1405 )
1408 )
1406 coreconfigitem(
1409 coreconfigitem(
1407 b'web', b'contact', default=None,
1410 b'web', b'contact', default=None,
1408 )
1411 )
1409 coreconfigitem(
1412 coreconfigitem(
1410 b'web', b'deny_push', default=list,
1413 b'web', b'deny_push', default=list,
1411 )
1414 )
1412 coreconfigitem(
1415 coreconfigitem(
1413 b'web', b'guessmime', default=False,
1416 b'web', b'guessmime', default=False,
1414 )
1417 )
1415 coreconfigitem(
1418 coreconfigitem(
1416 b'web', b'hidden', default=False,
1419 b'web', b'hidden', default=False,
1417 )
1420 )
1418 coreconfigitem(
1421 coreconfigitem(
1419 b'web', b'labels', default=list,
1422 b'web', b'labels', default=list,
1420 )
1423 )
1421 coreconfigitem(
1424 coreconfigitem(
1422 b'web', b'logoimg', default=b'hglogo.png',
1425 b'web', b'logoimg', default=b'hglogo.png',
1423 )
1426 )
1424 coreconfigitem(
1427 coreconfigitem(
1425 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1428 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1426 )
1429 )
1427 coreconfigitem(
1430 coreconfigitem(
1428 b'web', b'accesslog', default=b'-',
1431 b'web', b'accesslog', default=b'-',
1429 )
1432 )
1430 coreconfigitem(
1433 coreconfigitem(
1431 b'web', b'address', default=b'',
1434 b'web', b'address', default=b'',
1432 )
1435 )
1433 coreconfigitem(
1436 coreconfigitem(
1434 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1437 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1435 )
1438 )
1436 coreconfigitem(
1439 coreconfigitem(
1437 b'web', b'allow_read', default=list,
1440 b'web', b'allow_read', default=list,
1438 )
1441 )
1439 coreconfigitem(
1442 coreconfigitem(
1440 b'web', b'baseurl', default=None,
1443 b'web', b'baseurl', default=None,
1441 )
1444 )
1442 coreconfigitem(
1445 coreconfigitem(
1443 b'web', b'cacerts', default=None,
1446 b'web', b'cacerts', default=None,
1444 )
1447 )
1445 coreconfigitem(
1448 coreconfigitem(
1446 b'web', b'certificate', default=None,
1449 b'web', b'certificate', default=None,
1447 )
1450 )
1448 coreconfigitem(
1451 coreconfigitem(
1449 b'web', b'collapse', default=False,
1452 b'web', b'collapse', default=False,
1450 )
1453 )
1451 coreconfigitem(
1454 coreconfigitem(
1452 b'web', b'csp', default=None,
1455 b'web', b'csp', default=None,
1453 )
1456 )
1454 coreconfigitem(
1457 coreconfigitem(
1455 b'web', b'deny_read', default=list,
1458 b'web', b'deny_read', default=list,
1456 )
1459 )
1457 coreconfigitem(
1460 coreconfigitem(
1458 b'web', b'descend', default=True,
1461 b'web', b'descend', default=True,
1459 )
1462 )
1460 coreconfigitem(
1463 coreconfigitem(
1461 b'web', b'description', default=b"",
1464 b'web', b'description', default=b"",
1462 )
1465 )
1463 coreconfigitem(
1466 coreconfigitem(
1464 b'web', b'encoding', default=lambda: encoding.encoding,
1467 b'web', b'encoding', default=lambda: encoding.encoding,
1465 )
1468 )
1466 coreconfigitem(
1469 coreconfigitem(
1467 b'web', b'errorlog', default=b'-',
1470 b'web', b'errorlog', default=b'-',
1468 )
1471 )
1469 coreconfigitem(
1472 coreconfigitem(
1470 b'web', b'ipv6', default=False,
1473 b'web', b'ipv6', default=False,
1471 )
1474 )
1472 coreconfigitem(
1475 coreconfigitem(
1473 b'web', b'maxchanges', default=10,
1476 b'web', b'maxchanges', default=10,
1474 )
1477 )
1475 coreconfigitem(
1478 coreconfigitem(
1476 b'web', b'maxfiles', default=10,
1479 b'web', b'maxfiles', default=10,
1477 )
1480 )
1478 coreconfigitem(
1481 coreconfigitem(
1479 b'web', b'maxshortchanges', default=60,
1482 b'web', b'maxshortchanges', default=60,
1480 )
1483 )
1481 coreconfigitem(
1484 coreconfigitem(
1482 b'web', b'motd', default=b'',
1485 b'web', b'motd', default=b'',
1483 )
1486 )
1484 coreconfigitem(
1487 coreconfigitem(
1485 b'web', b'name', default=dynamicdefault,
1488 b'web', b'name', default=dynamicdefault,
1486 )
1489 )
1487 coreconfigitem(
1490 coreconfigitem(
1488 b'web', b'port', default=8000,
1491 b'web', b'port', default=8000,
1489 )
1492 )
1490 coreconfigitem(
1493 coreconfigitem(
1491 b'web', b'prefix', default=b'',
1494 b'web', b'prefix', default=b'',
1492 )
1495 )
1493 coreconfigitem(
1496 coreconfigitem(
1494 b'web', b'push_ssl', default=True,
1497 b'web', b'push_ssl', default=True,
1495 )
1498 )
1496 coreconfigitem(
1499 coreconfigitem(
1497 b'web', b'refreshinterval', default=20,
1500 b'web', b'refreshinterval', default=20,
1498 )
1501 )
1499 coreconfigitem(
1502 coreconfigitem(
1500 b'web', b'server-header', default=None,
1503 b'web', b'server-header', default=None,
1501 )
1504 )
1502 coreconfigitem(
1505 coreconfigitem(
1503 b'web', b'static', default=None,
1506 b'web', b'static', default=None,
1504 )
1507 )
1505 coreconfigitem(
1508 coreconfigitem(
1506 b'web', b'staticurl', default=None,
1509 b'web', b'staticurl', default=None,
1507 )
1510 )
1508 coreconfigitem(
1511 coreconfigitem(
1509 b'web', b'stripes', default=1,
1512 b'web', b'stripes', default=1,
1510 )
1513 )
1511 coreconfigitem(
1514 coreconfigitem(
1512 b'web', b'style', default=b'paper',
1515 b'web', b'style', default=b'paper',
1513 )
1516 )
1514 coreconfigitem(
1517 coreconfigitem(
1515 b'web', b'templates', default=None,
1518 b'web', b'templates', default=None,
1516 )
1519 )
1517 coreconfigitem(
1520 coreconfigitem(
1518 b'web', b'view', default=b'served', experimental=True,
1521 b'web', b'view', default=b'served', experimental=True,
1519 )
1522 )
1520 coreconfigitem(
1523 coreconfigitem(
1521 b'worker', b'backgroundclose', default=dynamicdefault,
1524 b'worker', b'backgroundclose', default=dynamicdefault,
1522 )
1525 )
1523 # Windows defaults to a limit of 512 open files. A buffer of 128
1526 # Windows defaults to a limit of 512 open files. A buffer of 128
1524 # should give us enough headway.
1527 # should give us enough headway.
1525 coreconfigitem(
1528 coreconfigitem(
1526 b'worker', b'backgroundclosemaxqueue', default=384,
1529 b'worker', b'backgroundclosemaxqueue', default=384,
1527 )
1530 )
1528 coreconfigitem(
1531 coreconfigitem(
1529 b'worker', b'backgroundcloseminfilecount', default=2048,
1532 b'worker', b'backgroundcloseminfilecount', default=2048,
1530 )
1533 )
1531 coreconfigitem(
1534 coreconfigitem(
1532 b'worker', b'backgroundclosethreadcount', default=4,
1535 b'worker', b'backgroundclosethreadcount', default=4,
1533 )
1536 )
1534 coreconfigitem(
1537 coreconfigitem(
1535 b'worker', b'enabled', default=True,
1538 b'worker', b'enabled', default=True,
1536 )
1539 )
1537 coreconfigitem(
1540 coreconfigitem(
1538 b'worker', b'numcpus', default=None,
1541 b'worker', b'numcpus', default=None,
1539 )
1542 )
1540
1543
1541 # Rebase related configuration moved to core because other extension are doing
1544 # Rebase related configuration moved to core because other extension are doing
1542 # strange things. For example, shelve import the extensions to reuse some bit
1545 # strange things. For example, shelve import the extensions to reuse some bit
1543 # without formally loading it.
1546 # without formally loading it.
1544 coreconfigitem(
1547 coreconfigitem(
1545 b'commands', b'rebase.requiredest', default=False,
1548 b'commands', b'rebase.requiredest', default=False,
1546 )
1549 )
1547 coreconfigitem(
1550 coreconfigitem(
1548 b'experimental', b'rebaseskipobsolete', default=True,
1551 b'experimental', b'rebaseskipobsolete', default=True,
1549 )
1552 )
1550 coreconfigitem(
1553 coreconfigitem(
1551 b'rebase', b'singletransaction', default=False,
1554 b'rebase', b'singletransaction', default=False,
1552 )
1555 )
1553 coreconfigitem(
1556 coreconfigitem(
1554 b'rebase', b'experimental.inmemory', default=False,
1557 b'rebase', b'experimental.inmemory', default=False,
1555 )
1558 )
@@ -1,3787 +1,3789 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # Functions receiving (ui, features) that extensions can register to impact
448 # Functions receiving (ui, features) that extensions can register to impact
449 # the ability to load repositories with custom requirements. Only
449 # the ability to load repositories with custom requirements. Only
450 # functions defined in loaded extensions are called.
450 # functions defined in loaded extensions are called.
451 #
451 #
452 # The function receives a set of requirement strings that the repository
452 # The function receives a set of requirement strings that the repository
453 # is capable of opening. Functions will typically add elements to the
453 # is capable of opening. Functions will typically add elements to the
454 # set to reflect that the extension knows how to handle that requirements.
454 # set to reflect that the extension knows how to handle that requirements.
455 featuresetupfuncs = set()
455 featuresetupfuncs = set()
456
456
457
457
458 def makelocalrepository(baseui, path, intents=None):
458 def makelocalrepository(baseui, path, intents=None):
459 """Create a local repository object.
459 """Create a local repository object.
460
460
461 Given arguments needed to construct a local repository, this function
461 Given arguments needed to construct a local repository, this function
462 performs various early repository loading functionality (such as
462 performs various early repository loading functionality (such as
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 the repository can be opened, derives a type suitable for representing
464 the repository can be opened, derives a type suitable for representing
465 that repository, and returns an instance of it.
465 that repository, and returns an instance of it.
466
466
467 The returned object conforms to the ``repository.completelocalrepository``
467 The returned object conforms to the ``repository.completelocalrepository``
468 interface.
468 interface.
469
469
470 The repository type is derived by calling a series of factory functions
470 The repository type is derived by calling a series of factory functions
471 for each aspect/interface of the final repository. These are defined by
471 for each aspect/interface of the final repository. These are defined by
472 ``REPO_INTERFACES``.
472 ``REPO_INTERFACES``.
473
473
474 Each factory function is called to produce a type implementing a specific
474 Each factory function is called to produce a type implementing a specific
475 interface. The cumulative list of returned types will be combined into a
475 interface. The cumulative list of returned types will be combined into a
476 new type and that type will be instantiated to represent the local
476 new type and that type will be instantiated to represent the local
477 repository.
477 repository.
478
478
479 The factory functions each receive various state that may be consulted
479 The factory functions each receive various state that may be consulted
480 as part of deriving a type.
480 as part of deriving a type.
481
481
482 Extensions should wrap these factory functions to customize repository type
482 Extensions should wrap these factory functions to customize repository type
483 creation. Note that an extension's wrapped function may be called even if
483 creation. Note that an extension's wrapped function may be called even if
484 that extension is not loaded for the repo being constructed. Extensions
484 that extension is not loaded for the repo being constructed. Extensions
485 should check if their ``__name__`` appears in the
485 should check if their ``__name__`` appears in the
486 ``extensionmodulenames`` set passed to the factory function and no-op if
486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 not.
487 not.
488 """
488 """
489 ui = baseui.copy()
489 ui = baseui.copy()
490 # Prevent copying repo configuration.
490 # Prevent copying repo configuration.
491 ui.copy = baseui.copy
491 ui.copy = baseui.copy
492
492
493 # Working directory VFS rooted at repository root.
493 # Working directory VFS rooted at repository root.
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495
495
496 # Main VFS for .hg/ directory.
496 # Main VFS for .hg/ directory.
497 hgpath = wdirvfs.join(b'.hg')
497 hgpath = wdirvfs.join(b'.hg')
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499
499
500 # The .hg/ path should exist and should be a directory. All other
500 # The .hg/ path should exist and should be a directory. All other
501 # cases are errors.
501 # cases are errors.
502 if not hgvfs.isdir():
502 if not hgvfs.isdir():
503 try:
503 try:
504 hgvfs.stat()
504 hgvfs.stat()
505 except OSError as e:
505 except OSError as e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 raise error.RepoError(_(b'repository %s not found') % path)
509 raise error.RepoError(_(b'repository %s not found') % path)
510
510
511 # .hg/requires file contains a newline-delimited list of
511 # .hg/requires file contains a newline-delimited list of
512 # features/capabilities the opener (us) must have in order to use
512 # features/capabilities the opener (us) must have in order to use
513 # the repository. This file was introduced in Mercurial 0.9.2,
513 # the repository. This file was introduced in Mercurial 0.9.2,
514 # which means very old repositories may not have one. We assume
514 # which means very old repositories may not have one. We assume
515 # a missing file translates to no requirements.
515 # a missing file translates to no requirements.
516 try:
516 try:
517 requirements = set(hgvfs.read(b'requires').splitlines())
517 requirements = set(hgvfs.read(b'requires').splitlines())
518 except IOError as e:
518 except IOError as e:
519 if e.errno != errno.ENOENT:
519 if e.errno != errno.ENOENT:
520 raise
520 raise
521 requirements = set()
521 requirements = set()
522
522
523 # The .hg/hgrc file may load extensions or contain config options
523 # The .hg/hgrc file may load extensions or contain config options
524 # that influence repository construction. Attempt to load it and
524 # that influence repository construction. Attempt to load it and
525 # process any new extensions that it may have pulled in.
525 # process any new extensions that it may have pulled in.
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 extensions.loadall(ui)
528 extensions.loadall(ui)
529 extensions.populateui(ui)
529 extensions.populateui(ui)
530
530
531 # Set of module names of extensions loaded for this repository.
531 # Set of module names of extensions loaded for this repository.
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533
533
534 supportedrequirements = gathersupportedrequirements(ui)
534 supportedrequirements = gathersupportedrequirements(ui)
535
535
536 # We first validate the requirements are known.
536 # We first validate the requirements are known.
537 ensurerequirementsrecognized(requirements, supportedrequirements)
537 ensurerequirementsrecognized(requirements, supportedrequirements)
538
538
539 # Then we validate that the known set is reasonable to use together.
539 # Then we validate that the known set is reasonable to use together.
540 ensurerequirementscompatible(ui, requirements)
540 ensurerequirementscompatible(ui, requirements)
541
541
542 # TODO there are unhandled edge cases related to opening repositories with
542 # TODO there are unhandled edge cases related to opening repositories with
543 # shared storage. If storage is shared, we should also test for requirements
543 # shared storage. If storage is shared, we should also test for requirements
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 # that repo, as that repo may load extensions needed to open it. This is a
545 # that repo, as that repo may load extensions needed to open it. This is a
546 # bit complicated because we don't want the other hgrc to overwrite settings
546 # bit complicated because we don't want the other hgrc to overwrite settings
547 # in this hgrc.
547 # in this hgrc.
548 #
548 #
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 # file when sharing repos. But if a requirement is added after the share is
550 # file when sharing repos. But if a requirement is added after the share is
551 # performed, thereby introducing a new requirement for the opener, we may
551 # performed, thereby introducing a new requirement for the opener, we may
552 # will not see that and could encounter a run-time error interacting with
552 # will not see that and could encounter a run-time error interacting with
553 # that shared store since it has an unknown-to-us requirement.
553 # that shared store since it has an unknown-to-us requirement.
554
554
555 # At this point, we know we should be capable of opening the repository.
555 # At this point, we know we should be capable of opening the repository.
556 # Now get on with doing that.
556 # Now get on with doing that.
557
557
558 features = set()
558 features = set()
559
559
560 # The "store" part of the repository holds versioned data. How it is
560 # The "store" part of the repository holds versioned data. How it is
561 # accessed is determined by various requirements. The ``shared`` or
561 # accessed is determined by various requirements. The ``shared`` or
562 # ``relshared`` requirements indicate the store lives in the path contained
562 # ``relshared`` requirements indicate the store lives in the path contained
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 if b'shared' in requirements or b'relshared' in requirements:
565 if b'shared' in requirements or b'relshared' in requirements:
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 if b'relshared' in requirements:
567 if b'relshared' in requirements:
568 sharedpath = hgvfs.join(sharedpath)
568 sharedpath = hgvfs.join(sharedpath)
569
569
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571
571
572 if not sharedvfs.exists():
572 if not sharedvfs.exists():
573 raise error.RepoError(
573 raise error.RepoError(
574 _(b'.hg/sharedpath points to nonexistent directory %s')
574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 % sharedvfs.base
575 % sharedvfs.base
576 )
576 )
577
577
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579
579
580 storebasepath = sharedvfs.base
580 storebasepath = sharedvfs.base
581 cachepath = sharedvfs.join(b'cache')
581 cachepath = sharedvfs.join(b'cache')
582 else:
582 else:
583 storebasepath = hgvfs.base
583 storebasepath = hgvfs.base
584 cachepath = hgvfs.join(b'cache')
584 cachepath = hgvfs.join(b'cache')
585 wcachepath = hgvfs.join(b'wcache')
585 wcachepath = hgvfs.join(b'wcache')
586
586
587 # The store has changed over time and the exact layout is dictated by
587 # The store has changed over time and the exact layout is dictated by
588 # requirements. The store interface abstracts differences across all
588 # requirements. The store interface abstracts differences across all
589 # of them.
589 # of them.
590 store = makestore(
590 store = makestore(
591 requirements,
591 requirements,
592 storebasepath,
592 storebasepath,
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 )
594 )
595 hgvfs.createmode = store.createmode
595 hgvfs.createmode = store.createmode
596
596
597 storevfs = store.vfs
597 storevfs = store.vfs
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599
599
600 # The cache vfs is used to manage cache files.
600 # The cache vfs is used to manage cache files.
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 cachevfs.createmode = store.createmode
602 cachevfs.createmode = store.createmode
603 # The cache vfs is used to manage cache files related to the working copy
603 # The cache vfs is used to manage cache files related to the working copy
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 wcachevfs.createmode = store.createmode
605 wcachevfs.createmode = store.createmode
606
606
607 # Now resolve the type for the repository object. We do this by repeatedly
607 # Now resolve the type for the repository object. We do this by repeatedly
608 # calling a factory function to produces types for specific aspects of the
608 # calling a factory function to produces types for specific aspects of the
609 # repo's operation. The aggregate returned types are used as base classes
609 # repo's operation. The aggregate returned types are used as base classes
610 # for a dynamically-derived type, which will represent our new repository.
610 # for a dynamically-derived type, which will represent our new repository.
611
611
612 bases = []
612 bases = []
613 extrastate = {}
613 extrastate = {}
614
614
615 for iface, fn in REPO_INTERFACES:
615 for iface, fn in REPO_INTERFACES:
616 # We pass all potentially useful state to give extensions tons of
616 # We pass all potentially useful state to give extensions tons of
617 # flexibility.
617 # flexibility.
618 typ = fn()(
618 typ = fn()(
619 ui=ui,
619 ui=ui,
620 intents=intents,
620 intents=intents,
621 requirements=requirements,
621 requirements=requirements,
622 features=features,
622 features=features,
623 wdirvfs=wdirvfs,
623 wdirvfs=wdirvfs,
624 hgvfs=hgvfs,
624 hgvfs=hgvfs,
625 store=store,
625 store=store,
626 storevfs=storevfs,
626 storevfs=storevfs,
627 storeoptions=storevfs.options,
627 storeoptions=storevfs.options,
628 cachevfs=cachevfs,
628 cachevfs=cachevfs,
629 wcachevfs=wcachevfs,
629 wcachevfs=wcachevfs,
630 extensionmodulenames=extensionmodulenames,
630 extensionmodulenames=extensionmodulenames,
631 extrastate=extrastate,
631 extrastate=extrastate,
632 baseclasses=bases,
632 baseclasses=bases,
633 )
633 )
634
634
635 if not isinstance(typ, type):
635 if not isinstance(typ, type):
636 raise error.ProgrammingError(
636 raise error.ProgrammingError(
637 b'unable to construct type for %s' % iface
637 b'unable to construct type for %s' % iface
638 )
638 )
639
639
640 bases.append(typ)
640 bases.append(typ)
641
641
642 # type() allows you to use characters in type names that wouldn't be
642 # type() allows you to use characters in type names that wouldn't be
643 # recognized as Python symbols in source code. We abuse that to add
643 # recognized as Python symbols in source code. We abuse that to add
644 # rich information about our constructed repo.
644 # rich information about our constructed repo.
645 name = pycompat.sysstr(
645 name = pycompat.sysstr(
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 )
647 )
648
648
649 cls = type(name, tuple(bases), {})
649 cls = type(name, tuple(bases), {})
650
650
651 return cls(
651 return cls(
652 baseui=baseui,
652 baseui=baseui,
653 ui=ui,
653 ui=ui,
654 origroot=path,
654 origroot=path,
655 wdirvfs=wdirvfs,
655 wdirvfs=wdirvfs,
656 hgvfs=hgvfs,
656 hgvfs=hgvfs,
657 requirements=requirements,
657 requirements=requirements,
658 supportedrequirements=supportedrequirements,
658 supportedrequirements=supportedrequirements,
659 sharedpath=storebasepath,
659 sharedpath=storebasepath,
660 store=store,
660 store=store,
661 cachevfs=cachevfs,
661 cachevfs=cachevfs,
662 wcachevfs=wcachevfs,
662 wcachevfs=wcachevfs,
663 features=features,
663 features=features,
664 intents=intents,
664 intents=intents,
665 )
665 )
666
666
667
667
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 """Load hgrc files/content into a ui instance.
669 """Load hgrc files/content into a ui instance.
670
670
671 This is called during repository opening to load any additional
671 This is called during repository opening to load any additional
672 config files or settings relevant to the current repository.
672 config files or settings relevant to the current repository.
673
673
674 Returns a bool indicating whether any additional configs were loaded.
674 Returns a bool indicating whether any additional configs were loaded.
675
675
676 Extensions should monkeypatch this function to modify how per-repo
676 Extensions should monkeypatch this function to modify how per-repo
677 configs are loaded. For example, an extension may wish to pull in
677 configs are loaded. For example, an extension may wish to pull in
678 configs from alternate files or sources.
678 configs from alternate files or sources.
679 """
679 """
680 if not rcutil.use_repo_hgrc():
680 if not rcutil.use_repo_hgrc():
681 return False
681 return False
682 try:
682 try:
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 return True
684 return True
685 except IOError:
685 except IOError:
686 return False
686 return False
687
687
688
688
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 """Perform additional actions after .hg/hgrc is loaded.
690 """Perform additional actions after .hg/hgrc is loaded.
691
691
692 This function is called during repository loading immediately after
692 This function is called during repository loading immediately after
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694
694
695 The function can be used to validate configs, automatically add
695 The function can be used to validate configs, automatically add
696 options (including extensions) based on requirements, etc.
696 options (including extensions) based on requirements, etc.
697 """
697 """
698
698
699 # Map of requirements to list of extensions to load automatically when
699 # Map of requirements to list of extensions to load automatically when
700 # requirement is present.
700 # requirement is present.
701 autoextensions = {
701 autoextensions = {
702 b'largefiles': [b'largefiles'],
702 b'largefiles': [b'largefiles'],
703 b'lfs': [b'lfs'],
703 b'lfs': [b'lfs'],
704 }
704 }
705
705
706 for requirement, names in sorted(autoextensions.items()):
706 for requirement, names in sorted(autoextensions.items()):
707 if requirement not in requirements:
707 if requirement not in requirements:
708 continue
708 continue
709
709
710 for name in names:
710 for name in names:
711 if not ui.hasconfig(b'extensions', name):
711 if not ui.hasconfig(b'extensions', name):
712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713
713
714
714
715 def gathersupportedrequirements(ui):
715 def gathersupportedrequirements(ui):
716 """Determine the complete set of recognized requirements."""
716 """Determine the complete set of recognized requirements."""
717 # Start with all requirements supported by this file.
717 # Start with all requirements supported by this file.
718 supported = set(localrepository._basesupported)
718 supported = set(localrepository._basesupported)
719
719
720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 # relevant to this ui instance.
721 # relevant to this ui instance.
722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723
723
724 for fn in featuresetupfuncs:
724 for fn in featuresetupfuncs:
725 if fn.__module__ in modules:
725 if fn.__module__ in modules:
726 fn(ui, supported)
726 fn(ui, supported)
727
727
728 # Add derived requirements from registered compression engines.
728 # Add derived requirements from registered compression engines.
729 for name in util.compengines:
729 for name in util.compengines:
730 engine = util.compengines[name]
730 engine = util.compengines[name]
731 if engine.available() and engine.revlogheader():
731 if engine.available() and engine.revlogheader():
732 supported.add(b'exp-compression-%s' % name)
732 supported.add(b'exp-compression-%s' % name)
733 if engine.name() == b'zstd':
733 if engine.name() == b'zstd':
734 supported.add(b'revlog-compression-zstd')
734 supported.add(b'revlog-compression-zstd')
735
735
736 return supported
736 return supported
737
737
738
738
739 def ensurerequirementsrecognized(requirements, supported):
739 def ensurerequirementsrecognized(requirements, supported):
740 """Validate that a set of local requirements is recognized.
740 """Validate that a set of local requirements is recognized.
741
741
742 Receives a set of requirements. Raises an ``error.RepoError`` if there
742 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 exists any requirement in that set that currently loaded code doesn't
743 exists any requirement in that set that currently loaded code doesn't
744 recognize.
744 recognize.
745
745
746 Returns a set of supported requirements.
746 Returns a set of supported requirements.
747 """
747 """
748 missing = set()
748 missing = set()
749
749
750 for requirement in requirements:
750 for requirement in requirements:
751 if requirement in supported:
751 if requirement in supported:
752 continue
752 continue
753
753
754 if not requirement or not requirement[0:1].isalnum():
754 if not requirement or not requirement[0:1].isalnum():
755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756
756
757 missing.add(requirement)
757 missing.add(requirement)
758
758
759 if missing:
759 if missing:
760 raise error.RequirementError(
760 raise error.RequirementError(
761 _(b'repository requires features unknown to this Mercurial: %s')
761 _(b'repository requires features unknown to this Mercurial: %s')
762 % b' '.join(sorted(missing)),
762 % b' '.join(sorted(missing)),
763 hint=_(
763 hint=_(
764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 b'for more information'
765 b'for more information'
766 ),
766 ),
767 )
767 )
768
768
769
769
770 def ensurerequirementscompatible(ui, requirements):
770 def ensurerequirementscompatible(ui, requirements):
771 """Validates that a set of recognized requirements is mutually compatible.
771 """Validates that a set of recognized requirements is mutually compatible.
772
772
773 Some requirements may not be compatible with others or require
773 Some requirements may not be compatible with others or require
774 config options that aren't enabled. This function is called during
774 config options that aren't enabled. This function is called during
775 repository opening to ensure that the set of requirements needed
775 repository opening to ensure that the set of requirements needed
776 to open a repository is sane and compatible with config options.
776 to open a repository is sane and compatible with config options.
777
777
778 Extensions can monkeypatch this function to perform additional
778 Extensions can monkeypatch this function to perform additional
779 checking.
779 checking.
780
780
781 ``error.RepoError`` should be raised on failure.
781 ``error.RepoError`` should be raised on failure.
782 """
782 """
783 if b'exp-sparse' in requirements and not sparse.enabled:
783 if b'exp-sparse' in requirements and not sparse.enabled:
784 raise error.RepoError(
784 raise error.RepoError(
785 _(
785 _(
786 b'repository is using sparse feature but '
786 b'repository is using sparse feature but '
787 b'sparse is not enabled; enable the '
787 b'sparse is not enabled; enable the '
788 b'"sparse" extensions to access'
788 b'"sparse" extensions to access'
789 )
789 )
790 )
790 )
791
791
792
792
793 def makestore(requirements, path, vfstype):
793 def makestore(requirements, path, vfstype):
794 """Construct a storage object for a repository."""
794 """Construct a storage object for a repository."""
795 if b'store' in requirements:
795 if b'store' in requirements:
796 if b'fncache' in requirements:
796 if b'fncache' in requirements:
797 return storemod.fncachestore(
797 return storemod.fncachestore(
798 path, vfstype, b'dotencode' in requirements
798 path, vfstype, b'dotencode' in requirements
799 )
799 )
800
800
801 return storemod.encodedstore(path, vfstype)
801 return storemod.encodedstore(path, vfstype)
802
802
803 return storemod.basicstore(path, vfstype)
803 return storemod.basicstore(path, vfstype)
804
804
805
805
806 def resolvestorevfsoptions(ui, requirements, features):
806 def resolvestorevfsoptions(ui, requirements, features):
807 """Resolve the options to pass to the store vfs opener.
807 """Resolve the options to pass to the store vfs opener.
808
808
809 The returned dict is used to influence behavior of the storage layer.
809 The returned dict is used to influence behavior of the storage layer.
810 """
810 """
811 options = {}
811 options = {}
812
812
813 if b'treemanifest' in requirements:
813 if b'treemanifest' in requirements:
814 options[b'treemanifest'] = True
814 options[b'treemanifest'] = True
815
815
816 # experimental config: format.manifestcachesize
816 # experimental config: format.manifestcachesize
817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 if manifestcachesize is not None:
818 if manifestcachesize is not None:
819 options[b'manifestcachesize'] = manifestcachesize
819 options[b'manifestcachesize'] = manifestcachesize
820
820
821 # In the absence of another requirement superseding a revlog-related
821 # In the absence of another requirement superseding a revlog-related
822 # requirement, we have to assume the repo is using revlog version 0.
822 # requirement, we have to assume the repo is using revlog version 0.
823 # This revlog format is super old and we don't bother trying to parse
823 # This revlog format is super old and we don't bother trying to parse
824 # opener options for it because those options wouldn't do anything
824 # opener options for it because those options wouldn't do anything
825 # meaningful on such old repos.
825 # meaningful on such old repos.
826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 else: # explicitly mark repo as using revlogv0
828 else: # explicitly mark repo as using revlogv0
829 options[b'revlogv0'] = True
829 options[b'revlogv0'] = True
830
830
831 if COPIESSDC_REQUIREMENT in requirements:
831 if COPIESSDC_REQUIREMENT in requirements:
832 options[b'copies-storage'] = b'changeset-sidedata'
832 options[b'copies-storage'] = b'changeset-sidedata'
833 else:
833 else:
834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 copiesextramode = (b'changeset-only', b'compatibility')
835 copiesextramode = (b'changeset-only', b'compatibility')
836 if writecopiesto in copiesextramode:
836 if writecopiesto in copiesextramode:
837 options[b'copies-storage'] = b'extra'
837 options[b'copies-storage'] = b'extra'
838
838
839 return options
839 return options
840
840
841
841
842 def resolverevlogstorevfsoptions(ui, requirements, features):
842 def resolverevlogstorevfsoptions(ui, requirements, features):
843 """Resolve opener options specific to revlogs."""
843 """Resolve opener options specific to revlogs."""
844
844
845 options = {}
845 options = {}
846 options[b'flagprocessors'] = {}
846 options[b'flagprocessors'] = {}
847
847
848 if b'revlogv1' in requirements:
848 if b'revlogv1' in requirements:
849 options[b'revlogv1'] = True
849 options[b'revlogv1'] = True
850 if REVLOGV2_REQUIREMENT in requirements:
850 if REVLOGV2_REQUIREMENT in requirements:
851 options[b'revlogv2'] = True
851 options[b'revlogv2'] = True
852
852
853 if b'generaldelta' in requirements:
853 if b'generaldelta' in requirements:
854 options[b'generaldelta'] = True
854 options[b'generaldelta'] = True
855
855
856 # experimental config: format.chunkcachesize
856 # experimental config: format.chunkcachesize
857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 if chunkcachesize is not None:
858 if chunkcachesize is not None:
859 options[b'chunkcachesize'] = chunkcachesize
859 options[b'chunkcachesize'] = chunkcachesize
860
860
861 deltabothparents = ui.configbool(
861 deltabothparents = ui.configbool(
862 b'storage', b'revlog.optimize-delta-parent-choice'
862 b'storage', b'revlog.optimize-delta-parent-choice'
863 )
863 )
864 options[b'deltabothparents'] = deltabothparents
864 options[b'deltabothparents'] = deltabothparents
865
865
866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 lazydeltabase = False
867 lazydeltabase = False
868 if lazydelta:
868 if lazydelta:
869 lazydeltabase = ui.configbool(
869 lazydeltabase = ui.configbool(
870 b'storage', b'revlog.reuse-external-delta-parent'
870 b'storage', b'revlog.reuse-external-delta-parent'
871 )
871 )
872 if lazydeltabase is None:
872 if lazydeltabase is None:
873 lazydeltabase = not scmutil.gddeltaconfig(ui)
873 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 options[b'lazydelta'] = lazydelta
874 options[b'lazydelta'] = lazydelta
875 options[b'lazydeltabase'] = lazydeltabase
875 options[b'lazydeltabase'] = lazydeltabase
876
876
877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 if 0 <= chainspan:
878 if 0 <= chainspan:
879 options[b'maxdeltachainspan'] = chainspan
879 options[b'maxdeltachainspan'] = chainspan
880
880
881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 if mmapindexthreshold is not None:
882 if mmapindexthreshold is not None:
883 options[b'mmapindexthreshold'] = mmapindexthreshold
883 options[b'mmapindexthreshold'] = mmapindexthreshold
884
884
885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 srdensitythres = float(
886 srdensitythres = float(
887 ui.config(b'experimental', b'sparse-read.density-threshold')
887 ui.config(b'experimental', b'sparse-read.density-threshold')
888 )
888 )
889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 options[b'with-sparse-read'] = withsparseread
890 options[b'with-sparse-read'] = withsparseread
891 options[b'sparse-read-density-threshold'] = srdensitythres
891 options[b'sparse-read-density-threshold'] = srdensitythres
892 options[b'sparse-read-min-gap-size'] = srmingapsize
892 options[b'sparse-read-min-gap-size'] = srmingapsize
893
893
894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 options[b'sparse-revlog'] = sparserevlog
895 options[b'sparse-revlog'] = sparserevlog
896 if sparserevlog:
896 if sparserevlog:
897 options[b'generaldelta'] = True
897 options[b'generaldelta'] = True
898
898
899 sidedata = SIDEDATA_REQUIREMENT in requirements
899 sidedata = SIDEDATA_REQUIREMENT in requirements
900 options[b'side-data'] = sidedata
900 options[b'side-data'] = sidedata
901
901
902 maxchainlen = None
902 maxchainlen = None
903 if sparserevlog:
903 if sparserevlog:
904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 # experimental config: format.maxchainlen
905 # experimental config: format.maxchainlen
906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 if maxchainlen is not None:
907 if maxchainlen is not None:
908 options[b'maxchainlen'] = maxchainlen
908 options[b'maxchainlen'] = maxchainlen
909
909
910 for r in requirements:
910 for r in requirements:
911 # we allow multiple compression engine requirement to co-exist because
911 # we allow multiple compression engine requirement to co-exist because
912 # strickly speaking, revlog seems to support mixed compression style.
912 # strickly speaking, revlog seems to support mixed compression style.
913 #
913 #
914 # The compression used for new entries will be "the last one"
914 # The compression used for new entries will be "the last one"
915 prefix = r.startswith
915 prefix = r.startswith
916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 options[b'compengine'] = r.split(b'-', 2)[2]
917 options[b'compengine'] = r.split(b'-', 2)[2]
918
918
919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 if options[b'zlib.level'] is not None:
920 if options[b'zlib.level'] is not None:
921 if not (0 <= options[b'zlib.level'] <= 9):
921 if not (0 <= options[b'zlib.level'] <= 9):
922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 raise error.Abort(msg % options[b'zlib.level'])
923 raise error.Abort(msg % options[b'zlib.level'])
924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 if options[b'zstd.level'] is not None:
925 if options[b'zstd.level'] is not None:
926 if not (0 <= options[b'zstd.level'] <= 22):
926 if not (0 <= options[b'zstd.level'] <= 22):
927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 raise error.Abort(msg % options[b'zstd.level'])
928 raise error.Abort(msg % options[b'zstd.level'])
929
929
930 if repository.NARROW_REQUIREMENT in requirements:
930 if repository.NARROW_REQUIREMENT in requirements:
931 options[b'enableellipsis'] = True
931 options[b'enableellipsis'] = True
932
932
933 if ui.configbool(b'experimental', b'rust.index'):
933 if ui.configbool(b'experimental', b'rust.index'):
934 options[b'rust.index'] = True
934 options[b'rust.index'] = True
935 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 options[b'exp-persistent-nodemap'] = True
935
937
936 return options
938 return options
937
939
938
940
939 def makemain(**kwargs):
941 def makemain(**kwargs):
940 """Produce a type conforming to ``ilocalrepositorymain``."""
942 """Produce a type conforming to ``ilocalrepositorymain``."""
941 return localrepository
943 return localrepository
942
944
943
945
944 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
946 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
945 class revlogfilestorage(object):
947 class revlogfilestorage(object):
946 """File storage when using revlogs."""
948 """File storage when using revlogs."""
947
949
948 def file(self, path):
950 def file(self, path):
949 if path[0] == b'/':
951 if path[0] == b'/':
950 path = path[1:]
952 path = path[1:]
951
953
952 return filelog.filelog(self.svfs, path)
954 return filelog.filelog(self.svfs, path)
953
955
954
956
955 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
956 class revlognarrowfilestorage(object):
958 class revlognarrowfilestorage(object):
957 """File storage when using revlogs and narrow files."""
959 """File storage when using revlogs and narrow files."""
958
960
959 def file(self, path):
961 def file(self, path):
960 if path[0] == b'/':
962 if path[0] == b'/':
961 path = path[1:]
963 path = path[1:]
962
964
963 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
965 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
964
966
965
967
966 def makefilestorage(requirements, features, **kwargs):
968 def makefilestorage(requirements, features, **kwargs):
967 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
969 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
968 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
970 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
969 features.add(repository.REPO_FEATURE_STREAM_CLONE)
971 features.add(repository.REPO_FEATURE_STREAM_CLONE)
970
972
971 if repository.NARROW_REQUIREMENT in requirements:
973 if repository.NARROW_REQUIREMENT in requirements:
972 return revlognarrowfilestorage
974 return revlognarrowfilestorage
973 else:
975 else:
974 return revlogfilestorage
976 return revlogfilestorage
975
977
976
978
977 # List of repository interfaces and factory functions for them. Each
979 # List of repository interfaces and factory functions for them. Each
978 # will be called in order during ``makelocalrepository()`` to iteratively
980 # will be called in order during ``makelocalrepository()`` to iteratively
979 # derive the final type for a local repository instance. We capture the
981 # derive the final type for a local repository instance. We capture the
980 # function as a lambda so we don't hold a reference and the module-level
982 # function as a lambda so we don't hold a reference and the module-level
981 # functions can be wrapped.
983 # functions can be wrapped.
982 REPO_INTERFACES = [
984 REPO_INTERFACES = [
983 (repository.ilocalrepositorymain, lambda: makemain),
985 (repository.ilocalrepositorymain, lambda: makemain),
984 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
986 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
985 ]
987 ]
986
988
987
989
988 @interfaceutil.implementer(repository.ilocalrepositorymain)
990 @interfaceutil.implementer(repository.ilocalrepositorymain)
989 class localrepository(object):
991 class localrepository(object):
990 """Main class for representing local repositories.
992 """Main class for representing local repositories.
991
993
992 All local repositories are instances of this class.
994 All local repositories are instances of this class.
993
995
994 Constructed on its own, instances of this class are not usable as
996 Constructed on its own, instances of this class are not usable as
995 repository objects. To obtain a usable repository object, call
997 repository objects. To obtain a usable repository object, call
996 ``hg.repository()``, ``localrepo.instance()``, or
998 ``hg.repository()``, ``localrepo.instance()``, or
997 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
999 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
998 ``instance()`` adds support for creating new repositories.
1000 ``instance()`` adds support for creating new repositories.
999 ``hg.repository()`` adds more extension integration, including calling
1001 ``hg.repository()`` adds more extension integration, including calling
1000 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1002 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1001 used.
1003 used.
1002 """
1004 """
1003
1005
1004 # obsolete experimental requirements:
1006 # obsolete experimental requirements:
1005 # - manifestv2: An experimental new manifest format that allowed
1007 # - manifestv2: An experimental new manifest format that allowed
1006 # for stem compression of long paths. Experiment ended up not
1008 # for stem compression of long paths. Experiment ended up not
1007 # being successful (repository sizes went up due to worse delta
1009 # being successful (repository sizes went up due to worse delta
1008 # chains), and the code was deleted in 4.6.
1010 # chains), and the code was deleted in 4.6.
1009 supportedformats = {
1011 supportedformats = {
1010 b'revlogv1',
1012 b'revlogv1',
1011 b'generaldelta',
1013 b'generaldelta',
1012 b'treemanifest',
1014 b'treemanifest',
1013 COPIESSDC_REQUIREMENT,
1015 COPIESSDC_REQUIREMENT,
1014 REVLOGV2_REQUIREMENT,
1016 REVLOGV2_REQUIREMENT,
1015 SIDEDATA_REQUIREMENT,
1017 SIDEDATA_REQUIREMENT,
1016 SPARSEREVLOG_REQUIREMENT,
1018 SPARSEREVLOG_REQUIREMENT,
1017 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1019 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1018 }
1020 }
1019 _basesupported = supportedformats | {
1021 _basesupported = supportedformats | {
1020 b'store',
1022 b'store',
1021 b'fncache',
1023 b'fncache',
1022 b'shared',
1024 b'shared',
1023 b'relshared',
1025 b'relshared',
1024 b'dotencode',
1026 b'dotencode',
1025 b'exp-sparse',
1027 b'exp-sparse',
1026 b'internal-phase',
1028 b'internal-phase',
1027 }
1029 }
1028
1030
1029 # list of prefix for file which can be written without 'wlock'
1031 # list of prefix for file which can be written without 'wlock'
1030 # Extensions should extend this list when needed
1032 # Extensions should extend this list when needed
1031 _wlockfreeprefix = {
1033 _wlockfreeprefix = {
1032 # We migh consider requiring 'wlock' for the next
1034 # We migh consider requiring 'wlock' for the next
1033 # two, but pretty much all the existing code assume
1035 # two, but pretty much all the existing code assume
1034 # wlock is not needed so we keep them excluded for
1036 # wlock is not needed so we keep them excluded for
1035 # now.
1037 # now.
1036 b'hgrc',
1038 b'hgrc',
1037 b'requires',
1039 b'requires',
1038 # XXX cache is a complicatged business someone
1040 # XXX cache is a complicatged business someone
1039 # should investigate this in depth at some point
1041 # should investigate this in depth at some point
1040 b'cache/',
1042 b'cache/',
1041 # XXX shouldn't be dirstate covered by the wlock?
1043 # XXX shouldn't be dirstate covered by the wlock?
1042 b'dirstate',
1044 b'dirstate',
1043 # XXX bisect was still a bit too messy at the time
1045 # XXX bisect was still a bit too messy at the time
1044 # this changeset was introduced. Someone should fix
1046 # this changeset was introduced. Someone should fix
1045 # the remainig bit and drop this line
1047 # the remainig bit and drop this line
1046 b'bisect.state',
1048 b'bisect.state',
1047 }
1049 }
1048
1050
1049 def __init__(
1051 def __init__(
1050 self,
1052 self,
1051 baseui,
1053 baseui,
1052 ui,
1054 ui,
1053 origroot,
1055 origroot,
1054 wdirvfs,
1056 wdirvfs,
1055 hgvfs,
1057 hgvfs,
1056 requirements,
1058 requirements,
1057 supportedrequirements,
1059 supportedrequirements,
1058 sharedpath,
1060 sharedpath,
1059 store,
1061 store,
1060 cachevfs,
1062 cachevfs,
1061 wcachevfs,
1063 wcachevfs,
1062 features,
1064 features,
1063 intents=None,
1065 intents=None,
1064 ):
1066 ):
1065 """Create a new local repository instance.
1067 """Create a new local repository instance.
1066
1068
1067 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1069 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1068 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1070 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1069 object.
1071 object.
1070
1072
1071 Arguments:
1073 Arguments:
1072
1074
1073 baseui
1075 baseui
1074 ``ui.ui`` instance that ``ui`` argument was based off of.
1076 ``ui.ui`` instance that ``ui`` argument was based off of.
1075
1077
1076 ui
1078 ui
1077 ``ui.ui`` instance for use by the repository.
1079 ``ui.ui`` instance for use by the repository.
1078
1080
1079 origroot
1081 origroot
1080 ``bytes`` path to working directory root of this repository.
1082 ``bytes`` path to working directory root of this repository.
1081
1083
1082 wdirvfs
1084 wdirvfs
1083 ``vfs.vfs`` rooted at the working directory.
1085 ``vfs.vfs`` rooted at the working directory.
1084
1086
1085 hgvfs
1087 hgvfs
1086 ``vfs.vfs`` rooted at .hg/
1088 ``vfs.vfs`` rooted at .hg/
1087
1089
1088 requirements
1090 requirements
1089 ``set`` of bytestrings representing repository opening requirements.
1091 ``set`` of bytestrings representing repository opening requirements.
1090
1092
1091 supportedrequirements
1093 supportedrequirements
1092 ``set`` of bytestrings representing repository requirements that we
1094 ``set`` of bytestrings representing repository requirements that we
1093 know how to open. May be a supetset of ``requirements``.
1095 know how to open. May be a supetset of ``requirements``.
1094
1096
1095 sharedpath
1097 sharedpath
1096 ``bytes`` Defining path to storage base directory. Points to a
1098 ``bytes`` Defining path to storage base directory. Points to a
1097 ``.hg/`` directory somewhere.
1099 ``.hg/`` directory somewhere.
1098
1100
1099 store
1101 store
1100 ``store.basicstore`` (or derived) instance providing access to
1102 ``store.basicstore`` (or derived) instance providing access to
1101 versioned storage.
1103 versioned storage.
1102
1104
1103 cachevfs
1105 cachevfs
1104 ``vfs.vfs`` used for cache files.
1106 ``vfs.vfs`` used for cache files.
1105
1107
1106 wcachevfs
1108 wcachevfs
1107 ``vfs.vfs`` used for cache files related to the working copy.
1109 ``vfs.vfs`` used for cache files related to the working copy.
1108
1110
1109 features
1111 features
1110 ``set`` of bytestrings defining features/capabilities of this
1112 ``set`` of bytestrings defining features/capabilities of this
1111 instance.
1113 instance.
1112
1114
1113 intents
1115 intents
1114 ``set`` of system strings indicating what this repo will be used
1116 ``set`` of system strings indicating what this repo will be used
1115 for.
1117 for.
1116 """
1118 """
1117 self.baseui = baseui
1119 self.baseui = baseui
1118 self.ui = ui
1120 self.ui = ui
1119 self.origroot = origroot
1121 self.origroot = origroot
1120 # vfs rooted at working directory.
1122 # vfs rooted at working directory.
1121 self.wvfs = wdirvfs
1123 self.wvfs = wdirvfs
1122 self.root = wdirvfs.base
1124 self.root = wdirvfs.base
1123 # vfs rooted at .hg/. Used to access most non-store paths.
1125 # vfs rooted at .hg/. Used to access most non-store paths.
1124 self.vfs = hgvfs
1126 self.vfs = hgvfs
1125 self.path = hgvfs.base
1127 self.path = hgvfs.base
1126 self.requirements = requirements
1128 self.requirements = requirements
1127 self.supported = supportedrequirements
1129 self.supported = supportedrequirements
1128 self.sharedpath = sharedpath
1130 self.sharedpath = sharedpath
1129 self.store = store
1131 self.store = store
1130 self.cachevfs = cachevfs
1132 self.cachevfs = cachevfs
1131 self.wcachevfs = wcachevfs
1133 self.wcachevfs = wcachevfs
1132 self.features = features
1134 self.features = features
1133
1135
1134 self.filtername = None
1136 self.filtername = None
1135
1137
1136 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1138 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1137 b'devel', b'check-locks'
1139 b'devel', b'check-locks'
1138 ):
1140 ):
1139 self.vfs.audit = self._getvfsward(self.vfs.audit)
1141 self.vfs.audit = self._getvfsward(self.vfs.audit)
1140 # A list of callback to shape the phase if no data were found.
1142 # A list of callback to shape the phase if no data were found.
1141 # Callback are in the form: func(repo, roots) --> processed root.
1143 # Callback are in the form: func(repo, roots) --> processed root.
1142 # This list it to be filled by extension during repo setup
1144 # This list it to be filled by extension during repo setup
1143 self._phasedefaults = []
1145 self._phasedefaults = []
1144
1146
1145 color.setup(self.ui)
1147 color.setup(self.ui)
1146
1148
1147 self.spath = self.store.path
1149 self.spath = self.store.path
1148 self.svfs = self.store.vfs
1150 self.svfs = self.store.vfs
1149 self.sjoin = self.store.join
1151 self.sjoin = self.store.join
1150 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1152 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1151 b'devel', b'check-locks'
1153 b'devel', b'check-locks'
1152 ):
1154 ):
1153 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1155 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1154 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1156 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1155 else: # standard vfs
1157 else: # standard vfs
1156 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1158 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1157
1159
1158 self._dirstatevalidatewarned = False
1160 self._dirstatevalidatewarned = False
1159
1161
1160 self._branchcaches = branchmap.BranchMapCache()
1162 self._branchcaches = branchmap.BranchMapCache()
1161 self._revbranchcache = None
1163 self._revbranchcache = None
1162 self._filterpats = {}
1164 self._filterpats = {}
1163 self._datafilters = {}
1165 self._datafilters = {}
1164 self._transref = self._lockref = self._wlockref = None
1166 self._transref = self._lockref = self._wlockref = None
1165
1167
1166 # A cache for various files under .hg/ that tracks file changes,
1168 # A cache for various files under .hg/ that tracks file changes,
1167 # (used by the filecache decorator)
1169 # (used by the filecache decorator)
1168 #
1170 #
1169 # Maps a property name to its util.filecacheentry
1171 # Maps a property name to its util.filecacheentry
1170 self._filecache = {}
1172 self._filecache = {}
1171
1173
1172 # hold sets of revision to be filtered
1174 # hold sets of revision to be filtered
1173 # should be cleared when something might have changed the filter value:
1175 # should be cleared when something might have changed the filter value:
1174 # - new changesets,
1176 # - new changesets,
1175 # - phase change,
1177 # - phase change,
1176 # - new obsolescence marker,
1178 # - new obsolescence marker,
1177 # - working directory parent change,
1179 # - working directory parent change,
1178 # - bookmark changes
1180 # - bookmark changes
1179 self.filteredrevcache = {}
1181 self.filteredrevcache = {}
1180
1182
1181 # post-dirstate-status hooks
1183 # post-dirstate-status hooks
1182 self._postdsstatus = []
1184 self._postdsstatus = []
1183
1185
1184 # generic mapping between names and nodes
1186 # generic mapping between names and nodes
1185 self.names = namespaces.namespaces()
1187 self.names = namespaces.namespaces()
1186
1188
1187 # Key to signature value.
1189 # Key to signature value.
1188 self._sparsesignaturecache = {}
1190 self._sparsesignaturecache = {}
1189 # Signature to cached matcher instance.
1191 # Signature to cached matcher instance.
1190 self._sparsematchercache = {}
1192 self._sparsematchercache = {}
1191
1193
1192 self._extrafilterid = repoview.extrafilter(ui)
1194 self._extrafilterid = repoview.extrafilter(ui)
1193
1195
1194 self.filecopiesmode = None
1196 self.filecopiesmode = None
1195 if COPIESSDC_REQUIREMENT in self.requirements:
1197 if COPIESSDC_REQUIREMENT in self.requirements:
1196 self.filecopiesmode = b'changeset-sidedata'
1198 self.filecopiesmode = b'changeset-sidedata'
1197
1199
1198 def _getvfsward(self, origfunc):
1200 def _getvfsward(self, origfunc):
1199 """build a ward for self.vfs"""
1201 """build a ward for self.vfs"""
1200 rref = weakref.ref(self)
1202 rref = weakref.ref(self)
1201
1203
1202 def checkvfs(path, mode=None):
1204 def checkvfs(path, mode=None):
1203 ret = origfunc(path, mode=mode)
1205 ret = origfunc(path, mode=mode)
1204 repo = rref()
1206 repo = rref()
1205 if (
1207 if (
1206 repo is None
1208 repo is None
1207 or not util.safehasattr(repo, b'_wlockref')
1209 or not util.safehasattr(repo, b'_wlockref')
1208 or not util.safehasattr(repo, b'_lockref')
1210 or not util.safehasattr(repo, b'_lockref')
1209 ):
1211 ):
1210 return
1212 return
1211 if mode in (None, b'r', b'rb'):
1213 if mode in (None, b'r', b'rb'):
1212 return
1214 return
1213 if path.startswith(repo.path):
1215 if path.startswith(repo.path):
1214 # truncate name relative to the repository (.hg)
1216 # truncate name relative to the repository (.hg)
1215 path = path[len(repo.path) + 1 :]
1217 path = path[len(repo.path) + 1 :]
1216 if path.startswith(b'cache/'):
1218 if path.startswith(b'cache/'):
1217 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1219 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1218 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1220 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1219 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1221 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1220 # journal is covered by 'lock'
1222 # journal is covered by 'lock'
1221 if repo._currentlock(repo._lockref) is None:
1223 if repo._currentlock(repo._lockref) is None:
1222 repo.ui.develwarn(
1224 repo.ui.develwarn(
1223 b'write with no lock: "%s"' % path,
1225 b'write with no lock: "%s"' % path,
1224 stacklevel=3,
1226 stacklevel=3,
1225 config=b'check-locks',
1227 config=b'check-locks',
1226 )
1228 )
1227 elif repo._currentlock(repo._wlockref) is None:
1229 elif repo._currentlock(repo._wlockref) is None:
1228 # rest of vfs files are covered by 'wlock'
1230 # rest of vfs files are covered by 'wlock'
1229 #
1231 #
1230 # exclude special files
1232 # exclude special files
1231 for prefix in self._wlockfreeprefix:
1233 for prefix in self._wlockfreeprefix:
1232 if path.startswith(prefix):
1234 if path.startswith(prefix):
1233 return
1235 return
1234 repo.ui.develwarn(
1236 repo.ui.develwarn(
1235 b'write with no wlock: "%s"' % path,
1237 b'write with no wlock: "%s"' % path,
1236 stacklevel=3,
1238 stacklevel=3,
1237 config=b'check-locks',
1239 config=b'check-locks',
1238 )
1240 )
1239 return ret
1241 return ret
1240
1242
1241 return checkvfs
1243 return checkvfs
1242
1244
1243 def _getsvfsward(self, origfunc):
1245 def _getsvfsward(self, origfunc):
1244 """build a ward for self.svfs"""
1246 """build a ward for self.svfs"""
1245 rref = weakref.ref(self)
1247 rref = weakref.ref(self)
1246
1248
1247 def checksvfs(path, mode=None):
1249 def checksvfs(path, mode=None):
1248 ret = origfunc(path, mode=mode)
1250 ret = origfunc(path, mode=mode)
1249 repo = rref()
1251 repo = rref()
1250 if repo is None or not util.safehasattr(repo, b'_lockref'):
1252 if repo is None or not util.safehasattr(repo, b'_lockref'):
1251 return
1253 return
1252 if mode in (None, b'r', b'rb'):
1254 if mode in (None, b'r', b'rb'):
1253 return
1255 return
1254 if path.startswith(repo.sharedpath):
1256 if path.startswith(repo.sharedpath):
1255 # truncate name relative to the repository (.hg)
1257 # truncate name relative to the repository (.hg)
1256 path = path[len(repo.sharedpath) + 1 :]
1258 path = path[len(repo.sharedpath) + 1 :]
1257 if repo._currentlock(repo._lockref) is None:
1259 if repo._currentlock(repo._lockref) is None:
1258 repo.ui.develwarn(
1260 repo.ui.develwarn(
1259 b'write with no lock: "%s"' % path, stacklevel=4
1261 b'write with no lock: "%s"' % path, stacklevel=4
1260 )
1262 )
1261 return ret
1263 return ret
1262
1264
1263 return checksvfs
1265 return checksvfs
1264
1266
1265 def close(self):
1267 def close(self):
1266 self._writecaches()
1268 self._writecaches()
1267
1269
1268 def _writecaches(self):
1270 def _writecaches(self):
1269 if self._revbranchcache:
1271 if self._revbranchcache:
1270 self._revbranchcache.write()
1272 self._revbranchcache.write()
1271
1273
1272 def _restrictcapabilities(self, caps):
1274 def _restrictcapabilities(self, caps):
1273 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1275 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1274 caps = set(caps)
1276 caps = set(caps)
1275 capsblob = bundle2.encodecaps(
1277 capsblob = bundle2.encodecaps(
1276 bundle2.getrepocaps(self, role=b'client')
1278 bundle2.getrepocaps(self, role=b'client')
1277 )
1279 )
1278 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1280 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1279 return caps
1281 return caps
1280
1282
1281 def _writerequirements(self):
1283 def _writerequirements(self):
1282 scmutil.writerequires(self.vfs, self.requirements)
1284 scmutil.writerequires(self.vfs, self.requirements)
1283
1285
1284 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1286 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1285 # self -> auditor -> self._checknested -> self
1287 # self -> auditor -> self._checknested -> self
1286
1288
1287 @property
1289 @property
1288 def auditor(self):
1290 def auditor(self):
1289 # This is only used by context.workingctx.match in order to
1291 # This is only used by context.workingctx.match in order to
1290 # detect files in subrepos.
1292 # detect files in subrepos.
1291 return pathutil.pathauditor(self.root, callback=self._checknested)
1293 return pathutil.pathauditor(self.root, callback=self._checknested)
1292
1294
1293 @property
1295 @property
1294 def nofsauditor(self):
1296 def nofsauditor(self):
1295 # This is only used by context.basectx.match in order to detect
1297 # This is only used by context.basectx.match in order to detect
1296 # files in subrepos.
1298 # files in subrepos.
1297 return pathutil.pathauditor(
1299 return pathutil.pathauditor(
1298 self.root, callback=self._checknested, realfs=False, cached=True
1300 self.root, callback=self._checknested, realfs=False, cached=True
1299 )
1301 )
1300
1302
1301 def _checknested(self, path):
1303 def _checknested(self, path):
1302 """Determine if path is a legal nested repository."""
1304 """Determine if path is a legal nested repository."""
1303 if not path.startswith(self.root):
1305 if not path.startswith(self.root):
1304 return False
1306 return False
1305 subpath = path[len(self.root) + 1 :]
1307 subpath = path[len(self.root) + 1 :]
1306 normsubpath = util.pconvert(subpath)
1308 normsubpath = util.pconvert(subpath)
1307
1309
1308 # XXX: Checking against the current working copy is wrong in
1310 # XXX: Checking against the current working copy is wrong in
1309 # the sense that it can reject things like
1311 # the sense that it can reject things like
1310 #
1312 #
1311 # $ hg cat -r 10 sub/x.txt
1313 # $ hg cat -r 10 sub/x.txt
1312 #
1314 #
1313 # if sub/ is no longer a subrepository in the working copy
1315 # if sub/ is no longer a subrepository in the working copy
1314 # parent revision.
1316 # parent revision.
1315 #
1317 #
1316 # However, it can of course also allow things that would have
1318 # However, it can of course also allow things that would have
1317 # been rejected before, such as the above cat command if sub/
1319 # been rejected before, such as the above cat command if sub/
1318 # is a subrepository now, but was a normal directory before.
1320 # is a subrepository now, but was a normal directory before.
1319 # The old path auditor would have rejected by mistake since it
1321 # The old path auditor would have rejected by mistake since it
1320 # panics when it sees sub/.hg/.
1322 # panics when it sees sub/.hg/.
1321 #
1323 #
1322 # All in all, checking against the working copy seems sensible
1324 # All in all, checking against the working copy seems sensible
1323 # since we want to prevent access to nested repositories on
1325 # since we want to prevent access to nested repositories on
1324 # the filesystem *now*.
1326 # the filesystem *now*.
1325 ctx = self[None]
1327 ctx = self[None]
1326 parts = util.splitpath(subpath)
1328 parts = util.splitpath(subpath)
1327 while parts:
1329 while parts:
1328 prefix = b'/'.join(parts)
1330 prefix = b'/'.join(parts)
1329 if prefix in ctx.substate:
1331 if prefix in ctx.substate:
1330 if prefix == normsubpath:
1332 if prefix == normsubpath:
1331 return True
1333 return True
1332 else:
1334 else:
1333 sub = ctx.sub(prefix)
1335 sub = ctx.sub(prefix)
1334 return sub.checknested(subpath[len(prefix) + 1 :])
1336 return sub.checknested(subpath[len(prefix) + 1 :])
1335 else:
1337 else:
1336 parts.pop()
1338 parts.pop()
1337 return False
1339 return False
1338
1340
1339 def peer(self):
1341 def peer(self):
1340 return localpeer(self) # not cached to avoid reference cycle
1342 return localpeer(self) # not cached to avoid reference cycle
1341
1343
1342 def unfiltered(self):
1344 def unfiltered(self):
1343 """Return unfiltered version of the repository
1345 """Return unfiltered version of the repository
1344
1346
1345 Intended to be overwritten by filtered repo."""
1347 Intended to be overwritten by filtered repo."""
1346 return self
1348 return self
1347
1349
1348 def filtered(self, name, visibilityexceptions=None):
1350 def filtered(self, name, visibilityexceptions=None):
1349 """Return a filtered version of a repository
1351 """Return a filtered version of a repository
1350
1352
1351 The `name` parameter is the identifier of the requested view. This
1353 The `name` parameter is the identifier of the requested view. This
1352 will return a repoview object set "exactly" to the specified view.
1354 will return a repoview object set "exactly" to the specified view.
1353
1355
1354 This function does not apply recursive filtering to a repository. For
1356 This function does not apply recursive filtering to a repository. For
1355 example calling `repo.filtered("served")` will return a repoview using
1357 example calling `repo.filtered("served")` will return a repoview using
1356 the "served" view, regardless of the initial view used by `repo`.
1358 the "served" view, regardless of the initial view used by `repo`.
1357
1359
1358 In other word, there is always only one level of `repoview` "filtering".
1360 In other word, there is always only one level of `repoview` "filtering".
1359 """
1361 """
1360 if self._extrafilterid is not None and b'%' not in name:
1362 if self._extrafilterid is not None and b'%' not in name:
1361 name = name + b'%' + self._extrafilterid
1363 name = name + b'%' + self._extrafilterid
1362
1364
1363 cls = repoview.newtype(self.unfiltered().__class__)
1365 cls = repoview.newtype(self.unfiltered().__class__)
1364 return cls(self, name, visibilityexceptions)
1366 return cls(self, name, visibilityexceptions)
1365
1367
1366 @mixedrepostorecache(
1368 @mixedrepostorecache(
1367 (b'bookmarks', b'plain'),
1369 (b'bookmarks', b'plain'),
1368 (b'bookmarks.current', b'plain'),
1370 (b'bookmarks.current', b'plain'),
1369 (b'bookmarks', b''),
1371 (b'bookmarks', b''),
1370 (b'00changelog.i', b''),
1372 (b'00changelog.i', b''),
1371 )
1373 )
1372 def _bookmarks(self):
1374 def _bookmarks(self):
1373 # Since the multiple files involved in the transaction cannot be
1375 # Since the multiple files involved in the transaction cannot be
1374 # written atomically (with current repository format), there is a race
1376 # written atomically (with current repository format), there is a race
1375 # condition here.
1377 # condition here.
1376 #
1378 #
1377 # 1) changelog content A is read
1379 # 1) changelog content A is read
1378 # 2) outside transaction update changelog to content B
1380 # 2) outside transaction update changelog to content B
1379 # 3) outside transaction update bookmark file referring to content B
1381 # 3) outside transaction update bookmark file referring to content B
1380 # 4) bookmarks file content is read and filtered against changelog-A
1382 # 4) bookmarks file content is read and filtered against changelog-A
1381 #
1383 #
1382 # When this happens, bookmarks against nodes missing from A are dropped.
1384 # When this happens, bookmarks against nodes missing from A are dropped.
1383 #
1385 #
1384 # Having this happening during read is not great, but it become worse
1386 # Having this happening during read is not great, but it become worse
1385 # when this happen during write because the bookmarks to the "unknown"
1387 # when this happen during write because the bookmarks to the "unknown"
1386 # nodes will be dropped for good. However, writes happen within locks.
1388 # nodes will be dropped for good. However, writes happen within locks.
1387 # This locking makes it possible to have a race free consistent read.
1389 # This locking makes it possible to have a race free consistent read.
1388 # For this purpose data read from disc before locking are
1390 # For this purpose data read from disc before locking are
1389 # "invalidated" right after the locks are taken. This invalidations are
1391 # "invalidated" right after the locks are taken. This invalidations are
1390 # "light", the `filecache` mechanism keep the data in memory and will
1392 # "light", the `filecache` mechanism keep the data in memory and will
1391 # reuse them if the underlying files did not changed. Not parsing the
1393 # reuse them if the underlying files did not changed. Not parsing the
1392 # same data multiple times helps performances.
1394 # same data multiple times helps performances.
1393 #
1395 #
1394 # Unfortunately in the case describe above, the files tracked by the
1396 # Unfortunately in the case describe above, the files tracked by the
1395 # bookmarks file cache might not have changed, but the in-memory
1397 # bookmarks file cache might not have changed, but the in-memory
1396 # content is still "wrong" because we used an older changelog content
1398 # content is still "wrong" because we used an older changelog content
1397 # to process the on-disk data. So after locking, the changelog would be
1399 # to process the on-disk data. So after locking, the changelog would be
1398 # refreshed but `_bookmarks` would be preserved.
1400 # refreshed but `_bookmarks` would be preserved.
1399 # Adding `00changelog.i` to the list of tracked file is not
1401 # Adding `00changelog.i` to the list of tracked file is not
1400 # enough, because at the time we build the content for `_bookmarks` in
1402 # enough, because at the time we build the content for `_bookmarks` in
1401 # (4), the changelog file has already diverged from the content used
1403 # (4), the changelog file has already diverged from the content used
1402 # for loading `changelog` in (1)
1404 # for loading `changelog` in (1)
1403 #
1405 #
1404 # To prevent the issue, we force the changelog to be explicitly
1406 # To prevent the issue, we force the changelog to be explicitly
1405 # reloaded while computing `_bookmarks`. The data race can still happen
1407 # reloaded while computing `_bookmarks`. The data race can still happen
1406 # without the lock (with a narrower window), but it would no longer go
1408 # without the lock (with a narrower window), but it would no longer go
1407 # undetected during the lock time refresh.
1409 # undetected during the lock time refresh.
1408 #
1410 #
1409 # The new schedule is as follow
1411 # The new schedule is as follow
1410 #
1412 #
1411 # 1) filecache logic detect that `_bookmarks` needs to be computed
1413 # 1) filecache logic detect that `_bookmarks` needs to be computed
1412 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1414 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1413 # 3) We force `changelog` filecache to be tested
1415 # 3) We force `changelog` filecache to be tested
1414 # 4) cachestat for `changelog` are captured (for changelog)
1416 # 4) cachestat for `changelog` are captured (for changelog)
1415 # 5) `_bookmarks` is computed and cached
1417 # 5) `_bookmarks` is computed and cached
1416 #
1418 #
1417 # The step in (3) ensure we have a changelog at least as recent as the
1419 # The step in (3) ensure we have a changelog at least as recent as the
1418 # cache stat computed in (1). As a result at locking time:
1420 # cache stat computed in (1). As a result at locking time:
1419 # * if the changelog did not changed since (1) -> we can reuse the data
1421 # * if the changelog did not changed since (1) -> we can reuse the data
1420 # * otherwise -> the bookmarks get refreshed.
1422 # * otherwise -> the bookmarks get refreshed.
1421 self._refreshchangelog()
1423 self._refreshchangelog()
1422 return bookmarks.bmstore(self)
1424 return bookmarks.bmstore(self)
1423
1425
1424 def _refreshchangelog(self):
1426 def _refreshchangelog(self):
1425 """make sure the in memory changelog match the on-disk one"""
1427 """make sure the in memory changelog match the on-disk one"""
1426 if 'changelog' in vars(self) and self.currenttransaction() is None:
1428 if 'changelog' in vars(self) and self.currenttransaction() is None:
1427 del self.changelog
1429 del self.changelog
1428
1430
1429 @property
1431 @property
1430 def _activebookmark(self):
1432 def _activebookmark(self):
1431 return self._bookmarks.active
1433 return self._bookmarks.active
1432
1434
1433 # _phasesets depend on changelog. what we need is to call
1435 # _phasesets depend on changelog. what we need is to call
1434 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1436 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1435 # can't be easily expressed in filecache mechanism.
1437 # can't be easily expressed in filecache mechanism.
1436 @storecache(b'phaseroots', b'00changelog.i')
1438 @storecache(b'phaseroots', b'00changelog.i')
1437 def _phasecache(self):
1439 def _phasecache(self):
1438 return phases.phasecache(self, self._phasedefaults)
1440 return phases.phasecache(self, self._phasedefaults)
1439
1441
1440 @storecache(b'obsstore')
1442 @storecache(b'obsstore')
1441 def obsstore(self):
1443 def obsstore(self):
1442 return obsolete.makestore(self.ui, self)
1444 return obsolete.makestore(self.ui, self)
1443
1445
1444 @storecache(b'00changelog.i')
1446 @storecache(b'00changelog.i')
1445 def changelog(self):
1447 def changelog(self):
1446 return self.store.changelog(txnutil.mayhavepending(self.root))
1448 return self.store.changelog(txnutil.mayhavepending(self.root))
1447
1449
1448 @storecache(b'00manifest.i')
1450 @storecache(b'00manifest.i')
1449 def manifestlog(self):
1451 def manifestlog(self):
1450 return self.store.manifestlog(self, self._storenarrowmatch)
1452 return self.store.manifestlog(self, self._storenarrowmatch)
1451
1453
1452 @repofilecache(b'dirstate')
1454 @repofilecache(b'dirstate')
1453 def dirstate(self):
1455 def dirstate(self):
1454 return self._makedirstate()
1456 return self._makedirstate()
1455
1457
1456 def _makedirstate(self):
1458 def _makedirstate(self):
1457 """Extension point for wrapping the dirstate per-repo."""
1459 """Extension point for wrapping the dirstate per-repo."""
1458 sparsematchfn = lambda: sparse.matcher(self)
1460 sparsematchfn = lambda: sparse.matcher(self)
1459
1461
1460 return dirstate.dirstate(
1462 return dirstate.dirstate(
1461 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1463 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1462 )
1464 )
1463
1465
1464 def _dirstatevalidate(self, node):
1466 def _dirstatevalidate(self, node):
1465 try:
1467 try:
1466 self.changelog.rev(node)
1468 self.changelog.rev(node)
1467 return node
1469 return node
1468 except error.LookupError:
1470 except error.LookupError:
1469 if not self._dirstatevalidatewarned:
1471 if not self._dirstatevalidatewarned:
1470 self._dirstatevalidatewarned = True
1472 self._dirstatevalidatewarned = True
1471 self.ui.warn(
1473 self.ui.warn(
1472 _(b"warning: ignoring unknown working parent %s!\n")
1474 _(b"warning: ignoring unknown working parent %s!\n")
1473 % short(node)
1475 % short(node)
1474 )
1476 )
1475 return nullid
1477 return nullid
1476
1478
1477 @storecache(narrowspec.FILENAME)
1479 @storecache(narrowspec.FILENAME)
1478 def narrowpats(self):
1480 def narrowpats(self):
1479 """matcher patterns for this repository's narrowspec
1481 """matcher patterns for this repository's narrowspec
1480
1482
1481 A tuple of (includes, excludes).
1483 A tuple of (includes, excludes).
1482 """
1484 """
1483 return narrowspec.load(self)
1485 return narrowspec.load(self)
1484
1486
1485 @storecache(narrowspec.FILENAME)
1487 @storecache(narrowspec.FILENAME)
1486 def _storenarrowmatch(self):
1488 def _storenarrowmatch(self):
1487 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 if repository.NARROW_REQUIREMENT not in self.requirements:
1488 return matchmod.always()
1490 return matchmod.always()
1489 include, exclude = self.narrowpats
1491 include, exclude = self.narrowpats
1490 return narrowspec.match(self.root, include=include, exclude=exclude)
1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1491
1493
1492 @storecache(narrowspec.FILENAME)
1494 @storecache(narrowspec.FILENAME)
1493 def _narrowmatch(self):
1495 def _narrowmatch(self):
1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1496 if repository.NARROW_REQUIREMENT not in self.requirements:
1495 return matchmod.always()
1497 return matchmod.always()
1496 narrowspec.checkworkingcopynarrowspec(self)
1498 narrowspec.checkworkingcopynarrowspec(self)
1497 include, exclude = self.narrowpats
1499 include, exclude = self.narrowpats
1498 return narrowspec.match(self.root, include=include, exclude=exclude)
1500 return narrowspec.match(self.root, include=include, exclude=exclude)
1499
1501
1500 def narrowmatch(self, match=None, includeexact=False):
1502 def narrowmatch(self, match=None, includeexact=False):
1501 """matcher corresponding the the repo's narrowspec
1503 """matcher corresponding the the repo's narrowspec
1502
1504
1503 If `match` is given, then that will be intersected with the narrow
1505 If `match` is given, then that will be intersected with the narrow
1504 matcher.
1506 matcher.
1505
1507
1506 If `includeexact` is True, then any exact matches from `match` will
1508 If `includeexact` is True, then any exact matches from `match` will
1507 be included even if they're outside the narrowspec.
1509 be included even if they're outside the narrowspec.
1508 """
1510 """
1509 if match:
1511 if match:
1510 if includeexact and not self._narrowmatch.always():
1512 if includeexact and not self._narrowmatch.always():
1511 # do not exclude explicitly-specified paths so that they can
1513 # do not exclude explicitly-specified paths so that they can
1512 # be warned later on
1514 # be warned later on
1513 em = matchmod.exact(match.files())
1515 em = matchmod.exact(match.files())
1514 nm = matchmod.unionmatcher([self._narrowmatch, em])
1516 nm = matchmod.unionmatcher([self._narrowmatch, em])
1515 return matchmod.intersectmatchers(match, nm)
1517 return matchmod.intersectmatchers(match, nm)
1516 return matchmod.intersectmatchers(match, self._narrowmatch)
1518 return matchmod.intersectmatchers(match, self._narrowmatch)
1517 return self._narrowmatch
1519 return self._narrowmatch
1518
1520
1519 def setnarrowpats(self, newincludes, newexcludes):
1521 def setnarrowpats(self, newincludes, newexcludes):
1520 narrowspec.save(self, newincludes, newexcludes)
1522 narrowspec.save(self, newincludes, newexcludes)
1521 self.invalidate(clearfilecache=True)
1523 self.invalidate(clearfilecache=True)
1522
1524
1523 @unfilteredpropertycache
1525 @unfilteredpropertycache
1524 def _quick_access_changeid_null(self):
1526 def _quick_access_changeid_null(self):
1525 return {
1527 return {
1526 b'null': (nullrev, nullid),
1528 b'null': (nullrev, nullid),
1527 nullrev: (nullrev, nullid),
1529 nullrev: (nullrev, nullid),
1528 nullid: (nullrev, nullid),
1530 nullid: (nullrev, nullid),
1529 }
1531 }
1530
1532
1531 @unfilteredpropertycache
1533 @unfilteredpropertycache
1532 def _quick_access_changeid_wc(self):
1534 def _quick_access_changeid_wc(self):
1533 # also fast path access to the working copy parents
1535 # also fast path access to the working copy parents
1534 # however, only do it for filter that ensure wc is visible.
1536 # however, only do it for filter that ensure wc is visible.
1535 quick = {}
1537 quick = {}
1536 cl = self.unfiltered().changelog
1538 cl = self.unfiltered().changelog
1537 for node in self.dirstate.parents():
1539 for node in self.dirstate.parents():
1538 if node == nullid:
1540 if node == nullid:
1539 continue
1541 continue
1540 rev = cl.index.get_rev(node)
1542 rev = cl.index.get_rev(node)
1541 if rev is None:
1543 if rev is None:
1542 # unknown working copy parent case:
1544 # unknown working copy parent case:
1543 #
1545 #
1544 # skip the fast path and let higher code deal with it
1546 # skip the fast path and let higher code deal with it
1545 continue
1547 continue
1546 pair = (rev, node)
1548 pair = (rev, node)
1547 quick[rev] = pair
1549 quick[rev] = pair
1548 quick[node] = pair
1550 quick[node] = pair
1549 # also add the parents of the parents
1551 # also add the parents of the parents
1550 for r in cl.parentrevs(rev):
1552 for r in cl.parentrevs(rev):
1551 if r == nullrev:
1553 if r == nullrev:
1552 continue
1554 continue
1553 n = cl.node(r)
1555 n = cl.node(r)
1554 pair = (r, n)
1556 pair = (r, n)
1555 quick[r] = pair
1557 quick[r] = pair
1556 quick[n] = pair
1558 quick[n] = pair
1557 p1node = self.dirstate.p1()
1559 p1node = self.dirstate.p1()
1558 if p1node != nullid:
1560 if p1node != nullid:
1559 quick[b'.'] = quick[p1node]
1561 quick[b'.'] = quick[p1node]
1560 return quick
1562 return quick
1561
1563
1562 @unfilteredmethod
1564 @unfilteredmethod
1563 def _quick_access_changeid_invalidate(self):
1565 def _quick_access_changeid_invalidate(self):
1564 if '_quick_access_changeid_wc' in vars(self):
1566 if '_quick_access_changeid_wc' in vars(self):
1565 del self.__dict__['_quick_access_changeid_wc']
1567 del self.__dict__['_quick_access_changeid_wc']
1566
1568
1567 @property
1569 @property
1568 def _quick_access_changeid(self):
1570 def _quick_access_changeid(self):
1569 """an helper dictionnary for __getitem__ calls
1571 """an helper dictionnary for __getitem__ calls
1570
1572
1571 This contains a list of symbol we can recognise right away without
1573 This contains a list of symbol we can recognise right away without
1572 further processing.
1574 further processing.
1573 """
1575 """
1574 mapping = self._quick_access_changeid_null
1576 mapping = self._quick_access_changeid_null
1575 if self.filtername in repoview.filter_has_wc:
1577 if self.filtername in repoview.filter_has_wc:
1576 mapping = mapping.copy()
1578 mapping = mapping.copy()
1577 mapping.update(self._quick_access_changeid_wc)
1579 mapping.update(self._quick_access_changeid_wc)
1578 return mapping
1580 return mapping
1579
1581
1580 def __getitem__(self, changeid):
1582 def __getitem__(self, changeid):
1581 # dealing with special cases
1583 # dealing with special cases
1582 if changeid is None:
1584 if changeid is None:
1583 return context.workingctx(self)
1585 return context.workingctx(self)
1584 if isinstance(changeid, context.basectx):
1586 if isinstance(changeid, context.basectx):
1585 return changeid
1587 return changeid
1586
1588
1587 # dealing with multiple revisions
1589 # dealing with multiple revisions
1588 if isinstance(changeid, slice):
1590 if isinstance(changeid, slice):
1589 # wdirrev isn't contiguous so the slice shouldn't include it
1591 # wdirrev isn't contiguous so the slice shouldn't include it
1590 return [
1592 return [
1591 self[i]
1593 self[i]
1592 for i in pycompat.xrange(*changeid.indices(len(self)))
1594 for i in pycompat.xrange(*changeid.indices(len(self)))
1593 if i not in self.changelog.filteredrevs
1595 if i not in self.changelog.filteredrevs
1594 ]
1596 ]
1595
1597
1596 # dealing with some special values
1598 # dealing with some special values
1597 quick_access = self._quick_access_changeid.get(changeid)
1599 quick_access = self._quick_access_changeid.get(changeid)
1598 if quick_access is not None:
1600 if quick_access is not None:
1599 rev, node = quick_access
1601 rev, node = quick_access
1600 return context.changectx(self, rev, node, maybe_filtered=False)
1602 return context.changectx(self, rev, node, maybe_filtered=False)
1601 if changeid == b'tip':
1603 if changeid == b'tip':
1602 node = self.changelog.tip()
1604 node = self.changelog.tip()
1603 rev = self.changelog.rev(node)
1605 rev = self.changelog.rev(node)
1604 return context.changectx(self, rev, node)
1606 return context.changectx(self, rev, node)
1605
1607
1606 # dealing with arbitrary values
1608 # dealing with arbitrary values
1607 try:
1609 try:
1608 if isinstance(changeid, int):
1610 if isinstance(changeid, int):
1609 node = self.changelog.node(changeid)
1611 node = self.changelog.node(changeid)
1610 rev = changeid
1612 rev = changeid
1611 elif changeid == b'.':
1613 elif changeid == b'.':
1612 # this is a hack to delay/avoid loading obsmarkers
1614 # this is a hack to delay/avoid loading obsmarkers
1613 # when we know that '.' won't be hidden
1615 # when we know that '.' won't be hidden
1614 node = self.dirstate.p1()
1616 node = self.dirstate.p1()
1615 rev = self.unfiltered().changelog.rev(node)
1617 rev = self.unfiltered().changelog.rev(node)
1616 elif len(changeid) == 20:
1618 elif len(changeid) == 20:
1617 try:
1619 try:
1618 node = changeid
1620 node = changeid
1619 rev = self.changelog.rev(changeid)
1621 rev = self.changelog.rev(changeid)
1620 except error.FilteredLookupError:
1622 except error.FilteredLookupError:
1621 changeid = hex(changeid) # for the error message
1623 changeid = hex(changeid) # for the error message
1622 raise
1624 raise
1623 except LookupError:
1625 except LookupError:
1624 # check if it might have come from damaged dirstate
1626 # check if it might have come from damaged dirstate
1625 #
1627 #
1626 # XXX we could avoid the unfiltered if we had a recognizable
1628 # XXX we could avoid the unfiltered if we had a recognizable
1627 # exception for filtered changeset access
1629 # exception for filtered changeset access
1628 if (
1630 if (
1629 self.local()
1631 self.local()
1630 and changeid in self.unfiltered().dirstate.parents()
1632 and changeid in self.unfiltered().dirstate.parents()
1631 ):
1633 ):
1632 msg = _(b"working directory has unknown parent '%s'!")
1634 msg = _(b"working directory has unknown parent '%s'!")
1633 raise error.Abort(msg % short(changeid))
1635 raise error.Abort(msg % short(changeid))
1634 changeid = hex(changeid) # for the error message
1636 changeid = hex(changeid) # for the error message
1635 raise
1637 raise
1636
1638
1637 elif len(changeid) == 40:
1639 elif len(changeid) == 40:
1638 node = bin(changeid)
1640 node = bin(changeid)
1639 rev = self.changelog.rev(node)
1641 rev = self.changelog.rev(node)
1640 else:
1642 else:
1641 raise error.ProgrammingError(
1643 raise error.ProgrammingError(
1642 b"unsupported changeid '%s' of type %s"
1644 b"unsupported changeid '%s' of type %s"
1643 % (changeid, pycompat.bytestr(type(changeid)))
1645 % (changeid, pycompat.bytestr(type(changeid)))
1644 )
1646 )
1645
1647
1646 return context.changectx(self, rev, node)
1648 return context.changectx(self, rev, node)
1647
1649
1648 except (error.FilteredIndexError, error.FilteredLookupError):
1650 except (error.FilteredIndexError, error.FilteredLookupError):
1649 raise error.FilteredRepoLookupError(
1651 raise error.FilteredRepoLookupError(
1650 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1652 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1651 )
1653 )
1652 except (IndexError, LookupError):
1654 except (IndexError, LookupError):
1653 raise error.RepoLookupError(
1655 raise error.RepoLookupError(
1654 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1656 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1655 )
1657 )
1656 except error.WdirUnsupported:
1658 except error.WdirUnsupported:
1657 return context.workingctx(self)
1659 return context.workingctx(self)
1658
1660
1659 def __contains__(self, changeid):
1661 def __contains__(self, changeid):
1660 """True if the given changeid exists
1662 """True if the given changeid exists
1661
1663
1662 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1664 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1663 specified.
1665 specified.
1664 """
1666 """
1665 try:
1667 try:
1666 self[changeid]
1668 self[changeid]
1667 return True
1669 return True
1668 except error.RepoLookupError:
1670 except error.RepoLookupError:
1669 return False
1671 return False
1670
1672
1671 def __nonzero__(self):
1673 def __nonzero__(self):
1672 return True
1674 return True
1673
1675
1674 __bool__ = __nonzero__
1676 __bool__ = __nonzero__
1675
1677
1676 def __len__(self):
1678 def __len__(self):
1677 # no need to pay the cost of repoview.changelog
1679 # no need to pay the cost of repoview.changelog
1678 unfi = self.unfiltered()
1680 unfi = self.unfiltered()
1679 return len(unfi.changelog)
1681 return len(unfi.changelog)
1680
1682
1681 def __iter__(self):
1683 def __iter__(self):
1682 return iter(self.changelog)
1684 return iter(self.changelog)
1683
1685
1684 def revs(self, expr, *args):
1686 def revs(self, expr, *args):
1685 '''Find revisions matching a revset.
1687 '''Find revisions matching a revset.
1686
1688
1687 The revset is specified as a string ``expr`` that may contain
1689 The revset is specified as a string ``expr`` that may contain
1688 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1690 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1689
1691
1690 Revset aliases from the configuration are not expanded. To expand
1692 Revset aliases from the configuration are not expanded. To expand
1691 user aliases, consider calling ``scmutil.revrange()`` or
1693 user aliases, consider calling ``scmutil.revrange()`` or
1692 ``repo.anyrevs([expr], user=True)``.
1694 ``repo.anyrevs([expr], user=True)``.
1693
1695
1694 Returns a smartset.abstractsmartset, which is a list-like interface
1696 Returns a smartset.abstractsmartset, which is a list-like interface
1695 that contains integer revisions.
1697 that contains integer revisions.
1696 '''
1698 '''
1697 tree = revsetlang.spectree(expr, *args)
1699 tree = revsetlang.spectree(expr, *args)
1698 return revset.makematcher(tree)(self)
1700 return revset.makematcher(tree)(self)
1699
1701
1700 def set(self, expr, *args):
1702 def set(self, expr, *args):
1701 '''Find revisions matching a revset and emit changectx instances.
1703 '''Find revisions matching a revset and emit changectx instances.
1702
1704
1703 This is a convenience wrapper around ``revs()`` that iterates the
1705 This is a convenience wrapper around ``revs()`` that iterates the
1704 result and is a generator of changectx instances.
1706 result and is a generator of changectx instances.
1705
1707
1706 Revset aliases from the configuration are not expanded. To expand
1708 Revset aliases from the configuration are not expanded. To expand
1707 user aliases, consider calling ``scmutil.revrange()``.
1709 user aliases, consider calling ``scmutil.revrange()``.
1708 '''
1710 '''
1709 for r in self.revs(expr, *args):
1711 for r in self.revs(expr, *args):
1710 yield self[r]
1712 yield self[r]
1711
1713
1712 def anyrevs(self, specs, user=False, localalias=None):
1714 def anyrevs(self, specs, user=False, localalias=None):
1713 '''Find revisions matching one of the given revsets.
1715 '''Find revisions matching one of the given revsets.
1714
1716
1715 Revset aliases from the configuration are not expanded by default. To
1717 Revset aliases from the configuration are not expanded by default. To
1716 expand user aliases, specify ``user=True``. To provide some local
1718 expand user aliases, specify ``user=True``. To provide some local
1717 definitions overriding user aliases, set ``localalias`` to
1719 definitions overriding user aliases, set ``localalias`` to
1718 ``{name: definitionstring}``.
1720 ``{name: definitionstring}``.
1719 '''
1721 '''
1720 if specs == [b'null']:
1722 if specs == [b'null']:
1721 return revset.baseset([nullrev])
1723 return revset.baseset([nullrev])
1722 if specs == [b'.']:
1724 if specs == [b'.']:
1723 quick_data = self._quick_access_changeid.get(b'.')
1725 quick_data = self._quick_access_changeid.get(b'.')
1724 if quick_data is not None:
1726 if quick_data is not None:
1725 return revset.baseset([quick_data[0]])
1727 return revset.baseset([quick_data[0]])
1726 if user:
1728 if user:
1727 m = revset.matchany(
1729 m = revset.matchany(
1728 self.ui,
1730 self.ui,
1729 specs,
1731 specs,
1730 lookup=revset.lookupfn(self),
1732 lookup=revset.lookupfn(self),
1731 localalias=localalias,
1733 localalias=localalias,
1732 )
1734 )
1733 else:
1735 else:
1734 m = revset.matchany(None, specs, localalias=localalias)
1736 m = revset.matchany(None, specs, localalias=localalias)
1735 return m(self)
1737 return m(self)
1736
1738
1737 def url(self):
1739 def url(self):
1738 return b'file:' + self.root
1740 return b'file:' + self.root
1739
1741
1740 def hook(self, name, throw=False, **args):
1742 def hook(self, name, throw=False, **args):
1741 """Call a hook, passing this repo instance.
1743 """Call a hook, passing this repo instance.
1742
1744
1743 This a convenience method to aid invoking hooks. Extensions likely
1745 This a convenience method to aid invoking hooks. Extensions likely
1744 won't call this unless they have registered a custom hook or are
1746 won't call this unless they have registered a custom hook or are
1745 replacing code that is expected to call a hook.
1747 replacing code that is expected to call a hook.
1746 """
1748 """
1747 return hook.hook(self.ui, self, name, throw, **args)
1749 return hook.hook(self.ui, self, name, throw, **args)
1748
1750
1749 @filteredpropertycache
1751 @filteredpropertycache
1750 def _tagscache(self):
1752 def _tagscache(self):
1751 '''Returns a tagscache object that contains various tags related
1753 '''Returns a tagscache object that contains various tags related
1752 caches.'''
1754 caches.'''
1753
1755
1754 # This simplifies its cache management by having one decorated
1756 # This simplifies its cache management by having one decorated
1755 # function (this one) and the rest simply fetch things from it.
1757 # function (this one) and the rest simply fetch things from it.
1756 class tagscache(object):
1758 class tagscache(object):
1757 def __init__(self):
1759 def __init__(self):
1758 # These two define the set of tags for this repository. tags
1760 # These two define the set of tags for this repository. tags
1759 # maps tag name to node; tagtypes maps tag name to 'global' or
1761 # maps tag name to node; tagtypes maps tag name to 'global' or
1760 # 'local'. (Global tags are defined by .hgtags across all
1762 # 'local'. (Global tags are defined by .hgtags across all
1761 # heads, and local tags are defined in .hg/localtags.)
1763 # heads, and local tags are defined in .hg/localtags.)
1762 # They constitute the in-memory cache of tags.
1764 # They constitute the in-memory cache of tags.
1763 self.tags = self.tagtypes = None
1765 self.tags = self.tagtypes = None
1764
1766
1765 self.nodetagscache = self.tagslist = None
1767 self.nodetagscache = self.tagslist = None
1766
1768
1767 cache = tagscache()
1769 cache = tagscache()
1768 cache.tags, cache.tagtypes = self._findtags()
1770 cache.tags, cache.tagtypes = self._findtags()
1769
1771
1770 return cache
1772 return cache
1771
1773
1772 def tags(self):
1774 def tags(self):
1773 '''return a mapping of tag to node'''
1775 '''return a mapping of tag to node'''
1774 t = {}
1776 t = {}
1775 if self.changelog.filteredrevs:
1777 if self.changelog.filteredrevs:
1776 tags, tt = self._findtags()
1778 tags, tt = self._findtags()
1777 else:
1779 else:
1778 tags = self._tagscache.tags
1780 tags = self._tagscache.tags
1779 rev = self.changelog.rev
1781 rev = self.changelog.rev
1780 for k, v in pycompat.iteritems(tags):
1782 for k, v in pycompat.iteritems(tags):
1781 try:
1783 try:
1782 # ignore tags to unknown nodes
1784 # ignore tags to unknown nodes
1783 rev(v)
1785 rev(v)
1784 t[k] = v
1786 t[k] = v
1785 except (error.LookupError, ValueError):
1787 except (error.LookupError, ValueError):
1786 pass
1788 pass
1787 return t
1789 return t
1788
1790
1789 def _findtags(self):
1791 def _findtags(self):
1790 '''Do the hard work of finding tags. Return a pair of dicts
1792 '''Do the hard work of finding tags. Return a pair of dicts
1791 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1793 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1792 maps tag name to a string like \'global\' or \'local\'.
1794 maps tag name to a string like \'global\' or \'local\'.
1793 Subclasses or extensions are free to add their own tags, but
1795 Subclasses or extensions are free to add their own tags, but
1794 should be aware that the returned dicts will be retained for the
1796 should be aware that the returned dicts will be retained for the
1795 duration of the localrepo object.'''
1797 duration of the localrepo object.'''
1796
1798
1797 # XXX what tagtype should subclasses/extensions use? Currently
1799 # XXX what tagtype should subclasses/extensions use? Currently
1798 # mq and bookmarks add tags, but do not set the tagtype at all.
1800 # mq and bookmarks add tags, but do not set the tagtype at all.
1799 # Should each extension invent its own tag type? Should there
1801 # Should each extension invent its own tag type? Should there
1800 # be one tagtype for all such "virtual" tags? Or is the status
1802 # be one tagtype for all such "virtual" tags? Or is the status
1801 # quo fine?
1803 # quo fine?
1802
1804
1803 # map tag name to (node, hist)
1805 # map tag name to (node, hist)
1804 alltags = tagsmod.findglobaltags(self.ui, self)
1806 alltags = tagsmod.findglobaltags(self.ui, self)
1805 # map tag name to tag type
1807 # map tag name to tag type
1806 tagtypes = dict((tag, b'global') for tag in alltags)
1808 tagtypes = dict((tag, b'global') for tag in alltags)
1807
1809
1808 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1810 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1809
1811
1810 # Build the return dicts. Have to re-encode tag names because
1812 # Build the return dicts. Have to re-encode tag names because
1811 # the tags module always uses UTF-8 (in order not to lose info
1813 # the tags module always uses UTF-8 (in order not to lose info
1812 # writing to the cache), but the rest of Mercurial wants them in
1814 # writing to the cache), but the rest of Mercurial wants them in
1813 # local encoding.
1815 # local encoding.
1814 tags = {}
1816 tags = {}
1815 for (name, (node, hist)) in pycompat.iteritems(alltags):
1817 for (name, (node, hist)) in pycompat.iteritems(alltags):
1816 if node != nullid:
1818 if node != nullid:
1817 tags[encoding.tolocal(name)] = node
1819 tags[encoding.tolocal(name)] = node
1818 tags[b'tip'] = self.changelog.tip()
1820 tags[b'tip'] = self.changelog.tip()
1819 tagtypes = dict(
1821 tagtypes = dict(
1820 [
1822 [
1821 (encoding.tolocal(name), value)
1823 (encoding.tolocal(name), value)
1822 for (name, value) in pycompat.iteritems(tagtypes)
1824 for (name, value) in pycompat.iteritems(tagtypes)
1823 ]
1825 ]
1824 )
1826 )
1825 return (tags, tagtypes)
1827 return (tags, tagtypes)
1826
1828
1827 def tagtype(self, tagname):
1829 def tagtype(self, tagname):
1828 '''
1830 '''
1829 return the type of the given tag. result can be:
1831 return the type of the given tag. result can be:
1830
1832
1831 'local' : a local tag
1833 'local' : a local tag
1832 'global' : a global tag
1834 'global' : a global tag
1833 None : tag does not exist
1835 None : tag does not exist
1834 '''
1836 '''
1835
1837
1836 return self._tagscache.tagtypes.get(tagname)
1838 return self._tagscache.tagtypes.get(tagname)
1837
1839
1838 def tagslist(self):
1840 def tagslist(self):
1839 '''return a list of tags ordered by revision'''
1841 '''return a list of tags ordered by revision'''
1840 if not self._tagscache.tagslist:
1842 if not self._tagscache.tagslist:
1841 l = []
1843 l = []
1842 for t, n in pycompat.iteritems(self.tags()):
1844 for t, n in pycompat.iteritems(self.tags()):
1843 l.append((self.changelog.rev(n), t, n))
1845 l.append((self.changelog.rev(n), t, n))
1844 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1846 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1845
1847
1846 return self._tagscache.tagslist
1848 return self._tagscache.tagslist
1847
1849
1848 def nodetags(self, node):
1850 def nodetags(self, node):
1849 '''return the tags associated with a node'''
1851 '''return the tags associated with a node'''
1850 if not self._tagscache.nodetagscache:
1852 if not self._tagscache.nodetagscache:
1851 nodetagscache = {}
1853 nodetagscache = {}
1852 for t, n in pycompat.iteritems(self._tagscache.tags):
1854 for t, n in pycompat.iteritems(self._tagscache.tags):
1853 nodetagscache.setdefault(n, []).append(t)
1855 nodetagscache.setdefault(n, []).append(t)
1854 for tags in pycompat.itervalues(nodetagscache):
1856 for tags in pycompat.itervalues(nodetagscache):
1855 tags.sort()
1857 tags.sort()
1856 self._tagscache.nodetagscache = nodetagscache
1858 self._tagscache.nodetagscache = nodetagscache
1857 return self._tagscache.nodetagscache.get(node, [])
1859 return self._tagscache.nodetagscache.get(node, [])
1858
1860
1859 def nodebookmarks(self, node):
1861 def nodebookmarks(self, node):
1860 """return the list of bookmarks pointing to the specified node"""
1862 """return the list of bookmarks pointing to the specified node"""
1861 return self._bookmarks.names(node)
1863 return self._bookmarks.names(node)
1862
1864
1863 def branchmap(self):
1865 def branchmap(self):
1864 '''returns a dictionary {branch: [branchheads]} with branchheads
1866 '''returns a dictionary {branch: [branchheads]} with branchheads
1865 ordered by increasing revision number'''
1867 ordered by increasing revision number'''
1866 return self._branchcaches[self]
1868 return self._branchcaches[self]
1867
1869
1868 @unfilteredmethod
1870 @unfilteredmethod
1869 def revbranchcache(self):
1871 def revbranchcache(self):
1870 if not self._revbranchcache:
1872 if not self._revbranchcache:
1871 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1873 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1872 return self._revbranchcache
1874 return self._revbranchcache
1873
1875
1874 def branchtip(self, branch, ignoremissing=False):
1876 def branchtip(self, branch, ignoremissing=False):
1875 '''return the tip node for a given branch
1877 '''return the tip node for a given branch
1876
1878
1877 If ignoremissing is True, then this method will not raise an error.
1879 If ignoremissing is True, then this method will not raise an error.
1878 This is helpful for callers that only expect None for a missing branch
1880 This is helpful for callers that only expect None for a missing branch
1879 (e.g. namespace).
1881 (e.g. namespace).
1880
1882
1881 '''
1883 '''
1882 try:
1884 try:
1883 return self.branchmap().branchtip(branch)
1885 return self.branchmap().branchtip(branch)
1884 except KeyError:
1886 except KeyError:
1885 if not ignoremissing:
1887 if not ignoremissing:
1886 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1888 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1887 else:
1889 else:
1888 pass
1890 pass
1889
1891
1890 def lookup(self, key):
1892 def lookup(self, key):
1891 node = scmutil.revsymbol(self, key).node()
1893 node = scmutil.revsymbol(self, key).node()
1892 if node is None:
1894 if node is None:
1893 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1895 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1894 return node
1896 return node
1895
1897
1896 def lookupbranch(self, key):
1898 def lookupbranch(self, key):
1897 if self.branchmap().hasbranch(key):
1899 if self.branchmap().hasbranch(key):
1898 return key
1900 return key
1899
1901
1900 return scmutil.revsymbol(self, key).branch()
1902 return scmutil.revsymbol(self, key).branch()
1901
1903
1902 def known(self, nodes):
1904 def known(self, nodes):
1903 cl = self.changelog
1905 cl = self.changelog
1904 get_rev = cl.index.get_rev
1906 get_rev = cl.index.get_rev
1905 filtered = cl.filteredrevs
1907 filtered = cl.filteredrevs
1906 result = []
1908 result = []
1907 for n in nodes:
1909 for n in nodes:
1908 r = get_rev(n)
1910 r = get_rev(n)
1909 resp = not (r is None or r in filtered)
1911 resp = not (r is None or r in filtered)
1910 result.append(resp)
1912 result.append(resp)
1911 return result
1913 return result
1912
1914
1913 def local(self):
1915 def local(self):
1914 return self
1916 return self
1915
1917
1916 def publishing(self):
1918 def publishing(self):
1917 # it's safe (and desirable) to trust the publish flag unconditionally
1919 # it's safe (and desirable) to trust the publish flag unconditionally
1918 # so that we don't finalize changes shared between users via ssh or nfs
1920 # so that we don't finalize changes shared between users via ssh or nfs
1919 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1921 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1920
1922
1921 def cancopy(self):
1923 def cancopy(self):
1922 # so statichttprepo's override of local() works
1924 # so statichttprepo's override of local() works
1923 if not self.local():
1925 if not self.local():
1924 return False
1926 return False
1925 if not self.publishing():
1927 if not self.publishing():
1926 return True
1928 return True
1927 # if publishing we can't copy if there is filtered content
1929 # if publishing we can't copy if there is filtered content
1928 return not self.filtered(b'visible').changelog.filteredrevs
1930 return not self.filtered(b'visible').changelog.filteredrevs
1929
1931
1930 def shared(self):
1932 def shared(self):
1931 '''the type of shared repository (None if not shared)'''
1933 '''the type of shared repository (None if not shared)'''
1932 if self.sharedpath != self.path:
1934 if self.sharedpath != self.path:
1933 return b'store'
1935 return b'store'
1934 return None
1936 return None
1935
1937
1936 def wjoin(self, f, *insidef):
1938 def wjoin(self, f, *insidef):
1937 return self.vfs.reljoin(self.root, f, *insidef)
1939 return self.vfs.reljoin(self.root, f, *insidef)
1938
1940
1939 def setparents(self, p1, p2=nullid):
1941 def setparents(self, p1, p2=nullid):
1940 self[None].setparents(p1, p2)
1942 self[None].setparents(p1, p2)
1941 self._quick_access_changeid_invalidate()
1943 self._quick_access_changeid_invalidate()
1942
1944
1943 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1945 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1944 """changeid must be a changeset revision, if specified.
1946 """changeid must be a changeset revision, if specified.
1945 fileid can be a file revision or node."""
1947 fileid can be a file revision or node."""
1946 return context.filectx(
1948 return context.filectx(
1947 self, path, changeid, fileid, changectx=changectx
1949 self, path, changeid, fileid, changectx=changectx
1948 )
1950 )
1949
1951
1950 def getcwd(self):
1952 def getcwd(self):
1951 return self.dirstate.getcwd()
1953 return self.dirstate.getcwd()
1952
1954
1953 def pathto(self, f, cwd=None):
1955 def pathto(self, f, cwd=None):
1954 return self.dirstate.pathto(f, cwd)
1956 return self.dirstate.pathto(f, cwd)
1955
1957
1956 def _loadfilter(self, filter):
1958 def _loadfilter(self, filter):
1957 if filter not in self._filterpats:
1959 if filter not in self._filterpats:
1958 l = []
1960 l = []
1959 for pat, cmd in self.ui.configitems(filter):
1961 for pat, cmd in self.ui.configitems(filter):
1960 if cmd == b'!':
1962 if cmd == b'!':
1961 continue
1963 continue
1962 mf = matchmod.match(self.root, b'', [pat])
1964 mf = matchmod.match(self.root, b'', [pat])
1963 fn = None
1965 fn = None
1964 params = cmd
1966 params = cmd
1965 for name, filterfn in pycompat.iteritems(self._datafilters):
1967 for name, filterfn in pycompat.iteritems(self._datafilters):
1966 if cmd.startswith(name):
1968 if cmd.startswith(name):
1967 fn = filterfn
1969 fn = filterfn
1968 params = cmd[len(name) :].lstrip()
1970 params = cmd[len(name) :].lstrip()
1969 break
1971 break
1970 if not fn:
1972 if not fn:
1971 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1973 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1972 fn.__name__ = 'commandfilter'
1974 fn.__name__ = 'commandfilter'
1973 # Wrap old filters not supporting keyword arguments
1975 # Wrap old filters not supporting keyword arguments
1974 if not pycompat.getargspec(fn)[2]:
1976 if not pycompat.getargspec(fn)[2]:
1975 oldfn = fn
1977 oldfn = fn
1976 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1978 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1977 fn.__name__ = 'compat-' + oldfn.__name__
1979 fn.__name__ = 'compat-' + oldfn.__name__
1978 l.append((mf, fn, params))
1980 l.append((mf, fn, params))
1979 self._filterpats[filter] = l
1981 self._filterpats[filter] = l
1980 return self._filterpats[filter]
1982 return self._filterpats[filter]
1981
1983
1982 def _filter(self, filterpats, filename, data):
1984 def _filter(self, filterpats, filename, data):
1983 for mf, fn, cmd in filterpats:
1985 for mf, fn, cmd in filterpats:
1984 if mf(filename):
1986 if mf(filename):
1985 self.ui.debug(
1987 self.ui.debug(
1986 b"filtering %s through %s\n"
1988 b"filtering %s through %s\n"
1987 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1989 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1988 )
1990 )
1989 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1991 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1990 break
1992 break
1991
1993
1992 return data
1994 return data
1993
1995
1994 @unfilteredpropertycache
1996 @unfilteredpropertycache
1995 def _encodefilterpats(self):
1997 def _encodefilterpats(self):
1996 return self._loadfilter(b'encode')
1998 return self._loadfilter(b'encode')
1997
1999
1998 @unfilteredpropertycache
2000 @unfilteredpropertycache
1999 def _decodefilterpats(self):
2001 def _decodefilterpats(self):
2000 return self._loadfilter(b'decode')
2002 return self._loadfilter(b'decode')
2001
2003
2002 def adddatafilter(self, name, filter):
2004 def adddatafilter(self, name, filter):
2003 self._datafilters[name] = filter
2005 self._datafilters[name] = filter
2004
2006
2005 def wread(self, filename):
2007 def wread(self, filename):
2006 if self.wvfs.islink(filename):
2008 if self.wvfs.islink(filename):
2007 data = self.wvfs.readlink(filename)
2009 data = self.wvfs.readlink(filename)
2008 else:
2010 else:
2009 data = self.wvfs.read(filename)
2011 data = self.wvfs.read(filename)
2010 return self._filter(self._encodefilterpats, filename, data)
2012 return self._filter(self._encodefilterpats, filename, data)
2011
2013
2012 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2014 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2013 """write ``data`` into ``filename`` in the working directory
2015 """write ``data`` into ``filename`` in the working directory
2014
2016
2015 This returns length of written (maybe decoded) data.
2017 This returns length of written (maybe decoded) data.
2016 """
2018 """
2017 data = self._filter(self._decodefilterpats, filename, data)
2019 data = self._filter(self._decodefilterpats, filename, data)
2018 if b'l' in flags:
2020 if b'l' in flags:
2019 self.wvfs.symlink(data, filename)
2021 self.wvfs.symlink(data, filename)
2020 else:
2022 else:
2021 self.wvfs.write(
2023 self.wvfs.write(
2022 filename, data, backgroundclose=backgroundclose, **kwargs
2024 filename, data, backgroundclose=backgroundclose, **kwargs
2023 )
2025 )
2024 if b'x' in flags:
2026 if b'x' in flags:
2025 self.wvfs.setflags(filename, False, True)
2027 self.wvfs.setflags(filename, False, True)
2026 else:
2028 else:
2027 self.wvfs.setflags(filename, False, False)
2029 self.wvfs.setflags(filename, False, False)
2028 return len(data)
2030 return len(data)
2029
2031
2030 def wwritedata(self, filename, data):
2032 def wwritedata(self, filename, data):
2031 return self._filter(self._decodefilterpats, filename, data)
2033 return self._filter(self._decodefilterpats, filename, data)
2032
2034
2033 def currenttransaction(self):
2035 def currenttransaction(self):
2034 """return the current transaction or None if non exists"""
2036 """return the current transaction or None if non exists"""
2035 if self._transref:
2037 if self._transref:
2036 tr = self._transref()
2038 tr = self._transref()
2037 else:
2039 else:
2038 tr = None
2040 tr = None
2039
2041
2040 if tr and tr.running():
2042 if tr and tr.running():
2041 return tr
2043 return tr
2042 return None
2044 return None
2043
2045
2044 def transaction(self, desc, report=None):
2046 def transaction(self, desc, report=None):
2045 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2047 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2046 b'devel', b'check-locks'
2048 b'devel', b'check-locks'
2047 ):
2049 ):
2048 if self._currentlock(self._lockref) is None:
2050 if self._currentlock(self._lockref) is None:
2049 raise error.ProgrammingError(b'transaction requires locking')
2051 raise error.ProgrammingError(b'transaction requires locking')
2050 tr = self.currenttransaction()
2052 tr = self.currenttransaction()
2051 if tr is not None:
2053 if tr is not None:
2052 return tr.nest(name=desc)
2054 return tr.nest(name=desc)
2053
2055
2054 # abort here if the journal already exists
2056 # abort here if the journal already exists
2055 if self.svfs.exists(b"journal"):
2057 if self.svfs.exists(b"journal"):
2056 raise error.RepoError(
2058 raise error.RepoError(
2057 _(b"abandoned transaction found"),
2059 _(b"abandoned transaction found"),
2058 hint=_(b"run 'hg recover' to clean up transaction"),
2060 hint=_(b"run 'hg recover' to clean up transaction"),
2059 )
2061 )
2060
2062
2061 idbase = b"%.40f#%f" % (random.random(), time.time())
2063 idbase = b"%.40f#%f" % (random.random(), time.time())
2062 ha = hex(hashutil.sha1(idbase).digest())
2064 ha = hex(hashutil.sha1(idbase).digest())
2063 txnid = b'TXN:' + ha
2065 txnid = b'TXN:' + ha
2064 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2066 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2065
2067
2066 self._writejournal(desc)
2068 self._writejournal(desc)
2067 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2069 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2068 if report:
2070 if report:
2069 rp = report
2071 rp = report
2070 else:
2072 else:
2071 rp = self.ui.warn
2073 rp = self.ui.warn
2072 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2074 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2073 # we must avoid cyclic reference between repo and transaction.
2075 # we must avoid cyclic reference between repo and transaction.
2074 reporef = weakref.ref(self)
2076 reporef = weakref.ref(self)
2075 # Code to track tag movement
2077 # Code to track tag movement
2076 #
2078 #
2077 # Since tags are all handled as file content, it is actually quite hard
2079 # Since tags are all handled as file content, it is actually quite hard
2078 # to track these movement from a code perspective. So we fallback to a
2080 # to track these movement from a code perspective. So we fallback to a
2079 # tracking at the repository level. One could envision to track changes
2081 # tracking at the repository level. One could envision to track changes
2080 # to the '.hgtags' file through changegroup apply but that fails to
2082 # to the '.hgtags' file through changegroup apply but that fails to
2081 # cope with case where transaction expose new heads without changegroup
2083 # cope with case where transaction expose new heads without changegroup
2082 # being involved (eg: phase movement).
2084 # being involved (eg: phase movement).
2083 #
2085 #
2084 # For now, We gate the feature behind a flag since this likely comes
2086 # For now, We gate the feature behind a flag since this likely comes
2085 # with performance impacts. The current code run more often than needed
2087 # with performance impacts. The current code run more often than needed
2086 # and do not use caches as much as it could. The current focus is on
2088 # and do not use caches as much as it could. The current focus is on
2087 # the behavior of the feature so we disable it by default. The flag
2089 # the behavior of the feature so we disable it by default. The flag
2088 # will be removed when we are happy with the performance impact.
2090 # will be removed when we are happy with the performance impact.
2089 #
2091 #
2090 # Once this feature is no longer experimental move the following
2092 # Once this feature is no longer experimental move the following
2091 # documentation to the appropriate help section:
2093 # documentation to the appropriate help section:
2092 #
2094 #
2093 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2095 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2094 # tags (new or changed or deleted tags). In addition the details of
2096 # tags (new or changed or deleted tags). In addition the details of
2095 # these changes are made available in a file at:
2097 # these changes are made available in a file at:
2096 # ``REPOROOT/.hg/changes/tags.changes``.
2098 # ``REPOROOT/.hg/changes/tags.changes``.
2097 # Make sure you check for HG_TAG_MOVED before reading that file as it
2099 # Make sure you check for HG_TAG_MOVED before reading that file as it
2098 # might exist from a previous transaction even if no tag were touched
2100 # might exist from a previous transaction even if no tag were touched
2099 # in this one. Changes are recorded in a line base format::
2101 # in this one. Changes are recorded in a line base format::
2100 #
2102 #
2101 # <action> <hex-node> <tag-name>\n
2103 # <action> <hex-node> <tag-name>\n
2102 #
2104 #
2103 # Actions are defined as follow:
2105 # Actions are defined as follow:
2104 # "-R": tag is removed,
2106 # "-R": tag is removed,
2105 # "+A": tag is added,
2107 # "+A": tag is added,
2106 # "-M": tag is moved (old value),
2108 # "-M": tag is moved (old value),
2107 # "+M": tag is moved (new value),
2109 # "+M": tag is moved (new value),
2108 tracktags = lambda x: None
2110 tracktags = lambda x: None
2109 # experimental config: experimental.hook-track-tags
2111 # experimental config: experimental.hook-track-tags
2110 shouldtracktags = self.ui.configbool(
2112 shouldtracktags = self.ui.configbool(
2111 b'experimental', b'hook-track-tags'
2113 b'experimental', b'hook-track-tags'
2112 )
2114 )
2113 if desc != b'strip' and shouldtracktags:
2115 if desc != b'strip' and shouldtracktags:
2114 oldheads = self.changelog.headrevs()
2116 oldheads = self.changelog.headrevs()
2115
2117
2116 def tracktags(tr2):
2118 def tracktags(tr2):
2117 repo = reporef()
2119 repo = reporef()
2118 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2120 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2119 newheads = repo.changelog.headrevs()
2121 newheads = repo.changelog.headrevs()
2120 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2122 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2121 # notes: we compare lists here.
2123 # notes: we compare lists here.
2122 # As we do it only once buiding set would not be cheaper
2124 # As we do it only once buiding set would not be cheaper
2123 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2125 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2124 if changes:
2126 if changes:
2125 tr2.hookargs[b'tag_moved'] = b'1'
2127 tr2.hookargs[b'tag_moved'] = b'1'
2126 with repo.vfs(
2128 with repo.vfs(
2127 b'changes/tags.changes', b'w', atomictemp=True
2129 b'changes/tags.changes', b'w', atomictemp=True
2128 ) as changesfile:
2130 ) as changesfile:
2129 # note: we do not register the file to the transaction
2131 # note: we do not register the file to the transaction
2130 # because we needs it to still exist on the transaction
2132 # because we needs it to still exist on the transaction
2131 # is close (for txnclose hooks)
2133 # is close (for txnclose hooks)
2132 tagsmod.writediff(changesfile, changes)
2134 tagsmod.writediff(changesfile, changes)
2133
2135
2134 def validate(tr2):
2136 def validate(tr2):
2135 """will run pre-closing hooks"""
2137 """will run pre-closing hooks"""
2136 # XXX the transaction API is a bit lacking here so we take a hacky
2138 # XXX the transaction API is a bit lacking here so we take a hacky
2137 # path for now
2139 # path for now
2138 #
2140 #
2139 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2141 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2140 # dict is copied before these run. In addition we needs the data
2142 # dict is copied before these run. In addition we needs the data
2141 # available to in memory hooks too.
2143 # available to in memory hooks too.
2142 #
2144 #
2143 # Moreover, we also need to make sure this runs before txnclose
2145 # Moreover, we also need to make sure this runs before txnclose
2144 # hooks and there is no "pending" mechanism that would execute
2146 # hooks and there is no "pending" mechanism that would execute
2145 # logic only if hooks are about to run.
2147 # logic only if hooks are about to run.
2146 #
2148 #
2147 # Fixing this limitation of the transaction is also needed to track
2149 # Fixing this limitation of the transaction is also needed to track
2148 # other families of changes (bookmarks, phases, obsolescence).
2150 # other families of changes (bookmarks, phases, obsolescence).
2149 #
2151 #
2150 # This will have to be fixed before we remove the experimental
2152 # This will have to be fixed before we remove the experimental
2151 # gating.
2153 # gating.
2152 tracktags(tr2)
2154 tracktags(tr2)
2153 repo = reporef()
2155 repo = reporef()
2154
2156
2155 singleheadopt = (b'experimental', b'single-head-per-branch')
2157 singleheadopt = (b'experimental', b'single-head-per-branch')
2156 singlehead = repo.ui.configbool(*singleheadopt)
2158 singlehead = repo.ui.configbool(*singleheadopt)
2157 if singlehead:
2159 if singlehead:
2158 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2160 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2159 accountclosed = singleheadsub.get(
2161 accountclosed = singleheadsub.get(
2160 b"account-closed-heads", False
2162 b"account-closed-heads", False
2161 )
2163 )
2162 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2164 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2163 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2165 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2164 for name, (old, new) in sorted(
2166 for name, (old, new) in sorted(
2165 tr.changes[b'bookmarks'].items()
2167 tr.changes[b'bookmarks'].items()
2166 ):
2168 ):
2167 args = tr.hookargs.copy()
2169 args = tr.hookargs.copy()
2168 args.update(bookmarks.preparehookargs(name, old, new))
2170 args.update(bookmarks.preparehookargs(name, old, new))
2169 repo.hook(
2171 repo.hook(
2170 b'pretxnclose-bookmark',
2172 b'pretxnclose-bookmark',
2171 throw=True,
2173 throw=True,
2172 **pycompat.strkwargs(args)
2174 **pycompat.strkwargs(args)
2173 )
2175 )
2174 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2176 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2175 cl = repo.unfiltered().changelog
2177 cl = repo.unfiltered().changelog
2176 for rev, (old, new) in tr.changes[b'phases'].items():
2178 for rev, (old, new) in tr.changes[b'phases'].items():
2177 args = tr.hookargs.copy()
2179 args = tr.hookargs.copy()
2178 node = hex(cl.node(rev))
2180 node = hex(cl.node(rev))
2179 args.update(phases.preparehookargs(node, old, new))
2181 args.update(phases.preparehookargs(node, old, new))
2180 repo.hook(
2182 repo.hook(
2181 b'pretxnclose-phase',
2183 b'pretxnclose-phase',
2182 throw=True,
2184 throw=True,
2183 **pycompat.strkwargs(args)
2185 **pycompat.strkwargs(args)
2184 )
2186 )
2185
2187
2186 repo.hook(
2188 repo.hook(
2187 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2189 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2188 )
2190 )
2189
2191
2190 def releasefn(tr, success):
2192 def releasefn(tr, success):
2191 repo = reporef()
2193 repo = reporef()
2192 if repo is None:
2194 if repo is None:
2193 # If the repo has been GC'd (and this release function is being
2195 # If the repo has been GC'd (and this release function is being
2194 # called from transaction.__del__), there's not much we can do,
2196 # called from transaction.__del__), there's not much we can do,
2195 # so just leave the unfinished transaction there and let the
2197 # so just leave the unfinished transaction there and let the
2196 # user run `hg recover`.
2198 # user run `hg recover`.
2197 return
2199 return
2198 if success:
2200 if success:
2199 # this should be explicitly invoked here, because
2201 # this should be explicitly invoked here, because
2200 # in-memory changes aren't written out at closing
2202 # in-memory changes aren't written out at closing
2201 # transaction, if tr.addfilegenerator (via
2203 # transaction, if tr.addfilegenerator (via
2202 # dirstate.write or so) isn't invoked while
2204 # dirstate.write or so) isn't invoked while
2203 # transaction running
2205 # transaction running
2204 repo.dirstate.write(None)
2206 repo.dirstate.write(None)
2205 else:
2207 else:
2206 # discard all changes (including ones already written
2208 # discard all changes (including ones already written
2207 # out) in this transaction
2209 # out) in this transaction
2208 narrowspec.restorebackup(self, b'journal.narrowspec')
2210 narrowspec.restorebackup(self, b'journal.narrowspec')
2209 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2211 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2210 repo.dirstate.restorebackup(None, b'journal.dirstate')
2212 repo.dirstate.restorebackup(None, b'journal.dirstate')
2211
2213
2212 repo.invalidate(clearfilecache=True)
2214 repo.invalidate(clearfilecache=True)
2213
2215
2214 tr = transaction.transaction(
2216 tr = transaction.transaction(
2215 rp,
2217 rp,
2216 self.svfs,
2218 self.svfs,
2217 vfsmap,
2219 vfsmap,
2218 b"journal",
2220 b"journal",
2219 b"undo",
2221 b"undo",
2220 aftertrans(renames),
2222 aftertrans(renames),
2221 self.store.createmode,
2223 self.store.createmode,
2222 validator=validate,
2224 validator=validate,
2223 releasefn=releasefn,
2225 releasefn=releasefn,
2224 checkambigfiles=_cachedfiles,
2226 checkambigfiles=_cachedfiles,
2225 name=desc,
2227 name=desc,
2226 )
2228 )
2227 tr.changes[b'origrepolen'] = len(self)
2229 tr.changes[b'origrepolen'] = len(self)
2228 tr.changes[b'obsmarkers'] = set()
2230 tr.changes[b'obsmarkers'] = set()
2229 tr.changes[b'phases'] = {}
2231 tr.changes[b'phases'] = {}
2230 tr.changes[b'bookmarks'] = {}
2232 tr.changes[b'bookmarks'] = {}
2231
2233
2232 tr.hookargs[b'txnid'] = txnid
2234 tr.hookargs[b'txnid'] = txnid
2233 tr.hookargs[b'txnname'] = desc
2235 tr.hookargs[b'txnname'] = desc
2234 # note: writing the fncache only during finalize mean that the file is
2236 # note: writing the fncache only during finalize mean that the file is
2235 # outdated when running hooks. As fncache is used for streaming clone,
2237 # outdated when running hooks. As fncache is used for streaming clone,
2236 # this is not expected to break anything that happen during the hooks.
2238 # this is not expected to break anything that happen during the hooks.
2237 tr.addfinalize(b'flush-fncache', self.store.write)
2239 tr.addfinalize(b'flush-fncache', self.store.write)
2238
2240
2239 def txnclosehook(tr2):
2241 def txnclosehook(tr2):
2240 """To be run if transaction is successful, will schedule a hook run
2242 """To be run if transaction is successful, will schedule a hook run
2241 """
2243 """
2242 # Don't reference tr2 in hook() so we don't hold a reference.
2244 # Don't reference tr2 in hook() so we don't hold a reference.
2243 # This reduces memory consumption when there are multiple
2245 # This reduces memory consumption when there are multiple
2244 # transactions per lock. This can likely go away if issue5045
2246 # transactions per lock. This can likely go away if issue5045
2245 # fixes the function accumulation.
2247 # fixes the function accumulation.
2246 hookargs = tr2.hookargs
2248 hookargs = tr2.hookargs
2247
2249
2248 def hookfunc(unused_success):
2250 def hookfunc(unused_success):
2249 repo = reporef()
2251 repo = reporef()
2250 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2252 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2251 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2253 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2252 for name, (old, new) in bmchanges:
2254 for name, (old, new) in bmchanges:
2253 args = tr.hookargs.copy()
2255 args = tr.hookargs.copy()
2254 args.update(bookmarks.preparehookargs(name, old, new))
2256 args.update(bookmarks.preparehookargs(name, old, new))
2255 repo.hook(
2257 repo.hook(
2256 b'txnclose-bookmark',
2258 b'txnclose-bookmark',
2257 throw=False,
2259 throw=False,
2258 **pycompat.strkwargs(args)
2260 **pycompat.strkwargs(args)
2259 )
2261 )
2260
2262
2261 if hook.hashook(repo.ui, b'txnclose-phase'):
2263 if hook.hashook(repo.ui, b'txnclose-phase'):
2262 cl = repo.unfiltered().changelog
2264 cl = repo.unfiltered().changelog
2263 phasemv = sorted(tr.changes[b'phases'].items())
2265 phasemv = sorted(tr.changes[b'phases'].items())
2264 for rev, (old, new) in phasemv:
2266 for rev, (old, new) in phasemv:
2265 args = tr.hookargs.copy()
2267 args = tr.hookargs.copy()
2266 node = hex(cl.node(rev))
2268 node = hex(cl.node(rev))
2267 args.update(phases.preparehookargs(node, old, new))
2269 args.update(phases.preparehookargs(node, old, new))
2268 repo.hook(
2270 repo.hook(
2269 b'txnclose-phase',
2271 b'txnclose-phase',
2270 throw=False,
2272 throw=False,
2271 **pycompat.strkwargs(args)
2273 **pycompat.strkwargs(args)
2272 )
2274 )
2273
2275
2274 repo.hook(
2276 repo.hook(
2275 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2277 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2276 )
2278 )
2277
2279
2278 reporef()._afterlock(hookfunc)
2280 reporef()._afterlock(hookfunc)
2279
2281
2280 tr.addfinalize(b'txnclose-hook', txnclosehook)
2282 tr.addfinalize(b'txnclose-hook', txnclosehook)
2281 # Include a leading "-" to make it happen before the transaction summary
2283 # Include a leading "-" to make it happen before the transaction summary
2282 # reports registered via scmutil.registersummarycallback() whose names
2284 # reports registered via scmutil.registersummarycallback() whose names
2283 # are 00-txnreport etc. That way, the caches will be warm when the
2285 # are 00-txnreport etc. That way, the caches will be warm when the
2284 # callbacks run.
2286 # callbacks run.
2285 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2287 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2286
2288
2287 def txnaborthook(tr2):
2289 def txnaborthook(tr2):
2288 """To be run if transaction is aborted
2290 """To be run if transaction is aborted
2289 """
2291 """
2290 reporef().hook(
2292 reporef().hook(
2291 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2293 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2292 )
2294 )
2293
2295
2294 tr.addabort(b'txnabort-hook', txnaborthook)
2296 tr.addabort(b'txnabort-hook', txnaborthook)
2295 # avoid eager cache invalidation. in-memory data should be identical
2297 # avoid eager cache invalidation. in-memory data should be identical
2296 # to stored data if transaction has no error.
2298 # to stored data if transaction has no error.
2297 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2299 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2298 self._transref = weakref.ref(tr)
2300 self._transref = weakref.ref(tr)
2299 scmutil.registersummarycallback(self, tr, desc)
2301 scmutil.registersummarycallback(self, tr, desc)
2300 return tr
2302 return tr
2301
2303
2302 def _journalfiles(self):
2304 def _journalfiles(self):
2303 return (
2305 return (
2304 (self.svfs, b'journal'),
2306 (self.svfs, b'journal'),
2305 (self.svfs, b'journal.narrowspec'),
2307 (self.svfs, b'journal.narrowspec'),
2306 (self.vfs, b'journal.narrowspec.dirstate'),
2308 (self.vfs, b'journal.narrowspec.dirstate'),
2307 (self.vfs, b'journal.dirstate'),
2309 (self.vfs, b'journal.dirstate'),
2308 (self.vfs, b'journal.branch'),
2310 (self.vfs, b'journal.branch'),
2309 (self.vfs, b'journal.desc'),
2311 (self.vfs, b'journal.desc'),
2310 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2312 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2311 (self.svfs, b'journal.phaseroots'),
2313 (self.svfs, b'journal.phaseroots'),
2312 )
2314 )
2313
2315
2314 def undofiles(self):
2316 def undofiles(self):
2315 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2317 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2316
2318
2317 @unfilteredmethod
2319 @unfilteredmethod
2318 def _writejournal(self, desc):
2320 def _writejournal(self, desc):
2319 self.dirstate.savebackup(None, b'journal.dirstate')
2321 self.dirstate.savebackup(None, b'journal.dirstate')
2320 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2322 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2321 narrowspec.savebackup(self, b'journal.narrowspec')
2323 narrowspec.savebackup(self, b'journal.narrowspec')
2322 self.vfs.write(
2324 self.vfs.write(
2323 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2325 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2324 )
2326 )
2325 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2327 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2326 bookmarksvfs = bookmarks.bookmarksvfs(self)
2328 bookmarksvfs = bookmarks.bookmarksvfs(self)
2327 bookmarksvfs.write(
2329 bookmarksvfs.write(
2328 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2330 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2329 )
2331 )
2330 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2332 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2331
2333
2332 def recover(self):
2334 def recover(self):
2333 with self.lock():
2335 with self.lock():
2334 if self.svfs.exists(b"journal"):
2336 if self.svfs.exists(b"journal"):
2335 self.ui.status(_(b"rolling back interrupted transaction\n"))
2337 self.ui.status(_(b"rolling back interrupted transaction\n"))
2336 vfsmap = {
2338 vfsmap = {
2337 b'': self.svfs,
2339 b'': self.svfs,
2338 b'plain': self.vfs,
2340 b'plain': self.vfs,
2339 }
2341 }
2340 transaction.rollback(
2342 transaction.rollback(
2341 self.svfs,
2343 self.svfs,
2342 vfsmap,
2344 vfsmap,
2343 b"journal",
2345 b"journal",
2344 self.ui.warn,
2346 self.ui.warn,
2345 checkambigfiles=_cachedfiles,
2347 checkambigfiles=_cachedfiles,
2346 )
2348 )
2347 self.invalidate()
2349 self.invalidate()
2348 return True
2350 return True
2349 else:
2351 else:
2350 self.ui.warn(_(b"no interrupted transaction available\n"))
2352 self.ui.warn(_(b"no interrupted transaction available\n"))
2351 return False
2353 return False
2352
2354
2353 def rollback(self, dryrun=False, force=False):
2355 def rollback(self, dryrun=False, force=False):
2354 wlock = lock = dsguard = None
2356 wlock = lock = dsguard = None
2355 try:
2357 try:
2356 wlock = self.wlock()
2358 wlock = self.wlock()
2357 lock = self.lock()
2359 lock = self.lock()
2358 if self.svfs.exists(b"undo"):
2360 if self.svfs.exists(b"undo"):
2359 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2361 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2360
2362
2361 return self._rollback(dryrun, force, dsguard)
2363 return self._rollback(dryrun, force, dsguard)
2362 else:
2364 else:
2363 self.ui.warn(_(b"no rollback information available\n"))
2365 self.ui.warn(_(b"no rollback information available\n"))
2364 return 1
2366 return 1
2365 finally:
2367 finally:
2366 release(dsguard, lock, wlock)
2368 release(dsguard, lock, wlock)
2367
2369
2368 @unfilteredmethod # Until we get smarter cache management
2370 @unfilteredmethod # Until we get smarter cache management
2369 def _rollback(self, dryrun, force, dsguard):
2371 def _rollback(self, dryrun, force, dsguard):
2370 ui = self.ui
2372 ui = self.ui
2371 try:
2373 try:
2372 args = self.vfs.read(b'undo.desc').splitlines()
2374 args = self.vfs.read(b'undo.desc').splitlines()
2373 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2375 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2374 if len(args) >= 3:
2376 if len(args) >= 3:
2375 detail = args[2]
2377 detail = args[2]
2376 oldtip = oldlen - 1
2378 oldtip = oldlen - 1
2377
2379
2378 if detail and ui.verbose:
2380 if detail and ui.verbose:
2379 msg = _(
2381 msg = _(
2380 b'repository tip rolled back to revision %d'
2382 b'repository tip rolled back to revision %d'
2381 b' (undo %s: %s)\n'
2383 b' (undo %s: %s)\n'
2382 ) % (oldtip, desc, detail)
2384 ) % (oldtip, desc, detail)
2383 else:
2385 else:
2384 msg = _(
2386 msg = _(
2385 b'repository tip rolled back to revision %d (undo %s)\n'
2387 b'repository tip rolled back to revision %d (undo %s)\n'
2386 ) % (oldtip, desc)
2388 ) % (oldtip, desc)
2387 except IOError:
2389 except IOError:
2388 msg = _(b'rolling back unknown transaction\n')
2390 msg = _(b'rolling back unknown transaction\n')
2389 desc = None
2391 desc = None
2390
2392
2391 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2393 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2392 raise error.Abort(
2394 raise error.Abort(
2393 _(
2395 _(
2394 b'rollback of last commit while not checked out '
2396 b'rollback of last commit while not checked out '
2395 b'may lose data'
2397 b'may lose data'
2396 ),
2398 ),
2397 hint=_(b'use -f to force'),
2399 hint=_(b'use -f to force'),
2398 )
2400 )
2399
2401
2400 ui.status(msg)
2402 ui.status(msg)
2401 if dryrun:
2403 if dryrun:
2402 return 0
2404 return 0
2403
2405
2404 parents = self.dirstate.parents()
2406 parents = self.dirstate.parents()
2405 self.destroying()
2407 self.destroying()
2406 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2408 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2407 transaction.rollback(
2409 transaction.rollback(
2408 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2410 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2409 )
2411 )
2410 bookmarksvfs = bookmarks.bookmarksvfs(self)
2412 bookmarksvfs = bookmarks.bookmarksvfs(self)
2411 if bookmarksvfs.exists(b'undo.bookmarks'):
2413 if bookmarksvfs.exists(b'undo.bookmarks'):
2412 bookmarksvfs.rename(
2414 bookmarksvfs.rename(
2413 b'undo.bookmarks', b'bookmarks', checkambig=True
2415 b'undo.bookmarks', b'bookmarks', checkambig=True
2414 )
2416 )
2415 if self.svfs.exists(b'undo.phaseroots'):
2417 if self.svfs.exists(b'undo.phaseroots'):
2416 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2418 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2417 self.invalidate()
2419 self.invalidate()
2418
2420
2419 has_node = self.changelog.index.has_node
2421 has_node = self.changelog.index.has_node
2420 parentgone = any(not has_node(p) for p in parents)
2422 parentgone = any(not has_node(p) for p in parents)
2421 if parentgone:
2423 if parentgone:
2422 # prevent dirstateguard from overwriting already restored one
2424 # prevent dirstateguard from overwriting already restored one
2423 dsguard.close()
2425 dsguard.close()
2424
2426
2425 narrowspec.restorebackup(self, b'undo.narrowspec')
2427 narrowspec.restorebackup(self, b'undo.narrowspec')
2426 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2428 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2427 self.dirstate.restorebackup(None, b'undo.dirstate')
2429 self.dirstate.restorebackup(None, b'undo.dirstate')
2428 try:
2430 try:
2429 branch = self.vfs.read(b'undo.branch')
2431 branch = self.vfs.read(b'undo.branch')
2430 self.dirstate.setbranch(encoding.tolocal(branch))
2432 self.dirstate.setbranch(encoding.tolocal(branch))
2431 except IOError:
2433 except IOError:
2432 ui.warn(
2434 ui.warn(
2433 _(
2435 _(
2434 b'named branch could not be reset: '
2436 b'named branch could not be reset: '
2435 b'current branch is still \'%s\'\n'
2437 b'current branch is still \'%s\'\n'
2436 )
2438 )
2437 % self.dirstate.branch()
2439 % self.dirstate.branch()
2438 )
2440 )
2439
2441
2440 parents = tuple([p.rev() for p in self[None].parents()])
2442 parents = tuple([p.rev() for p in self[None].parents()])
2441 if len(parents) > 1:
2443 if len(parents) > 1:
2442 ui.status(
2444 ui.status(
2443 _(
2445 _(
2444 b'working directory now based on '
2446 b'working directory now based on '
2445 b'revisions %d and %d\n'
2447 b'revisions %d and %d\n'
2446 )
2448 )
2447 % parents
2449 % parents
2448 )
2450 )
2449 else:
2451 else:
2450 ui.status(
2452 ui.status(
2451 _(b'working directory now based on revision %d\n') % parents
2453 _(b'working directory now based on revision %d\n') % parents
2452 )
2454 )
2453 mergemod.mergestate.clean(self, self[b'.'].node())
2455 mergemod.mergestate.clean(self, self[b'.'].node())
2454
2456
2455 # TODO: if we know which new heads may result from this rollback, pass
2457 # TODO: if we know which new heads may result from this rollback, pass
2456 # them to destroy(), which will prevent the branchhead cache from being
2458 # them to destroy(), which will prevent the branchhead cache from being
2457 # invalidated.
2459 # invalidated.
2458 self.destroyed()
2460 self.destroyed()
2459 return 0
2461 return 0
2460
2462
2461 def _buildcacheupdater(self, newtransaction):
2463 def _buildcacheupdater(self, newtransaction):
2462 """called during transaction to build the callback updating cache
2464 """called during transaction to build the callback updating cache
2463
2465
2464 Lives on the repository to help extension who might want to augment
2466 Lives on the repository to help extension who might want to augment
2465 this logic. For this purpose, the created transaction is passed to the
2467 this logic. For this purpose, the created transaction is passed to the
2466 method.
2468 method.
2467 """
2469 """
2468 # we must avoid cyclic reference between repo and transaction.
2470 # we must avoid cyclic reference between repo and transaction.
2469 reporef = weakref.ref(self)
2471 reporef = weakref.ref(self)
2470
2472
2471 def updater(tr):
2473 def updater(tr):
2472 repo = reporef()
2474 repo = reporef()
2473 repo.updatecaches(tr)
2475 repo.updatecaches(tr)
2474
2476
2475 return updater
2477 return updater
2476
2478
2477 @unfilteredmethod
2479 @unfilteredmethod
2478 def updatecaches(self, tr=None, full=False):
2480 def updatecaches(self, tr=None, full=False):
2479 """warm appropriate caches
2481 """warm appropriate caches
2480
2482
2481 If this function is called after a transaction closed. The transaction
2483 If this function is called after a transaction closed. The transaction
2482 will be available in the 'tr' argument. This can be used to selectively
2484 will be available in the 'tr' argument. This can be used to selectively
2483 update caches relevant to the changes in that transaction.
2485 update caches relevant to the changes in that transaction.
2484
2486
2485 If 'full' is set, make sure all caches the function knows about have
2487 If 'full' is set, make sure all caches the function knows about have
2486 up-to-date data. Even the ones usually loaded more lazily.
2488 up-to-date data. Even the ones usually loaded more lazily.
2487 """
2489 """
2488 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2490 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2489 # During strip, many caches are invalid but
2491 # During strip, many caches are invalid but
2490 # later call to `destroyed` will refresh them.
2492 # later call to `destroyed` will refresh them.
2491 return
2493 return
2492
2494
2493 if tr is None or tr.changes[b'origrepolen'] < len(self):
2495 if tr is None or tr.changes[b'origrepolen'] < len(self):
2494 # accessing the 'ser ved' branchmap should refresh all the others,
2496 # accessing the 'ser ved' branchmap should refresh all the others,
2495 self.ui.debug(b'updating the branch cache\n')
2497 self.ui.debug(b'updating the branch cache\n')
2496 self.filtered(b'served').branchmap()
2498 self.filtered(b'served').branchmap()
2497 self.filtered(b'served.hidden').branchmap()
2499 self.filtered(b'served.hidden').branchmap()
2498
2500
2499 if full:
2501 if full:
2500 unfi = self.unfiltered()
2502 unfi = self.unfiltered()
2501 rbc = unfi.revbranchcache()
2503 rbc = unfi.revbranchcache()
2502 for r in unfi.changelog:
2504 for r in unfi.changelog:
2503 rbc.branchinfo(r)
2505 rbc.branchinfo(r)
2504 rbc.write()
2506 rbc.write()
2505
2507
2506 # ensure the working copy parents are in the manifestfulltextcache
2508 # ensure the working copy parents are in the manifestfulltextcache
2507 for ctx in self[b'.'].parents():
2509 for ctx in self[b'.'].parents():
2508 ctx.manifest() # accessing the manifest is enough
2510 ctx.manifest() # accessing the manifest is enough
2509
2511
2510 # accessing fnode cache warms the cache
2512 # accessing fnode cache warms the cache
2511 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2513 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2512 # accessing tags warm the cache
2514 # accessing tags warm the cache
2513 self.tags()
2515 self.tags()
2514 self.filtered(b'served').tags()
2516 self.filtered(b'served').tags()
2515
2517
2516 # The `full` arg is documented as updating even the lazily-loaded
2518 # The `full` arg is documented as updating even the lazily-loaded
2517 # caches immediately, so we're forcing a write to cause these caches
2519 # caches immediately, so we're forcing a write to cause these caches
2518 # to be warmed up even if they haven't explicitly been requested
2520 # to be warmed up even if they haven't explicitly been requested
2519 # yet (if they've never been used by hg, they won't ever have been
2521 # yet (if they've never been used by hg, they won't ever have been
2520 # written, even if they're a subset of another kind of cache that
2522 # written, even if they're a subset of another kind of cache that
2521 # *has* been used).
2523 # *has* been used).
2522 for filt in repoview.filtertable.keys():
2524 for filt in repoview.filtertable.keys():
2523 filtered = self.filtered(filt)
2525 filtered = self.filtered(filt)
2524 filtered.branchmap().write(filtered)
2526 filtered.branchmap().write(filtered)
2525
2527
2526 def invalidatecaches(self):
2528 def invalidatecaches(self):
2527
2529
2528 if '_tagscache' in vars(self):
2530 if '_tagscache' in vars(self):
2529 # can't use delattr on proxy
2531 # can't use delattr on proxy
2530 del self.__dict__['_tagscache']
2532 del self.__dict__['_tagscache']
2531
2533
2532 self._branchcaches.clear()
2534 self._branchcaches.clear()
2533 self.invalidatevolatilesets()
2535 self.invalidatevolatilesets()
2534 self._sparsesignaturecache.clear()
2536 self._sparsesignaturecache.clear()
2535
2537
2536 def invalidatevolatilesets(self):
2538 def invalidatevolatilesets(self):
2537 self.filteredrevcache.clear()
2539 self.filteredrevcache.clear()
2538 obsolete.clearobscaches(self)
2540 obsolete.clearobscaches(self)
2539 self._quick_access_changeid_invalidate()
2541 self._quick_access_changeid_invalidate()
2540
2542
2541 def invalidatedirstate(self):
2543 def invalidatedirstate(self):
2542 '''Invalidates the dirstate, causing the next call to dirstate
2544 '''Invalidates the dirstate, causing the next call to dirstate
2543 to check if it was modified since the last time it was read,
2545 to check if it was modified since the last time it was read,
2544 rereading it if it has.
2546 rereading it if it has.
2545
2547
2546 This is different to dirstate.invalidate() that it doesn't always
2548 This is different to dirstate.invalidate() that it doesn't always
2547 rereads the dirstate. Use dirstate.invalidate() if you want to
2549 rereads the dirstate. Use dirstate.invalidate() if you want to
2548 explicitly read the dirstate again (i.e. restoring it to a previous
2550 explicitly read the dirstate again (i.e. restoring it to a previous
2549 known good state).'''
2551 known good state).'''
2550 if hasunfilteredcache(self, 'dirstate'):
2552 if hasunfilteredcache(self, 'dirstate'):
2551 for k in self.dirstate._filecache:
2553 for k in self.dirstate._filecache:
2552 try:
2554 try:
2553 delattr(self.dirstate, k)
2555 delattr(self.dirstate, k)
2554 except AttributeError:
2556 except AttributeError:
2555 pass
2557 pass
2556 delattr(self.unfiltered(), 'dirstate')
2558 delattr(self.unfiltered(), 'dirstate')
2557
2559
2558 def invalidate(self, clearfilecache=False):
2560 def invalidate(self, clearfilecache=False):
2559 '''Invalidates both store and non-store parts other than dirstate
2561 '''Invalidates both store and non-store parts other than dirstate
2560
2562
2561 If a transaction is running, invalidation of store is omitted,
2563 If a transaction is running, invalidation of store is omitted,
2562 because discarding in-memory changes might cause inconsistency
2564 because discarding in-memory changes might cause inconsistency
2563 (e.g. incomplete fncache causes unintentional failure, but
2565 (e.g. incomplete fncache causes unintentional failure, but
2564 redundant one doesn't).
2566 redundant one doesn't).
2565 '''
2567 '''
2566 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2568 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2567 for k in list(self._filecache.keys()):
2569 for k in list(self._filecache.keys()):
2568 # dirstate is invalidated separately in invalidatedirstate()
2570 # dirstate is invalidated separately in invalidatedirstate()
2569 if k == b'dirstate':
2571 if k == b'dirstate':
2570 continue
2572 continue
2571 if (
2573 if (
2572 k == b'changelog'
2574 k == b'changelog'
2573 and self.currenttransaction()
2575 and self.currenttransaction()
2574 and self.changelog._delayed
2576 and self.changelog._delayed
2575 ):
2577 ):
2576 # The changelog object may store unwritten revisions. We don't
2578 # The changelog object may store unwritten revisions. We don't
2577 # want to lose them.
2579 # want to lose them.
2578 # TODO: Solve the problem instead of working around it.
2580 # TODO: Solve the problem instead of working around it.
2579 continue
2581 continue
2580
2582
2581 if clearfilecache:
2583 if clearfilecache:
2582 del self._filecache[k]
2584 del self._filecache[k]
2583 try:
2585 try:
2584 delattr(unfiltered, k)
2586 delattr(unfiltered, k)
2585 except AttributeError:
2587 except AttributeError:
2586 pass
2588 pass
2587 self.invalidatecaches()
2589 self.invalidatecaches()
2588 if not self.currenttransaction():
2590 if not self.currenttransaction():
2589 # TODO: Changing contents of store outside transaction
2591 # TODO: Changing contents of store outside transaction
2590 # causes inconsistency. We should make in-memory store
2592 # causes inconsistency. We should make in-memory store
2591 # changes detectable, and abort if changed.
2593 # changes detectable, and abort if changed.
2592 self.store.invalidatecaches()
2594 self.store.invalidatecaches()
2593
2595
2594 def invalidateall(self):
2596 def invalidateall(self):
2595 '''Fully invalidates both store and non-store parts, causing the
2597 '''Fully invalidates both store and non-store parts, causing the
2596 subsequent operation to reread any outside changes.'''
2598 subsequent operation to reread any outside changes.'''
2597 # extension should hook this to invalidate its caches
2599 # extension should hook this to invalidate its caches
2598 self.invalidate()
2600 self.invalidate()
2599 self.invalidatedirstate()
2601 self.invalidatedirstate()
2600
2602
2601 @unfilteredmethod
2603 @unfilteredmethod
2602 def _refreshfilecachestats(self, tr):
2604 def _refreshfilecachestats(self, tr):
2603 """Reload stats of cached files so that they are flagged as valid"""
2605 """Reload stats of cached files so that they are flagged as valid"""
2604 for k, ce in self._filecache.items():
2606 for k, ce in self._filecache.items():
2605 k = pycompat.sysstr(k)
2607 k = pycompat.sysstr(k)
2606 if k == 'dirstate' or k not in self.__dict__:
2608 if k == 'dirstate' or k not in self.__dict__:
2607 continue
2609 continue
2608 ce.refresh()
2610 ce.refresh()
2609
2611
2610 def _lock(
2612 def _lock(
2611 self,
2613 self,
2612 vfs,
2614 vfs,
2613 lockname,
2615 lockname,
2614 wait,
2616 wait,
2615 releasefn,
2617 releasefn,
2616 acquirefn,
2618 acquirefn,
2617 desc,
2619 desc,
2618 inheritchecker=None,
2620 inheritchecker=None,
2619 parentenvvar=None,
2621 parentenvvar=None,
2620 ):
2622 ):
2621 parentlock = None
2623 parentlock = None
2622 # the contents of parentenvvar are used by the underlying lock to
2624 # the contents of parentenvvar are used by the underlying lock to
2623 # determine whether it can be inherited
2625 # determine whether it can be inherited
2624 if parentenvvar is not None:
2626 if parentenvvar is not None:
2625 parentlock = encoding.environ.get(parentenvvar)
2627 parentlock = encoding.environ.get(parentenvvar)
2626
2628
2627 timeout = 0
2629 timeout = 0
2628 warntimeout = 0
2630 warntimeout = 0
2629 if wait:
2631 if wait:
2630 timeout = self.ui.configint(b"ui", b"timeout")
2632 timeout = self.ui.configint(b"ui", b"timeout")
2631 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2633 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2632 # internal config: ui.signal-safe-lock
2634 # internal config: ui.signal-safe-lock
2633 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2635 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2634
2636
2635 l = lockmod.trylock(
2637 l = lockmod.trylock(
2636 self.ui,
2638 self.ui,
2637 vfs,
2639 vfs,
2638 lockname,
2640 lockname,
2639 timeout,
2641 timeout,
2640 warntimeout,
2642 warntimeout,
2641 releasefn=releasefn,
2643 releasefn=releasefn,
2642 acquirefn=acquirefn,
2644 acquirefn=acquirefn,
2643 desc=desc,
2645 desc=desc,
2644 inheritchecker=inheritchecker,
2646 inheritchecker=inheritchecker,
2645 parentlock=parentlock,
2647 parentlock=parentlock,
2646 signalsafe=signalsafe,
2648 signalsafe=signalsafe,
2647 )
2649 )
2648 return l
2650 return l
2649
2651
2650 def _afterlock(self, callback):
2652 def _afterlock(self, callback):
2651 """add a callback to be run when the repository is fully unlocked
2653 """add a callback to be run when the repository is fully unlocked
2652
2654
2653 The callback will be executed when the outermost lock is released
2655 The callback will be executed when the outermost lock is released
2654 (with wlock being higher level than 'lock')."""
2656 (with wlock being higher level than 'lock')."""
2655 for ref in (self._wlockref, self._lockref):
2657 for ref in (self._wlockref, self._lockref):
2656 l = ref and ref()
2658 l = ref and ref()
2657 if l and l.held:
2659 if l and l.held:
2658 l.postrelease.append(callback)
2660 l.postrelease.append(callback)
2659 break
2661 break
2660 else: # no lock have been found.
2662 else: # no lock have been found.
2661 callback(True)
2663 callback(True)
2662
2664
2663 def lock(self, wait=True):
2665 def lock(self, wait=True):
2664 '''Lock the repository store (.hg/store) and return a weak reference
2666 '''Lock the repository store (.hg/store) and return a weak reference
2665 to the lock. Use this before modifying the store (e.g. committing or
2667 to the lock. Use this before modifying the store (e.g. committing or
2666 stripping). If you are opening a transaction, get a lock as well.)
2668 stripping). If you are opening a transaction, get a lock as well.)
2667
2669
2668 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2670 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2669 'wlock' first to avoid a dead-lock hazard.'''
2671 'wlock' first to avoid a dead-lock hazard.'''
2670 l = self._currentlock(self._lockref)
2672 l = self._currentlock(self._lockref)
2671 if l is not None:
2673 if l is not None:
2672 l.lock()
2674 l.lock()
2673 return l
2675 return l
2674
2676
2675 l = self._lock(
2677 l = self._lock(
2676 vfs=self.svfs,
2678 vfs=self.svfs,
2677 lockname=b"lock",
2679 lockname=b"lock",
2678 wait=wait,
2680 wait=wait,
2679 releasefn=None,
2681 releasefn=None,
2680 acquirefn=self.invalidate,
2682 acquirefn=self.invalidate,
2681 desc=_(b'repository %s') % self.origroot,
2683 desc=_(b'repository %s') % self.origroot,
2682 )
2684 )
2683 self._lockref = weakref.ref(l)
2685 self._lockref = weakref.ref(l)
2684 return l
2686 return l
2685
2687
2686 def _wlockchecktransaction(self):
2688 def _wlockchecktransaction(self):
2687 if self.currenttransaction() is not None:
2689 if self.currenttransaction() is not None:
2688 raise error.LockInheritanceContractViolation(
2690 raise error.LockInheritanceContractViolation(
2689 b'wlock cannot be inherited in the middle of a transaction'
2691 b'wlock cannot be inherited in the middle of a transaction'
2690 )
2692 )
2691
2693
2692 def wlock(self, wait=True):
2694 def wlock(self, wait=True):
2693 '''Lock the non-store parts of the repository (everything under
2695 '''Lock the non-store parts of the repository (everything under
2694 .hg except .hg/store) and return a weak reference to the lock.
2696 .hg except .hg/store) and return a weak reference to the lock.
2695
2697
2696 Use this before modifying files in .hg.
2698 Use this before modifying files in .hg.
2697
2699
2698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2700 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2699 'wlock' first to avoid a dead-lock hazard.'''
2701 'wlock' first to avoid a dead-lock hazard.'''
2700 l = self._wlockref and self._wlockref()
2702 l = self._wlockref and self._wlockref()
2701 if l is not None and l.held:
2703 if l is not None and l.held:
2702 l.lock()
2704 l.lock()
2703 return l
2705 return l
2704
2706
2705 # We do not need to check for non-waiting lock acquisition. Such
2707 # We do not need to check for non-waiting lock acquisition. Such
2706 # acquisition would not cause dead-lock as they would just fail.
2708 # acquisition would not cause dead-lock as they would just fail.
2707 if wait and (
2709 if wait and (
2708 self.ui.configbool(b'devel', b'all-warnings')
2710 self.ui.configbool(b'devel', b'all-warnings')
2709 or self.ui.configbool(b'devel', b'check-locks')
2711 or self.ui.configbool(b'devel', b'check-locks')
2710 ):
2712 ):
2711 if self._currentlock(self._lockref) is not None:
2713 if self._currentlock(self._lockref) is not None:
2712 self.ui.develwarn(b'"wlock" acquired after "lock"')
2714 self.ui.develwarn(b'"wlock" acquired after "lock"')
2713
2715
2714 def unlock():
2716 def unlock():
2715 if self.dirstate.pendingparentchange():
2717 if self.dirstate.pendingparentchange():
2716 self.dirstate.invalidate()
2718 self.dirstate.invalidate()
2717 else:
2719 else:
2718 self.dirstate.write(None)
2720 self.dirstate.write(None)
2719
2721
2720 self._filecache[b'dirstate'].refresh()
2722 self._filecache[b'dirstate'].refresh()
2721
2723
2722 l = self._lock(
2724 l = self._lock(
2723 self.vfs,
2725 self.vfs,
2724 b"wlock",
2726 b"wlock",
2725 wait,
2727 wait,
2726 unlock,
2728 unlock,
2727 self.invalidatedirstate,
2729 self.invalidatedirstate,
2728 _(b'working directory of %s') % self.origroot,
2730 _(b'working directory of %s') % self.origroot,
2729 inheritchecker=self._wlockchecktransaction,
2731 inheritchecker=self._wlockchecktransaction,
2730 parentenvvar=b'HG_WLOCK_LOCKER',
2732 parentenvvar=b'HG_WLOCK_LOCKER',
2731 )
2733 )
2732 self._wlockref = weakref.ref(l)
2734 self._wlockref = weakref.ref(l)
2733 return l
2735 return l
2734
2736
2735 def _currentlock(self, lockref):
2737 def _currentlock(self, lockref):
2736 """Returns the lock if it's held, or None if it's not."""
2738 """Returns the lock if it's held, or None if it's not."""
2737 if lockref is None:
2739 if lockref is None:
2738 return None
2740 return None
2739 l = lockref()
2741 l = lockref()
2740 if l is None or not l.held:
2742 if l is None or not l.held:
2741 return None
2743 return None
2742 return l
2744 return l
2743
2745
2744 def currentwlock(self):
2746 def currentwlock(self):
2745 """Returns the wlock if it's held, or None if it's not."""
2747 """Returns the wlock if it's held, or None if it's not."""
2746 return self._currentlock(self._wlockref)
2748 return self._currentlock(self._wlockref)
2747
2749
2748 def _filecommit(
2750 def _filecommit(
2749 self,
2751 self,
2750 fctx,
2752 fctx,
2751 manifest1,
2753 manifest1,
2752 manifest2,
2754 manifest2,
2753 linkrev,
2755 linkrev,
2754 tr,
2756 tr,
2755 changelist,
2757 changelist,
2756 includecopymeta,
2758 includecopymeta,
2757 ):
2759 ):
2758 """
2760 """
2759 commit an individual file as part of a larger transaction
2761 commit an individual file as part of a larger transaction
2760 """
2762 """
2761
2763
2762 fname = fctx.path()
2764 fname = fctx.path()
2763 fparent1 = manifest1.get(fname, nullid)
2765 fparent1 = manifest1.get(fname, nullid)
2764 fparent2 = manifest2.get(fname, nullid)
2766 fparent2 = manifest2.get(fname, nullid)
2765 if isinstance(fctx, context.filectx):
2767 if isinstance(fctx, context.filectx):
2766 node = fctx.filenode()
2768 node = fctx.filenode()
2767 if node in [fparent1, fparent2]:
2769 if node in [fparent1, fparent2]:
2768 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2770 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2769 if (
2771 if (
2770 fparent1 != nullid
2772 fparent1 != nullid
2771 and manifest1.flags(fname) != fctx.flags()
2773 and manifest1.flags(fname) != fctx.flags()
2772 ) or (
2774 ) or (
2773 fparent2 != nullid
2775 fparent2 != nullid
2774 and manifest2.flags(fname) != fctx.flags()
2776 and manifest2.flags(fname) != fctx.flags()
2775 ):
2777 ):
2776 changelist.append(fname)
2778 changelist.append(fname)
2777 return node
2779 return node
2778
2780
2779 flog = self.file(fname)
2781 flog = self.file(fname)
2780 meta = {}
2782 meta = {}
2781 cfname = fctx.copysource()
2783 cfname = fctx.copysource()
2782 if cfname and cfname != fname:
2784 if cfname and cfname != fname:
2783 # Mark the new revision of this file as a copy of another
2785 # Mark the new revision of this file as a copy of another
2784 # file. This copy data will effectively act as a parent
2786 # file. This copy data will effectively act as a parent
2785 # of this new revision. If this is a merge, the first
2787 # of this new revision. If this is a merge, the first
2786 # parent will be the nullid (meaning "look up the copy data")
2788 # parent will be the nullid (meaning "look up the copy data")
2787 # and the second one will be the other parent. For example:
2789 # and the second one will be the other parent. For example:
2788 #
2790 #
2789 # 0 --- 1 --- 3 rev1 changes file foo
2791 # 0 --- 1 --- 3 rev1 changes file foo
2790 # \ / rev2 renames foo to bar and changes it
2792 # \ / rev2 renames foo to bar and changes it
2791 # \- 2 -/ rev3 should have bar with all changes and
2793 # \- 2 -/ rev3 should have bar with all changes and
2792 # should record that bar descends from
2794 # should record that bar descends from
2793 # bar in rev2 and foo in rev1
2795 # bar in rev2 and foo in rev1
2794 #
2796 #
2795 # this allows this merge to succeed:
2797 # this allows this merge to succeed:
2796 #
2798 #
2797 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2799 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2798 # \ / merging rev3 and rev4 should use bar@rev2
2800 # \ / merging rev3 and rev4 should use bar@rev2
2799 # \- 2 --- 4 as the merge base
2801 # \- 2 --- 4 as the merge base
2800 #
2802 #
2801
2803
2802 cnode = manifest1.get(cfname)
2804 cnode = manifest1.get(cfname)
2803 newfparent = fparent2
2805 newfparent = fparent2
2804
2806
2805 if manifest2: # branch merge
2807 if manifest2: # branch merge
2806 if fparent2 == nullid or cnode is None: # copied on remote side
2808 if fparent2 == nullid or cnode is None: # copied on remote side
2807 if cfname in manifest2:
2809 if cfname in manifest2:
2808 cnode = manifest2[cfname]
2810 cnode = manifest2[cfname]
2809 newfparent = fparent1
2811 newfparent = fparent1
2810
2812
2811 # Here, we used to search backwards through history to try to find
2813 # Here, we used to search backwards through history to try to find
2812 # where the file copy came from if the source of a copy was not in
2814 # where the file copy came from if the source of a copy was not in
2813 # the parent directory. However, this doesn't actually make sense to
2815 # the parent directory. However, this doesn't actually make sense to
2814 # do (what does a copy from something not in your working copy even
2816 # do (what does a copy from something not in your working copy even
2815 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2817 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2816 # the user that copy information was dropped, so if they didn't
2818 # the user that copy information was dropped, so if they didn't
2817 # expect this outcome it can be fixed, but this is the correct
2819 # expect this outcome it can be fixed, but this is the correct
2818 # behavior in this circumstance.
2820 # behavior in this circumstance.
2819
2821
2820 if cnode:
2822 if cnode:
2821 self.ui.debug(
2823 self.ui.debug(
2822 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2824 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2823 )
2825 )
2824 if includecopymeta:
2826 if includecopymeta:
2825 meta[b"copy"] = cfname
2827 meta[b"copy"] = cfname
2826 meta[b"copyrev"] = hex(cnode)
2828 meta[b"copyrev"] = hex(cnode)
2827 fparent1, fparent2 = nullid, newfparent
2829 fparent1, fparent2 = nullid, newfparent
2828 else:
2830 else:
2829 self.ui.warn(
2831 self.ui.warn(
2830 _(
2832 _(
2831 b"warning: can't find ancestor for '%s' "
2833 b"warning: can't find ancestor for '%s' "
2832 b"copied from '%s'!\n"
2834 b"copied from '%s'!\n"
2833 )
2835 )
2834 % (fname, cfname)
2836 % (fname, cfname)
2835 )
2837 )
2836
2838
2837 elif fparent1 == nullid:
2839 elif fparent1 == nullid:
2838 fparent1, fparent2 = fparent2, nullid
2840 fparent1, fparent2 = fparent2, nullid
2839 elif fparent2 != nullid:
2841 elif fparent2 != nullid:
2840 # is one parent an ancestor of the other?
2842 # is one parent an ancestor of the other?
2841 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2843 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2842 if fparent1 in fparentancestors:
2844 if fparent1 in fparentancestors:
2843 fparent1, fparent2 = fparent2, nullid
2845 fparent1, fparent2 = fparent2, nullid
2844 elif fparent2 in fparentancestors:
2846 elif fparent2 in fparentancestors:
2845 fparent2 = nullid
2847 fparent2 = nullid
2846
2848
2847 # is the file changed?
2849 # is the file changed?
2848 text = fctx.data()
2850 text = fctx.data()
2849 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2851 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2850 changelist.append(fname)
2852 changelist.append(fname)
2851 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2853 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2852 # are just the flags changed during merge?
2854 # are just the flags changed during merge?
2853 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2855 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2854 changelist.append(fname)
2856 changelist.append(fname)
2855
2857
2856 return fparent1
2858 return fparent1
2857
2859
2858 def checkcommitpatterns(self, wctx, match, status, fail):
2860 def checkcommitpatterns(self, wctx, match, status, fail):
2859 """check for commit arguments that aren't committable"""
2861 """check for commit arguments that aren't committable"""
2860 if match.isexact() or match.prefix():
2862 if match.isexact() or match.prefix():
2861 matched = set(status.modified + status.added + status.removed)
2863 matched = set(status.modified + status.added + status.removed)
2862
2864
2863 for f in match.files():
2865 for f in match.files():
2864 f = self.dirstate.normalize(f)
2866 f = self.dirstate.normalize(f)
2865 if f == b'.' or f in matched or f in wctx.substate:
2867 if f == b'.' or f in matched or f in wctx.substate:
2866 continue
2868 continue
2867 if f in status.deleted:
2869 if f in status.deleted:
2868 fail(f, _(b'file not found!'))
2870 fail(f, _(b'file not found!'))
2869 # Is it a directory that exists or used to exist?
2871 # Is it a directory that exists or used to exist?
2870 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2872 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2871 d = f + b'/'
2873 d = f + b'/'
2872 for mf in matched:
2874 for mf in matched:
2873 if mf.startswith(d):
2875 if mf.startswith(d):
2874 break
2876 break
2875 else:
2877 else:
2876 fail(f, _(b"no match under directory!"))
2878 fail(f, _(b"no match under directory!"))
2877 elif f not in self.dirstate:
2879 elif f not in self.dirstate:
2878 fail(f, _(b"file not tracked!"))
2880 fail(f, _(b"file not tracked!"))
2879
2881
2880 @unfilteredmethod
2882 @unfilteredmethod
2881 def commit(
2883 def commit(
2882 self,
2884 self,
2883 text=b"",
2885 text=b"",
2884 user=None,
2886 user=None,
2885 date=None,
2887 date=None,
2886 match=None,
2888 match=None,
2887 force=False,
2889 force=False,
2888 editor=None,
2890 editor=None,
2889 extra=None,
2891 extra=None,
2890 ):
2892 ):
2891 """Add a new revision to current repository.
2893 """Add a new revision to current repository.
2892
2894
2893 Revision information is gathered from the working directory,
2895 Revision information is gathered from the working directory,
2894 match can be used to filter the committed files. If editor is
2896 match can be used to filter the committed files. If editor is
2895 supplied, it is called to get a commit message.
2897 supplied, it is called to get a commit message.
2896 """
2898 """
2897 if extra is None:
2899 if extra is None:
2898 extra = {}
2900 extra = {}
2899
2901
2900 def fail(f, msg):
2902 def fail(f, msg):
2901 raise error.Abort(b'%s: %s' % (f, msg))
2903 raise error.Abort(b'%s: %s' % (f, msg))
2902
2904
2903 if not match:
2905 if not match:
2904 match = matchmod.always()
2906 match = matchmod.always()
2905
2907
2906 if not force:
2908 if not force:
2907 match.bad = fail
2909 match.bad = fail
2908
2910
2909 # lock() for recent changelog (see issue4368)
2911 # lock() for recent changelog (see issue4368)
2910 with self.wlock(), self.lock():
2912 with self.wlock(), self.lock():
2911 wctx = self[None]
2913 wctx = self[None]
2912 merge = len(wctx.parents()) > 1
2914 merge = len(wctx.parents()) > 1
2913
2915
2914 if not force and merge and not match.always():
2916 if not force and merge and not match.always():
2915 raise error.Abort(
2917 raise error.Abort(
2916 _(
2918 _(
2917 b'cannot partially commit a merge '
2919 b'cannot partially commit a merge '
2918 b'(do not specify files or patterns)'
2920 b'(do not specify files or patterns)'
2919 )
2921 )
2920 )
2922 )
2921
2923
2922 status = self.status(match=match, clean=force)
2924 status = self.status(match=match, clean=force)
2923 if force:
2925 if force:
2924 status.modified.extend(
2926 status.modified.extend(
2925 status.clean
2927 status.clean
2926 ) # mq may commit clean files
2928 ) # mq may commit clean files
2927
2929
2928 # check subrepos
2930 # check subrepos
2929 subs, commitsubs, newstate = subrepoutil.precommit(
2931 subs, commitsubs, newstate = subrepoutil.precommit(
2930 self.ui, wctx, status, match, force=force
2932 self.ui, wctx, status, match, force=force
2931 )
2933 )
2932
2934
2933 # make sure all explicit patterns are matched
2935 # make sure all explicit patterns are matched
2934 if not force:
2936 if not force:
2935 self.checkcommitpatterns(wctx, match, status, fail)
2937 self.checkcommitpatterns(wctx, match, status, fail)
2936
2938
2937 cctx = context.workingcommitctx(
2939 cctx = context.workingcommitctx(
2938 self, status, text, user, date, extra
2940 self, status, text, user, date, extra
2939 )
2941 )
2940
2942
2941 # internal config: ui.allowemptycommit
2943 # internal config: ui.allowemptycommit
2942 allowemptycommit = (
2944 allowemptycommit = (
2943 wctx.branch() != wctx.p1().branch()
2945 wctx.branch() != wctx.p1().branch()
2944 or extra.get(b'close')
2946 or extra.get(b'close')
2945 or merge
2947 or merge
2946 or cctx.files()
2948 or cctx.files()
2947 or self.ui.configbool(b'ui', b'allowemptycommit')
2949 or self.ui.configbool(b'ui', b'allowemptycommit')
2948 )
2950 )
2949 if not allowemptycommit:
2951 if not allowemptycommit:
2950 return None
2952 return None
2951
2953
2952 if merge and cctx.deleted():
2954 if merge and cctx.deleted():
2953 raise error.Abort(_(b"cannot commit merge with missing files"))
2955 raise error.Abort(_(b"cannot commit merge with missing files"))
2954
2956
2955 ms = mergemod.mergestate.read(self)
2957 ms = mergemod.mergestate.read(self)
2956 mergeutil.checkunresolved(ms)
2958 mergeutil.checkunresolved(ms)
2957
2959
2958 if editor:
2960 if editor:
2959 cctx._text = editor(self, cctx, subs)
2961 cctx._text = editor(self, cctx, subs)
2960 edited = text != cctx._text
2962 edited = text != cctx._text
2961
2963
2962 # Save commit message in case this transaction gets rolled back
2964 # Save commit message in case this transaction gets rolled back
2963 # (e.g. by a pretxncommit hook). Leave the content alone on
2965 # (e.g. by a pretxncommit hook). Leave the content alone on
2964 # the assumption that the user will use the same editor again.
2966 # the assumption that the user will use the same editor again.
2965 msgfn = self.savecommitmessage(cctx._text)
2967 msgfn = self.savecommitmessage(cctx._text)
2966
2968
2967 # commit subs and write new state
2969 # commit subs and write new state
2968 if subs:
2970 if subs:
2969 uipathfn = scmutil.getuipathfn(self)
2971 uipathfn = scmutil.getuipathfn(self)
2970 for s in sorted(commitsubs):
2972 for s in sorted(commitsubs):
2971 sub = wctx.sub(s)
2973 sub = wctx.sub(s)
2972 self.ui.status(
2974 self.ui.status(
2973 _(b'committing subrepository %s\n')
2975 _(b'committing subrepository %s\n')
2974 % uipathfn(subrepoutil.subrelpath(sub))
2976 % uipathfn(subrepoutil.subrelpath(sub))
2975 )
2977 )
2976 sr = sub.commit(cctx._text, user, date)
2978 sr = sub.commit(cctx._text, user, date)
2977 newstate[s] = (newstate[s][0], sr)
2979 newstate[s] = (newstate[s][0], sr)
2978 subrepoutil.writestate(self, newstate)
2980 subrepoutil.writestate(self, newstate)
2979
2981
2980 p1, p2 = self.dirstate.parents()
2982 p1, p2 = self.dirstate.parents()
2981 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2982 try:
2984 try:
2983 self.hook(
2985 self.hook(
2984 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2986 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2985 )
2987 )
2986 with self.transaction(b'commit'):
2988 with self.transaction(b'commit'):
2987 ret = self.commitctx(cctx, True)
2989 ret = self.commitctx(cctx, True)
2988 # update bookmarks, dirstate and mergestate
2990 # update bookmarks, dirstate and mergestate
2989 bookmarks.update(self, [p1, p2], ret)
2991 bookmarks.update(self, [p1, p2], ret)
2990 cctx.markcommitted(ret)
2992 cctx.markcommitted(ret)
2991 ms.reset()
2993 ms.reset()
2992 except: # re-raises
2994 except: # re-raises
2993 if edited:
2995 if edited:
2994 self.ui.write(
2996 self.ui.write(
2995 _(b'note: commit message saved in %s\n') % msgfn
2997 _(b'note: commit message saved in %s\n') % msgfn
2996 )
2998 )
2997 raise
2999 raise
2998
3000
2999 def commithook(unused_success):
3001 def commithook(unused_success):
3000 # hack for command that use a temporary commit (eg: histedit)
3002 # hack for command that use a temporary commit (eg: histedit)
3001 # temporary commit got stripped before hook release
3003 # temporary commit got stripped before hook release
3002 if self.changelog.hasnode(ret):
3004 if self.changelog.hasnode(ret):
3003 self.hook(
3005 self.hook(
3004 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3006 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3005 )
3007 )
3006
3008
3007 self._afterlock(commithook)
3009 self._afterlock(commithook)
3008 return ret
3010 return ret
3009
3011
3010 @unfilteredmethod
3012 @unfilteredmethod
3011 def commitctx(self, ctx, error=False, origctx=None):
3013 def commitctx(self, ctx, error=False, origctx=None):
3012 """Add a new revision to current repository.
3014 """Add a new revision to current repository.
3013 Revision information is passed via the context argument.
3015 Revision information is passed via the context argument.
3014
3016
3015 ctx.files() should list all files involved in this commit, i.e.
3017 ctx.files() should list all files involved in this commit, i.e.
3016 modified/added/removed files. On merge, it may be wider than the
3018 modified/added/removed files. On merge, it may be wider than the
3017 ctx.files() to be committed, since any file nodes derived directly
3019 ctx.files() to be committed, since any file nodes derived directly
3018 from p1 or p2 are excluded from the committed ctx.files().
3020 from p1 or p2 are excluded from the committed ctx.files().
3019
3021
3020 origctx is for convert to work around the problem that bug
3022 origctx is for convert to work around the problem that bug
3021 fixes to the files list in changesets change hashes. For
3023 fixes to the files list in changesets change hashes. For
3022 convert to be the identity, it can pass an origctx and this
3024 convert to be the identity, it can pass an origctx and this
3023 function will use the same files list when it makes sense to
3025 function will use the same files list when it makes sense to
3024 do so.
3026 do so.
3025 """
3027 """
3026
3028
3027 p1, p2 = ctx.p1(), ctx.p2()
3029 p1, p2 = ctx.p1(), ctx.p2()
3028 user = ctx.user()
3030 user = ctx.user()
3029
3031
3030 if self.filecopiesmode == b'changeset-sidedata':
3032 if self.filecopiesmode == b'changeset-sidedata':
3031 writechangesetcopy = True
3033 writechangesetcopy = True
3032 writefilecopymeta = True
3034 writefilecopymeta = True
3033 writecopiesto = None
3035 writecopiesto = None
3034 else:
3036 else:
3035 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3037 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3036 writefilecopymeta = writecopiesto != b'changeset-only'
3038 writefilecopymeta = writecopiesto != b'changeset-only'
3037 writechangesetcopy = writecopiesto in (
3039 writechangesetcopy = writecopiesto in (
3038 b'changeset-only',
3040 b'changeset-only',
3039 b'compatibility',
3041 b'compatibility',
3040 )
3042 )
3041 p1copies, p2copies = None, None
3043 p1copies, p2copies = None, None
3042 if writechangesetcopy:
3044 if writechangesetcopy:
3043 p1copies = ctx.p1copies()
3045 p1copies = ctx.p1copies()
3044 p2copies = ctx.p2copies()
3046 p2copies = ctx.p2copies()
3045 filesadded, filesremoved = None, None
3047 filesadded, filesremoved = None, None
3046 with self.lock(), self.transaction(b"commit") as tr:
3048 with self.lock(), self.transaction(b"commit") as tr:
3047 trp = weakref.proxy(tr)
3049 trp = weakref.proxy(tr)
3048
3050
3049 if ctx.manifestnode():
3051 if ctx.manifestnode():
3050 # reuse an existing manifest revision
3052 # reuse an existing manifest revision
3051 self.ui.debug(b'reusing known manifest\n')
3053 self.ui.debug(b'reusing known manifest\n')
3052 mn = ctx.manifestnode()
3054 mn = ctx.manifestnode()
3053 files = ctx.files()
3055 files = ctx.files()
3054 if writechangesetcopy:
3056 if writechangesetcopy:
3055 filesadded = ctx.filesadded()
3057 filesadded = ctx.filesadded()
3056 filesremoved = ctx.filesremoved()
3058 filesremoved = ctx.filesremoved()
3057 elif ctx.files():
3059 elif ctx.files():
3058 m1ctx = p1.manifestctx()
3060 m1ctx = p1.manifestctx()
3059 m2ctx = p2.manifestctx()
3061 m2ctx = p2.manifestctx()
3060 mctx = m1ctx.copy()
3062 mctx = m1ctx.copy()
3061
3063
3062 m = mctx.read()
3064 m = mctx.read()
3063 m1 = m1ctx.read()
3065 m1 = m1ctx.read()
3064 m2 = m2ctx.read()
3066 m2 = m2ctx.read()
3065
3067
3066 # check in files
3068 # check in files
3067 added = []
3069 added = []
3068 changed = []
3070 changed = []
3069 removed = list(ctx.removed())
3071 removed = list(ctx.removed())
3070 linkrev = len(self)
3072 linkrev = len(self)
3071 self.ui.note(_(b"committing files:\n"))
3073 self.ui.note(_(b"committing files:\n"))
3072 uipathfn = scmutil.getuipathfn(self)
3074 uipathfn = scmutil.getuipathfn(self)
3073 for f in sorted(ctx.modified() + ctx.added()):
3075 for f in sorted(ctx.modified() + ctx.added()):
3074 self.ui.note(uipathfn(f) + b"\n")
3076 self.ui.note(uipathfn(f) + b"\n")
3075 try:
3077 try:
3076 fctx = ctx[f]
3078 fctx = ctx[f]
3077 if fctx is None:
3079 if fctx is None:
3078 removed.append(f)
3080 removed.append(f)
3079 else:
3081 else:
3080 added.append(f)
3082 added.append(f)
3081 m[f] = self._filecommit(
3083 m[f] = self._filecommit(
3082 fctx,
3084 fctx,
3083 m1,
3085 m1,
3084 m2,
3086 m2,
3085 linkrev,
3087 linkrev,
3086 trp,
3088 trp,
3087 changed,
3089 changed,
3088 writefilecopymeta,
3090 writefilecopymeta,
3089 )
3091 )
3090 m.setflag(f, fctx.flags())
3092 m.setflag(f, fctx.flags())
3091 except OSError:
3093 except OSError:
3092 self.ui.warn(
3094 self.ui.warn(
3093 _(b"trouble committing %s!\n") % uipathfn(f)
3095 _(b"trouble committing %s!\n") % uipathfn(f)
3094 )
3096 )
3095 raise
3097 raise
3096 except IOError as inst:
3098 except IOError as inst:
3097 errcode = getattr(inst, 'errno', errno.ENOENT)
3099 errcode = getattr(inst, 'errno', errno.ENOENT)
3098 if error or errcode and errcode != errno.ENOENT:
3100 if error or errcode and errcode != errno.ENOENT:
3099 self.ui.warn(
3101 self.ui.warn(
3100 _(b"trouble committing %s!\n") % uipathfn(f)
3102 _(b"trouble committing %s!\n") % uipathfn(f)
3101 )
3103 )
3102 raise
3104 raise
3103
3105
3104 # update manifest
3106 # update manifest
3105 removed = [f for f in removed if f in m1 or f in m2]
3107 removed = [f for f in removed if f in m1 or f in m2]
3106 drop = sorted([f for f in removed if f in m])
3108 drop = sorted([f for f in removed if f in m])
3107 for f in drop:
3109 for f in drop:
3108 del m[f]
3110 del m[f]
3109 if p2.rev() != nullrev:
3111 if p2.rev() != nullrev:
3110
3112
3111 @util.cachefunc
3113 @util.cachefunc
3112 def mas():
3114 def mas():
3113 p1n = p1.node()
3115 p1n = p1.node()
3114 p2n = p2.node()
3116 p2n = p2.node()
3115 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3117 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3116 if not cahs:
3118 if not cahs:
3117 cahs = [nullrev]
3119 cahs = [nullrev]
3118 return [self[r].manifest() for r in cahs]
3120 return [self[r].manifest() for r in cahs]
3119
3121
3120 def deletionfromparent(f):
3122 def deletionfromparent(f):
3121 # When a file is removed relative to p1 in a merge, this
3123 # When a file is removed relative to p1 in a merge, this
3122 # function determines whether the absence is due to a
3124 # function determines whether the absence is due to a
3123 # deletion from a parent, or whether the merge commit
3125 # deletion from a parent, or whether the merge commit
3124 # itself deletes the file. We decide this by doing a
3126 # itself deletes the file. We decide this by doing a
3125 # simplified three way merge of the manifest entry for
3127 # simplified three way merge of the manifest entry for
3126 # the file. There are two ways we decide the merge
3128 # the file. There are two ways we decide the merge
3127 # itself didn't delete a file:
3129 # itself didn't delete a file:
3128 # - neither parent (nor the merge) contain the file
3130 # - neither parent (nor the merge) contain the file
3129 # - exactly one parent contains the file, and that
3131 # - exactly one parent contains the file, and that
3130 # parent has the same filelog entry as the merge
3132 # parent has the same filelog entry as the merge
3131 # ancestor (or all of them if there two). In other
3133 # ancestor (or all of them if there two). In other
3132 # words, that parent left the file unchanged while the
3134 # words, that parent left the file unchanged while the
3133 # other one deleted it.
3135 # other one deleted it.
3134 # One way to think about this is that deleting a file is
3136 # One way to think about this is that deleting a file is
3135 # similar to emptying it, so the list of changed files
3137 # similar to emptying it, so the list of changed files
3136 # should be similar either way. The computation
3138 # should be similar either way. The computation
3137 # described above is not done directly in _filecommit
3139 # described above is not done directly in _filecommit
3138 # when creating the list of changed files, however
3140 # when creating the list of changed files, however
3139 # it does something very similar by comparing filelog
3141 # it does something very similar by comparing filelog
3140 # nodes.
3142 # nodes.
3141 if f in m1:
3143 if f in m1:
3142 return f not in m2 and all(
3144 return f not in m2 and all(
3143 f in ma and ma.find(f) == m1.find(f)
3145 f in ma and ma.find(f) == m1.find(f)
3144 for ma in mas()
3146 for ma in mas()
3145 )
3147 )
3146 elif f in m2:
3148 elif f in m2:
3147 return all(
3149 return all(
3148 f in ma and ma.find(f) == m2.find(f)
3150 f in ma and ma.find(f) == m2.find(f)
3149 for ma in mas()
3151 for ma in mas()
3150 )
3152 )
3151 else:
3153 else:
3152 return True
3154 return True
3153
3155
3154 removed = [f for f in removed if not deletionfromparent(f)]
3156 removed = [f for f in removed if not deletionfromparent(f)]
3155
3157
3156 files = changed + removed
3158 files = changed + removed
3157 md = None
3159 md = None
3158 if not files:
3160 if not files:
3159 # if no "files" actually changed in terms of the changelog,
3161 # if no "files" actually changed in terms of the changelog,
3160 # try hard to detect unmodified manifest entry so that the
3162 # try hard to detect unmodified manifest entry so that the
3161 # exact same commit can be reproduced later on convert.
3163 # exact same commit can be reproduced later on convert.
3162 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3164 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3163 if not files and md:
3165 if not files and md:
3164 self.ui.debug(
3166 self.ui.debug(
3165 b'not reusing manifest (no file change in '
3167 b'not reusing manifest (no file change in '
3166 b'changelog, but manifest differs)\n'
3168 b'changelog, but manifest differs)\n'
3167 )
3169 )
3168 if files or md:
3170 if files or md:
3169 self.ui.note(_(b"committing manifest\n"))
3171 self.ui.note(_(b"committing manifest\n"))
3170 # we're using narrowmatch here since it's already applied at
3172 # we're using narrowmatch here since it's already applied at
3171 # other stages (such as dirstate.walk), so we're already
3173 # other stages (such as dirstate.walk), so we're already
3172 # ignoring things outside of narrowspec in most cases. The
3174 # ignoring things outside of narrowspec in most cases. The
3173 # one case where we might have files outside the narrowspec
3175 # one case where we might have files outside the narrowspec
3174 # at this point is merges, and we already error out in the
3176 # at this point is merges, and we already error out in the
3175 # case where the merge has files outside of the narrowspec,
3177 # case where the merge has files outside of the narrowspec,
3176 # so this is safe.
3178 # so this is safe.
3177 mn = mctx.write(
3179 mn = mctx.write(
3178 trp,
3180 trp,
3179 linkrev,
3181 linkrev,
3180 p1.manifestnode(),
3182 p1.manifestnode(),
3181 p2.manifestnode(),
3183 p2.manifestnode(),
3182 added,
3184 added,
3183 drop,
3185 drop,
3184 match=self.narrowmatch(),
3186 match=self.narrowmatch(),
3185 )
3187 )
3186
3188
3187 if writechangesetcopy:
3189 if writechangesetcopy:
3188 filesadded = [
3190 filesadded = [
3189 f for f in changed if not (f in m1 or f in m2)
3191 f for f in changed if not (f in m1 or f in m2)
3190 ]
3192 ]
3191 filesremoved = removed
3193 filesremoved = removed
3192 else:
3194 else:
3193 self.ui.debug(
3195 self.ui.debug(
3194 b'reusing manifest from p1 (listed files '
3196 b'reusing manifest from p1 (listed files '
3195 b'actually unchanged)\n'
3197 b'actually unchanged)\n'
3196 )
3198 )
3197 mn = p1.manifestnode()
3199 mn = p1.manifestnode()
3198 else:
3200 else:
3199 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3201 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3200 mn = p1.manifestnode()
3202 mn = p1.manifestnode()
3201 files = []
3203 files = []
3202
3204
3203 if writecopiesto == b'changeset-only':
3205 if writecopiesto == b'changeset-only':
3204 # If writing only to changeset extras, use None to indicate that
3206 # If writing only to changeset extras, use None to indicate that
3205 # no entry should be written. If writing to both, write an empty
3207 # no entry should be written. If writing to both, write an empty
3206 # entry to prevent the reader from falling back to reading
3208 # entry to prevent the reader from falling back to reading
3207 # filelogs.
3209 # filelogs.
3208 p1copies = p1copies or None
3210 p1copies = p1copies or None
3209 p2copies = p2copies or None
3211 p2copies = p2copies or None
3210 filesadded = filesadded or None
3212 filesadded = filesadded or None
3211 filesremoved = filesremoved or None
3213 filesremoved = filesremoved or None
3212
3214
3213 if origctx and origctx.manifestnode() == mn:
3215 if origctx and origctx.manifestnode() == mn:
3214 files = origctx.files()
3216 files = origctx.files()
3215
3217
3216 # update changelog
3218 # update changelog
3217 self.ui.note(_(b"committing changelog\n"))
3219 self.ui.note(_(b"committing changelog\n"))
3218 self.changelog.delayupdate(tr)
3220 self.changelog.delayupdate(tr)
3219 n = self.changelog.add(
3221 n = self.changelog.add(
3220 mn,
3222 mn,
3221 files,
3223 files,
3222 ctx.description(),
3224 ctx.description(),
3223 trp,
3225 trp,
3224 p1.node(),
3226 p1.node(),
3225 p2.node(),
3227 p2.node(),
3226 user,
3228 user,
3227 ctx.date(),
3229 ctx.date(),
3228 ctx.extra().copy(),
3230 ctx.extra().copy(),
3229 p1copies,
3231 p1copies,
3230 p2copies,
3232 p2copies,
3231 filesadded,
3233 filesadded,
3232 filesremoved,
3234 filesremoved,
3233 )
3235 )
3234 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3236 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3235 self.hook(
3237 self.hook(
3236 b'pretxncommit',
3238 b'pretxncommit',
3237 throw=True,
3239 throw=True,
3238 node=hex(n),
3240 node=hex(n),
3239 parent1=xp1,
3241 parent1=xp1,
3240 parent2=xp2,
3242 parent2=xp2,
3241 )
3243 )
3242 # set the new commit is proper phase
3244 # set the new commit is proper phase
3243 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3245 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3244 if targetphase:
3246 if targetphase:
3245 # retract boundary do not alter parent changeset.
3247 # retract boundary do not alter parent changeset.
3246 # if a parent have higher the resulting phase will
3248 # if a parent have higher the resulting phase will
3247 # be compliant anyway
3249 # be compliant anyway
3248 #
3250 #
3249 # if minimal phase was 0 we don't need to retract anything
3251 # if minimal phase was 0 we don't need to retract anything
3250 phases.registernew(self, tr, targetphase, [n])
3252 phases.registernew(self, tr, targetphase, [n])
3251 return n
3253 return n
3252
3254
3253 @unfilteredmethod
3255 @unfilteredmethod
3254 def destroying(self):
3256 def destroying(self):
3255 '''Inform the repository that nodes are about to be destroyed.
3257 '''Inform the repository that nodes are about to be destroyed.
3256 Intended for use by strip and rollback, so there's a common
3258 Intended for use by strip and rollback, so there's a common
3257 place for anything that has to be done before destroying history.
3259 place for anything that has to be done before destroying history.
3258
3260
3259 This is mostly useful for saving state that is in memory and waiting
3261 This is mostly useful for saving state that is in memory and waiting
3260 to be flushed when the current lock is released. Because a call to
3262 to be flushed when the current lock is released. Because a call to
3261 destroyed is imminent, the repo will be invalidated causing those
3263 destroyed is imminent, the repo will be invalidated causing those
3262 changes to stay in memory (waiting for the next unlock), or vanish
3264 changes to stay in memory (waiting for the next unlock), or vanish
3263 completely.
3265 completely.
3264 '''
3266 '''
3265 # When using the same lock to commit and strip, the phasecache is left
3267 # When using the same lock to commit and strip, the phasecache is left
3266 # dirty after committing. Then when we strip, the repo is invalidated,
3268 # dirty after committing. Then when we strip, the repo is invalidated,
3267 # causing those changes to disappear.
3269 # causing those changes to disappear.
3268 if '_phasecache' in vars(self):
3270 if '_phasecache' in vars(self):
3269 self._phasecache.write()
3271 self._phasecache.write()
3270
3272
3271 @unfilteredmethod
3273 @unfilteredmethod
3272 def destroyed(self):
3274 def destroyed(self):
3273 '''Inform the repository that nodes have been destroyed.
3275 '''Inform the repository that nodes have been destroyed.
3274 Intended for use by strip and rollback, so there's a common
3276 Intended for use by strip and rollback, so there's a common
3275 place for anything that has to be done after destroying history.
3277 place for anything that has to be done after destroying history.
3276 '''
3278 '''
3277 # When one tries to:
3279 # When one tries to:
3278 # 1) destroy nodes thus calling this method (e.g. strip)
3280 # 1) destroy nodes thus calling this method (e.g. strip)
3279 # 2) use phasecache somewhere (e.g. commit)
3281 # 2) use phasecache somewhere (e.g. commit)
3280 #
3282 #
3281 # then 2) will fail because the phasecache contains nodes that were
3283 # then 2) will fail because the phasecache contains nodes that were
3282 # removed. We can either remove phasecache from the filecache,
3284 # removed. We can either remove phasecache from the filecache,
3283 # causing it to reload next time it is accessed, or simply filter
3285 # causing it to reload next time it is accessed, or simply filter
3284 # the removed nodes now and write the updated cache.
3286 # the removed nodes now and write the updated cache.
3285 self._phasecache.filterunknown(self)
3287 self._phasecache.filterunknown(self)
3286 self._phasecache.write()
3288 self._phasecache.write()
3287
3289
3288 # refresh all repository caches
3290 # refresh all repository caches
3289 self.updatecaches()
3291 self.updatecaches()
3290
3292
3291 # Ensure the persistent tag cache is updated. Doing it now
3293 # Ensure the persistent tag cache is updated. Doing it now
3292 # means that the tag cache only has to worry about destroyed
3294 # means that the tag cache only has to worry about destroyed
3293 # heads immediately after a strip/rollback. That in turn
3295 # heads immediately after a strip/rollback. That in turn
3294 # guarantees that "cachetip == currenttip" (comparing both rev
3296 # guarantees that "cachetip == currenttip" (comparing both rev
3295 # and node) always means no nodes have been added or destroyed.
3297 # and node) always means no nodes have been added or destroyed.
3296
3298
3297 # XXX this is suboptimal when qrefresh'ing: we strip the current
3299 # XXX this is suboptimal when qrefresh'ing: we strip the current
3298 # head, refresh the tag cache, then immediately add a new head.
3300 # head, refresh the tag cache, then immediately add a new head.
3299 # But I think doing it this way is necessary for the "instant
3301 # But I think doing it this way is necessary for the "instant
3300 # tag cache retrieval" case to work.
3302 # tag cache retrieval" case to work.
3301 self.invalidate()
3303 self.invalidate()
3302
3304
3303 def status(
3305 def status(
3304 self,
3306 self,
3305 node1=b'.',
3307 node1=b'.',
3306 node2=None,
3308 node2=None,
3307 match=None,
3309 match=None,
3308 ignored=False,
3310 ignored=False,
3309 clean=False,
3311 clean=False,
3310 unknown=False,
3312 unknown=False,
3311 listsubrepos=False,
3313 listsubrepos=False,
3312 ):
3314 ):
3313 '''a convenience method that calls node1.status(node2)'''
3315 '''a convenience method that calls node1.status(node2)'''
3314 return self[node1].status(
3316 return self[node1].status(
3315 node2, match, ignored, clean, unknown, listsubrepos
3317 node2, match, ignored, clean, unknown, listsubrepos
3316 )
3318 )
3317
3319
3318 def addpostdsstatus(self, ps):
3320 def addpostdsstatus(self, ps):
3319 """Add a callback to run within the wlock, at the point at which status
3321 """Add a callback to run within the wlock, at the point at which status
3320 fixups happen.
3322 fixups happen.
3321
3323
3322 On status completion, callback(wctx, status) will be called with the
3324 On status completion, callback(wctx, status) will be called with the
3323 wlock held, unless the dirstate has changed from underneath or the wlock
3325 wlock held, unless the dirstate has changed from underneath or the wlock
3324 couldn't be grabbed.
3326 couldn't be grabbed.
3325
3327
3326 Callbacks should not capture and use a cached copy of the dirstate --
3328 Callbacks should not capture and use a cached copy of the dirstate --
3327 it might change in the meanwhile. Instead, they should access the
3329 it might change in the meanwhile. Instead, they should access the
3328 dirstate via wctx.repo().dirstate.
3330 dirstate via wctx.repo().dirstate.
3329
3331
3330 This list is emptied out after each status run -- extensions should
3332 This list is emptied out after each status run -- extensions should
3331 make sure it adds to this list each time dirstate.status is called.
3333 make sure it adds to this list each time dirstate.status is called.
3332 Extensions should also make sure they don't call this for statuses
3334 Extensions should also make sure they don't call this for statuses
3333 that don't involve the dirstate.
3335 that don't involve the dirstate.
3334 """
3336 """
3335
3337
3336 # The list is located here for uniqueness reasons -- it is actually
3338 # The list is located here for uniqueness reasons -- it is actually
3337 # managed by the workingctx, but that isn't unique per-repo.
3339 # managed by the workingctx, but that isn't unique per-repo.
3338 self._postdsstatus.append(ps)
3340 self._postdsstatus.append(ps)
3339
3341
3340 def postdsstatus(self):
3342 def postdsstatus(self):
3341 """Used by workingctx to get the list of post-dirstate-status hooks."""
3343 """Used by workingctx to get the list of post-dirstate-status hooks."""
3342 return self._postdsstatus
3344 return self._postdsstatus
3343
3345
3344 def clearpostdsstatus(self):
3346 def clearpostdsstatus(self):
3345 """Used by workingctx to clear post-dirstate-status hooks."""
3347 """Used by workingctx to clear post-dirstate-status hooks."""
3346 del self._postdsstatus[:]
3348 del self._postdsstatus[:]
3347
3349
3348 def heads(self, start=None):
3350 def heads(self, start=None):
3349 if start is None:
3351 if start is None:
3350 cl = self.changelog
3352 cl = self.changelog
3351 headrevs = reversed(cl.headrevs())
3353 headrevs = reversed(cl.headrevs())
3352 return [cl.node(rev) for rev in headrevs]
3354 return [cl.node(rev) for rev in headrevs]
3353
3355
3354 heads = self.changelog.heads(start)
3356 heads = self.changelog.heads(start)
3355 # sort the output in rev descending order
3357 # sort the output in rev descending order
3356 return sorted(heads, key=self.changelog.rev, reverse=True)
3358 return sorted(heads, key=self.changelog.rev, reverse=True)
3357
3359
3358 def branchheads(self, branch=None, start=None, closed=False):
3360 def branchheads(self, branch=None, start=None, closed=False):
3359 '''return a (possibly filtered) list of heads for the given branch
3361 '''return a (possibly filtered) list of heads for the given branch
3360
3362
3361 Heads are returned in topological order, from newest to oldest.
3363 Heads are returned in topological order, from newest to oldest.
3362 If branch is None, use the dirstate branch.
3364 If branch is None, use the dirstate branch.
3363 If start is not None, return only heads reachable from start.
3365 If start is not None, return only heads reachable from start.
3364 If closed is True, return heads that are marked as closed as well.
3366 If closed is True, return heads that are marked as closed as well.
3365 '''
3367 '''
3366 if branch is None:
3368 if branch is None:
3367 branch = self[None].branch()
3369 branch = self[None].branch()
3368 branches = self.branchmap()
3370 branches = self.branchmap()
3369 if not branches.hasbranch(branch):
3371 if not branches.hasbranch(branch):
3370 return []
3372 return []
3371 # the cache returns heads ordered lowest to highest
3373 # the cache returns heads ordered lowest to highest
3372 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3374 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3373 if start is not None:
3375 if start is not None:
3374 # filter out the heads that cannot be reached from startrev
3376 # filter out the heads that cannot be reached from startrev
3375 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3377 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3376 bheads = [h for h in bheads if h in fbheads]
3378 bheads = [h for h in bheads if h in fbheads]
3377 return bheads
3379 return bheads
3378
3380
3379 def branches(self, nodes):
3381 def branches(self, nodes):
3380 if not nodes:
3382 if not nodes:
3381 nodes = [self.changelog.tip()]
3383 nodes = [self.changelog.tip()]
3382 b = []
3384 b = []
3383 for n in nodes:
3385 for n in nodes:
3384 t = n
3386 t = n
3385 while True:
3387 while True:
3386 p = self.changelog.parents(n)
3388 p = self.changelog.parents(n)
3387 if p[1] != nullid or p[0] == nullid:
3389 if p[1] != nullid or p[0] == nullid:
3388 b.append((t, n, p[0], p[1]))
3390 b.append((t, n, p[0], p[1]))
3389 break
3391 break
3390 n = p[0]
3392 n = p[0]
3391 return b
3393 return b
3392
3394
3393 def between(self, pairs):
3395 def between(self, pairs):
3394 r = []
3396 r = []
3395
3397
3396 for top, bottom in pairs:
3398 for top, bottom in pairs:
3397 n, l, i = top, [], 0
3399 n, l, i = top, [], 0
3398 f = 1
3400 f = 1
3399
3401
3400 while n != bottom and n != nullid:
3402 while n != bottom and n != nullid:
3401 p = self.changelog.parents(n)[0]
3403 p = self.changelog.parents(n)[0]
3402 if i == f:
3404 if i == f:
3403 l.append(n)
3405 l.append(n)
3404 f = f * 2
3406 f = f * 2
3405 n = p
3407 n = p
3406 i += 1
3408 i += 1
3407
3409
3408 r.append(l)
3410 r.append(l)
3409
3411
3410 return r
3412 return r
3411
3413
3412 def checkpush(self, pushop):
3414 def checkpush(self, pushop):
3413 """Extensions can override this function if additional checks have
3415 """Extensions can override this function if additional checks have
3414 to be performed before pushing, or call it if they override push
3416 to be performed before pushing, or call it if they override push
3415 command.
3417 command.
3416 """
3418 """
3417
3419
3418 @unfilteredpropertycache
3420 @unfilteredpropertycache
3419 def prepushoutgoinghooks(self):
3421 def prepushoutgoinghooks(self):
3420 """Return util.hooks consists of a pushop with repo, remote, outgoing
3422 """Return util.hooks consists of a pushop with repo, remote, outgoing
3421 methods, which are called before pushing changesets.
3423 methods, which are called before pushing changesets.
3422 """
3424 """
3423 return util.hooks()
3425 return util.hooks()
3424
3426
3425 def pushkey(self, namespace, key, old, new):
3427 def pushkey(self, namespace, key, old, new):
3426 try:
3428 try:
3427 tr = self.currenttransaction()
3429 tr = self.currenttransaction()
3428 hookargs = {}
3430 hookargs = {}
3429 if tr is not None:
3431 if tr is not None:
3430 hookargs.update(tr.hookargs)
3432 hookargs.update(tr.hookargs)
3431 hookargs = pycompat.strkwargs(hookargs)
3433 hookargs = pycompat.strkwargs(hookargs)
3432 hookargs['namespace'] = namespace
3434 hookargs['namespace'] = namespace
3433 hookargs['key'] = key
3435 hookargs['key'] = key
3434 hookargs['old'] = old
3436 hookargs['old'] = old
3435 hookargs['new'] = new
3437 hookargs['new'] = new
3436 self.hook(b'prepushkey', throw=True, **hookargs)
3438 self.hook(b'prepushkey', throw=True, **hookargs)
3437 except error.HookAbort as exc:
3439 except error.HookAbort as exc:
3438 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3440 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3439 if exc.hint:
3441 if exc.hint:
3440 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3442 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3441 return False
3443 return False
3442 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3444 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3443 ret = pushkey.push(self, namespace, key, old, new)
3445 ret = pushkey.push(self, namespace, key, old, new)
3444
3446
3445 def runhook(unused_success):
3447 def runhook(unused_success):
3446 self.hook(
3448 self.hook(
3447 b'pushkey',
3449 b'pushkey',
3448 namespace=namespace,
3450 namespace=namespace,
3449 key=key,
3451 key=key,
3450 old=old,
3452 old=old,
3451 new=new,
3453 new=new,
3452 ret=ret,
3454 ret=ret,
3453 )
3455 )
3454
3456
3455 self._afterlock(runhook)
3457 self._afterlock(runhook)
3456 return ret
3458 return ret
3457
3459
3458 def listkeys(self, namespace):
3460 def listkeys(self, namespace):
3459 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3461 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3460 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3462 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3461 values = pushkey.list(self, namespace)
3463 values = pushkey.list(self, namespace)
3462 self.hook(b'listkeys', namespace=namespace, values=values)
3464 self.hook(b'listkeys', namespace=namespace, values=values)
3463 return values
3465 return values
3464
3466
3465 def debugwireargs(self, one, two, three=None, four=None, five=None):
3467 def debugwireargs(self, one, two, three=None, four=None, five=None):
3466 '''used to test argument passing over the wire'''
3468 '''used to test argument passing over the wire'''
3467 return b"%s %s %s %s %s" % (
3469 return b"%s %s %s %s %s" % (
3468 one,
3470 one,
3469 two,
3471 two,
3470 pycompat.bytestr(three),
3472 pycompat.bytestr(three),
3471 pycompat.bytestr(four),
3473 pycompat.bytestr(four),
3472 pycompat.bytestr(five),
3474 pycompat.bytestr(five),
3473 )
3475 )
3474
3476
3475 def savecommitmessage(self, text):
3477 def savecommitmessage(self, text):
3476 fp = self.vfs(b'last-message.txt', b'wb')
3478 fp = self.vfs(b'last-message.txt', b'wb')
3477 try:
3479 try:
3478 fp.write(text)
3480 fp.write(text)
3479 finally:
3481 finally:
3480 fp.close()
3482 fp.close()
3481 return self.pathto(fp.name[len(self.root) + 1 :])
3483 return self.pathto(fp.name[len(self.root) + 1 :])
3482
3484
3483
3485
3484 # used to avoid circular references so destructors work
3486 # used to avoid circular references so destructors work
3485 def aftertrans(files):
3487 def aftertrans(files):
3486 renamefiles = [tuple(t) for t in files]
3488 renamefiles = [tuple(t) for t in files]
3487
3489
3488 def a():
3490 def a():
3489 for vfs, src, dest in renamefiles:
3491 for vfs, src, dest in renamefiles:
3490 # if src and dest refer to a same file, vfs.rename is a no-op,
3492 # if src and dest refer to a same file, vfs.rename is a no-op,
3491 # leaving both src and dest on disk. delete dest to make sure
3493 # leaving both src and dest on disk. delete dest to make sure
3492 # the rename couldn't be such a no-op.
3494 # the rename couldn't be such a no-op.
3493 vfs.tryunlink(dest)
3495 vfs.tryunlink(dest)
3494 try:
3496 try:
3495 vfs.rename(src, dest)
3497 vfs.rename(src, dest)
3496 except OSError: # journal file does not yet exist
3498 except OSError: # journal file does not yet exist
3497 pass
3499 pass
3498
3500
3499 return a
3501 return a
3500
3502
3501
3503
3502 def undoname(fn):
3504 def undoname(fn):
3503 base, name = os.path.split(fn)
3505 base, name = os.path.split(fn)
3504 assert name.startswith(b'journal')
3506 assert name.startswith(b'journal')
3505 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3507 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3506
3508
3507
3509
3508 def instance(ui, path, create, intents=None, createopts=None):
3510 def instance(ui, path, create, intents=None, createopts=None):
3509 localpath = util.urllocalpath(path)
3511 localpath = util.urllocalpath(path)
3510 if create:
3512 if create:
3511 createrepository(ui, localpath, createopts=createopts)
3513 createrepository(ui, localpath, createopts=createopts)
3512
3514
3513 return makelocalrepository(ui, localpath, intents=intents)
3515 return makelocalrepository(ui, localpath, intents=intents)
3514
3516
3515
3517
3516 def islocal(path):
3518 def islocal(path):
3517 return True
3519 return True
3518
3520
3519
3521
3520 def defaultcreateopts(ui, createopts=None):
3522 def defaultcreateopts(ui, createopts=None):
3521 """Populate the default creation options for a repository.
3523 """Populate the default creation options for a repository.
3522
3524
3523 A dictionary of explicitly requested creation options can be passed
3525 A dictionary of explicitly requested creation options can be passed
3524 in. Missing keys will be populated.
3526 in. Missing keys will be populated.
3525 """
3527 """
3526 createopts = dict(createopts or {})
3528 createopts = dict(createopts or {})
3527
3529
3528 if b'backend' not in createopts:
3530 if b'backend' not in createopts:
3529 # experimental config: storage.new-repo-backend
3531 # experimental config: storage.new-repo-backend
3530 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3532 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3531
3533
3532 return createopts
3534 return createopts
3533
3535
3534
3536
3535 def newreporequirements(ui, createopts):
3537 def newreporequirements(ui, createopts):
3536 """Determine the set of requirements for a new local repository.
3538 """Determine the set of requirements for a new local repository.
3537
3539
3538 Extensions can wrap this function to specify custom requirements for
3540 Extensions can wrap this function to specify custom requirements for
3539 new repositories.
3541 new repositories.
3540 """
3542 """
3541 # If the repo is being created from a shared repository, we copy
3543 # If the repo is being created from a shared repository, we copy
3542 # its requirements.
3544 # its requirements.
3543 if b'sharedrepo' in createopts:
3545 if b'sharedrepo' in createopts:
3544 requirements = set(createopts[b'sharedrepo'].requirements)
3546 requirements = set(createopts[b'sharedrepo'].requirements)
3545 if createopts.get(b'sharedrelative'):
3547 if createopts.get(b'sharedrelative'):
3546 requirements.add(b'relshared')
3548 requirements.add(b'relshared')
3547 else:
3549 else:
3548 requirements.add(b'shared')
3550 requirements.add(b'shared')
3549
3551
3550 return requirements
3552 return requirements
3551
3553
3552 if b'backend' not in createopts:
3554 if b'backend' not in createopts:
3553 raise error.ProgrammingError(
3555 raise error.ProgrammingError(
3554 b'backend key not present in createopts; '
3556 b'backend key not present in createopts; '
3555 b'was defaultcreateopts() called?'
3557 b'was defaultcreateopts() called?'
3556 )
3558 )
3557
3559
3558 if createopts[b'backend'] != b'revlogv1':
3560 if createopts[b'backend'] != b'revlogv1':
3559 raise error.Abort(
3561 raise error.Abort(
3560 _(
3562 _(
3561 b'unable to determine repository requirements for '
3563 b'unable to determine repository requirements for '
3562 b'storage backend: %s'
3564 b'storage backend: %s'
3563 )
3565 )
3564 % createopts[b'backend']
3566 % createopts[b'backend']
3565 )
3567 )
3566
3568
3567 requirements = {b'revlogv1'}
3569 requirements = {b'revlogv1'}
3568 if ui.configbool(b'format', b'usestore'):
3570 if ui.configbool(b'format', b'usestore'):
3569 requirements.add(b'store')
3571 requirements.add(b'store')
3570 if ui.configbool(b'format', b'usefncache'):
3572 if ui.configbool(b'format', b'usefncache'):
3571 requirements.add(b'fncache')
3573 requirements.add(b'fncache')
3572 if ui.configbool(b'format', b'dotencode'):
3574 if ui.configbool(b'format', b'dotencode'):
3573 requirements.add(b'dotencode')
3575 requirements.add(b'dotencode')
3574
3576
3575 compengine = ui.config(b'format', b'revlog-compression')
3577 compengine = ui.config(b'format', b'revlog-compression')
3576 if compengine not in util.compengines:
3578 if compengine not in util.compengines:
3577 raise error.Abort(
3579 raise error.Abort(
3578 _(
3580 _(
3579 b'compression engine %s defined by '
3581 b'compression engine %s defined by '
3580 b'format.revlog-compression not available'
3582 b'format.revlog-compression not available'
3581 )
3583 )
3582 % compengine,
3584 % compengine,
3583 hint=_(
3585 hint=_(
3584 b'run "hg debuginstall" to list available '
3586 b'run "hg debuginstall" to list available '
3585 b'compression engines'
3587 b'compression engines'
3586 ),
3588 ),
3587 )
3589 )
3588
3590
3589 # zlib is the historical default and doesn't need an explicit requirement.
3591 # zlib is the historical default and doesn't need an explicit requirement.
3590 elif compengine == b'zstd':
3592 elif compengine == b'zstd':
3591 requirements.add(b'revlog-compression-zstd')
3593 requirements.add(b'revlog-compression-zstd')
3592 elif compengine != b'zlib':
3594 elif compengine != b'zlib':
3593 requirements.add(b'exp-compression-%s' % compengine)
3595 requirements.add(b'exp-compression-%s' % compengine)
3594
3596
3595 if scmutil.gdinitconfig(ui):
3597 if scmutil.gdinitconfig(ui):
3596 requirements.add(b'generaldelta')
3598 requirements.add(b'generaldelta')
3597 if ui.configbool(b'format', b'sparse-revlog'):
3599 if ui.configbool(b'format', b'sparse-revlog'):
3598 requirements.add(SPARSEREVLOG_REQUIREMENT)
3600 requirements.add(SPARSEREVLOG_REQUIREMENT)
3599
3601
3600 # experimental config: format.exp-use-side-data
3602 # experimental config: format.exp-use-side-data
3601 if ui.configbool(b'format', b'exp-use-side-data'):
3603 if ui.configbool(b'format', b'exp-use-side-data'):
3602 requirements.add(SIDEDATA_REQUIREMENT)
3604 requirements.add(SIDEDATA_REQUIREMENT)
3603 # experimental config: format.exp-use-copies-side-data-changeset
3605 # experimental config: format.exp-use-copies-side-data-changeset
3604 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3606 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3605 requirements.add(SIDEDATA_REQUIREMENT)
3607 requirements.add(SIDEDATA_REQUIREMENT)
3606 requirements.add(COPIESSDC_REQUIREMENT)
3608 requirements.add(COPIESSDC_REQUIREMENT)
3607 if ui.configbool(b'experimental', b'treemanifest'):
3609 if ui.configbool(b'experimental', b'treemanifest'):
3608 requirements.add(b'treemanifest')
3610 requirements.add(b'treemanifest')
3609
3611
3610 revlogv2 = ui.config(b'experimental', b'revlogv2')
3612 revlogv2 = ui.config(b'experimental', b'revlogv2')
3611 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3613 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3612 requirements.remove(b'revlogv1')
3614 requirements.remove(b'revlogv1')
3613 # generaldelta is implied by revlogv2.
3615 # generaldelta is implied by revlogv2.
3614 requirements.discard(b'generaldelta')
3616 requirements.discard(b'generaldelta')
3615 requirements.add(REVLOGV2_REQUIREMENT)
3617 requirements.add(REVLOGV2_REQUIREMENT)
3616 # experimental config: format.internal-phase
3618 # experimental config: format.internal-phase
3617 if ui.configbool(b'format', b'internal-phase'):
3619 if ui.configbool(b'format', b'internal-phase'):
3618 requirements.add(b'internal-phase')
3620 requirements.add(b'internal-phase')
3619
3621
3620 if createopts.get(b'narrowfiles'):
3622 if createopts.get(b'narrowfiles'):
3621 requirements.add(repository.NARROW_REQUIREMENT)
3623 requirements.add(repository.NARROW_REQUIREMENT)
3622
3624
3623 if createopts.get(b'lfs'):
3625 if createopts.get(b'lfs'):
3624 requirements.add(b'lfs')
3626 requirements.add(b'lfs')
3625
3627
3626 if ui.configbool(b'format', b'bookmarks-in-store'):
3628 if ui.configbool(b'format', b'bookmarks-in-store'):
3627 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3629 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3628
3630
3629 return requirements
3631 return requirements
3630
3632
3631
3633
3632 def filterknowncreateopts(ui, createopts):
3634 def filterknowncreateopts(ui, createopts):
3633 """Filters a dict of repo creation options against options that are known.
3635 """Filters a dict of repo creation options against options that are known.
3634
3636
3635 Receives a dict of repo creation options and returns a dict of those
3637 Receives a dict of repo creation options and returns a dict of those
3636 options that we don't know how to handle.
3638 options that we don't know how to handle.
3637
3639
3638 This function is called as part of repository creation. If the
3640 This function is called as part of repository creation. If the
3639 returned dict contains any items, repository creation will not
3641 returned dict contains any items, repository creation will not
3640 be allowed, as it means there was a request to create a repository
3642 be allowed, as it means there was a request to create a repository
3641 with options not recognized by loaded code.
3643 with options not recognized by loaded code.
3642
3644
3643 Extensions can wrap this function to filter out creation options
3645 Extensions can wrap this function to filter out creation options
3644 they know how to handle.
3646 they know how to handle.
3645 """
3647 """
3646 known = {
3648 known = {
3647 b'backend',
3649 b'backend',
3648 b'lfs',
3650 b'lfs',
3649 b'narrowfiles',
3651 b'narrowfiles',
3650 b'sharedrepo',
3652 b'sharedrepo',
3651 b'sharedrelative',
3653 b'sharedrelative',
3652 b'shareditems',
3654 b'shareditems',
3653 b'shallowfilestore',
3655 b'shallowfilestore',
3654 }
3656 }
3655
3657
3656 return {k: v for k, v in createopts.items() if k not in known}
3658 return {k: v for k, v in createopts.items() if k not in known}
3657
3659
3658
3660
3659 def createrepository(ui, path, createopts=None):
3661 def createrepository(ui, path, createopts=None):
3660 """Create a new repository in a vfs.
3662 """Create a new repository in a vfs.
3661
3663
3662 ``path`` path to the new repo's working directory.
3664 ``path`` path to the new repo's working directory.
3663 ``createopts`` options for the new repository.
3665 ``createopts`` options for the new repository.
3664
3666
3665 The following keys for ``createopts`` are recognized:
3667 The following keys for ``createopts`` are recognized:
3666
3668
3667 backend
3669 backend
3668 The storage backend to use.
3670 The storage backend to use.
3669 lfs
3671 lfs
3670 Repository will be created with ``lfs`` requirement. The lfs extension
3672 Repository will be created with ``lfs`` requirement. The lfs extension
3671 will automatically be loaded when the repository is accessed.
3673 will automatically be loaded when the repository is accessed.
3672 narrowfiles
3674 narrowfiles
3673 Set up repository to support narrow file storage.
3675 Set up repository to support narrow file storage.
3674 sharedrepo
3676 sharedrepo
3675 Repository object from which storage should be shared.
3677 Repository object from which storage should be shared.
3676 sharedrelative
3678 sharedrelative
3677 Boolean indicating if the path to the shared repo should be
3679 Boolean indicating if the path to the shared repo should be
3678 stored as relative. By default, the pointer to the "parent" repo
3680 stored as relative. By default, the pointer to the "parent" repo
3679 is stored as an absolute path.
3681 is stored as an absolute path.
3680 shareditems
3682 shareditems
3681 Set of items to share to the new repository (in addition to storage).
3683 Set of items to share to the new repository (in addition to storage).
3682 shallowfilestore
3684 shallowfilestore
3683 Indicates that storage for files should be shallow (not all ancestor
3685 Indicates that storage for files should be shallow (not all ancestor
3684 revisions are known).
3686 revisions are known).
3685 """
3687 """
3686 createopts = defaultcreateopts(ui, createopts=createopts)
3688 createopts = defaultcreateopts(ui, createopts=createopts)
3687
3689
3688 unknownopts = filterknowncreateopts(ui, createopts)
3690 unknownopts = filterknowncreateopts(ui, createopts)
3689
3691
3690 if not isinstance(unknownopts, dict):
3692 if not isinstance(unknownopts, dict):
3691 raise error.ProgrammingError(
3693 raise error.ProgrammingError(
3692 b'filterknowncreateopts() did not return a dict'
3694 b'filterknowncreateopts() did not return a dict'
3693 )
3695 )
3694
3696
3695 if unknownopts:
3697 if unknownopts:
3696 raise error.Abort(
3698 raise error.Abort(
3697 _(
3699 _(
3698 b'unable to create repository because of unknown '
3700 b'unable to create repository because of unknown '
3699 b'creation option: %s'
3701 b'creation option: %s'
3700 )
3702 )
3701 % b', '.join(sorted(unknownopts)),
3703 % b', '.join(sorted(unknownopts)),
3702 hint=_(b'is a required extension not loaded?'),
3704 hint=_(b'is a required extension not loaded?'),
3703 )
3705 )
3704
3706
3705 requirements = newreporequirements(ui, createopts=createopts)
3707 requirements = newreporequirements(ui, createopts=createopts)
3706
3708
3707 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3709 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3708
3710
3709 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3711 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3710 if hgvfs.exists():
3712 if hgvfs.exists():
3711 raise error.RepoError(_(b'repository %s already exists') % path)
3713 raise error.RepoError(_(b'repository %s already exists') % path)
3712
3714
3713 if b'sharedrepo' in createopts:
3715 if b'sharedrepo' in createopts:
3714 sharedpath = createopts[b'sharedrepo'].sharedpath
3716 sharedpath = createopts[b'sharedrepo'].sharedpath
3715
3717
3716 if createopts.get(b'sharedrelative'):
3718 if createopts.get(b'sharedrelative'):
3717 try:
3719 try:
3718 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3720 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3719 except (IOError, ValueError) as e:
3721 except (IOError, ValueError) as e:
3720 # ValueError is raised on Windows if the drive letters differ
3722 # ValueError is raised on Windows if the drive letters differ
3721 # on each path.
3723 # on each path.
3722 raise error.Abort(
3724 raise error.Abort(
3723 _(b'cannot calculate relative path'),
3725 _(b'cannot calculate relative path'),
3724 hint=stringutil.forcebytestr(e),
3726 hint=stringutil.forcebytestr(e),
3725 )
3727 )
3726
3728
3727 if not wdirvfs.exists():
3729 if not wdirvfs.exists():
3728 wdirvfs.makedirs()
3730 wdirvfs.makedirs()
3729
3731
3730 hgvfs.makedir(notindexed=True)
3732 hgvfs.makedir(notindexed=True)
3731 if b'sharedrepo' not in createopts:
3733 if b'sharedrepo' not in createopts:
3732 hgvfs.mkdir(b'cache')
3734 hgvfs.mkdir(b'cache')
3733 hgvfs.mkdir(b'wcache')
3735 hgvfs.mkdir(b'wcache')
3734
3736
3735 if b'store' in requirements and b'sharedrepo' not in createopts:
3737 if b'store' in requirements and b'sharedrepo' not in createopts:
3736 hgvfs.mkdir(b'store')
3738 hgvfs.mkdir(b'store')
3737
3739
3738 # We create an invalid changelog outside the store so very old
3740 # We create an invalid changelog outside the store so very old
3739 # Mercurial versions (which didn't know about the requirements
3741 # Mercurial versions (which didn't know about the requirements
3740 # file) encounter an error on reading the changelog. This
3742 # file) encounter an error on reading the changelog. This
3741 # effectively locks out old clients and prevents them from
3743 # effectively locks out old clients and prevents them from
3742 # mucking with a repo in an unknown format.
3744 # mucking with a repo in an unknown format.
3743 #
3745 #
3744 # The revlog header has version 2, which won't be recognized by
3746 # The revlog header has version 2, which won't be recognized by
3745 # such old clients.
3747 # such old clients.
3746 hgvfs.append(
3748 hgvfs.append(
3747 b'00changelog.i',
3749 b'00changelog.i',
3748 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3750 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3749 b'layout',
3751 b'layout',
3750 )
3752 )
3751
3753
3752 scmutil.writerequires(hgvfs, requirements)
3754 scmutil.writerequires(hgvfs, requirements)
3753
3755
3754 # Write out file telling readers where to find the shared store.
3756 # Write out file telling readers where to find the shared store.
3755 if b'sharedrepo' in createopts:
3757 if b'sharedrepo' in createopts:
3756 hgvfs.write(b'sharedpath', sharedpath)
3758 hgvfs.write(b'sharedpath', sharedpath)
3757
3759
3758 if createopts.get(b'shareditems'):
3760 if createopts.get(b'shareditems'):
3759 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3761 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3760 hgvfs.write(b'shared', shared)
3762 hgvfs.write(b'shared', shared)
3761
3763
3762
3764
3763 def poisonrepository(repo):
3765 def poisonrepository(repo):
3764 """Poison a repository instance so it can no longer be used."""
3766 """Poison a repository instance so it can no longer be used."""
3765 # Perform any cleanup on the instance.
3767 # Perform any cleanup on the instance.
3766 repo.close()
3768 repo.close()
3767
3769
3768 # Our strategy is to replace the type of the object with one that
3770 # Our strategy is to replace the type of the object with one that
3769 # has all attribute lookups result in error.
3771 # has all attribute lookups result in error.
3770 #
3772 #
3771 # But we have to allow the close() method because some constructors
3773 # But we have to allow the close() method because some constructors
3772 # of repos call close() on repo references.
3774 # of repos call close() on repo references.
3773 class poisonedrepository(object):
3775 class poisonedrepository(object):
3774 def __getattribute__(self, item):
3776 def __getattribute__(self, item):
3775 if item == 'close':
3777 if item == 'close':
3776 return object.__getattribute__(self, item)
3778 return object.__getattribute__(self, item)
3777
3779
3778 raise error.ProgrammingError(
3780 raise error.ProgrammingError(
3779 b'repo instances should not be used after unshare'
3781 b'repo instances should not be used after unshare'
3780 )
3782 )
3781
3783
3782 def close(self):
3784 def close(self):
3783 pass
3785 pass
3784
3786
3785 # We may have a repoview, which intercepts __setattr__. So be sure
3787 # We may have a repoview, which intercepts __setattr__. So be sure
3786 # we operate at the lowest level possible.
3788 # we operate at the lowest level possible.
3787 object.__setattr__(repo, '__class__', poisonedrepository)
3789 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2989 +1,2995 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_ISCENSORED,
56 REVIDX_ISCENSORED,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_SIDEDATA,
58 REVIDX_SIDEDATA,
59 )
59 )
60 from .thirdparty import attr
60 from .thirdparty import attr
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 nodemap as nodemaputil,
78 nodemap as nodemaputil,
79 sidedata as sidedatautil,
79 sidedata as sidedatautil,
80 )
80 )
81 from .utils import (
81 from .utils import (
82 storageutil,
82 storageutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 # blanked usage of all the name to prevent pyflakes constraints
86 # blanked usage of all the name to prevent pyflakes constraints
87 # We need these name available in the module for extensions.
87 # We need these name available in the module for extensions.
88 REVLOGV0
88 REVLOGV0
89 REVLOGV1
89 REVLOGV1
90 REVLOGV2
90 REVLOGV2
91 FLAG_INLINE_DATA
91 FLAG_INLINE_DATA
92 FLAG_GENERALDELTA
92 FLAG_GENERALDELTA
93 REVLOG_DEFAULT_FLAGS
93 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FORMAT
94 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_VERSION
95 REVLOG_DEFAULT_VERSION
96 REVLOGV1_FLAGS
96 REVLOGV1_FLAGS
97 REVLOGV2_FLAGS
97 REVLOGV2_FLAGS
98 REVIDX_ISCENSORED
98 REVIDX_ISCENSORED
99 REVIDX_ELLIPSIS
99 REVIDX_ELLIPSIS
100 REVIDX_SIDEDATA
100 REVIDX_SIDEDATA
101 REVIDX_EXTSTORED
101 REVIDX_EXTSTORED
102 REVIDX_DEFAULT_FLAGS
102 REVIDX_DEFAULT_FLAGS
103 REVIDX_FLAGS_ORDER
103 REVIDX_FLAGS_ORDER
104 REVIDX_RAWTEXT_CHANGING_FLAGS
104 REVIDX_RAWTEXT_CHANGING_FLAGS
105
105
106 parsers = policy.importmod('parsers')
106 parsers = policy.importmod('parsers')
107 rustancestor = policy.importrust('ancestor')
107 rustancestor = policy.importrust('ancestor')
108 rustdagop = policy.importrust('dagop')
108 rustdagop = policy.importrust('dagop')
109 rustrevlog = policy.importrust('revlog')
109 rustrevlog = policy.importrust('revlog')
110
110
111 # Aliased for performance.
111 # Aliased for performance.
112 _zlibdecompress = zlib.decompress
112 _zlibdecompress = zlib.decompress
113
113
114 # max size of revlog with inline data
114 # max size of revlog with inline data
115 _maxinline = 131072
115 _maxinline = 131072
116 _chunksize = 1048576
116 _chunksize = 1048576
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False, {}
120 return text, False, {}
121
121
122
122
123 def ellipsiswriteprocessor(rl, text, sidedata):
123 def ellipsiswriteprocessor(rl, text, sidedata):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsisrawprocessor(rl, text):
127 def ellipsisrawprocessor(rl, text):
128 return False
128 return False
129
129
130
130
131 ellipsisprocessor = (
131 ellipsisprocessor = (
132 ellipsisreadprocessor,
132 ellipsisreadprocessor,
133 ellipsiswriteprocessor,
133 ellipsiswriteprocessor,
134 ellipsisrawprocessor,
134 ellipsisrawprocessor,
135 )
135 )
136
136
137
137
138 def getoffset(q):
138 def getoffset(q):
139 return int(q >> 16)
139 return int(q >> 16)
140
140
141
141
142 def gettype(q):
142 def gettype(q):
143 return int(q & 0xFFFF)
143 return int(q & 0xFFFF)
144
144
145
145
146 def offset_type(offset, type):
146 def offset_type(offset, type):
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
149 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 @attr.s(slots=True, frozen=True)
162 @attr.s(slots=True, frozen=True)
163 class _revisioninfo(object):
163 class _revisioninfo(object):
164 """Information about a revision that allows building its fulltext
164 """Information about a revision that allows building its fulltext
165 node: expected hash of the revision
165 node: expected hash of the revision
166 p1, p2: parent revs of the revision
166 p1, p2: parent revs of the revision
167 btext: built text cache consisting of a one-element list
167 btext: built text cache consisting of a one-element list
168 cachedelta: (baserev, uncompressed_delta) or None
168 cachedelta: (baserev, uncompressed_delta) or None
169 flags: flags associated to the revision storage
169 flags: flags associated to the revision storage
170
170
171 One of btext[0] or cachedelta must be set.
171 One of btext[0] or cachedelta must be set.
172 """
172 """
173
173
174 node = attr.ib()
174 node = attr.ib()
175 p1 = attr.ib()
175 p1 = attr.ib()
176 p2 = attr.ib()
176 p2 = attr.ib()
177 btext = attr.ib()
177 btext = attr.ib()
178 textlen = attr.ib()
178 textlen = attr.ib()
179 cachedelta = attr.ib()
179 cachedelta = attr.ib()
180 flags = attr.ib()
180 flags = attr.ib()
181
181
182
182
183 @interfaceutil.implementer(repository.irevisiondelta)
183 @interfaceutil.implementer(repository.irevisiondelta)
184 @attr.s(slots=True)
184 @attr.s(slots=True)
185 class revlogrevisiondelta(object):
185 class revlogrevisiondelta(object):
186 node = attr.ib()
186 node = attr.ib()
187 p1node = attr.ib()
187 p1node = attr.ib()
188 p2node = attr.ib()
188 p2node = attr.ib()
189 basenode = attr.ib()
189 basenode = attr.ib()
190 flags = attr.ib()
190 flags = attr.ib()
191 baserevisionsize = attr.ib()
191 baserevisionsize = attr.ib()
192 revision = attr.ib()
192 revision = attr.ib()
193 delta = attr.ib()
193 delta = attr.ib()
194 linknode = attr.ib(default=None)
194 linknode = attr.ib(default=None)
195
195
196
196
197 @interfaceutil.implementer(repository.iverifyproblem)
197 @interfaceutil.implementer(repository.iverifyproblem)
198 @attr.s(frozen=True)
198 @attr.s(frozen=True)
199 class revlogproblem(object):
199 class revlogproblem(object):
200 warning = attr.ib(default=None)
200 warning = attr.ib(default=None)
201 error = attr.ib(default=None)
201 error = attr.ib(default=None)
202 node = attr.ib(default=None)
202 node = attr.ib(default=None)
203
203
204
204
205 # index v0:
205 # index v0:
206 # 4 bytes: offset
206 # 4 bytes: offset
207 # 4 bytes: compressed length
207 # 4 bytes: compressed length
208 # 4 bytes: base rev
208 # 4 bytes: base rev
209 # 4 bytes: link rev
209 # 4 bytes: link rev
210 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 1 nodeid
211 # 20 bytes: parent 2 nodeid
211 # 20 bytes: parent 2 nodeid
212 # 20 bytes: nodeid
212 # 20 bytes: nodeid
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_pack = indexformatv0.pack
215 indexformatv0_unpack = indexformatv0.unpack
215 indexformatv0_unpack = indexformatv0.unpack
216
216
217
217
218 class revlogoldindex(list):
218 class revlogoldindex(list):
219 @property
219 @property
220 def nodemap(self):
220 def nodemap(self):
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 return self._nodemap
223 return self._nodemap
224
224
225 @util.propertycache
225 @util.propertycache
226 def _nodemap(self):
226 def _nodemap(self):
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 for r in range(0, len(self)):
228 for r in range(0, len(self)):
229 n = self[r][7]
229 n = self[r][7]
230 nodemap[n] = r
230 nodemap[n] = r
231 return nodemap
231 return nodemap
232
232
233 def has_node(self, node):
233 def has_node(self, node):
234 """return True if the node exist in the index"""
234 """return True if the node exist in the index"""
235 return node in self._nodemap
235 return node in self._nodemap
236
236
237 def rev(self, node):
237 def rev(self, node):
238 """return a revision for a node
238 """return a revision for a node
239
239
240 If the node is unknown, raise a RevlogError"""
240 If the node is unknown, raise a RevlogError"""
241 return self._nodemap[node]
241 return self._nodemap[node]
242
242
243 def get_rev(self, node):
243 def get_rev(self, node):
244 """return a revision for a node
244 """return a revision for a node
245
245
246 If the node is unknown, return None"""
246 If the node is unknown, return None"""
247 return self._nodemap.get(node)
247 return self._nodemap.get(node)
248
248
249 def append(self, tup):
249 def append(self, tup):
250 self._nodemap[tup[7]] = len(self)
250 self._nodemap[tup[7]] = len(self)
251 super(revlogoldindex, self).append(tup)
251 super(revlogoldindex, self).append(tup)
252
252
253 def __delitem__(self, i):
253 def __delitem__(self, i):
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 for r in pycompat.xrange(i.start, len(self)):
256 for r in pycompat.xrange(i.start, len(self)):
257 del self._nodemap[self[r][7]]
257 del self._nodemap[self[r][7]]
258 super(revlogoldindex, self).__delitem__(i)
258 super(revlogoldindex, self).__delitem__(i)
259
259
260 def clearcaches(self):
260 def clearcaches(self):
261 self.__dict__.pop('_nodemap', None)
261 self.__dict__.pop('_nodemap', None)
262
262
263 def __getitem__(self, i):
263 def __getitem__(self, i):
264 if i == -1:
264 if i == -1:
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 return list.__getitem__(self, i)
266 return list.__getitem__(self, i)
267
267
268
268
269 class revlogoldio(object):
269 class revlogoldio(object):
270 def __init__(self):
270 def __init__(self):
271 self.size = indexformatv0.size
271 self.size = indexformatv0.size
272
272
273 def parseindex(self, data, inline):
273 def parseindex(self, data, inline):
274 s = self.size
274 s = self.size
275 index = []
275 index = []
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 n = off = 0
277 n = off = 0
278 l = len(data)
278 l = len(data)
279 while off + s <= l:
279 while off + s <= l:
280 cur = data[off : off + s]
280 cur = data[off : off + s]
281 off += s
281 off += s
282 e = indexformatv0_unpack(cur)
282 e = indexformatv0_unpack(cur)
283 # transform to revlogv1 format
283 # transform to revlogv1 format
284 e2 = (
284 e2 = (
285 offset_type(e[0], 0),
285 offset_type(e[0], 0),
286 e[1],
286 e[1],
287 -1,
287 -1,
288 e[2],
288 e[2],
289 e[3],
289 e[3],
290 nodemap.get(e[4], nullrev),
290 nodemap.get(e[4], nullrev),
291 nodemap.get(e[5], nullrev),
291 nodemap.get(e[5], nullrev),
292 e[6],
292 e[6],
293 )
293 )
294 index.append(e2)
294 index.append(e2)
295 nodemap[e[6]] = n
295 nodemap[e[6]] = n
296 n += 1
296 n += 1
297
297
298 index = revlogoldindex(index)
298 index = revlogoldindex(index)
299 return index, None
299 return index, None
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 if gettype(entry[0]):
302 if gettype(entry[0]):
303 raise error.RevlogError(
303 raise error.RevlogError(
304 _(b'index entry flags need revlog version 1')
304 _(b'index entry flags need revlog version 1')
305 )
305 )
306 e2 = (
306 e2 = (
307 getoffset(entry[0]),
307 getoffset(entry[0]),
308 entry[1],
308 entry[1],
309 entry[3],
309 entry[3],
310 entry[4],
310 entry[4],
311 node(entry[5]),
311 node(entry[5]),
312 node(entry[6]),
312 node(entry[6]),
313 entry[7],
313 entry[7],
314 )
314 )
315 return indexformatv0_pack(*e2)
315 return indexformatv0_pack(*e2)
316
316
317
317
318 # index ng:
318 # index ng:
319 # 6 bytes: offset
319 # 6 bytes: offset
320 # 2 bytes: flags
320 # 2 bytes: flags
321 # 4 bytes: compressed length
321 # 4 bytes: compressed length
322 # 4 bytes: uncompressed length
322 # 4 bytes: uncompressed length
323 # 4 bytes: base rev
323 # 4 bytes: base rev
324 # 4 bytes: link rev
324 # 4 bytes: link rev
325 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 1 rev
326 # 4 bytes: parent 2 rev
326 # 4 bytes: parent 2 rev
327 # 32 bytes: nodeid
327 # 32 bytes: nodeid
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 indexformatng_pack = indexformatng.pack
329 indexformatng_pack = indexformatng.pack
330 versionformat = struct.Struct(b">I")
330 versionformat = struct.Struct(b">I")
331 versionformat_pack = versionformat.pack
331 versionformat_pack = versionformat.pack
332 versionformat_unpack = versionformat.unpack
332 versionformat_unpack = versionformat.unpack
333
333
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 # signed integer)
335 # signed integer)
336 _maxentrysize = 0x7FFFFFFF
336 _maxentrysize = 0x7FFFFFFF
337
337
338
338
339 class revlogio(object):
339 class revlogio(object):
340 def __init__(self):
340 def __init__(self):
341 self.size = indexformatng.size
341 self.size = indexformatng.size
342
342
343 def parseindex(self, data, inline):
343 def parseindex(self, data, inline):
344 # call the C implementation to parse the index data
344 # call the C implementation to parse the index data
345 index, cache = parsers.parse_index2(data, inline)
345 index, cache = parsers.parse_index2(data, inline)
346 return index, cache
346 return index, cache
347
347
348 def packentry(self, entry, node, version, rev):
348 def packentry(self, entry, node, version, rev):
349 p = indexformatng_pack(*entry)
349 p = indexformatng_pack(*entry)
350 if rev == 0:
350 if rev == 0:
351 p = versionformat_pack(version) + p[4:]
351 p = versionformat_pack(version) + p[4:]
352 return p
352 return p
353
353
354
354
355 class rustrevlogio(revlogio):
355 class rustrevlogio(revlogio):
356 def parseindex(self, data, inline):
356 def parseindex(self, data, inline):
357 index, cache = super(rustrevlogio, self).parseindex(data, inline)
357 index, cache = super(rustrevlogio, self).parseindex(data, inline)
358 return rustrevlog.MixedIndex(index), cache
358 return rustrevlog.MixedIndex(index), cache
359
359
360
360
361 class revlog(object):
361 class revlog(object):
362 """
362 """
363 the underlying revision storage object
363 the underlying revision storage object
364
364
365 A revlog consists of two parts, an index and the revision data.
365 A revlog consists of two parts, an index and the revision data.
366
366
367 The index is a file with a fixed record size containing
367 The index is a file with a fixed record size containing
368 information on each revision, including its nodeid (hash), the
368 information on each revision, including its nodeid (hash), the
369 nodeids of its parents, the position and offset of its data within
369 nodeids of its parents, the position and offset of its data within
370 the data file, and the revision it's based on. Finally, each entry
370 the data file, and the revision it's based on. Finally, each entry
371 contains a linkrev entry that can serve as a pointer to external
371 contains a linkrev entry that can serve as a pointer to external
372 data.
372 data.
373
373
374 The revision data itself is a linear collection of data chunks.
374 The revision data itself is a linear collection of data chunks.
375 Each chunk represents a revision and is usually represented as a
375 Each chunk represents a revision and is usually represented as a
376 delta against the previous chunk. To bound lookup time, runs of
376 delta against the previous chunk. To bound lookup time, runs of
377 deltas are limited to about 2 times the length of the original
377 deltas are limited to about 2 times the length of the original
378 version data. This makes retrieval of a version proportional to
378 version data. This makes retrieval of a version proportional to
379 its size, or O(1) relative to the number of revisions.
379 its size, or O(1) relative to the number of revisions.
380
380
381 Both pieces of the revlog are written to in an append-only
381 Both pieces of the revlog are written to in an append-only
382 fashion, which means we never need to rewrite a file to insert or
382 fashion, which means we never need to rewrite a file to insert or
383 remove data, and can use some simple techniques to avoid the need
383 remove data, and can use some simple techniques to avoid the need
384 for locking while reading.
384 for locking while reading.
385
385
386 If checkambig, indexfile is opened with checkambig=True at
386 If checkambig, indexfile is opened with checkambig=True at
387 writing, to avoid file stat ambiguity.
387 writing, to avoid file stat ambiguity.
388
388
389 If mmaplargeindex is True, and an mmapindexthreshold is set, the
389 If mmaplargeindex is True, and an mmapindexthreshold is set, the
390 index will be mmapped rather than read if it is larger than the
390 index will be mmapped rather than read if it is larger than the
391 configured threshold.
391 configured threshold.
392
392
393 If censorable is True, the revlog can have censored revisions.
393 If censorable is True, the revlog can have censored revisions.
394
394
395 If `upperboundcomp` is not None, this is the expected maximal gain from
395 If `upperboundcomp` is not None, this is the expected maximal gain from
396 compression for the data content.
396 compression for the data content.
397 """
397 """
398
398
399 _flagserrorclass = error.RevlogError
399 _flagserrorclass = error.RevlogError
400
400
401 def __init__(
401 def __init__(
402 self,
402 self,
403 opener,
403 opener,
404 indexfile,
404 indexfile,
405 datafile=None,
405 datafile=None,
406 checkambig=False,
406 checkambig=False,
407 mmaplargeindex=False,
407 mmaplargeindex=False,
408 censorable=False,
408 censorable=False,
409 upperboundcomp=None,
409 upperboundcomp=None,
410 persistentnodemap=False,
410 ):
411 ):
411 """
412 """
412 create a revlog object
413 create a revlog object
413
414
414 opener is a function that abstracts the file opening operation
415 opener is a function that abstracts the file opening operation
415 and can be used to implement COW semantics or the like.
416 and can be used to implement COW semantics or the like.
416
417
417 """
418 """
418 self.upperboundcomp = upperboundcomp
419 self.upperboundcomp = upperboundcomp
419 self.indexfile = indexfile
420 self.indexfile = indexfile
420 self.datafile = datafile or (indexfile[:-2] + b".d")
421 self.datafile = datafile or (indexfile[:-2] + b".d")
422 self.nodemap_file = None
423 if persistentnodemap:
424 self.nodemap_file = indexfile[:-2] + b".n"
425
421 self.opener = opener
426 self.opener = opener
422 # When True, indexfile is opened with checkambig=True at writing, to
427 # When True, indexfile is opened with checkambig=True at writing, to
423 # avoid file stat ambiguity.
428 # avoid file stat ambiguity.
424 self._checkambig = checkambig
429 self._checkambig = checkambig
425 self._mmaplargeindex = mmaplargeindex
430 self._mmaplargeindex = mmaplargeindex
426 self._censorable = censorable
431 self._censorable = censorable
427 # 3-tuple of (node, rev, text) for a raw revision.
432 # 3-tuple of (node, rev, text) for a raw revision.
428 self._revisioncache = None
433 self._revisioncache = None
429 # Maps rev to chain base rev.
434 # Maps rev to chain base rev.
430 self._chainbasecache = util.lrucachedict(100)
435 self._chainbasecache = util.lrucachedict(100)
431 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
436 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
432 self._chunkcache = (0, b'')
437 self._chunkcache = (0, b'')
433 # How much data to read and cache into the raw revlog data cache.
438 # How much data to read and cache into the raw revlog data cache.
434 self._chunkcachesize = 65536
439 self._chunkcachesize = 65536
435 self._maxchainlen = None
440 self._maxchainlen = None
436 self._deltabothparents = True
441 self._deltabothparents = True
437 self.index = None
442 self.index = None
438 # Mapping of partial identifiers to full nodes.
443 # Mapping of partial identifiers to full nodes.
439 self._pcache = {}
444 self._pcache = {}
440 # Mapping of revision integer to full node.
445 # Mapping of revision integer to full node.
441 self._compengine = b'zlib'
446 self._compengine = b'zlib'
442 self._compengineopts = {}
447 self._compengineopts = {}
443 self._maxdeltachainspan = -1
448 self._maxdeltachainspan = -1
444 self._withsparseread = False
449 self._withsparseread = False
445 self._sparserevlog = False
450 self._sparserevlog = False
446 self._srdensitythreshold = 0.50
451 self._srdensitythreshold = 0.50
447 self._srmingapsize = 262144
452 self._srmingapsize = 262144
448
453
449 # Make copy of flag processors so each revlog instance can support
454 # Make copy of flag processors so each revlog instance can support
450 # custom flags.
455 # custom flags.
451 self._flagprocessors = dict(flagutil.flagprocessors)
456 self._flagprocessors = dict(flagutil.flagprocessors)
452
457
453 # 2-tuple of file handles being used for active writing.
458 # 2-tuple of file handles being used for active writing.
454 self._writinghandles = None
459 self._writinghandles = None
455
460
456 self._loadindex()
461 self._loadindex()
457
462
458 def _loadindex(self):
463 def _loadindex(self):
459 mmapindexthreshold = None
464 mmapindexthreshold = None
460 opts = self.opener.options
465 opts = self.opener.options
461
466
462 if b'revlogv2' in opts:
467 if b'revlogv2' in opts:
463 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
468 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
464 elif b'revlogv1' in opts:
469 elif b'revlogv1' in opts:
465 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
470 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
466 if b'generaldelta' in opts:
471 if b'generaldelta' in opts:
467 newversionflags |= FLAG_GENERALDELTA
472 newversionflags |= FLAG_GENERALDELTA
468 elif b'revlogv0' in self.opener.options:
473 elif b'revlogv0' in self.opener.options:
469 newversionflags = REVLOGV0
474 newversionflags = REVLOGV0
470 else:
475 else:
471 newversionflags = REVLOG_DEFAULT_VERSION
476 newversionflags = REVLOG_DEFAULT_VERSION
472
477
473 if b'chunkcachesize' in opts:
478 if b'chunkcachesize' in opts:
474 self._chunkcachesize = opts[b'chunkcachesize']
479 self._chunkcachesize = opts[b'chunkcachesize']
475 if b'maxchainlen' in opts:
480 if b'maxchainlen' in opts:
476 self._maxchainlen = opts[b'maxchainlen']
481 self._maxchainlen = opts[b'maxchainlen']
477 if b'deltabothparents' in opts:
482 if b'deltabothparents' in opts:
478 self._deltabothparents = opts[b'deltabothparents']
483 self._deltabothparents = opts[b'deltabothparents']
479 self._lazydelta = bool(opts.get(b'lazydelta', True))
484 self._lazydelta = bool(opts.get(b'lazydelta', True))
480 self._lazydeltabase = False
485 self._lazydeltabase = False
481 if self._lazydelta:
486 if self._lazydelta:
482 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
487 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
483 if b'compengine' in opts:
488 if b'compengine' in opts:
484 self._compengine = opts[b'compengine']
489 self._compengine = opts[b'compengine']
485 if b'zlib.level' in opts:
490 if b'zlib.level' in opts:
486 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
491 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
487 if b'zstd.level' in opts:
492 if b'zstd.level' in opts:
488 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
493 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
489 if b'maxdeltachainspan' in opts:
494 if b'maxdeltachainspan' in opts:
490 self._maxdeltachainspan = opts[b'maxdeltachainspan']
495 self._maxdeltachainspan = opts[b'maxdeltachainspan']
491 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
496 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
492 mmapindexthreshold = opts[b'mmapindexthreshold']
497 mmapindexthreshold = opts[b'mmapindexthreshold']
493 self.hassidedata = bool(opts.get(b'side-data', False))
498 self.hassidedata = bool(opts.get(b'side-data', False))
494 if self.hassidedata:
499 if self.hassidedata:
495 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
500 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
496 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
501 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
497 withsparseread = bool(opts.get(b'with-sparse-read', False))
502 withsparseread = bool(opts.get(b'with-sparse-read', False))
498 # sparse-revlog forces sparse-read
503 # sparse-revlog forces sparse-read
499 self._withsparseread = self._sparserevlog or withsparseread
504 self._withsparseread = self._sparserevlog or withsparseread
500 if b'sparse-read-density-threshold' in opts:
505 if b'sparse-read-density-threshold' in opts:
501 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
506 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
502 if b'sparse-read-min-gap-size' in opts:
507 if b'sparse-read-min-gap-size' in opts:
503 self._srmingapsize = opts[b'sparse-read-min-gap-size']
508 self._srmingapsize = opts[b'sparse-read-min-gap-size']
504 if opts.get(b'enableellipsis'):
509 if opts.get(b'enableellipsis'):
505 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
510 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
506
511
507 # revlog v0 doesn't have flag processors
512 # revlog v0 doesn't have flag processors
508 for flag, processor in pycompat.iteritems(
513 for flag, processor in pycompat.iteritems(
509 opts.get(b'flagprocessors', {})
514 opts.get(b'flagprocessors', {})
510 ):
515 ):
511 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
516 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
512
517
513 if self._chunkcachesize <= 0:
518 if self._chunkcachesize <= 0:
514 raise error.RevlogError(
519 raise error.RevlogError(
515 _(b'revlog chunk cache size %r is not greater than 0')
520 _(b'revlog chunk cache size %r is not greater than 0')
516 % self._chunkcachesize
521 % self._chunkcachesize
517 )
522 )
518 elif self._chunkcachesize & (self._chunkcachesize - 1):
523 elif self._chunkcachesize & (self._chunkcachesize - 1):
519 raise error.RevlogError(
524 raise error.RevlogError(
520 _(b'revlog chunk cache size %r is not a power of 2')
525 _(b'revlog chunk cache size %r is not a power of 2')
521 % self._chunkcachesize
526 % self._chunkcachesize
522 )
527 )
523
528
524 indexdata = b''
529 indexdata = b''
525 self._initempty = True
530 self._initempty = True
526 try:
531 try:
527 with self._indexfp() as f:
532 with self._indexfp() as f:
528 if (
533 if (
529 mmapindexthreshold is not None
534 mmapindexthreshold is not None
530 and self.opener.fstat(f).st_size >= mmapindexthreshold
535 and self.opener.fstat(f).st_size >= mmapindexthreshold
531 ):
536 ):
532 # TODO: should .close() to release resources without
537 # TODO: should .close() to release resources without
533 # relying on Python GC
538 # relying on Python GC
534 indexdata = util.buffer(util.mmapread(f))
539 indexdata = util.buffer(util.mmapread(f))
535 else:
540 else:
536 indexdata = f.read()
541 indexdata = f.read()
537 if len(indexdata) > 0:
542 if len(indexdata) > 0:
538 versionflags = versionformat_unpack(indexdata[:4])[0]
543 versionflags = versionformat_unpack(indexdata[:4])[0]
539 self._initempty = False
544 self._initempty = False
540 else:
545 else:
541 versionflags = newversionflags
546 versionflags = newversionflags
542 except IOError as inst:
547 except IOError as inst:
543 if inst.errno != errno.ENOENT:
548 if inst.errno != errno.ENOENT:
544 raise
549 raise
545
550
546 versionflags = newversionflags
551 versionflags = newversionflags
547
552
548 self.version = versionflags
553 self.version = versionflags
549
554
550 flags = versionflags & ~0xFFFF
555 flags = versionflags & ~0xFFFF
551 fmt = versionflags & 0xFFFF
556 fmt = versionflags & 0xFFFF
552
557
553 if fmt == REVLOGV0:
558 if fmt == REVLOGV0:
554 if flags:
559 if flags:
555 raise error.RevlogError(
560 raise error.RevlogError(
556 _(b'unknown flags (%#04x) in version %d revlog %s')
561 _(b'unknown flags (%#04x) in version %d revlog %s')
557 % (flags >> 16, fmt, self.indexfile)
562 % (flags >> 16, fmt, self.indexfile)
558 )
563 )
559
564
560 self._inline = False
565 self._inline = False
561 self._generaldelta = False
566 self._generaldelta = False
562
567
563 elif fmt == REVLOGV1:
568 elif fmt == REVLOGV1:
564 if flags & ~REVLOGV1_FLAGS:
569 if flags & ~REVLOGV1_FLAGS:
565 raise error.RevlogError(
570 raise error.RevlogError(
566 _(b'unknown flags (%#04x) in version %d revlog %s')
571 _(b'unknown flags (%#04x) in version %d revlog %s')
567 % (flags >> 16, fmt, self.indexfile)
572 % (flags >> 16, fmt, self.indexfile)
568 )
573 )
569
574
570 self._inline = versionflags & FLAG_INLINE_DATA
575 self._inline = versionflags & FLAG_INLINE_DATA
571 self._generaldelta = versionflags & FLAG_GENERALDELTA
576 self._generaldelta = versionflags & FLAG_GENERALDELTA
572
577
573 elif fmt == REVLOGV2:
578 elif fmt == REVLOGV2:
574 if flags & ~REVLOGV2_FLAGS:
579 if flags & ~REVLOGV2_FLAGS:
575 raise error.RevlogError(
580 raise error.RevlogError(
576 _(b'unknown flags (%#04x) in version %d revlog %s')
581 _(b'unknown flags (%#04x) in version %d revlog %s')
577 % (flags >> 16, fmt, self.indexfile)
582 % (flags >> 16, fmt, self.indexfile)
578 )
583 )
579
584
580 self._inline = versionflags & FLAG_INLINE_DATA
585 self._inline = versionflags & FLAG_INLINE_DATA
581 # generaldelta implied by version 2 revlogs.
586 # generaldelta implied by version 2 revlogs.
582 self._generaldelta = True
587 self._generaldelta = True
583
588
584 else:
589 else:
585 raise error.RevlogError(
590 raise error.RevlogError(
586 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
591 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
587 )
592 )
588 # sparse-revlog can't be on without general-delta (issue6056)
593 # sparse-revlog can't be on without general-delta (issue6056)
589 if not self._generaldelta:
594 if not self._generaldelta:
590 self._sparserevlog = False
595 self._sparserevlog = False
591
596
592 self._storedeltachains = True
597 self._storedeltachains = True
593
598
594 self._io = revlogio()
599 self._io = revlogio()
595 if self.version == REVLOGV0:
600 if self.version == REVLOGV0:
596 self._io = revlogoldio()
601 self._io = revlogoldio()
597 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
602 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
598 self._io = rustrevlogio()
603 self._io = rustrevlogio()
599 try:
604 try:
600 d = self._io.parseindex(indexdata, self._inline)
605 d = self._io.parseindex(indexdata, self._inline)
601 except (ValueError, IndexError):
606 except (ValueError, IndexError):
602 raise error.RevlogError(
607 raise error.RevlogError(
603 _(b"index %s is corrupted") % self.indexfile
608 _(b"index %s is corrupted") % self.indexfile
604 )
609 )
605 self.index, self._chunkcache = d
610 self.index, self._chunkcache = d
606 if not self._chunkcache:
611 if not self._chunkcache:
607 self._chunkclear()
612 self._chunkclear()
608 # revnum -> (chain-length, sum-delta-length)
613 # revnum -> (chain-length, sum-delta-length)
609 self._chaininfocache = {}
614 self._chaininfocache = {}
610 # revlog header -> revlog compressor
615 # revlog header -> revlog compressor
611 self._decompressors = {}
616 self._decompressors = {}
612
617
613 @util.propertycache
618 @util.propertycache
614 def _compressor(self):
619 def _compressor(self):
615 engine = util.compengines[self._compengine]
620 engine = util.compengines[self._compengine]
616 return engine.revlogcompressor(self._compengineopts)
621 return engine.revlogcompressor(self._compengineopts)
617
622
618 def _indexfp(self, mode=b'r'):
623 def _indexfp(self, mode=b'r'):
619 """file object for the revlog's index file"""
624 """file object for the revlog's index file"""
620 args = {'mode': mode}
625 args = {'mode': mode}
621 if mode != b'r':
626 if mode != b'r':
622 args['checkambig'] = self._checkambig
627 args['checkambig'] = self._checkambig
623 if mode == b'w':
628 if mode == b'w':
624 args['atomictemp'] = True
629 args['atomictemp'] = True
625 return self.opener(self.indexfile, **args)
630 return self.opener(self.indexfile, **args)
626
631
627 def _datafp(self, mode=b'r'):
632 def _datafp(self, mode=b'r'):
628 """file object for the revlog's data file"""
633 """file object for the revlog's data file"""
629 return self.opener(self.datafile, mode=mode)
634 return self.opener(self.datafile, mode=mode)
630
635
631 @contextlib.contextmanager
636 @contextlib.contextmanager
632 def _datareadfp(self, existingfp=None):
637 def _datareadfp(self, existingfp=None):
633 """file object suitable to read data"""
638 """file object suitable to read data"""
634 # Use explicit file handle, if given.
639 # Use explicit file handle, if given.
635 if existingfp is not None:
640 if existingfp is not None:
636 yield existingfp
641 yield existingfp
637
642
638 # Use a file handle being actively used for writes, if available.
643 # Use a file handle being actively used for writes, if available.
639 # There is some danger to doing this because reads will seek the
644 # There is some danger to doing this because reads will seek the
640 # file. However, _writeentry() performs a SEEK_END before all writes,
645 # file. However, _writeentry() performs a SEEK_END before all writes,
641 # so we should be safe.
646 # so we should be safe.
642 elif self._writinghandles:
647 elif self._writinghandles:
643 if self._inline:
648 if self._inline:
644 yield self._writinghandles[0]
649 yield self._writinghandles[0]
645 else:
650 else:
646 yield self._writinghandles[1]
651 yield self._writinghandles[1]
647
652
648 # Otherwise open a new file handle.
653 # Otherwise open a new file handle.
649 else:
654 else:
650 if self._inline:
655 if self._inline:
651 func = self._indexfp
656 func = self._indexfp
652 else:
657 else:
653 func = self._datafp
658 func = self._datafp
654 with func() as fp:
659 with func() as fp:
655 yield fp
660 yield fp
656
661
657 def tiprev(self):
662 def tiprev(self):
658 return len(self.index) - 1
663 return len(self.index) - 1
659
664
660 def tip(self):
665 def tip(self):
661 return self.node(self.tiprev())
666 return self.node(self.tiprev())
662
667
663 def __contains__(self, rev):
668 def __contains__(self, rev):
664 return 0 <= rev < len(self)
669 return 0 <= rev < len(self)
665
670
666 def __len__(self):
671 def __len__(self):
667 return len(self.index)
672 return len(self.index)
668
673
669 def __iter__(self):
674 def __iter__(self):
670 return iter(pycompat.xrange(len(self)))
675 return iter(pycompat.xrange(len(self)))
671
676
672 def revs(self, start=0, stop=None):
677 def revs(self, start=0, stop=None):
673 """iterate over all rev in this revlog (from start to stop)"""
678 """iterate over all rev in this revlog (from start to stop)"""
674 return storageutil.iterrevs(len(self), start=start, stop=stop)
679 return storageutil.iterrevs(len(self), start=start, stop=stop)
675
680
676 @property
681 @property
677 def nodemap(self):
682 def nodemap(self):
678 msg = (
683 msg = (
679 b"revlog.nodemap is deprecated, "
684 b"revlog.nodemap is deprecated, "
680 b"use revlog.index.[has_node|rev|get_rev]"
685 b"use revlog.index.[has_node|rev|get_rev]"
681 )
686 )
682 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
687 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
683 return self.index.nodemap
688 return self.index.nodemap
684
689
685 @property
690 @property
686 def _nodecache(self):
691 def _nodecache(self):
687 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
692 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
688 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
693 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
689 return self.index.nodemap
694 return self.index.nodemap
690
695
691 def hasnode(self, node):
696 def hasnode(self, node):
692 try:
697 try:
693 self.rev(node)
698 self.rev(node)
694 return True
699 return True
695 except KeyError:
700 except KeyError:
696 return False
701 return False
697
702
698 def candelta(self, baserev, rev):
703 def candelta(self, baserev, rev):
699 """whether two revisions (baserev, rev) can be delta-ed or not"""
704 """whether two revisions (baserev, rev) can be delta-ed or not"""
700 # Disable delta if either rev requires a content-changing flag
705 # Disable delta if either rev requires a content-changing flag
701 # processor (ex. LFS). This is because such flag processor can alter
706 # processor (ex. LFS). This is because such flag processor can alter
702 # the rawtext content that the delta will be based on, and two clients
707 # the rawtext content that the delta will be based on, and two clients
703 # could have a same revlog node with different flags (i.e. different
708 # could have a same revlog node with different flags (i.e. different
704 # rawtext contents) and the delta could be incompatible.
709 # rawtext contents) and the delta could be incompatible.
705 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
710 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
706 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
711 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
707 ):
712 ):
708 return False
713 return False
709 return True
714 return True
710
715
711 def clearcaches(self):
716 def clearcaches(self):
712 self._revisioncache = None
717 self._revisioncache = None
713 self._chainbasecache.clear()
718 self._chainbasecache.clear()
714 self._chunkcache = (0, b'')
719 self._chunkcache = (0, b'')
715 self._pcache = {}
720 self._pcache = {}
716 self.index.clearcaches()
721 self.index.clearcaches()
717
722
718 def rev(self, node):
723 def rev(self, node):
719 try:
724 try:
720 return self.index.rev(node)
725 return self.index.rev(node)
721 except TypeError:
726 except TypeError:
722 raise
727 raise
723 except error.RevlogError:
728 except error.RevlogError:
724 # parsers.c radix tree lookup failed
729 # parsers.c radix tree lookup failed
725 if node == wdirid or node in wdirfilenodeids:
730 if node == wdirid or node in wdirfilenodeids:
726 raise error.WdirUnsupported
731 raise error.WdirUnsupported
727 raise error.LookupError(node, self.indexfile, _(b'no node'))
732 raise error.LookupError(node, self.indexfile, _(b'no node'))
728
733
729 # Accessors for index entries.
734 # Accessors for index entries.
730
735
731 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
736 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
732 # are flags.
737 # are flags.
733 def start(self, rev):
738 def start(self, rev):
734 return int(self.index[rev][0] >> 16)
739 return int(self.index[rev][0] >> 16)
735
740
736 def flags(self, rev):
741 def flags(self, rev):
737 return self.index[rev][0] & 0xFFFF
742 return self.index[rev][0] & 0xFFFF
738
743
739 def length(self, rev):
744 def length(self, rev):
740 return self.index[rev][1]
745 return self.index[rev][1]
741
746
742 def rawsize(self, rev):
747 def rawsize(self, rev):
743 """return the length of the uncompressed text for a given revision"""
748 """return the length of the uncompressed text for a given revision"""
744 l = self.index[rev][2]
749 l = self.index[rev][2]
745 if l >= 0:
750 if l >= 0:
746 return l
751 return l
747
752
748 t = self.rawdata(rev)
753 t = self.rawdata(rev)
749 return len(t)
754 return len(t)
750
755
751 def size(self, rev):
756 def size(self, rev):
752 """length of non-raw text (processed by a "read" flag processor)"""
757 """length of non-raw text (processed by a "read" flag processor)"""
753 # fast path: if no "read" flag processor could change the content,
758 # fast path: if no "read" flag processor could change the content,
754 # size is rawsize. note: ELLIPSIS is known to not change the content.
759 # size is rawsize. note: ELLIPSIS is known to not change the content.
755 flags = self.flags(rev)
760 flags = self.flags(rev)
756 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
761 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
757 return self.rawsize(rev)
762 return self.rawsize(rev)
758
763
759 return len(self.revision(rev, raw=False))
764 return len(self.revision(rev, raw=False))
760
765
761 def chainbase(self, rev):
766 def chainbase(self, rev):
762 base = self._chainbasecache.get(rev)
767 base = self._chainbasecache.get(rev)
763 if base is not None:
768 if base is not None:
764 return base
769 return base
765
770
766 index = self.index
771 index = self.index
767 iterrev = rev
772 iterrev = rev
768 base = index[iterrev][3]
773 base = index[iterrev][3]
769 while base != iterrev:
774 while base != iterrev:
770 iterrev = base
775 iterrev = base
771 base = index[iterrev][3]
776 base = index[iterrev][3]
772
777
773 self._chainbasecache[rev] = base
778 self._chainbasecache[rev] = base
774 return base
779 return base
775
780
776 def linkrev(self, rev):
781 def linkrev(self, rev):
777 return self.index[rev][4]
782 return self.index[rev][4]
778
783
779 def parentrevs(self, rev):
784 def parentrevs(self, rev):
780 try:
785 try:
781 entry = self.index[rev]
786 entry = self.index[rev]
782 except IndexError:
787 except IndexError:
783 if rev == wdirrev:
788 if rev == wdirrev:
784 raise error.WdirUnsupported
789 raise error.WdirUnsupported
785 raise
790 raise
786
791
787 return entry[5], entry[6]
792 return entry[5], entry[6]
788
793
789 # fast parentrevs(rev) where rev isn't filtered
794 # fast parentrevs(rev) where rev isn't filtered
790 _uncheckedparentrevs = parentrevs
795 _uncheckedparentrevs = parentrevs
791
796
792 def node(self, rev):
797 def node(self, rev):
793 try:
798 try:
794 return self.index[rev][7]
799 return self.index[rev][7]
795 except IndexError:
800 except IndexError:
796 if rev == wdirrev:
801 if rev == wdirrev:
797 raise error.WdirUnsupported
802 raise error.WdirUnsupported
798 raise
803 raise
799
804
800 # Derived from index values.
805 # Derived from index values.
801
806
802 def end(self, rev):
807 def end(self, rev):
803 return self.start(rev) + self.length(rev)
808 return self.start(rev) + self.length(rev)
804
809
805 def parents(self, node):
810 def parents(self, node):
806 i = self.index
811 i = self.index
807 d = i[self.rev(node)]
812 d = i[self.rev(node)]
808 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
813 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
809
814
810 def chainlen(self, rev):
815 def chainlen(self, rev):
811 return self._chaininfo(rev)[0]
816 return self._chaininfo(rev)[0]
812
817
813 def _chaininfo(self, rev):
818 def _chaininfo(self, rev):
814 chaininfocache = self._chaininfocache
819 chaininfocache = self._chaininfocache
815 if rev in chaininfocache:
820 if rev in chaininfocache:
816 return chaininfocache[rev]
821 return chaininfocache[rev]
817 index = self.index
822 index = self.index
818 generaldelta = self._generaldelta
823 generaldelta = self._generaldelta
819 iterrev = rev
824 iterrev = rev
820 e = index[iterrev]
825 e = index[iterrev]
821 clen = 0
826 clen = 0
822 compresseddeltalen = 0
827 compresseddeltalen = 0
823 while iterrev != e[3]:
828 while iterrev != e[3]:
824 clen += 1
829 clen += 1
825 compresseddeltalen += e[1]
830 compresseddeltalen += e[1]
826 if generaldelta:
831 if generaldelta:
827 iterrev = e[3]
832 iterrev = e[3]
828 else:
833 else:
829 iterrev -= 1
834 iterrev -= 1
830 if iterrev in chaininfocache:
835 if iterrev in chaininfocache:
831 t = chaininfocache[iterrev]
836 t = chaininfocache[iterrev]
832 clen += t[0]
837 clen += t[0]
833 compresseddeltalen += t[1]
838 compresseddeltalen += t[1]
834 break
839 break
835 e = index[iterrev]
840 e = index[iterrev]
836 else:
841 else:
837 # Add text length of base since decompressing that also takes
842 # Add text length of base since decompressing that also takes
838 # work. For cache hits the length is already included.
843 # work. For cache hits the length is already included.
839 compresseddeltalen += e[1]
844 compresseddeltalen += e[1]
840 r = (clen, compresseddeltalen)
845 r = (clen, compresseddeltalen)
841 chaininfocache[rev] = r
846 chaininfocache[rev] = r
842 return r
847 return r
843
848
844 def _deltachain(self, rev, stoprev=None):
849 def _deltachain(self, rev, stoprev=None):
845 """Obtain the delta chain for a revision.
850 """Obtain the delta chain for a revision.
846
851
847 ``stoprev`` specifies a revision to stop at. If not specified, we
852 ``stoprev`` specifies a revision to stop at. If not specified, we
848 stop at the base of the chain.
853 stop at the base of the chain.
849
854
850 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
855 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
851 revs in ascending order and ``stopped`` is a bool indicating whether
856 revs in ascending order and ``stopped`` is a bool indicating whether
852 ``stoprev`` was hit.
857 ``stoprev`` was hit.
853 """
858 """
854 # Try C implementation.
859 # Try C implementation.
855 try:
860 try:
856 return self.index.deltachain(rev, stoprev, self._generaldelta)
861 return self.index.deltachain(rev, stoprev, self._generaldelta)
857 except AttributeError:
862 except AttributeError:
858 pass
863 pass
859
864
860 chain = []
865 chain = []
861
866
862 # Alias to prevent attribute lookup in tight loop.
867 # Alias to prevent attribute lookup in tight loop.
863 index = self.index
868 index = self.index
864 generaldelta = self._generaldelta
869 generaldelta = self._generaldelta
865
870
866 iterrev = rev
871 iterrev = rev
867 e = index[iterrev]
872 e = index[iterrev]
868 while iterrev != e[3] and iterrev != stoprev:
873 while iterrev != e[3] and iterrev != stoprev:
869 chain.append(iterrev)
874 chain.append(iterrev)
870 if generaldelta:
875 if generaldelta:
871 iterrev = e[3]
876 iterrev = e[3]
872 else:
877 else:
873 iterrev -= 1
878 iterrev -= 1
874 e = index[iterrev]
879 e = index[iterrev]
875
880
876 if iterrev == stoprev:
881 if iterrev == stoprev:
877 stopped = True
882 stopped = True
878 else:
883 else:
879 chain.append(iterrev)
884 chain.append(iterrev)
880 stopped = False
885 stopped = False
881
886
882 chain.reverse()
887 chain.reverse()
883 return chain, stopped
888 return chain, stopped
884
889
885 def ancestors(self, revs, stoprev=0, inclusive=False):
890 def ancestors(self, revs, stoprev=0, inclusive=False):
886 """Generate the ancestors of 'revs' in reverse revision order.
891 """Generate the ancestors of 'revs' in reverse revision order.
887 Does not generate revs lower than stoprev.
892 Does not generate revs lower than stoprev.
888
893
889 See the documentation for ancestor.lazyancestors for more details."""
894 See the documentation for ancestor.lazyancestors for more details."""
890
895
891 # first, make sure start revisions aren't filtered
896 # first, make sure start revisions aren't filtered
892 revs = list(revs)
897 revs = list(revs)
893 checkrev = self.node
898 checkrev = self.node
894 for r in revs:
899 for r in revs:
895 checkrev(r)
900 checkrev(r)
896 # and we're sure ancestors aren't filtered as well
901 # and we're sure ancestors aren't filtered as well
897
902
898 if rustancestor is not None:
903 if rustancestor is not None:
899 lazyancestors = rustancestor.LazyAncestors
904 lazyancestors = rustancestor.LazyAncestors
900 arg = self.index
905 arg = self.index
901 elif util.safehasattr(parsers, b'rustlazyancestors'):
906 elif util.safehasattr(parsers, b'rustlazyancestors'):
902 lazyancestors = ancestor.rustlazyancestors
907 lazyancestors = ancestor.rustlazyancestors
903 arg = self.index
908 arg = self.index
904 else:
909 else:
905 lazyancestors = ancestor.lazyancestors
910 lazyancestors = ancestor.lazyancestors
906 arg = self._uncheckedparentrevs
911 arg = self._uncheckedparentrevs
907 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
912 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
908
913
909 def descendants(self, revs):
914 def descendants(self, revs):
910 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
915 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
911
916
912 def findcommonmissing(self, common=None, heads=None):
917 def findcommonmissing(self, common=None, heads=None):
913 """Return a tuple of the ancestors of common and the ancestors of heads
918 """Return a tuple of the ancestors of common and the ancestors of heads
914 that are not ancestors of common. In revset terminology, we return the
919 that are not ancestors of common. In revset terminology, we return the
915 tuple:
920 tuple:
916
921
917 ::common, (::heads) - (::common)
922 ::common, (::heads) - (::common)
918
923
919 The list is sorted by revision number, meaning it is
924 The list is sorted by revision number, meaning it is
920 topologically sorted.
925 topologically sorted.
921
926
922 'heads' and 'common' are both lists of node IDs. If heads is
927 'heads' and 'common' are both lists of node IDs. If heads is
923 not supplied, uses all of the revlog's heads. If common is not
928 not supplied, uses all of the revlog's heads. If common is not
924 supplied, uses nullid."""
929 supplied, uses nullid."""
925 if common is None:
930 if common is None:
926 common = [nullid]
931 common = [nullid]
927 if heads is None:
932 if heads is None:
928 heads = self.heads()
933 heads = self.heads()
929
934
930 common = [self.rev(n) for n in common]
935 common = [self.rev(n) for n in common]
931 heads = [self.rev(n) for n in heads]
936 heads = [self.rev(n) for n in heads]
932
937
933 # we want the ancestors, but inclusive
938 # we want the ancestors, but inclusive
934 class lazyset(object):
939 class lazyset(object):
935 def __init__(self, lazyvalues):
940 def __init__(self, lazyvalues):
936 self.addedvalues = set()
941 self.addedvalues = set()
937 self.lazyvalues = lazyvalues
942 self.lazyvalues = lazyvalues
938
943
939 def __contains__(self, value):
944 def __contains__(self, value):
940 return value in self.addedvalues or value in self.lazyvalues
945 return value in self.addedvalues or value in self.lazyvalues
941
946
942 def __iter__(self):
947 def __iter__(self):
943 added = self.addedvalues
948 added = self.addedvalues
944 for r in added:
949 for r in added:
945 yield r
950 yield r
946 for r in self.lazyvalues:
951 for r in self.lazyvalues:
947 if not r in added:
952 if not r in added:
948 yield r
953 yield r
949
954
950 def add(self, value):
955 def add(self, value):
951 self.addedvalues.add(value)
956 self.addedvalues.add(value)
952
957
953 def update(self, values):
958 def update(self, values):
954 self.addedvalues.update(values)
959 self.addedvalues.update(values)
955
960
956 has = lazyset(self.ancestors(common))
961 has = lazyset(self.ancestors(common))
957 has.add(nullrev)
962 has.add(nullrev)
958 has.update(common)
963 has.update(common)
959
964
960 # take all ancestors from heads that aren't in has
965 # take all ancestors from heads that aren't in has
961 missing = set()
966 missing = set()
962 visit = collections.deque(r for r in heads if r not in has)
967 visit = collections.deque(r for r in heads if r not in has)
963 while visit:
968 while visit:
964 r = visit.popleft()
969 r = visit.popleft()
965 if r in missing:
970 if r in missing:
966 continue
971 continue
967 else:
972 else:
968 missing.add(r)
973 missing.add(r)
969 for p in self.parentrevs(r):
974 for p in self.parentrevs(r):
970 if p not in has:
975 if p not in has:
971 visit.append(p)
976 visit.append(p)
972 missing = list(missing)
977 missing = list(missing)
973 missing.sort()
978 missing.sort()
974 return has, [self.node(miss) for miss in missing]
979 return has, [self.node(miss) for miss in missing]
975
980
976 def incrementalmissingrevs(self, common=None):
981 def incrementalmissingrevs(self, common=None):
977 """Return an object that can be used to incrementally compute the
982 """Return an object that can be used to incrementally compute the
978 revision numbers of the ancestors of arbitrary sets that are not
983 revision numbers of the ancestors of arbitrary sets that are not
979 ancestors of common. This is an ancestor.incrementalmissingancestors
984 ancestors of common. This is an ancestor.incrementalmissingancestors
980 object.
985 object.
981
986
982 'common' is a list of revision numbers. If common is not supplied, uses
987 'common' is a list of revision numbers. If common is not supplied, uses
983 nullrev.
988 nullrev.
984 """
989 """
985 if common is None:
990 if common is None:
986 common = [nullrev]
991 common = [nullrev]
987
992
988 if rustancestor is not None:
993 if rustancestor is not None:
989 return rustancestor.MissingAncestors(self.index, common)
994 return rustancestor.MissingAncestors(self.index, common)
990 return ancestor.incrementalmissingancestors(self.parentrevs, common)
995 return ancestor.incrementalmissingancestors(self.parentrevs, common)
991
996
992 def findmissingrevs(self, common=None, heads=None):
997 def findmissingrevs(self, common=None, heads=None):
993 """Return the revision numbers of the ancestors of heads that
998 """Return the revision numbers of the ancestors of heads that
994 are not ancestors of common.
999 are not ancestors of common.
995
1000
996 More specifically, return a list of revision numbers corresponding to
1001 More specifically, return a list of revision numbers corresponding to
997 nodes N such that every N satisfies the following constraints:
1002 nodes N such that every N satisfies the following constraints:
998
1003
999 1. N is an ancestor of some node in 'heads'
1004 1. N is an ancestor of some node in 'heads'
1000 2. N is not an ancestor of any node in 'common'
1005 2. N is not an ancestor of any node in 'common'
1001
1006
1002 The list is sorted by revision number, meaning it is
1007 The list is sorted by revision number, meaning it is
1003 topologically sorted.
1008 topologically sorted.
1004
1009
1005 'heads' and 'common' are both lists of revision numbers. If heads is
1010 'heads' and 'common' are both lists of revision numbers. If heads is
1006 not supplied, uses all of the revlog's heads. If common is not
1011 not supplied, uses all of the revlog's heads. If common is not
1007 supplied, uses nullid."""
1012 supplied, uses nullid."""
1008 if common is None:
1013 if common is None:
1009 common = [nullrev]
1014 common = [nullrev]
1010 if heads is None:
1015 if heads is None:
1011 heads = self.headrevs()
1016 heads = self.headrevs()
1012
1017
1013 inc = self.incrementalmissingrevs(common=common)
1018 inc = self.incrementalmissingrevs(common=common)
1014 return inc.missingancestors(heads)
1019 return inc.missingancestors(heads)
1015
1020
1016 def findmissing(self, common=None, heads=None):
1021 def findmissing(self, common=None, heads=None):
1017 """Return the ancestors of heads that are not ancestors of common.
1022 """Return the ancestors of heads that are not ancestors of common.
1018
1023
1019 More specifically, return a list of nodes N such that every N
1024 More specifically, return a list of nodes N such that every N
1020 satisfies the following constraints:
1025 satisfies the following constraints:
1021
1026
1022 1. N is an ancestor of some node in 'heads'
1027 1. N is an ancestor of some node in 'heads'
1023 2. N is not an ancestor of any node in 'common'
1028 2. N is not an ancestor of any node in 'common'
1024
1029
1025 The list is sorted by revision number, meaning it is
1030 The list is sorted by revision number, meaning it is
1026 topologically sorted.
1031 topologically sorted.
1027
1032
1028 'heads' and 'common' are both lists of node IDs. If heads is
1033 'heads' and 'common' are both lists of node IDs. If heads is
1029 not supplied, uses all of the revlog's heads. If common is not
1034 not supplied, uses all of the revlog's heads. If common is not
1030 supplied, uses nullid."""
1035 supplied, uses nullid."""
1031 if common is None:
1036 if common is None:
1032 common = [nullid]
1037 common = [nullid]
1033 if heads is None:
1038 if heads is None:
1034 heads = self.heads()
1039 heads = self.heads()
1035
1040
1036 common = [self.rev(n) for n in common]
1041 common = [self.rev(n) for n in common]
1037 heads = [self.rev(n) for n in heads]
1042 heads = [self.rev(n) for n in heads]
1038
1043
1039 inc = self.incrementalmissingrevs(common=common)
1044 inc = self.incrementalmissingrevs(common=common)
1040 return [self.node(r) for r in inc.missingancestors(heads)]
1045 return [self.node(r) for r in inc.missingancestors(heads)]
1041
1046
1042 def nodesbetween(self, roots=None, heads=None):
1047 def nodesbetween(self, roots=None, heads=None):
1043 """Return a topological path from 'roots' to 'heads'.
1048 """Return a topological path from 'roots' to 'heads'.
1044
1049
1045 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1050 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1046 topologically sorted list of all nodes N that satisfy both of
1051 topologically sorted list of all nodes N that satisfy both of
1047 these constraints:
1052 these constraints:
1048
1053
1049 1. N is a descendant of some node in 'roots'
1054 1. N is a descendant of some node in 'roots'
1050 2. N is an ancestor of some node in 'heads'
1055 2. N is an ancestor of some node in 'heads'
1051
1056
1052 Every node is considered to be both a descendant and an ancestor
1057 Every node is considered to be both a descendant and an ancestor
1053 of itself, so every reachable node in 'roots' and 'heads' will be
1058 of itself, so every reachable node in 'roots' and 'heads' will be
1054 included in 'nodes'.
1059 included in 'nodes'.
1055
1060
1056 'outroots' is the list of reachable nodes in 'roots', i.e., the
1061 'outroots' is the list of reachable nodes in 'roots', i.e., the
1057 subset of 'roots' that is returned in 'nodes'. Likewise,
1062 subset of 'roots' that is returned in 'nodes'. Likewise,
1058 'outheads' is the subset of 'heads' that is also in 'nodes'.
1063 'outheads' is the subset of 'heads' that is also in 'nodes'.
1059
1064
1060 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1065 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1061 unspecified, uses nullid as the only root. If 'heads' is
1066 unspecified, uses nullid as the only root. If 'heads' is
1062 unspecified, uses list of all of the revlog's heads."""
1067 unspecified, uses list of all of the revlog's heads."""
1063 nonodes = ([], [], [])
1068 nonodes = ([], [], [])
1064 if roots is not None:
1069 if roots is not None:
1065 roots = list(roots)
1070 roots = list(roots)
1066 if not roots:
1071 if not roots:
1067 return nonodes
1072 return nonodes
1068 lowestrev = min([self.rev(n) for n in roots])
1073 lowestrev = min([self.rev(n) for n in roots])
1069 else:
1074 else:
1070 roots = [nullid] # Everybody's a descendant of nullid
1075 roots = [nullid] # Everybody's a descendant of nullid
1071 lowestrev = nullrev
1076 lowestrev = nullrev
1072 if (lowestrev == nullrev) and (heads is None):
1077 if (lowestrev == nullrev) and (heads is None):
1073 # We want _all_ the nodes!
1078 # We want _all_ the nodes!
1074 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1079 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1075 if heads is None:
1080 if heads is None:
1076 # All nodes are ancestors, so the latest ancestor is the last
1081 # All nodes are ancestors, so the latest ancestor is the last
1077 # node.
1082 # node.
1078 highestrev = len(self) - 1
1083 highestrev = len(self) - 1
1079 # Set ancestors to None to signal that every node is an ancestor.
1084 # Set ancestors to None to signal that every node is an ancestor.
1080 ancestors = None
1085 ancestors = None
1081 # Set heads to an empty dictionary for later discovery of heads
1086 # Set heads to an empty dictionary for later discovery of heads
1082 heads = {}
1087 heads = {}
1083 else:
1088 else:
1084 heads = list(heads)
1089 heads = list(heads)
1085 if not heads:
1090 if not heads:
1086 return nonodes
1091 return nonodes
1087 ancestors = set()
1092 ancestors = set()
1088 # Turn heads into a dictionary so we can remove 'fake' heads.
1093 # Turn heads into a dictionary so we can remove 'fake' heads.
1089 # Also, later we will be using it to filter out the heads we can't
1094 # Also, later we will be using it to filter out the heads we can't
1090 # find from roots.
1095 # find from roots.
1091 heads = dict.fromkeys(heads, False)
1096 heads = dict.fromkeys(heads, False)
1092 # Start at the top and keep marking parents until we're done.
1097 # Start at the top and keep marking parents until we're done.
1093 nodestotag = set(heads)
1098 nodestotag = set(heads)
1094 # Remember where the top was so we can use it as a limit later.
1099 # Remember where the top was so we can use it as a limit later.
1095 highestrev = max([self.rev(n) for n in nodestotag])
1100 highestrev = max([self.rev(n) for n in nodestotag])
1096 while nodestotag:
1101 while nodestotag:
1097 # grab a node to tag
1102 # grab a node to tag
1098 n = nodestotag.pop()
1103 n = nodestotag.pop()
1099 # Never tag nullid
1104 # Never tag nullid
1100 if n == nullid:
1105 if n == nullid:
1101 continue
1106 continue
1102 # A node's revision number represents its place in a
1107 # A node's revision number represents its place in a
1103 # topologically sorted list of nodes.
1108 # topologically sorted list of nodes.
1104 r = self.rev(n)
1109 r = self.rev(n)
1105 if r >= lowestrev:
1110 if r >= lowestrev:
1106 if n not in ancestors:
1111 if n not in ancestors:
1107 # If we are possibly a descendant of one of the roots
1112 # If we are possibly a descendant of one of the roots
1108 # and we haven't already been marked as an ancestor
1113 # and we haven't already been marked as an ancestor
1109 ancestors.add(n) # Mark as ancestor
1114 ancestors.add(n) # Mark as ancestor
1110 # Add non-nullid parents to list of nodes to tag.
1115 # Add non-nullid parents to list of nodes to tag.
1111 nodestotag.update(
1116 nodestotag.update(
1112 [p for p in self.parents(n) if p != nullid]
1117 [p for p in self.parents(n) if p != nullid]
1113 )
1118 )
1114 elif n in heads: # We've seen it before, is it a fake head?
1119 elif n in heads: # We've seen it before, is it a fake head?
1115 # So it is, real heads should not be the ancestors of
1120 # So it is, real heads should not be the ancestors of
1116 # any other heads.
1121 # any other heads.
1117 heads.pop(n)
1122 heads.pop(n)
1118 if not ancestors:
1123 if not ancestors:
1119 return nonodes
1124 return nonodes
1120 # Now that we have our set of ancestors, we want to remove any
1125 # Now that we have our set of ancestors, we want to remove any
1121 # roots that are not ancestors.
1126 # roots that are not ancestors.
1122
1127
1123 # If one of the roots was nullid, everything is included anyway.
1128 # If one of the roots was nullid, everything is included anyway.
1124 if lowestrev > nullrev:
1129 if lowestrev > nullrev:
1125 # But, since we weren't, let's recompute the lowest rev to not
1130 # But, since we weren't, let's recompute the lowest rev to not
1126 # include roots that aren't ancestors.
1131 # include roots that aren't ancestors.
1127
1132
1128 # Filter out roots that aren't ancestors of heads
1133 # Filter out roots that aren't ancestors of heads
1129 roots = [root for root in roots if root in ancestors]
1134 roots = [root for root in roots if root in ancestors]
1130 # Recompute the lowest revision
1135 # Recompute the lowest revision
1131 if roots:
1136 if roots:
1132 lowestrev = min([self.rev(root) for root in roots])
1137 lowestrev = min([self.rev(root) for root in roots])
1133 else:
1138 else:
1134 # No more roots? Return empty list
1139 # No more roots? Return empty list
1135 return nonodes
1140 return nonodes
1136 else:
1141 else:
1137 # We are descending from nullid, and don't need to care about
1142 # We are descending from nullid, and don't need to care about
1138 # any other roots.
1143 # any other roots.
1139 lowestrev = nullrev
1144 lowestrev = nullrev
1140 roots = [nullid]
1145 roots = [nullid]
1141 # Transform our roots list into a set.
1146 # Transform our roots list into a set.
1142 descendants = set(roots)
1147 descendants = set(roots)
1143 # Also, keep the original roots so we can filter out roots that aren't
1148 # Also, keep the original roots so we can filter out roots that aren't
1144 # 'real' roots (i.e. are descended from other roots).
1149 # 'real' roots (i.e. are descended from other roots).
1145 roots = descendants.copy()
1150 roots = descendants.copy()
1146 # Our topologically sorted list of output nodes.
1151 # Our topologically sorted list of output nodes.
1147 orderedout = []
1152 orderedout = []
1148 # Don't start at nullid since we don't want nullid in our output list,
1153 # Don't start at nullid since we don't want nullid in our output list,
1149 # and if nullid shows up in descendants, empty parents will look like
1154 # and if nullid shows up in descendants, empty parents will look like
1150 # they're descendants.
1155 # they're descendants.
1151 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1156 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1152 n = self.node(r)
1157 n = self.node(r)
1153 isdescendant = False
1158 isdescendant = False
1154 if lowestrev == nullrev: # Everybody is a descendant of nullid
1159 if lowestrev == nullrev: # Everybody is a descendant of nullid
1155 isdescendant = True
1160 isdescendant = True
1156 elif n in descendants:
1161 elif n in descendants:
1157 # n is already a descendant
1162 # n is already a descendant
1158 isdescendant = True
1163 isdescendant = True
1159 # This check only needs to be done here because all the roots
1164 # This check only needs to be done here because all the roots
1160 # will start being marked is descendants before the loop.
1165 # will start being marked is descendants before the loop.
1161 if n in roots:
1166 if n in roots:
1162 # If n was a root, check if it's a 'real' root.
1167 # If n was a root, check if it's a 'real' root.
1163 p = tuple(self.parents(n))
1168 p = tuple(self.parents(n))
1164 # If any of its parents are descendants, it's not a root.
1169 # If any of its parents are descendants, it's not a root.
1165 if (p[0] in descendants) or (p[1] in descendants):
1170 if (p[0] in descendants) or (p[1] in descendants):
1166 roots.remove(n)
1171 roots.remove(n)
1167 else:
1172 else:
1168 p = tuple(self.parents(n))
1173 p = tuple(self.parents(n))
1169 # A node is a descendant if either of its parents are
1174 # A node is a descendant if either of its parents are
1170 # descendants. (We seeded the dependents list with the roots
1175 # descendants. (We seeded the dependents list with the roots
1171 # up there, remember?)
1176 # up there, remember?)
1172 if (p[0] in descendants) or (p[1] in descendants):
1177 if (p[0] in descendants) or (p[1] in descendants):
1173 descendants.add(n)
1178 descendants.add(n)
1174 isdescendant = True
1179 isdescendant = True
1175 if isdescendant and ((ancestors is None) or (n in ancestors)):
1180 if isdescendant and ((ancestors is None) or (n in ancestors)):
1176 # Only include nodes that are both descendants and ancestors.
1181 # Only include nodes that are both descendants and ancestors.
1177 orderedout.append(n)
1182 orderedout.append(n)
1178 if (ancestors is not None) and (n in heads):
1183 if (ancestors is not None) and (n in heads):
1179 # We're trying to figure out which heads are reachable
1184 # We're trying to figure out which heads are reachable
1180 # from roots.
1185 # from roots.
1181 # Mark this head as having been reached
1186 # Mark this head as having been reached
1182 heads[n] = True
1187 heads[n] = True
1183 elif ancestors is None:
1188 elif ancestors is None:
1184 # Otherwise, we're trying to discover the heads.
1189 # Otherwise, we're trying to discover the heads.
1185 # Assume this is a head because if it isn't, the next step
1190 # Assume this is a head because if it isn't, the next step
1186 # will eventually remove it.
1191 # will eventually remove it.
1187 heads[n] = True
1192 heads[n] = True
1188 # But, obviously its parents aren't.
1193 # But, obviously its parents aren't.
1189 for p in self.parents(n):
1194 for p in self.parents(n):
1190 heads.pop(p, None)
1195 heads.pop(p, None)
1191 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1196 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1192 roots = list(roots)
1197 roots = list(roots)
1193 assert orderedout
1198 assert orderedout
1194 assert roots
1199 assert roots
1195 assert heads
1200 assert heads
1196 return (orderedout, roots, heads)
1201 return (orderedout, roots, heads)
1197
1202
1198 def headrevs(self, revs=None):
1203 def headrevs(self, revs=None):
1199 if revs is None:
1204 if revs is None:
1200 try:
1205 try:
1201 return self.index.headrevs()
1206 return self.index.headrevs()
1202 except AttributeError:
1207 except AttributeError:
1203 return self._headrevs()
1208 return self._headrevs()
1204 if rustdagop is not None:
1209 if rustdagop is not None:
1205 return rustdagop.headrevs(self.index, revs)
1210 return rustdagop.headrevs(self.index, revs)
1206 return dagop.headrevs(revs, self._uncheckedparentrevs)
1211 return dagop.headrevs(revs, self._uncheckedparentrevs)
1207
1212
1208 def computephases(self, roots):
1213 def computephases(self, roots):
1209 return self.index.computephasesmapsets(roots)
1214 return self.index.computephasesmapsets(roots)
1210
1215
1211 def _headrevs(self):
1216 def _headrevs(self):
1212 count = len(self)
1217 count = len(self)
1213 if not count:
1218 if not count:
1214 return [nullrev]
1219 return [nullrev]
1215 # we won't iter over filtered rev so nobody is a head at start
1220 # we won't iter over filtered rev so nobody is a head at start
1216 ishead = [0] * (count + 1)
1221 ishead = [0] * (count + 1)
1217 index = self.index
1222 index = self.index
1218 for r in self:
1223 for r in self:
1219 ishead[r] = 1 # I may be an head
1224 ishead[r] = 1 # I may be an head
1220 e = index[r]
1225 e = index[r]
1221 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1226 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1222 return [r for r, val in enumerate(ishead) if val]
1227 return [r for r, val in enumerate(ishead) if val]
1223
1228
1224 def heads(self, start=None, stop=None):
1229 def heads(self, start=None, stop=None):
1225 """return the list of all nodes that have no children
1230 """return the list of all nodes that have no children
1226
1231
1227 if start is specified, only heads that are descendants of
1232 if start is specified, only heads that are descendants of
1228 start will be returned
1233 start will be returned
1229 if stop is specified, it will consider all the revs from stop
1234 if stop is specified, it will consider all the revs from stop
1230 as if they had no children
1235 as if they had no children
1231 """
1236 """
1232 if start is None and stop is None:
1237 if start is None and stop is None:
1233 if not len(self):
1238 if not len(self):
1234 return [nullid]
1239 return [nullid]
1235 return [self.node(r) for r in self.headrevs()]
1240 return [self.node(r) for r in self.headrevs()]
1236
1241
1237 if start is None:
1242 if start is None:
1238 start = nullrev
1243 start = nullrev
1239 else:
1244 else:
1240 start = self.rev(start)
1245 start = self.rev(start)
1241
1246
1242 stoprevs = set(self.rev(n) for n in stop or [])
1247 stoprevs = set(self.rev(n) for n in stop or [])
1243
1248
1244 revs = dagop.headrevssubset(
1249 revs = dagop.headrevssubset(
1245 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1250 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1246 )
1251 )
1247
1252
1248 return [self.node(rev) for rev in revs]
1253 return [self.node(rev) for rev in revs]
1249
1254
1250 def children(self, node):
1255 def children(self, node):
1251 """find the children of a given node"""
1256 """find the children of a given node"""
1252 c = []
1257 c = []
1253 p = self.rev(node)
1258 p = self.rev(node)
1254 for r in self.revs(start=p + 1):
1259 for r in self.revs(start=p + 1):
1255 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1260 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1256 if prevs:
1261 if prevs:
1257 for pr in prevs:
1262 for pr in prevs:
1258 if pr == p:
1263 if pr == p:
1259 c.append(self.node(r))
1264 c.append(self.node(r))
1260 elif p == nullrev:
1265 elif p == nullrev:
1261 c.append(self.node(r))
1266 c.append(self.node(r))
1262 return c
1267 return c
1263
1268
1264 def commonancestorsheads(self, a, b):
1269 def commonancestorsheads(self, a, b):
1265 """calculate all the heads of the common ancestors of nodes a and b"""
1270 """calculate all the heads of the common ancestors of nodes a and b"""
1266 a, b = self.rev(a), self.rev(b)
1271 a, b = self.rev(a), self.rev(b)
1267 ancs = self._commonancestorsheads(a, b)
1272 ancs = self._commonancestorsheads(a, b)
1268 return pycompat.maplist(self.node, ancs)
1273 return pycompat.maplist(self.node, ancs)
1269
1274
1270 def _commonancestorsheads(self, *revs):
1275 def _commonancestorsheads(self, *revs):
1271 """calculate all the heads of the common ancestors of revs"""
1276 """calculate all the heads of the common ancestors of revs"""
1272 try:
1277 try:
1273 ancs = self.index.commonancestorsheads(*revs)
1278 ancs = self.index.commonancestorsheads(*revs)
1274 except (AttributeError, OverflowError): # C implementation failed
1279 except (AttributeError, OverflowError): # C implementation failed
1275 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1280 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1276 return ancs
1281 return ancs
1277
1282
1278 def isancestor(self, a, b):
1283 def isancestor(self, a, b):
1279 """return True if node a is an ancestor of node b
1284 """return True if node a is an ancestor of node b
1280
1285
1281 A revision is considered an ancestor of itself."""
1286 A revision is considered an ancestor of itself."""
1282 a, b = self.rev(a), self.rev(b)
1287 a, b = self.rev(a), self.rev(b)
1283 return self.isancestorrev(a, b)
1288 return self.isancestorrev(a, b)
1284
1289
1285 def isancestorrev(self, a, b):
1290 def isancestorrev(self, a, b):
1286 """return True if revision a is an ancestor of revision b
1291 """return True if revision a is an ancestor of revision b
1287
1292
1288 A revision is considered an ancestor of itself.
1293 A revision is considered an ancestor of itself.
1289
1294
1290 The implementation of this is trivial but the use of
1295 The implementation of this is trivial but the use of
1291 reachableroots is not."""
1296 reachableroots is not."""
1292 if a == nullrev:
1297 if a == nullrev:
1293 return True
1298 return True
1294 elif a == b:
1299 elif a == b:
1295 return True
1300 return True
1296 elif a > b:
1301 elif a > b:
1297 return False
1302 return False
1298 return bool(self.reachableroots(a, [b], [a], includepath=False))
1303 return bool(self.reachableroots(a, [b], [a], includepath=False))
1299
1304
1300 def reachableroots(self, minroot, heads, roots, includepath=False):
1305 def reachableroots(self, minroot, heads, roots, includepath=False):
1301 """return (heads(::(<roots> and <roots>::<heads>)))
1306 """return (heads(::(<roots> and <roots>::<heads>)))
1302
1307
1303 If includepath is True, return (<roots>::<heads>)."""
1308 If includepath is True, return (<roots>::<heads>)."""
1304 try:
1309 try:
1305 return self.index.reachableroots2(
1310 return self.index.reachableroots2(
1306 minroot, heads, roots, includepath
1311 minroot, heads, roots, includepath
1307 )
1312 )
1308 except AttributeError:
1313 except AttributeError:
1309 return dagop._reachablerootspure(
1314 return dagop._reachablerootspure(
1310 self.parentrevs, minroot, roots, heads, includepath
1315 self.parentrevs, minroot, roots, heads, includepath
1311 )
1316 )
1312
1317
1313 def ancestor(self, a, b):
1318 def ancestor(self, a, b):
1314 """calculate the "best" common ancestor of nodes a and b"""
1319 """calculate the "best" common ancestor of nodes a and b"""
1315
1320
1316 a, b = self.rev(a), self.rev(b)
1321 a, b = self.rev(a), self.rev(b)
1317 try:
1322 try:
1318 ancs = self.index.ancestors(a, b)
1323 ancs = self.index.ancestors(a, b)
1319 except (AttributeError, OverflowError):
1324 except (AttributeError, OverflowError):
1320 ancs = ancestor.ancestors(self.parentrevs, a, b)
1325 ancs = ancestor.ancestors(self.parentrevs, a, b)
1321 if ancs:
1326 if ancs:
1322 # choose a consistent winner when there's a tie
1327 # choose a consistent winner when there's a tie
1323 return min(map(self.node, ancs))
1328 return min(map(self.node, ancs))
1324 return nullid
1329 return nullid
1325
1330
1326 def _match(self, id):
1331 def _match(self, id):
1327 if isinstance(id, int):
1332 if isinstance(id, int):
1328 # rev
1333 # rev
1329 return self.node(id)
1334 return self.node(id)
1330 if len(id) == 20:
1335 if len(id) == 20:
1331 # possibly a binary node
1336 # possibly a binary node
1332 # odds of a binary node being all hex in ASCII are 1 in 10**25
1337 # odds of a binary node being all hex in ASCII are 1 in 10**25
1333 try:
1338 try:
1334 node = id
1339 node = id
1335 self.rev(node) # quick search the index
1340 self.rev(node) # quick search the index
1336 return node
1341 return node
1337 except error.LookupError:
1342 except error.LookupError:
1338 pass # may be partial hex id
1343 pass # may be partial hex id
1339 try:
1344 try:
1340 # str(rev)
1345 # str(rev)
1341 rev = int(id)
1346 rev = int(id)
1342 if b"%d" % rev != id:
1347 if b"%d" % rev != id:
1343 raise ValueError
1348 raise ValueError
1344 if rev < 0:
1349 if rev < 0:
1345 rev = len(self) + rev
1350 rev = len(self) + rev
1346 if rev < 0 or rev >= len(self):
1351 if rev < 0 or rev >= len(self):
1347 raise ValueError
1352 raise ValueError
1348 return self.node(rev)
1353 return self.node(rev)
1349 except (ValueError, OverflowError):
1354 except (ValueError, OverflowError):
1350 pass
1355 pass
1351 if len(id) == 40:
1356 if len(id) == 40:
1352 try:
1357 try:
1353 # a full hex nodeid?
1358 # a full hex nodeid?
1354 node = bin(id)
1359 node = bin(id)
1355 self.rev(node)
1360 self.rev(node)
1356 return node
1361 return node
1357 except (TypeError, error.LookupError):
1362 except (TypeError, error.LookupError):
1358 pass
1363 pass
1359
1364
1360 def _partialmatch(self, id):
1365 def _partialmatch(self, id):
1361 # we don't care wdirfilenodeids as they should be always full hash
1366 # we don't care wdirfilenodeids as they should be always full hash
1362 maybewdir = wdirhex.startswith(id)
1367 maybewdir = wdirhex.startswith(id)
1363 try:
1368 try:
1364 partial = self.index.partialmatch(id)
1369 partial = self.index.partialmatch(id)
1365 if partial and self.hasnode(partial):
1370 if partial and self.hasnode(partial):
1366 if maybewdir:
1371 if maybewdir:
1367 # single 'ff...' match in radix tree, ambiguous with wdir
1372 # single 'ff...' match in radix tree, ambiguous with wdir
1368 raise error.RevlogError
1373 raise error.RevlogError
1369 return partial
1374 return partial
1370 if maybewdir:
1375 if maybewdir:
1371 # no 'ff...' match in radix tree, wdir identified
1376 # no 'ff...' match in radix tree, wdir identified
1372 raise error.WdirUnsupported
1377 raise error.WdirUnsupported
1373 return None
1378 return None
1374 except error.RevlogError:
1379 except error.RevlogError:
1375 # parsers.c radix tree lookup gave multiple matches
1380 # parsers.c radix tree lookup gave multiple matches
1376 # fast path: for unfiltered changelog, radix tree is accurate
1381 # fast path: for unfiltered changelog, radix tree is accurate
1377 if not getattr(self, 'filteredrevs', None):
1382 if not getattr(self, 'filteredrevs', None):
1378 raise error.AmbiguousPrefixLookupError(
1383 raise error.AmbiguousPrefixLookupError(
1379 id, self.indexfile, _(b'ambiguous identifier')
1384 id, self.indexfile, _(b'ambiguous identifier')
1380 )
1385 )
1381 # fall through to slow path that filters hidden revisions
1386 # fall through to slow path that filters hidden revisions
1382 except (AttributeError, ValueError):
1387 except (AttributeError, ValueError):
1383 # we are pure python, or key was too short to search radix tree
1388 # we are pure python, or key was too short to search radix tree
1384 pass
1389 pass
1385
1390
1386 if id in self._pcache:
1391 if id in self._pcache:
1387 return self._pcache[id]
1392 return self._pcache[id]
1388
1393
1389 if len(id) <= 40:
1394 if len(id) <= 40:
1390 try:
1395 try:
1391 # hex(node)[:...]
1396 # hex(node)[:...]
1392 l = len(id) // 2 # grab an even number of digits
1397 l = len(id) // 2 # grab an even number of digits
1393 prefix = bin(id[: l * 2])
1398 prefix = bin(id[: l * 2])
1394 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1399 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1395 nl = [
1400 nl = [
1396 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1401 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1397 ]
1402 ]
1398 if nullhex.startswith(id):
1403 if nullhex.startswith(id):
1399 nl.append(nullid)
1404 nl.append(nullid)
1400 if len(nl) > 0:
1405 if len(nl) > 0:
1401 if len(nl) == 1 and not maybewdir:
1406 if len(nl) == 1 and not maybewdir:
1402 self._pcache[id] = nl[0]
1407 self._pcache[id] = nl[0]
1403 return nl[0]
1408 return nl[0]
1404 raise error.AmbiguousPrefixLookupError(
1409 raise error.AmbiguousPrefixLookupError(
1405 id, self.indexfile, _(b'ambiguous identifier')
1410 id, self.indexfile, _(b'ambiguous identifier')
1406 )
1411 )
1407 if maybewdir:
1412 if maybewdir:
1408 raise error.WdirUnsupported
1413 raise error.WdirUnsupported
1409 return None
1414 return None
1410 except TypeError:
1415 except TypeError:
1411 pass
1416 pass
1412
1417
1413 def lookup(self, id):
1418 def lookup(self, id):
1414 """locate a node based on:
1419 """locate a node based on:
1415 - revision number or str(revision number)
1420 - revision number or str(revision number)
1416 - nodeid or subset of hex nodeid
1421 - nodeid or subset of hex nodeid
1417 """
1422 """
1418 n = self._match(id)
1423 n = self._match(id)
1419 if n is not None:
1424 if n is not None:
1420 return n
1425 return n
1421 n = self._partialmatch(id)
1426 n = self._partialmatch(id)
1422 if n:
1427 if n:
1423 return n
1428 return n
1424
1429
1425 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1430 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1426
1431
1427 def shortest(self, node, minlength=1):
1432 def shortest(self, node, minlength=1):
1428 """Find the shortest unambiguous prefix that matches node."""
1433 """Find the shortest unambiguous prefix that matches node."""
1429
1434
1430 def isvalid(prefix):
1435 def isvalid(prefix):
1431 try:
1436 try:
1432 matchednode = self._partialmatch(prefix)
1437 matchednode = self._partialmatch(prefix)
1433 except error.AmbiguousPrefixLookupError:
1438 except error.AmbiguousPrefixLookupError:
1434 return False
1439 return False
1435 except error.WdirUnsupported:
1440 except error.WdirUnsupported:
1436 # single 'ff...' match
1441 # single 'ff...' match
1437 return True
1442 return True
1438 if matchednode is None:
1443 if matchednode is None:
1439 raise error.LookupError(node, self.indexfile, _(b'no node'))
1444 raise error.LookupError(node, self.indexfile, _(b'no node'))
1440 return True
1445 return True
1441
1446
1442 def maybewdir(prefix):
1447 def maybewdir(prefix):
1443 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1448 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1444
1449
1445 hexnode = hex(node)
1450 hexnode = hex(node)
1446
1451
1447 def disambiguate(hexnode, minlength):
1452 def disambiguate(hexnode, minlength):
1448 """Disambiguate against wdirid."""
1453 """Disambiguate against wdirid."""
1449 for length in range(minlength, 41):
1454 for length in range(minlength, 41):
1450 prefix = hexnode[:length]
1455 prefix = hexnode[:length]
1451 if not maybewdir(prefix):
1456 if not maybewdir(prefix):
1452 return prefix
1457 return prefix
1453
1458
1454 if not getattr(self, 'filteredrevs', None):
1459 if not getattr(self, 'filteredrevs', None):
1455 try:
1460 try:
1456 length = max(self.index.shortest(node), minlength)
1461 length = max(self.index.shortest(node), minlength)
1457 return disambiguate(hexnode, length)
1462 return disambiguate(hexnode, length)
1458 except error.RevlogError:
1463 except error.RevlogError:
1459 if node != wdirid:
1464 if node != wdirid:
1460 raise error.LookupError(node, self.indexfile, _(b'no node'))
1465 raise error.LookupError(node, self.indexfile, _(b'no node'))
1461 except AttributeError:
1466 except AttributeError:
1462 # Fall through to pure code
1467 # Fall through to pure code
1463 pass
1468 pass
1464
1469
1465 if node == wdirid:
1470 if node == wdirid:
1466 for length in range(minlength, 41):
1471 for length in range(minlength, 41):
1467 prefix = hexnode[:length]
1472 prefix = hexnode[:length]
1468 if isvalid(prefix):
1473 if isvalid(prefix):
1469 return prefix
1474 return prefix
1470
1475
1471 for length in range(minlength, 41):
1476 for length in range(minlength, 41):
1472 prefix = hexnode[:length]
1477 prefix = hexnode[:length]
1473 if isvalid(prefix):
1478 if isvalid(prefix):
1474 return disambiguate(hexnode, length)
1479 return disambiguate(hexnode, length)
1475
1480
1476 def cmp(self, node, text):
1481 def cmp(self, node, text):
1477 """compare text with a given file revision
1482 """compare text with a given file revision
1478
1483
1479 returns True if text is different than what is stored.
1484 returns True if text is different than what is stored.
1480 """
1485 """
1481 p1, p2 = self.parents(node)
1486 p1, p2 = self.parents(node)
1482 return storageutil.hashrevisionsha1(text, p1, p2) != node
1487 return storageutil.hashrevisionsha1(text, p1, p2) != node
1483
1488
1484 def _cachesegment(self, offset, data):
1489 def _cachesegment(self, offset, data):
1485 """Add a segment to the revlog cache.
1490 """Add a segment to the revlog cache.
1486
1491
1487 Accepts an absolute offset and the data that is at that location.
1492 Accepts an absolute offset and the data that is at that location.
1488 """
1493 """
1489 o, d = self._chunkcache
1494 o, d = self._chunkcache
1490 # try to add to existing cache
1495 # try to add to existing cache
1491 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1496 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1492 self._chunkcache = o, d + data
1497 self._chunkcache = o, d + data
1493 else:
1498 else:
1494 self._chunkcache = offset, data
1499 self._chunkcache = offset, data
1495
1500
1496 def _readsegment(self, offset, length, df=None):
1501 def _readsegment(self, offset, length, df=None):
1497 """Load a segment of raw data from the revlog.
1502 """Load a segment of raw data from the revlog.
1498
1503
1499 Accepts an absolute offset, length to read, and an optional existing
1504 Accepts an absolute offset, length to read, and an optional existing
1500 file handle to read from.
1505 file handle to read from.
1501
1506
1502 If an existing file handle is passed, it will be seeked and the
1507 If an existing file handle is passed, it will be seeked and the
1503 original seek position will NOT be restored.
1508 original seek position will NOT be restored.
1504
1509
1505 Returns a str or buffer of raw byte data.
1510 Returns a str or buffer of raw byte data.
1506
1511
1507 Raises if the requested number of bytes could not be read.
1512 Raises if the requested number of bytes could not be read.
1508 """
1513 """
1509 # Cache data both forward and backward around the requested
1514 # Cache data both forward and backward around the requested
1510 # data, in a fixed size window. This helps speed up operations
1515 # data, in a fixed size window. This helps speed up operations
1511 # involving reading the revlog backwards.
1516 # involving reading the revlog backwards.
1512 cachesize = self._chunkcachesize
1517 cachesize = self._chunkcachesize
1513 realoffset = offset & ~(cachesize - 1)
1518 realoffset = offset & ~(cachesize - 1)
1514 reallength = (
1519 reallength = (
1515 (offset + length + cachesize) & ~(cachesize - 1)
1520 (offset + length + cachesize) & ~(cachesize - 1)
1516 ) - realoffset
1521 ) - realoffset
1517 with self._datareadfp(df) as df:
1522 with self._datareadfp(df) as df:
1518 df.seek(realoffset)
1523 df.seek(realoffset)
1519 d = df.read(reallength)
1524 d = df.read(reallength)
1520
1525
1521 self._cachesegment(realoffset, d)
1526 self._cachesegment(realoffset, d)
1522 if offset != realoffset or reallength != length:
1527 if offset != realoffset or reallength != length:
1523 startoffset = offset - realoffset
1528 startoffset = offset - realoffset
1524 if len(d) - startoffset < length:
1529 if len(d) - startoffset < length:
1525 raise error.RevlogError(
1530 raise error.RevlogError(
1526 _(
1531 _(
1527 b'partial read of revlog %s; expected %d bytes from '
1532 b'partial read of revlog %s; expected %d bytes from '
1528 b'offset %d, got %d'
1533 b'offset %d, got %d'
1529 )
1534 )
1530 % (
1535 % (
1531 self.indexfile if self._inline else self.datafile,
1536 self.indexfile if self._inline else self.datafile,
1532 length,
1537 length,
1533 realoffset,
1538 realoffset,
1534 len(d) - startoffset,
1539 len(d) - startoffset,
1535 )
1540 )
1536 )
1541 )
1537
1542
1538 return util.buffer(d, startoffset, length)
1543 return util.buffer(d, startoffset, length)
1539
1544
1540 if len(d) < length:
1545 if len(d) < length:
1541 raise error.RevlogError(
1546 raise error.RevlogError(
1542 _(
1547 _(
1543 b'partial read of revlog %s; expected %d bytes from offset '
1548 b'partial read of revlog %s; expected %d bytes from offset '
1544 b'%d, got %d'
1549 b'%d, got %d'
1545 )
1550 )
1546 % (
1551 % (
1547 self.indexfile if self._inline else self.datafile,
1552 self.indexfile if self._inline else self.datafile,
1548 length,
1553 length,
1549 offset,
1554 offset,
1550 len(d),
1555 len(d),
1551 )
1556 )
1552 )
1557 )
1553
1558
1554 return d
1559 return d
1555
1560
1556 def _getsegment(self, offset, length, df=None):
1561 def _getsegment(self, offset, length, df=None):
1557 """Obtain a segment of raw data from the revlog.
1562 """Obtain a segment of raw data from the revlog.
1558
1563
1559 Accepts an absolute offset, length of bytes to obtain, and an
1564 Accepts an absolute offset, length of bytes to obtain, and an
1560 optional file handle to the already-opened revlog. If the file
1565 optional file handle to the already-opened revlog. If the file
1561 handle is used, it's original seek position will not be preserved.
1566 handle is used, it's original seek position will not be preserved.
1562
1567
1563 Requests for data may be returned from a cache.
1568 Requests for data may be returned from a cache.
1564
1569
1565 Returns a str or a buffer instance of raw byte data.
1570 Returns a str or a buffer instance of raw byte data.
1566 """
1571 """
1567 o, d = self._chunkcache
1572 o, d = self._chunkcache
1568 l = len(d)
1573 l = len(d)
1569
1574
1570 # is it in the cache?
1575 # is it in the cache?
1571 cachestart = offset - o
1576 cachestart = offset - o
1572 cacheend = cachestart + length
1577 cacheend = cachestart + length
1573 if cachestart >= 0 and cacheend <= l:
1578 if cachestart >= 0 and cacheend <= l:
1574 if cachestart == 0 and cacheend == l:
1579 if cachestart == 0 and cacheend == l:
1575 return d # avoid a copy
1580 return d # avoid a copy
1576 return util.buffer(d, cachestart, cacheend - cachestart)
1581 return util.buffer(d, cachestart, cacheend - cachestart)
1577
1582
1578 return self._readsegment(offset, length, df=df)
1583 return self._readsegment(offset, length, df=df)
1579
1584
1580 def _getsegmentforrevs(self, startrev, endrev, df=None):
1585 def _getsegmentforrevs(self, startrev, endrev, df=None):
1581 """Obtain a segment of raw data corresponding to a range of revisions.
1586 """Obtain a segment of raw data corresponding to a range of revisions.
1582
1587
1583 Accepts the start and end revisions and an optional already-open
1588 Accepts the start and end revisions and an optional already-open
1584 file handle to be used for reading. If the file handle is read, its
1589 file handle to be used for reading. If the file handle is read, its
1585 seek position will not be preserved.
1590 seek position will not be preserved.
1586
1591
1587 Requests for data may be satisfied by a cache.
1592 Requests for data may be satisfied by a cache.
1588
1593
1589 Returns a 2-tuple of (offset, data) for the requested range of
1594 Returns a 2-tuple of (offset, data) for the requested range of
1590 revisions. Offset is the integer offset from the beginning of the
1595 revisions. Offset is the integer offset from the beginning of the
1591 revlog and data is a str or buffer of the raw byte data.
1596 revlog and data is a str or buffer of the raw byte data.
1592
1597
1593 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1598 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1594 to determine where each revision's data begins and ends.
1599 to determine where each revision's data begins and ends.
1595 """
1600 """
1596 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1601 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1597 # (functions are expensive).
1602 # (functions are expensive).
1598 index = self.index
1603 index = self.index
1599 istart = index[startrev]
1604 istart = index[startrev]
1600 start = int(istart[0] >> 16)
1605 start = int(istart[0] >> 16)
1601 if startrev == endrev:
1606 if startrev == endrev:
1602 end = start + istart[1]
1607 end = start + istart[1]
1603 else:
1608 else:
1604 iend = index[endrev]
1609 iend = index[endrev]
1605 end = int(iend[0] >> 16) + iend[1]
1610 end = int(iend[0] >> 16) + iend[1]
1606
1611
1607 if self._inline:
1612 if self._inline:
1608 start += (startrev + 1) * self._io.size
1613 start += (startrev + 1) * self._io.size
1609 end += (endrev + 1) * self._io.size
1614 end += (endrev + 1) * self._io.size
1610 length = end - start
1615 length = end - start
1611
1616
1612 return start, self._getsegment(start, length, df=df)
1617 return start, self._getsegment(start, length, df=df)
1613
1618
1614 def _chunk(self, rev, df=None):
1619 def _chunk(self, rev, df=None):
1615 """Obtain a single decompressed chunk for a revision.
1620 """Obtain a single decompressed chunk for a revision.
1616
1621
1617 Accepts an integer revision and an optional already-open file handle
1622 Accepts an integer revision and an optional already-open file handle
1618 to be used for reading. If used, the seek position of the file will not
1623 to be used for reading. If used, the seek position of the file will not
1619 be preserved.
1624 be preserved.
1620
1625
1621 Returns a str holding uncompressed data for the requested revision.
1626 Returns a str holding uncompressed data for the requested revision.
1622 """
1627 """
1623 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1628 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1624
1629
1625 def _chunks(self, revs, df=None, targetsize=None):
1630 def _chunks(self, revs, df=None, targetsize=None):
1626 """Obtain decompressed chunks for the specified revisions.
1631 """Obtain decompressed chunks for the specified revisions.
1627
1632
1628 Accepts an iterable of numeric revisions that are assumed to be in
1633 Accepts an iterable of numeric revisions that are assumed to be in
1629 ascending order. Also accepts an optional already-open file handle
1634 ascending order. Also accepts an optional already-open file handle
1630 to be used for reading. If used, the seek position of the file will
1635 to be used for reading. If used, the seek position of the file will
1631 not be preserved.
1636 not be preserved.
1632
1637
1633 This function is similar to calling ``self._chunk()`` multiple times,
1638 This function is similar to calling ``self._chunk()`` multiple times,
1634 but is faster.
1639 but is faster.
1635
1640
1636 Returns a list with decompressed data for each requested revision.
1641 Returns a list with decompressed data for each requested revision.
1637 """
1642 """
1638 if not revs:
1643 if not revs:
1639 return []
1644 return []
1640 start = self.start
1645 start = self.start
1641 length = self.length
1646 length = self.length
1642 inline = self._inline
1647 inline = self._inline
1643 iosize = self._io.size
1648 iosize = self._io.size
1644 buffer = util.buffer
1649 buffer = util.buffer
1645
1650
1646 l = []
1651 l = []
1647 ladd = l.append
1652 ladd = l.append
1648
1653
1649 if not self._withsparseread:
1654 if not self._withsparseread:
1650 slicedchunks = (revs,)
1655 slicedchunks = (revs,)
1651 else:
1656 else:
1652 slicedchunks = deltautil.slicechunk(
1657 slicedchunks = deltautil.slicechunk(
1653 self, revs, targetsize=targetsize
1658 self, revs, targetsize=targetsize
1654 )
1659 )
1655
1660
1656 for revschunk in slicedchunks:
1661 for revschunk in slicedchunks:
1657 firstrev = revschunk[0]
1662 firstrev = revschunk[0]
1658 # Skip trailing revisions with empty diff
1663 # Skip trailing revisions with empty diff
1659 for lastrev in revschunk[::-1]:
1664 for lastrev in revschunk[::-1]:
1660 if length(lastrev) != 0:
1665 if length(lastrev) != 0:
1661 break
1666 break
1662
1667
1663 try:
1668 try:
1664 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1669 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1665 except OverflowError:
1670 except OverflowError:
1666 # issue4215 - we can't cache a run of chunks greater than
1671 # issue4215 - we can't cache a run of chunks greater than
1667 # 2G on Windows
1672 # 2G on Windows
1668 return [self._chunk(rev, df=df) for rev in revschunk]
1673 return [self._chunk(rev, df=df) for rev in revschunk]
1669
1674
1670 decomp = self.decompress
1675 decomp = self.decompress
1671 for rev in revschunk:
1676 for rev in revschunk:
1672 chunkstart = start(rev)
1677 chunkstart = start(rev)
1673 if inline:
1678 if inline:
1674 chunkstart += (rev + 1) * iosize
1679 chunkstart += (rev + 1) * iosize
1675 chunklength = length(rev)
1680 chunklength = length(rev)
1676 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1681 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1677
1682
1678 return l
1683 return l
1679
1684
1680 def _chunkclear(self):
1685 def _chunkclear(self):
1681 """Clear the raw chunk cache."""
1686 """Clear the raw chunk cache."""
1682 self._chunkcache = (0, b'')
1687 self._chunkcache = (0, b'')
1683
1688
1684 def deltaparent(self, rev):
1689 def deltaparent(self, rev):
1685 """return deltaparent of the given revision"""
1690 """return deltaparent of the given revision"""
1686 base = self.index[rev][3]
1691 base = self.index[rev][3]
1687 if base == rev:
1692 if base == rev:
1688 return nullrev
1693 return nullrev
1689 elif self._generaldelta:
1694 elif self._generaldelta:
1690 return base
1695 return base
1691 else:
1696 else:
1692 return rev - 1
1697 return rev - 1
1693
1698
1694 def issnapshot(self, rev):
1699 def issnapshot(self, rev):
1695 """tells whether rev is a snapshot
1700 """tells whether rev is a snapshot
1696 """
1701 """
1697 if not self._sparserevlog:
1702 if not self._sparserevlog:
1698 return self.deltaparent(rev) == nullrev
1703 return self.deltaparent(rev) == nullrev
1699 elif util.safehasattr(self.index, b'issnapshot'):
1704 elif util.safehasattr(self.index, b'issnapshot'):
1700 # directly assign the method to cache the testing and access
1705 # directly assign the method to cache the testing and access
1701 self.issnapshot = self.index.issnapshot
1706 self.issnapshot = self.index.issnapshot
1702 return self.issnapshot(rev)
1707 return self.issnapshot(rev)
1703 if rev == nullrev:
1708 if rev == nullrev:
1704 return True
1709 return True
1705 entry = self.index[rev]
1710 entry = self.index[rev]
1706 base = entry[3]
1711 base = entry[3]
1707 if base == rev:
1712 if base == rev:
1708 return True
1713 return True
1709 if base == nullrev:
1714 if base == nullrev:
1710 return True
1715 return True
1711 p1 = entry[5]
1716 p1 = entry[5]
1712 p2 = entry[6]
1717 p2 = entry[6]
1713 if base == p1 or base == p2:
1718 if base == p1 or base == p2:
1714 return False
1719 return False
1715 return self.issnapshot(base)
1720 return self.issnapshot(base)
1716
1721
1717 def snapshotdepth(self, rev):
1722 def snapshotdepth(self, rev):
1718 """number of snapshot in the chain before this one"""
1723 """number of snapshot in the chain before this one"""
1719 if not self.issnapshot(rev):
1724 if not self.issnapshot(rev):
1720 raise error.ProgrammingError(b'revision %d not a snapshot')
1725 raise error.ProgrammingError(b'revision %d not a snapshot')
1721 return len(self._deltachain(rev)[0]) - 1
1726 return len(self._deltachain(rev)[0]) - 1
1722
1727
1723 def revdiff(self, rev1, rev2):
1728 def revdiff(self, rev1, rev2):
1724 """return or calculate a delta between two revisions
1729 """return or calculate a delta between two revisions
1725
1730
1726 The delta calculated is in binary form and is intended to be written to
1731 The delta calculated is in binary form and is intended to be written to
1727 revlog data directly. So this function needs raw revision data.
1732 revlog data directly. So this function needs raw revision data.
1728 """
1733 """
1729 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1734 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1730 return bytes(self._chunk(rev2))
1735 return bytes(self._chunk(rev2))
1731
1736
1732 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1737 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1733
1738
1734 def _processflags(self, text, flags, operation, raw=False):
1739 def _processflags(self, text, flags, operation, raw=False):
1735 """deprecated entry point to access flag processors"""
1740 """deprecated entry point to access flag processors"""
1736 msg = b'_processflag(...) use the specialized variant'
1741 msg = b'_processflag(...) use the specialized variant'
1737 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1742 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1738 if raw:
1743 if raw:
1739 return text, flagutil.processflagsraw(self, text, flags)
1744 return text, flagutil.processflagsraw(self, text, flags)
1740 elif operation == b'read':
1745 elif operation == b'read':
1741 return flagutil.processflagsread(self, text, flags)
1746 return flagutil.processflagsread(self, text, flags)
1742 else: # write operation
1747 else: # write operation
1743 return flagutil.processflagswrite(self, text, flags)
1748 return flagutil.processflagswrite(self, text, flags)
1744
1749
1745 def revision(self, nodeorrev, _df=None, raw=False):
1750 def revision(self, nodeorrev, _df=None, raw=False):
1746 """return an uncompressed revision of a given node or revision
1751 """return an uncompressed revision of a given node or revision
1747 number.
1752 number.
1748
1753
1749 _df - an existing file handle to read from. (internal-only)
1754 _df - an existing file handle to read from. (internal-only)
1750 raw - an optional argument specifying if the revision data is to be
1755 raw - an optional argument specifying if the revision data is to be
1751 treated as raw data when applying flag transforms. 'raw' should be set
1756 treated as raw data when applying flag transforms. 'raw' should be set
1752 to True when generating changegroups or in debug commands.
1757 to True when generating changegroups or in debug commands.
1753 """
1758 """
1754 if raw:
1759 if raw:
1755 msg = (
1760 msg = (
1756 b'revlog.revision(..., raw=True) is deprecated, '
1761 b'revlog.revision(..., raw=True) is deprecated, '
1757 b'use revlog.rawdata(...)'
1762 b'use revlog.rawdata(...)'
1758 )
1763 )
1759 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1764 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1760 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1765 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1761
1766
1762 def sidedata(self, nodeorrev, _df=None):
1767 def sidedata(self, nodeorrev, _df=None):
1763 """a map of extra data related to the changeset but not part of the hash
1768 """a map of extra data related to the changeset but not part of the hash
1764
1769
1765 This function currently return a dictionary. However, more advanced
1770 This function currently return a dictionary. However, more advanced
1766 mapping object will likely be used in the future for a more
1771 mapping object will likely be used in the future for a more
1767 efficient/lazy code.
1772 efficient/lazy code.
1768 """
1773 """
1769 return self._revisiondata(nodeorrev, _df)[1]
1774 return self._revisiondata(nodeorrev, _df)[1]
1770
1775
1771 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1776 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1772 # deal with <nodeorrev> argument type
1777 # deal with <nodeorrev> argument type
1773 if isinstance(nodeorrev, int):
1778 if isinstance(nodeorrev, int):
1774 rev = nodeorrev
1779 rev = nodeorrev
1775 node = self.node(rev)
1780 node = self.node(rev)
1776 else:
1781 else:
1777 node = nodeorrev
1782 node = nodeorrev
1778 rev = None
1783 rev = None
1779
1784
1780 # fast path the special `nullid` rev
1785 # fast path the special `nullid` rev
1781 if node == nullid:
1786 if node == nullid:
1782 return b"", {}
1787 return b"", {}
1783
1788
1784 # ``rawtext`` is the text as stored inside the revlog. Might be the
1789 # ``rawtext`` is the text as stored inside the revlog. Might be the
1785 # revision or might need to be processed to retrieve the revision.
1790 # revision or might need to be processed to retrieve the revision.
1786 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1791 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1787
1792
1788 if raw and validated:
1793 if raw and validated:
1789 # if we don't want to process the raw text and that raw
1794 # if we don't want to process the raw text and that raw
1790 # text is cached, we can exit early.
1795 # text is cached, we can exit early.
1791 return rawtext, {}
1796 return rawtext, {}
1792 if rev is None:
1797 if rev is None:
1793 rev = self.rev(node)
1798 rev = self.rev(node)
1794 # the revlog's flag for this revision
1799 # the revlog's flag for this revision
1795 # (usually alter its state or content)
1800 # (usually alter its state or content)
1796 flags = self.flags(rev)
1801 flags = self.flags(rev)
1797
1802
1798 if validated and flags == REVIDX_DEFAULT_FLAGS:
1803 if validated and flags == REVIDX_DEFAULT_FLAGS:
1799 # no extra flags set, no flag processor runs, text = rawtext
1804 # no extra flags set, no flag processor runs, text = rawtext
1800 return rawtext, {}
1805 return rawtext, {}
1801
1806
1802 sidedata = {}
1807 sidedata = {}
1803 if raw:
1808 if raw:
1804 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1809 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1805 text = rawtext
1810 text = rawtext
1806 else:
1811 else:
1807 try:
1812 try:
1808 r = flagutil.processflagsread(self, rawtext, flags)
1813 r = flagutil.processflagsread(self, rawtext, flags)
1809 except error.SidedataHashError as exc:
1814 except error.SidedataHashError as exc:
1810 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1815 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1811 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1816 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1812 raise error.RevlogError(msg)
1817 raise error.RevlogError(msg)
1813 text, validatehash, sidedata = r
1818 text, validatehash, sidedata = r
1814 if validatehash:
1819 if validatehash:
1815 self.checkhash(text, node, rev=rev)
1820 self.checkhash(text, node, rev=rev)
1816 if not validated:
1821 if not validated:
1817 self._revisioncache = (node, rev, rawtext)
1822 self._revisioncache = (node, rev, rawtext)
1818
1823
1819 return text, sidedata
1824 return text, sidedata
1820
1825
1821 def _rawtext(self, node, rev, _df=None):
1826 def _rawtext(self, node, rev, _df=None):
1822 """return the possibly unvalidated rawtext for a revision
1827 """return the possibly unvalidated rawtext for a revision
1823
1828
1824 returns (rev, rawtext, validated)
1829 returns (rev, rawtext, validated)
1825 """
1830 """
1826
1831
1827 # revision in the cache (could be useful to apply delta)
1832 # revision in the cache (could be useful to apply delta)
1828 cachedrev = None
1833 cachedrev = None
1829 # An intermediate text to apply deltas to
1834 # An intermediate text to apply deltas to
1830 basetext = None
1835 basetext = None
1831
1836
1832 # Check if we have the entry in cache
1837 # Check if we have the entry in cache
1833 # The cache entry looks like (node, rev, rawtext)
1838 # The cache entry looks like (node, rev, rawtext)
1834 if self._revisioncache:
1839 if self._revisioncache:
1835 if self._revisioncache[0] == node:
1840 if self._revisioncache[0] == node:
1836 return (rev, self._revisioncache[2], True)
1841 return (rev, self._revisioncache[2], True)
1837 cachedrev = self._revisioncache[1]
1842 cachedrev = self._revisioncache[1]
1838
1843
1839 if rev is None:
1844 if rev is None:
1840 rev = self.rev(node)
1845 rev = self.rev(node)
1841
1846
1842 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1847 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1843 if stopped:
1848 if stopped:
1844 basetext = self._revisioncache[2]
1849 basetext = self._revisioncache[2]
1845
1850
1846 # drop cache to save memory, the caller is expected to
1851 # drop cache to save memory, the caller is expected to
1847 # update self._revisioncache after validating the text
1852 # update self._revisioncache after validating the text
1848 self._revisioncache = None
1853 self._revisioncache = None
1849
1854
1850 targetsize = None
1855 targetsize = None
1851 rawsize = self.index[rev][2]
1856 rawsize = self.index[rev][2]
1852 if 0 <= rawsize:
1857 if 0 <= rawsize:
1853 targetsize = 4 * rawsize
1858 targetsize = 4 * rawsize
1854
1859
1855 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1860 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1856 if basetext is None:
1861 if basetext is None:
1857 basetext = bytes(bins[0])
1862 basetext = bytes(bins[0])
1858 bins = bins[1:]
1863 bins = bins[1:]
1859
1864
1860 rawtext = mdiff.patches(basetext, bins)
1865 rawtext = mdiff.patches(basetext, bins)
1861 del basetext # let us have a chance to free memory early
1866 del basetext # let us have a chance to free memory early
1862 return (rev, rawtext, False)
1867 return (rev, rawtext, False)
1863
1868
1864 def rawdata(self, nodeorrev, _df=None):
1869 def rawdata(self, nodeorrev, _df=None):
1865 """return an uncompressed raw data of a given node or revision number.
1870 """return an uncompressed raw data of a given node or revision number.
1866
1871
1867 _df - an existing file handle to read from. (internal-only)
1872 _df - an existing file handle to read from. (internal-only)
1868 """
1873 """
1869 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1874 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1870
1875
1871 def hash(self, text, p1, p2):
1876 def hash(self, text, p1, p2):
1872 """Compute a node hash.
1877 """Compute a node hash.
1873
1878
1874 Available as a function so that subclasses can replace the hash
1879 Available as a function so that subclasses can replace the hash
1875 as needed.
1880 as needed.
1876 """
1881 """
1877 return storageutil.hashrevisionsha1(text, p1, p2)
1882 return storageutil.hashrevisionsha1(text, p1, p2)
1878
1883
1879 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1884 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1880 """Check node hash integrity.
1885 """Check node hash integrity.
1881
1886
1882 Available as a function so that subclasses can extend hash mismatch
1887 Available as a function so that subclasses can extend hash mismatch
1883 behaviors as needed.
1888 behaviors as needed.
1884 """
1889 """
1885 try:
1890 try:
1886 if p1 is None and p2 is None:
1891 if p1 is None and p2 is None:
1887 p1, p2 = self.parents(node)
1892 p1, p2 = self.parents(node)
1888 if node != self.hash(text, p1, p2):
1893 if node != self.hash(text, p1, p2):
1889 # Clear the revision cache on hash failure. The revision cache
1894 # Clear the revision cache on hash failure. The revision cache
1890 # only stores the raw revision and clearing the cache does have
1895 # only stores the raw revision and clearing the cache does have
1891 # the side-effect that we won't have a cache hit when the raw
1896 # the side-effect that we won't have a cache hit when the raw
1892 # revision data is accessed. But this case should be rare and
1897 # revision data is accessed. But this case should be rare and
1893 # it is extra work to teach the cache about the hash
1898 # it is extra work to teach the cache about the hash
1894 # verification state.
1899 # verification state.
1895 if self._revisioncache and self._revisioncache[0] == node:
1900 if self._revisioncache and self._revisioncache[0] == node:
1896 self._revisioncache = None
1901 self._revisioncache = None
1897
1902
1898 revornode = rev
1903 revornode = rev
1899 if revornode is None:
1904 if revornode is None:
1900 revornode = templatefilters.short(hex(node))
1905 revornode = templatefilters.short(hex(node))
1901 raise error.RevlogError(
1906 raise error.RevlogError(
1902 _(b"integrity check failed on %s:%s")
1907 _(b"integrity check failed on %s:%s")
1903 % (self.indexfile, pycompat.bytestr(revornode))
1908 % (self.indexfile, pycompat.bytestr(revornode))
1904 )
1909 )
1905 except error.RevlogError:
1910 except error.RevlogError:
1906 if self._censorable and storageutil.iscensoredtext(text):
1911 if self._censorable and storageutil.iscensoredtext(text):
1907 raise error.CensoredNodeError(self.indexfile, node, text)
1912 raise error.CensoredNodeError(self.indexfile, node, text)
1908 raise
1913 raise
1909
1914
1910 def _enforceinlinesize(self, tr, fp=None):
1915 def _enforceinlinesize(self, tr, fp=None):
1911 """Check if the revlog is too big for inline and convert if so.
1916 """Check if the revlog is too big for inline and convert if so.
1912
1917
1913 This should be called after revisions are added to the revlog. If the
1918 This should be called after revisions are added to the revlog. If the
1914 revlog has grown too large to be an inline revlog, it will convert it
1919 revlog has grown too large to be an inline revlog, it will convert it
1915 to use multiple index and data files.
1920 to use multiple index and data files.
1916 """
1921 """
1917 tiprev = len(self) - 1
1922 tiprev = len(self) - 1
1918 if (
1923 if (
1919 not self._inline
1924 not self._inline
1920 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1925 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1921 ):
1926 ):
1922 return
1927 return
1923
1928
1924 trinfo = tr.find(self.indexfile)
1929 trinfo = tr.find(self.indexfile)
1925 if trinfo is None:
1930 if trinfo is None:
1926 raise error.RevlogError(
1931 raise error.RevlogError(
1927 _(b"%s not found in the transaction") % self.indexfile
1932 _(b"%s not found in the transaction") % self.indexfile
1928 )
1933 )
1929
1934
1930 trindex = trinfo[2]
1935 trindex = trinfo[2]
1931 if trindex is not None:
1936 if trindex is not None:
1932 dataoff = self.start(trindex)
1937 dataoff = self.start(trindex)
1933 else:
1938 else:
1934 # revlog was stripped at start of transaction, use all leftover data
1939 # revlog was stripped at start of transaction, use all leftover data
1935 trindex = len(self) - 1
1940 trindex = len(self) - 1
1936 dataoff = self.end(tiprev)
1941 dataoff = self.end(tiprev)
1937
1942
1938 tr.add(self.datafile, dataoff)
1943 tr.add(self.datafile, dataoff)
1939
1944
1940 if fp:
1945 if fp:
1941 fp.flush()
1946 fp.flush()
1942 fp.close()
1947 fp.close()
1943 # We can't use the cached file handle after close(). So prevent
1948 # We can't use the cached file handle after close(). So prevent
1944 # its usage.
1949 # its usage.
1945 self._writinghandles = None
1950 self._writinghandles = None
1946
1951
1947 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1952 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1948 for r in self:
1953 for r in self:
1949 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1954 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1950
1955
1951 with self._indexfp(b'w') as fp:
1956 with self._indexfp(b'w') as fp:
1952 self.version &= ~FLAG_INLINE_DATA
1957 self.version &= ~FLAG_INLINE_DATA
1953 self._inline = False
1958 self._inline = False
1954 io = self._io
1959 io = self._io
1955 for i in self:
1960 for i in self:
1956 e = io.packentry(self.index[i], self.node, self.version, i)
1961 e = io.packentry(self.index[i], self.node, self.version, i)
1957 fp.write(e)
1962 fp.write(e)
1958
1963
1959 # the temp file replace the real index when we exit the context
1964 # the temp file replace the real index when we exit the context
1960 # manager
1965 # manager
1961
1966
1962 tr.replace(self.indexfile, trindex * self._io.size)
1967 tr.replace(self.indexfile, trindex * self._io.size)
1963 self._chunkclear()
1968 self._chunkclear()
1964
1969
1965 def _nodeduplicatecallback(self, transaction, node):
1970 def _nodeduplicatecallback(self, transaction, node):
1966 """called when trying to add a node already stored.
1971 """called when trying to add a node already stored.
1967 """
1972 """
1968
1973
1969 def addrevision(
1974 def addrevision(
1970 self,
1975 self,
1971 text,
1976 text,
1972 transaction,
1977 transaction,
1973 link,
1978 link,
1974 p1,
1979 p1,
1975 p2,
1980 p2,
1976 cachedelta=None,
1981 cachedelta=None,
1977 node=None,
1982 node=None,
1978 flags=REVIDX_DEFAULT_FLAGS,
1983 flags=REVIDX_DEFAULT_FLAGS,
1979 deltacomputer=None,
1984 deltacomputer=None,
1980 sidedata=None,
1985 sidedata=None,
1981 ):
1986 ):
1982 """add a revision to the log
1987 """add a revision to the log
1983
1988
1984 text - the revision data to add
1989 text - the revision data to add
1985 transaction - the transaction object used for rollback
1990 transaction - the transaction object used for rollback
1986 link - the linkrev data to add
1991 link - the linkrev data to add
1987 p1, p2 - the parent nodeids of the revision
1992 p1, p2 - the parent nodeids of the revision
1988 cachedelta - an optional precomputed delta
1993 cachedelta - an optional precomputed delta
1989 node - nodeid of revision; typically node is not specified, and it is
1994 node - nodeid of revision; typically node is not specified, and it is
1990 computed by default as hash(text, p1, p2), however subclasses might
1995 computed by default as hash(text, p1, p2), however subclasses might
1991 use different hashing method (and override checkhash() in such case)
1996 use different hashing method (and override checkhash() in such case)
1992 flags - the known flags to set on the revision
1997 flags - the known flags to set on the revision
1993 deltacomputer - an optional deltacomputer instance shared between
1998 deltacomputer - an optional deltacomputer instance shared between
1994 multiple calls
1999 multiple calls
1995 """
2000 """
1996 if link == nullrev:
2001 if link == nullrev:
1997 raise error.RevlogError(
2002 raise error.RevlogError(
1998 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2003 _(b"attempted to add linkrev -1 to %s") % self.indexfile
1999 )
2004 )
2000
2005
2001 if sidedata is None:
2006 if sidedata is None:
2002 sidedata = {}
2007 sidedata = {}
2003 flags = flags & ~REVIDX_SIDEDATA
2008 flags = flags & ~REVIDX_SIDEDATA
2004 elif not self.hassidedata:
2009 elif not self.hassidedata:
2005 raise error.ProgrammingError(
2010 raise error.ProgrammingError(
2006 _(b"trying to add sidedata to a revlog who don't support them")
2011 _(b"trying to add sidedata to a revlog who don't support them")
2007 )
2012 )
2008 else:
2013 else:
2009 flags |= REVIDX_SIDEDATA
2014 flags |= REVIDX_SIDEDATA
2010
2015
2011 if flags:
2016 if flags:
2012 node = node or self.hash(text, p1, p2)
2017 node = node or self.hash(text, p1, p2)
2013
2018
2014 rawtext, validatehash = flagutil.processflagswrite(
2019 rawtext, validatehash = flagutil.processflagswrite(
2015 self, text, flags, sidedata=sidedata
2020 self, text, flags, sidedata=sidedata
2016 )
2021 )
2017
2022
2018 # If the flag processor modifies the revision data, ignore any provided
2023 # If the flag processor modifies the revision data, ignore any provided
2019 # cachedelta.
2024 # cachedelta.
2020 if rawtext != text:
2025 if rawtext != text:
2021 cachedelta = None
2026 cachedelta = None
2022
2027
2023 if len(rawtext) > _maxentrysize:
2028 if len(rawtext) > _maxentrysize:
2024 raise error.RevlogError(
2029 raise error.RevlogError(
2025 _(
2030 _(
2026 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2031 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2027 )
2032 )
2028 % (self.indexfile, len(rawtext))
2033 % (self.indexfile, len(rawtext))
2029 )
2034 )
2030
2035
2031 node = node or self.hash(rawtext, p1, p2)
2036 node = node or self.hash(rawtext, p1, p2)
2032 if self.index.has_node(node):
2037 if self.index.has_node(node):
2033 return node
2038 return node
2034
2039
2035 if validatehash:
2040 if validatehash:
2036 self.checkhash(rawtext, node, p1=p1, p2=p2)
2041 self.checkhash(rawtext, node, p1=p1, p2=p2)
2037
2042
2038 return self.addrawrevision(
2043 return self.addrawrevision(
2039 rawtext,
2044 rawtext,
2040 transaction,
2045 transaction,
2041 link,
2046 link,
2042 p1,
2047 p1,
2043 p2,
2048 p2,
2044 node,
2049 node,
2045 flags,
2050 flags,
2046 cachedelta=cachedelta,
2051 cachedelta=cachedelta,
2047 deltacomputer=deltacomputer,
2052 deltacomputer=deltacomputer,
2048 )
2053 )
2049
2054
2050 def addrawrevision(
2055 def addrawrevision(
2051 self,
2056 self,
2052 rawtext,
2057 rawtext,
2053 transaction,
2058 transaction,
2054 link,
2059 link,
2055 p1,
2060 p1,
2056 p2,
2061 p2,
2057 node,
2062 node,
2058 flags,
2063 flags,
2059 cachedelta=None,
2064 cachedelta=None,
2060 deltacomputer=None,
2065 deltacomputer=None,
2061 ):
2066 ):
2062 """add a raw revision with known flags, node and parents
2067 """add a raw revision with known flags, node and parents
2063 useful when reusing a revision not stored in this revlog (ex: received
2068 useful when reusing a revision not stored in this revlog (ex: received
2064 over wire, or read from an external bundle).
2069 over wire, or read from an external bundle).
2065 """
2070 """
2066 dfh = None
2071 dfh = None
2067 if not self._inline:
2072 if not self._inline:
2068 dfh = self._datafp(b"a+")
2073 dfh = self._datafp(b"a+")
2069 ifh = self._indexfp(b"a+")
2074 ifh = self._indexfp(b"a+")
2070 try:
2075 try:
2071 return self._addrevision(
2076 return self._addrevision(
2072 node,
2077 node,
2073 rawtext,
2078 rawtext,
2074 transaction,
2079 transaction,
2075 link,
2080 link,
2076 p1,
2081 p1,
2077 p2,
2082 p2,
2078 flags,
2083 flags,
2079 cachedelta,
2084 cachedelta,
2080 ifh,
2085 ifh,
2081 dfh,
2086 dfh,
2082 deltacomputer=deltacomputer,
2087 deltacomputer=deltacomputer,
2083 )
2088 )
2084 finally:
2089 finally:
2085 if dfh:
2090 if dfh:
2086 dfh.close()
2091 dfh.close()
2087 ifh.close()
2092 ifh.close()
2088
2093
2089 def compress(self, data):
2094 def compress(self, data):
2090 """Generate a possibly-compressed representation of data."""
2095 """Generate a possibly-compressed representation of data."""
2091 if not data:
2096 if not data:
2092 return b'', data
2097 return b'', data
2093
2098
2094 compressed = self._compressor.compress(data)
2099 compressed = self._compressor.compress(data)
2095
2100
2096 if compressed:
2101 if compressed:
2097 # The revlog compressor added the header in the returned data.
2102 # The revlog compressor added the header in the returned data.
2098 return b'', compressed
2103 return b'', compressed
2099
2104
2100 if data[0:1] == b'\0':
2105 if data[0:1] == b'\0':
2101 return b'', data
2106 return b'', data
2102 return b'u', data
2107 return b'u', data
2103
2108
2104 def decompress(self, data):
2109 def decompress(self, data):
2105 """Decompress a revlog chunk.
2110 """Decompress a revlog chunk.
2106
2111
2107 The chunk is expected to begin with a header identifying the
2112 The chunk is expected to begin with a header identifying the
2108 format type so it can be routed to an appropriate decompressor.
2113 format type so it can be routed to an appropriate decompressor.
2109 """
2114 """
2110 if not data:
2115 if not data:
2111 return data
2116 return data
2112
2117
2113 # Revlogs are read much more frequently than they are written and many
2118 # Revlogs are read much more frequently than they are written and many
2114 # chunks only take microseconds to decompress, so performance is
2119 # chunks only take microseconds to decompress, so performance is
2115 # important here.
2120 # important here.
2116 #
2121 #
2117 # We can make a few assumptions about revlogs:
2122 # We can make a few assumptions about revlogs:
2118 #
2123 #
2119 # 1) the majority of chunks will be compressed (as opposed to inline
2124 # 1) the majority of chunks will be compressed (as opposed to inline
2120 # raw data).
2125 # raw data).
2121 # 2) decompressing *any* data will likely by at least 10x slower than
2126 # 2) decompressing *any* data will likely by at least 10x slower than
2122 # returning raw inline data.
2127 # returning raw inline data.
2123 # 3) we want to prioritize common and officially supported compression
2128 # 3) we want to prioritize common and officially supported compression
2124 # engines
2129 # engines
2125 #
2130 #
2126 # It follows that we want to optimize for "decompress compressed data
2131 # It follows that we want to optimize for "decompress compressed data
2127 # when encoded with common and officially supported compression engines"
2132 # when encoded with common and officially supported compression engines"
2128 # case over "raw data" and "data encoded by less common or non-official
2133 # case over "raw data" and "data encoded by less common or non-official
2129 # compression engines." That is why we have the inline lookup first
2134 # compression engines." That is why we have the inline lookup first
2130 # followed by the compengines lookup.
2135 # followed by the compengines lookup.
2131 #
2136 #
2132 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2137 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2133 # compressed chunks. And this matters for changelog and manifest reads.
2138 # compressed chunks. And this matters for changelog and manifest reads.
2134 t = data[0:1]
2139 t = data[0:1]
2135
2140
2136 if t == b'x':
2141 if t == b'x':
2137 try:
2142 try:
2138 return _zlibdecompress(data)
2143 return _zlibdecompress(data)
2139 except zlib.error as e:
2144 except zlib.error as e:
2140 raise error.RevlogError(
2145 raise error.RevlogError(
2141 _(b'revlog decompress error: %s')
2146 _(b'revlog decompress error: %s')
2142 % stringutil.forcebytestr(e)
2147 % stringutil.forcebytestr(e)
2143 )
2148 )
2144 # '\0' is more common than 'u' so it goes first.
2149 # '\0' is more common than 'u' so it goes first.
2145 elif t == b'\0':
2150 elif t == b'\0':
2146 return data
2151 return data
2147 elif t == b'u':
2152 elif t == b'u':
2148 return util.buffer(data, 1)
2153 return util.buffer(data, 1)
2149
2154
2150 try:
2155 try:
2151 compressor = self._decompressors[t]
2156 compressor = self._decompressors[t]
2152 except KeyError:
2157 except KeyError:
2153 try:
2158 try:
2154 engine = util.compengines.forrevlogheader(t)
2159 engine = util.compengines.forrevlogheader(t)
2155 compressor = engine.revlogcompressor(self._compengineopts)
2160 compressor = engine.revlogcompressor(self._compengineopts)
2156 self._decompressors[t] = compressor
2161 self._decompressors[t] = compressor
2157 except KeyError:
2162 except KeyError:
2158 raise error.RevlogError(_(b'unknown compression type %r') % t)
2163 raise error.RevlogError(_(b'unknown compression type %r') % t)
2159
2164
2160 return compressor.decompress(data)
2165 return compressor.decompress(data)
2161
2166
2162 def _addrevision(
2167 def _addrevision(
2163 self,
2168 self,
2164 node,
2169 node,
2165 rawtext,
2170 rawtext,
2166 transaction,
2171 transaction,
2167 link,
2172 link,
2168 p1,
2173 p1,
2169 p2,
2174 p2,
2170 flags,
2175 flags,
2171 cachedelta,
2176 cachedelta,
2172 ifh,
2177 ifh,
2173 dfh,
2178 dfh,
2174 alwayscache=False,
2179 alwayscache=False,
2175 deltacomputer=None,
2180 deltacomputer=None,
2176 ):
2181 ):
2177 """internal function to add revisions to the log
2182 """internal function to add revisions to the log
2178
2183
2179 see addrevision for argument descriptions.
2184 see addrevision for argument descriptions.
2180
2185
2181 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2186 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2182
2187
2183 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2188 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2184 be used.
2189 be used.
2185
2190
2186 invariants:
2191 invariants:
2187 - rawtext is optional (can be None); if not set, cachedelta must be set.
2192 - rawtext is optional (can be None); if not set, cachedelta must be set.
2188 if both are set, they must correspond to each other.
2193 if both are set, they must correspond to each other.
2189 """
2194 """
2190 if node == nullid:
2195 if node == nullid:
2191 raise error.RevlogError(
2196 raise error.RevlogError(
2192 _(b"%s: attempt to add null revision") % self.indexfile
2197 _(b"%s: attempt to add null revision") % self.indexfile
2193 )
2198 )
2194 if node == wdirid or node in wdirfilenodeids:
2199 if node == wdirid or node in wdirfilenodeids:
2195 raise error.RevlogError(
2200 raise error.RevlogError(
2196 _(b"%s: attempt to add wdir revision") % self.indexfile
2201 _(b"%s: attempt to add wdir revision") % self.indexfile
2197 )
2202 )
2198
2203
2199 if self._inline:
2204 if self._inline:
2200 fh = ifh
2205 fh = ifh
2201 else:
2206 else:
2202 fh = dfh
2207 fh = dfh
2203
2208
2204 btext = [rawtext]
2209 btext = [rawtext]
2205
2210
2206 curr = len(self)
2211 curr = len(self)
2207 prev = curr - 1
2212 prev = curr - 1
2208 offset = self.end(prev)
2213 offset = self.end(prev)
2209 p1r, p2r = self.rev(p1), self.rev(p2)
2214 p1r, p2r = self.rev(p1), self.rev(p2)
2210
2215
2211 # full versions are inserted when the needed deltas
2216 # full versions are inserted when the needed deltas
2212 # become comparable to the uncompressed text
2217 # become comparable to the uncompressed text
2213 if rawtext is None:
2218 if rawtext is None:
2214 # need rawtext size, before changed by flag processors, which is
2219 # need rawtext size, before changed by flag processors, which is
2215 # the non-raw size. use revlog explicitly to avoid filelog's extra
2220 # the non-raw size. use revlog explicitly to avoid filelog's extra
2216 # logic that might remove metadata size.
2221 # logic that might remove metadata size.
2217 textlen = mdiff.patchedsize(
2222 textlen = mdiff.patchedsize(
2218 revlog.size(self, cachedelta[0]), cachedelta[1]
2223 revlog.size(self, cachedelta[0]), cachedelta[1]
2219 )
2224 )
2220 else:
2225 else:
2221 textlen = len(rawtext)
2226 textlen = len(rawtext)
2222
2227
2223 if deltacomputer is None:
2228 if deltacomputer is None:
2224 deltacomputer = deltautil.deltacomputer(self)
2229 deltacomputer = deltautil.deltacomputer(self)
2225
2230
2226 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2231 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2227
2232
2228 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2233 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2229
2234
2230 e = (
2235 e = (
2231 offset_type(offset, flags),
2236 offset_type(offset, flags),
2232 deltainfo.deltalen,
2237 deltainfo.deltalen,
2233 textlen,
2238 textlen,
2234 deltainfo.base,
2239 deltainfo.base,
2235 link,
2240 link,
2236 p1r,
2241 p1r,
2237 p2r,
2242 p2r,
2238 node,
2243 node,
2239 )
2244 )
2240 self.index.append(e)
2245 self.index.append(e)
2241
2246
2242 entry = self._io.packentry(e, self.node, self.version, curr)
2247 entry = self._io.packentry(e, self.node, self.version, curr)
2243 self._writeentry(
2248 self._writeentry(
2244 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2249 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2245 )
2250 )
2246
2251
2247 rawtext = btext[0]
2252 rawtext = btext[0]
2248
2253
2249 if alwayscache and rawtext is None:
2254 if alwayscache and rawtext is None:
2250 rawtext = deltacomputer.buildtext(revinfo, fh)
2255 rawtext = deltacomputer.buildtext(revinfo, fh)
2251
2256
2252 if type(rawtext) == bytes: # only accept immutable objects
2257 if type(rawtext) == bytes: # only accept immutable objects
2253 self._revisioncache = (node, curr, rawtext)
2258 self._revisioncache = (node, curr, rawtext)
2254 self._chainbasecache[curr] = deltainfo.chainbase
2259 self._chainbasecache[curr] = deltainfo.chainbase
2255 return node
2260 return node
2256
2261
2257 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2262 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2258 # Files opened in a+ mode have inconsistent behavior on various
2263 # Files opened in a+ mode have inconsistent behavior on various
2259 # platforms. Windows requires that a file positioning call be made
2264 # platforms. Windows requires that a file positioning call be made
2260 # when the file handle transitions between reads and writes. See
2265 # when the file handle transitions between reads and writes. See
2261 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2266 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2262 # platforms, Python or the platform itself can be buggy. Some versions
2267 # platforms, Python or the platform itself can be buggy. Some versions
2263 # of Solaris have been observed to not append at the end of the file
2268 # of Solaris have been observed to not append at the end of the file
2264 # if the file was seeked to before the end. See issue4943 for more.
2269 # if the file was seeked to before the end. See issue4943 for more.
2265 #
2270 #
2266 # We work around this issue by inserting a seek() before writing.
2271 # We work around this issue by inserting a seek() before writing.
2267 # Note: This is likely not necessary on Python 3. However, because
2272 # Note: This is likely not necessary on Python 3. However, because
2268 # the file handle is reused for reads and may be seeked there, we need
2273 # the file handle is reused for reads and may be seeked there, we need
2269 # to be careful before changing this.
2274 # to be careful before changing this.
2270 ifh.seek(0, os.SEEK_END)
2275 ifh.seek(0, os.SEEK_END)
2271 if dfh:
2276 if dfh:
2272 dfh.seek(0, os.SEEK_END)
2277 dfh.seek(0, os.SEEK_END)
2273
2278
2274 curr = len(self) - 1
2279 curr = len(self) - 1
2275 if not self._inline:
2280 if not self._inline:
2276 transaction.add(self.datafile, offset)
2281 transaction.add(self.datafile, offset)
2277 transaction.add(self.indexfile, curr * len(entry))
2282 transaction.add(self.indexfile, curr * len(entry))
2278 if data[0]:
2283 if data[0]:
2279 dfh.write(data[0])
2284 dfh.write(data[0])
2280 dfh.write(data[1])
2285 dfh.write(data[1])
2281 ifh.write(entry)
2286 ifh.write(entry)
2282 else:
2287 else:
2283 offset += curr * self._io.size
2288 offset += curr * self._io.size
2284 transaction.add(self.indexfile, offset, curr)
2289 transaction.add(self.indexfile, offset, curr)
2285 ifh.write(entry)
2290 ifh.write(entry)
2286 ifh.write(data[0])
2291 ifh.write(data[0])
2287 ifh.write(data[1])
2292 ifh.write(data[1])
2288 self._enforceinlinesize(transaction, ifh)
2293 self._enforceinlinesize(transaction, ifh)
2294 nodemaputil.setup_persistent_nodemap(transaction, self)
2289
2295
2290 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2296 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2291 """
2297 """
2292 add a delta group
2298 add a delta group
2293
2299
2294 given a set of deltas, add them to the revision log. the
2300 given a set of deltas, add them to the revision log. the
2295 first delta is against its parent, which should be in our
2301 first delta is against its parent, which should be in our
2296 log, the rest are against the previous delta.
2302 log, the rest are against the previous delta.
2297
2303
2298 If ``addrevisioncb`` is defined, it will be called with arguments of
2304 If ``addrevisioncb`` is defined, it will be called with arguments of
2299 this revlog and the node that was added.
2305 this revlog and the node that was added.
2300 """
2306 """
2301
2307
2302 if self._writinghandles:
2308 if self._writinghandles:
2303 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2309 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2304
2310
2305 nodes = []
2311 nodes = []
2306
2312
2307 r = len(self)
2313 r = len(self)
2308 end = 0
2314 end = 0
2309 if r:
2315 if r:
2310 end = self.end(r - 1)
2316 end = self.end(r - 1)
2311 ifh = self._indexfp(b"a+")
2317 ifh = self._indexfp(b"a+")
2312 isize = r * self._io.size
2318 isize = r * self._io.size
2313 if self._inline:
2319 if self._inline:
2314 transaction.add(self.indexfile, end + isize, r)
2320 transaction.add(self.indexfile, end + isize, r)
2315 dfh = None
2321 dfh = None
2316 else:
2322 else:
2317 transaction.add(self.indexfile, isize, r)
2323 transaction.add(self.indexfile, isize, r)
2318 transaction.add(self.datafile, end)
2324 transaction.add(self.datafile, end)
2319 dfh = self._datafp(b"a+")
2325 dfh = self._datafp(b"a+")
2320
2326
2321 def flush():
2327 def flush():
2322 if dfh:
2328 if dfh:
2323 dfh.flush()
2329 dfh.flush()
2324 ifh.flush()
2330 ifh.flush()
2325
2331
2326 self._writinghandles = (ifh, dfh)
2332 self._writinghandles = (ifh, dfh)
2327
2333
2328 try:
2334 try:
2329 deltacomputer = deltautil.deltacomputer(self)
2335 deltacomputer = deltautil.deltacomputer(self)
2330 # loop through our set of deltas
2336 # loop through our set of deltas
2331 for data in deltas:
2337 for data in deltas:
2332 node, p1, p2, linknode, deltabase, delta, flags = data
2338 node, p1, p2, linknode, deltabase, delta, flags = data
2333 link = linkmapper(linknode)
2339 link = linkmapper(linknode)
2334 flags = flags or REVIDX_DEFAULT_FLAGS
2340 flags = flags or REVIDX_DEFAULT_FLAGS
2335
2341
2336 nodes.append(node)
2342 nodes.append(node)
2337
2343
2338 if self.index.has_node(node):
2344 if self.index.has_node(node):
2339 self._nodeduplicatecallback(transaction, node)
2345 self._nodeduplicatecallback(transaction, node)
2340 # this can happen if two branches make the same change
2346 # this can happen if two branches make the same change
2341 continue
2347 continue
2342
2348
2343 for p in (p1, p2):
2349 for p in (p1, p2):
2344 if not self.index.has_node(p):
2350 if not self.index.has_node(p):
2345 raise error.LookupError(
2351 raise error.LookupError(
2346 p, self.indexfile, _(b'unknown parent')
2352 p, self.indexfile, _(b'unknown parent')
2347 )
2353 )
2348
2354
2349 if not self.index.has_node(deltabase):
2355 if not self.index.has_node(deltabase):
2350 raise error.LookupError(
2356 raise error.LookupError(
2351 deltabase, self.indexfile, _(b'unknown delta base')
2357 deltabase, self.indexfile, _(b'unknown delta base')
2352 )
2358 )
2353
2359
2354 baserev = self.rev(deltabase)
2360 baserev = self.rev(deltabase)
2355
2361
2356 if baserev != nullrev and self.iscensored(baserev):
2362 if baserev != nullrev and self.iscensored(baserev):
2357 # if base is censored, delta must be full replacement in a
2363 # if base is censored, delta must be full replacement in a
2358 # single patch operation
2364 # single patch operation
2359 hlen = struct.calcsize(b">lll")
2365 hlen = struct.calcsize(b">lll")
2360 oldlen = self.rawsize(baserev)
2366 oldlen = self.rawsize(baserev)
2361 newlen = len(delta) - hlen
2367 newlen = len(delta) - hlen
2362 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2368 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2363 raise error.CensoredBaseError(
2369 raise error.CensoredBaseError(
2364 self.indexfile, self.node(baserev)
2370 self.indexfile, self.node(baserev)
2365 )
2371 )
2366
2372
2367 if not flags and self._peek_iscensored(baserev, delta, flush):
2373 if not flags and self._peek_iscensored(baserev, delta, flush):
2368 flags |= REVIDX_ISCENSORED
2374 flags |= REVIDX_ISCENSORED
2369
2375
2370 # We assume consumers of addrevisioncb will want to retrieve
2376 # We assume consumers of addrevisioncb will want to retrieve
2371 # the added revision, which will require a call to
2377 # the added revision, which will require a call to
2372 # revision(). revision() will fast path if there is a cache
2378 # revision(). revision() will fast path if there is a cache
2373 # hit. So, we tell _addrevision() to always cache in this case.
2379 # hit. So, we tell _addrevision() to always cache in this case.
2374 # We're only using addgroup() in the context of changegroup
2380 # We're only using addgroup() in the context of changegroup
2375 # generation so the revision data can always be handled as raw
2381 # generation so the revision data can always be handled as raw
2376 # by the flagprocessor.
2382 # by the flagprocessor.
2377 self._addrevision(
2383 self._addrevision(
2378 node,
2384 node,
2379 None,
2385 None,
2380 transaction,
2386 transaction,
2381 link,
2387 link,
2382 p1,
2388 p1,
2383 p2,
2389 p2,
2384 flags,
2390 flags,
2385 (baserev, delta),
2391 (baserev, delta),
2386 ifh,
2392 ifh,
2387 dfh,
2393 dfh,
2388 alwayscache=bool(addrevisioncb),
2394 alwayscache=bool(addrevisioncb),
2389 deltacomputer=deltacomputer,
2395 deltacomputer=deltacomputer,
2390 )
2396 )
2391
2397
2392 if addrevisioncb:
2398 if addrevisioncb:
2393 addrevisioncb(self, node)
2399 addrevisioncb(self, node)
2394
2400
2395 if not dfh and not self._inline:
2401 if not dfh and not self._inline:
2396 # addrevision switched from inline to conventional
2402 # addrevision switched from inline to conventional
2397 # reopen the index
2403 # reopen the index
2398 ifh.close()
2404 ifh.close()
2399 dfh = self._datafp(b"a+")
2405 dfh = self._datafp(b"a+")
2400 ifh = self._indexfp(b"a+")
2406 ifh = self._indexfp(b"a+")
2401 self._writinghandles = (ifh, dfh)
2407 self._writinghandles = (ifh, dfh)
2402 finally:
2408 finally:
2403 self._writinghandles = None
2409 self._writinghandles = None
2404
2410
2405 if dfh:
2411 if dfh:
2406 dfh.close()
2412 dfh.close()
2407 ifh.close()
2413 ifh.close()
2408
2414
2409 return nodes
2415 return nodes
2410
2416
2411 def iscensored(self, rev):
2417 def iscensored(self, rev):
2412 """Check if a file revision is censored."""
2418 """Check if a file revision is censored."""
2413 if not self._censorable:
2419 if not self._censorable:
2414 return False
2420 return False
2415
2421
2416 return self.flags(rev) & REVIDX_ISCENSORED
2422 return self.flags(rev) & REVIDX_ISCENSORED
2417
2423
2418 def _peek_iscensored(self, baserev, delta, flush):
2424 def _peek_iscensored(self, baserev, delta, flush):
2419 """Quickly check if a delta produces a censored revision."""
2425 """Quickly check if a delta produces a censored revision."""
2420 if not self._censorable:
2426 if not self._censorable:
2421 return False
2427 return False
2422
2428
2423 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2429 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2424
2430
2425 def getstrippoint(self, minlink):
2431 def getstrippoint(self, minlink):
2426 """find the minimum rev that must be stripped to strip the linkrev
2432 """find the minimum rev that must be stripped to strip the linkrev
2427
2433
2428 Returns a tuple containing the minimum rev and a set of all revs that
2434 Returns a tuple containing the minimum rev and a set of all revs that
2429 have linkrevs that will be broken by this strip.
2435 have linkrevs that will be broken by this strip.
2430 """
2436 """
2431 return storageutil.resolvestripinfo(
2437 return storageutil.resolvestripinfo(
2432 minlink,
2438 minlink,
2433 len(self) - 1,
2439 len(self) - 1,
2434 self.headrevs(),
2440 self.headrevs(),
2435 self.linkrev,
2441 self.linkrev,
2436 self.parentrevs,
2442 self.parentrevs,
2437 )
2443 )
2438
2444
2439 def strip(self, minlink, transaction):
2445 def strip(self, minlink, transaction):
2440 """truncate the revlog on the first revision with a linkrev >= minlink
2446 """truncate the revlog on the first revision with a linkrev >= minlink
2441
2447
2442 This function is called when we're stripping revision minlink and
2448 This function is called when we're stripping revision minlink and
2443 its descendants from the repository.
2449 its descendants from the repository.
2444
2450
2445 We have to remove all revisions with linkrev >= minlink, because
2451 We have to remove all revisions with linkrev >= minlink, because
2446 the equivalent changelog revisions will be renumbered after the
2452 the equivalent changelog revisions will be renumbered after the
2447 strip.
2453 strip.
2448
2454
2449 So we truncate the revlog on the first of these revisions, and
2455 So we truncate the revlog on the first of these revisions, and
2450 trust that the caller has saved the revisions that shouldn't be
2456 trust that the caller has saved the revisions that shouldn't be
2451 removed and that it'll re-add them after this truncation.
2457 removed and that it'll re-add them after this truncation.
2452 """
2458 """
2453 if len(self) == 0:
2459 if len(self) == 0:
2454 return
2460 return
2455
2461
2456 rev, _ = self.getstrippoint(minlink)
2462 rev, _ = self.getstrippoint(minlink)
2457 if rev == len(self):
2463 if rev == len(self):
2458 return
2464 return
2459
2465
2460 # first truncate the files on disk
2466 # first truncate the files on disk
2461 end = self.start(rev)
2467 end = self.start(rev)
2462 if not self._inline:
2468 if not self._inline:
2463 transaction.add(self.datafile, end)
2469 transaction.add(self.datafile, end)
2464 end = rev * self._io.size
2470 end = rev * self._io.size
2465 else:
2471 else:
2466 end += rev * self._io.size
2472 end += rev * self._io.size
2467
2473
2468 transaction.add(self.indexfile, end)
2474 transaction.add(self.indexfile, end)
2469
2475
2470 # then reset internal state in memory to forget those revisions
2476 # then reset internal state in memory to forget those revisions
2471 self._revisioncache = None
2477 self._revisioncache = None
2472 self._chaininfocache = {}
2478 self._chaininfocache = {}
2473 self._chunkclear()
2479 self._chunkclear()
2474
2480
2475 del self.index[rev:-1]
2481 del self.index[rev:-1]
2476
2482
2477 def checksize(self):
2483 def checksize(self):
2478 """Check size of index and data files
2484 """Check size of index and data files
2479
2485
2480 return a (dd, di) tuple.
2486 return a (dd, di) tuple.
2481 - dd: extra bytes for the "data" file
2487 - dd: extra bytes for the "data" file
2482 - di: extra bytes for the "index" file
2488 - di: extra bytes for the "index" file
2483
2489
2484 A healthy revlog will return (0, 0).
2490 A healthy revlog will return (0, 0).
2485 """
2491 """
2486 expected = 0
2492 expected = 0
2487 if len(self):
2493 if len(self):
2488 expected = max(0, self.end(len(self) - 1))
2494 expected = max(0, self.end(len(self) - 1))
2489
2495
2490 try:
2496 try:
2491 with self._datafp() as f:
2497 with self._datafp() as f:
2492 f.seek(0, io.SEEK_END)
2498 f.seek(0, io.SEEK_END)
2493 actual = f.tell()
2499 actual = f.tell()
2494 dd = actual - expected
2500 dd = actual - expected
2495 except IOError as inst:
2501 except IOError as inst:
2496 if inst.errno != errno.ENOENT:
2502 if inst.errno != errno.ENOENT:
2497 raise
2503 raise
2498 dd = 0
2504 dd = 0
2499
2505
2500 try:
2506 try:
2501 f = self.opener(self.indexfile)
2507 f = self.opener(self.indexfile)
2502 f.seek(0, io.SEEK_END)
2508 f.seek(0, io.SEEK_END)
2503 actual = f.tell()
2509 actual = f.tell()
2504 f.close()
2510 f.close()
2505 s = self._io.size
2511 s = self._io.size
2506 i = max(0, actual // s)
2512 i = max(0, actual // s)
2507 di = actual - (i * s)
2513 di = actual - (i * s)
2508 if self._inline:
2514 if self._inline:
2509 databytes = 0
2515 databytes = 0
2510 for r in self:
2516 for r in self:
2511 databytes += max(0, self.length(r))
2517 databytes += max(0, self.length(r))
2512 dd = 0
2518 dd = 0
2513 di = actual - len(self) * s - databytes
2519 di = actual - len(self) * s - databytes
2514 except IOError as inst:
2520 except IOError as inst:
2515 if inst.errno != errno.ENOENT:
2521 if inst.errno != errno.ENOENT:
2516 raise
2522 raise
2517 di = 0
2523 di = 0
2518
2524
2519 return (dd, di)
2525 return (dd, di)
2520
2526
2521 def files(self):
2527 def files(self):
2522 res = [self.indexfile]
2528 res = [self.indexfile]
2523 if not self._inline:
2529 if not self._inline:
2524 res.append(self.datafile)
2530 res.append(self.datafile)
2525 return res
2531 return res
2526
2532
2527 def emitrevisions(
2533 def emitrevisions(
2528 self,
2534 self,
2529 nodes,
2535 nodes,
2530 nodesorder=None,
2536 nodesorder=None,
2531 revisiondata=False,
2537 revisiondata=False,
2532 assumehaveparentrevisions=False,
2538 assumehaveparentrevisions=False,
2533 deltamode=repository.CG_DELTAMODE_STD,
2539 deltamode=repository.CG_DELTAMODE_STD,
2534 ):
2540 ):
2535 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2541 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2536 raise error.ProgrammingError(
2542 raise error.ProgrammingError(
2537 b'unhandled value for nodesorder: %s' % nodesorder
2543 b'unhandled value for nodesorder: %s' % nodesorder
2538 )
2544 )
2539
2545
2540 if nodesorder is None and not self._generaldelta:
2546 if nodesorder is None and not self._generaldelta:
2541 nodesorder = b'storage'
2547 nodesorder = b'storage'
2542
2548
2543 if (
2549 if (
2544 not self._storedeltachains
2550 not self._storedeltachains
2545 and deltamode != repository.CG_DELTAMODE_PREV
2551 and deltamode != repository.CG_DELTAMODE_PREV
2546 ):
2552 ):
2547 deltamode = repository.CG_DELTAMODE_FULL
2553 deltamode = repository.CG_DELTAMODE_FULL
2548
2554
2549 return storageutil.emitrevisions(
2555 return storageutil.emitrevisions(
2550 self,
2556 self,
2551 nodes,
2557 nodes,
2552 nodesorder,
2558 nodesorder,
2553 revlogrevisiondelta,
2559 revlogrevisiondelta,
2554 deltaparentfn=self.deltaparent,
2560 deltaparentfn=self.deltaparent,
2555 candeltafn=self.candelta,
2561 candeltafn=self.candelta,
2556 rawsizefn=self.rawsize,
2562 rawsizefn=self.rawsize,
2557 revdifffn=self.revdiff,
2563 revdifffn=self.revdiff,
2558 flagsfn=self.flags,
2564 flagsfn=self.flags,
2559 deltamode=deltamode,
2565 deltamode=deltamode,
2560 revisiondata=revisiondata,
2566 revisiondata=revisiondata,
2561 assumehaveparentrevisions=assumehaveparentrevisions,
2567 assumehaveparentrevisions=assumehaveparentrevisions,
2562 )
2568 )
2563
2569
2564 DELTAREUSEALWAYS = b'always'
2570 DELTAREUSEALWAYS = b'always'
2565 DELTAREUSESAMEREVS = b'samerevs'
2571 DELTAREUSESAMEREVS = b'samerevs'
2566 DELTAREUSENEVER = b'never'
2572 DELTAREUSENEVER = b'never'
2567
2573
2568 DELTAREUSEFULLADD = b'fulladd'
2574 DELTAREUSEFULLADD = b'fulladd'
2569
2575
2570 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2576 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2571
2577
2572 def clone(
2578 def clone(
2573 self,
2579 self,
2574 tr,
2580 tr,
2575 destrevlog,
2581 destrevlog,
2576 addrevisioncb=None,
2582 addrevisioncb=None,
2577 deltareuse=DELTAREUSESAMEREVS,
2583 deltareuse=DELTAREUSESAMEREVS,
2578 forcedeltabothparents=None,
2584 forcedeltabothparents=None,
2579 sidedatacompanion=None,
2585 sidedatacompanion=None,
2580 ):
2586 ):
2581 """Copy this revlog to another, possibly with format changes.
2587 """Copy this revlog to another, possibly with format changes.
2582
2588
2583 The destination revlog will contain the same revisions and nodes.
2589 The destination revlog will contain the same revisions and nodes.
2584 However, it may not be bit-for-bit identical due to e.g. delta encoding
2590 However, it may not be bit-for-bit identical due to e.g. delta encoding
2585 differences.
2591 differences.
2586
2592
2587 The ``deltareuse`` argument control how deltas from the existing revlog
2593 The ``deltareuse`` argument control how deltas from the existing revlog
2588 are preserved in the destination revlog. The argument can have the
2594 are preserved in the destination revlog. The argument can have the
2589 following values:
2595 following values:
2590
2596
2591 DELTAREUSEALWAYS
2597 DELTAREUSEALWAYS
2592 Deltas will always be reused (if possible), even if the destination
2598 Deltas will always be reused (if possible), even if the destination
2593 revlog would not select the same revisions for the delta. This is the
2599 revlog would not select the same revisions for the delta. This is the
2594 fastest mode of operation.
2600 fastest mode of operation.
2595 DELTAREUSESAMEREVS
2601 DELTAREUSESAMEREVS
2596 Deltas will be reused if the destination revlog would pick the same
2602 Deltas will be reused if the destination revlog would pick the same
2597 revisions for the delta. This mode strikes a balance between speed
2603 revisions for the delta. This mode strikes a balance between speed
2598 and optimization.
2604 and optimization.
2599 DELTAREUSENEVER
2605 DELTAREUSENEVER
2600 Deltas will never be reused. This is the slowest mode of execution.
2606 Deltas will never be reused. This is the slowest mode of execution.
2601 This mode can be used to recompute deltas (e.g. if the diff/delta
2607 This mode can be used to recompute deltas (e.g. if the diff/delta
2602 algorithm changes).
2608 algorithm changes).
2603 DELTAREUSEFULLADD
2609 DELTAREUSEFULLADD
2604 Revision will be re-added as if their were new content. This is
2610 Revision will be re-added as if their were new content. This is
2605 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2611 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2606 eg: large file detection and handling.
2612 eg: large file detection and handling.
2607
2613
2608 Delta computation can be slow, so the choice of delta reuse policy can
2614 Delta computation can be slow, so the choice of delta reuse policy can
2609 significantly affect run time.
2615 significantly affect run time.
2610
2616
2611 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2617 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2612 two extremes. Deltas will be reused if they are appropriate. But if the
2618 two extremes. Deltas will be reused if they are appropriate. But if the
2613 delta could choose a better revision, it will do so. This means if you
2619 delta could choose a better revision, it will do so. This means if you
2614 are converting a non-generaldelta revlog to a generaldelta revlog,
2620 are converting a non-generaldelta revlog to a generaldelta revlog,
2615 deltas will be recomputed if the delta's parent isn't a parent of the
2621 deltas will be recomputed if the delta's parent isn't a parent of the
2616 revision.
2622 revision.
2617
2623
2618 In addition to the delta policy, the ``forcedeltabothparents``
2624 In addition to the delta policy, the ``forcedeltabothparents``
2619 argument controls whether to force compute deltas against both parents
2625 argument controls whether to force compute deltas against both parents
2620 for merges. By default, the current default is used.
2626 for merges. By default, the current default is used.
2621
2627
2622 If not None, the `sidedatacompanion` is callable that accept two
2628 If not None, the `sidedatacompanion` is callable that accept two
2623 arguments:
2629 arguments:
2624
2630
2625 (srcrevlog, rev)
2631 (srcrevlog, rev)
2626
2632
2627 and return a triplet that control changes to sidedata content from the
2633 and return a triplet that control changes to sidedata content from the
2628 old revision to the new clone result:
2634 old revision to the new clone result:
2629
2635
2630 (dropall, filterout, update)
2636 (dropall, filterout, update)
2631
2637
2632 * if `dropall` is True, all sidedata should be dropped
2638 * if `dropall` is True, all sidedata should be dropped
2633 * `filterout` is a set of sidedata keys that should be dropped
2639 * `filterout` is a set of sidedata keys that should be dropped
2634 * `update` is a mapping of additionnal/new key -> value
2640 * `update` is a mapping of additionnal/new key -> value
2635 """
2641 """
2636 if deltareuse not in self.DELTAREUSEALL:
2642 if deltareuse not in self.DELTAREUSEALL:
2637 raise ValueError(
2643 raise ValueError(
2638 _(b'value for deltareuse invalid: %s') % deltareuse
2644 _(b'value for deltareuse invalid: %s') % deltareuse
2639 )
2645 )
2640
2646
2641 if len(destrevlog):
2647 if len(destrevlog):
2642 raise ValueError(_(b'destination revlog is not empty'))
2648 raise ValueError(_(b'destination revlog is not empty'))
2643
2649
2644 if getattr(self, 'filteredrevs', None):
2650 if getattr(self, 'filteredrevs', None):
2645 raise ValueError(_(b'source revlog has filtered revisions'))
2651 raise ValueError(_(b'source revlog has filtered revisions'))
2646 if getattr(destrevlog, 'filteredrevs', None):
2652 if getattr(destrevlog, 'filteredrevs', None):
2647 raise ValueError(_(b'destination revlog has filtered revisions'))
2653 raise ValueError(_(b'destination revlog has filtered revisions'))
2648
2654
2649 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2655 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2650 # if possible.
2656 # if possible.
2651 oldlazydelta = destrevlog._lazydelta
2657 oldlazydelta = destrevlog._lazydelta
2652 oldlazydeltabase = destrevlog._lazydeltabase
2658 oldlazydeltabase = destrevlog._lazydeltabase
2653 oldamd = destrevlog._deltabothparents
2659 oldamd = destrevlog._deltabothparents
2654
2660
2655 try:
2661 try:
2656 if deltareuse == self.DELTAREUSEALWAYS:
2662 if deltareuse == self.DELTAREUSEALWAYS:
2657 destrevlog._lazydeltabase = True
2663 destrevlog._lazydeltabase = True
2658 destrevlog._lazydelta = True
2664 destrevlog._lazydelta = True
2659 elif deltareuse == self.DELTAREUSESAMEREVS:
2665 elif deltareuse == self.DELTAREUSESAMEREVS:
2660 destrevlog._lazydeltabase = False
2666 destrevlog._lazydeltabase = False
2661 destrevlog._lazydelta = True
2667 destrevlog._lazydelta = True
2662 elif deltareuse == self.DELTAREUSENEVER:
2668 elif deltareuse == self.DELTAREUSENEVER:
2663 destrevlog._lazydeltabase = False
2669 destrevlog._lazydeltabase = False
2664 destrevlog._lazydelta = False
2670 destrevlog._lazydelta = False
2665
2671
2666 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2672 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2667
2673
2668 self._clone(
2674 self._clone(
2669 tr,
2675 tr,
2670 destrevlog,
2676 destrevlog,
2671 addrevisioncb,
2677 addrevisioncb,
2672 deltareuse,
2678 deltareuse,
2673 forcedeltabothparents,
2679 forcedeltabothparents,
2674 sidedatacompanion,
2680 sidedatacompanion,
2675 )
2681 )
2676
2682
2677 finally:
2683 finally:
2678 destrevlog._lazydelta = oldlazydelta
2684 destrevlog._lazydelta = oldlazydelta
2679 destrevlog._lazydeltabase = oldlazydeltabase
2685 destrevlog._lazydeltabase = oldlazydeltabase
2680 destrevlog._deltabothparents = oldamd
2686 destrevlog._deltabothparents = oldamd
2681
2687
2682 def _clone(
2688 def _clone(
2683 self,
2689 self,
2684 tr,
2690 tr,
2685 destrevlog,
2691 destrevlog,
2686 addrevisioncb,
2692 addrevisioncb,
2687 deltareuse,
2693 deltareuse,
2688 forcedeltabothparents,
2694 forcedeltabothparents,
2689 sidedatacompanion,
2695 sidedatacompanion,
2690 ):
2696 ):
2691 """perform the core duty of `revlog.clone` after parameter processing"""
2697 """perform the core duty of `revlog.clone` after parameter processing"""
2692 deltacomputer = deltautil.deltacomputer(destrevlog)
2698 deltacomputer = deltautil.deltacomputer(destrevlog)
2693 index = self.index
2699 index = self.index
2694 for rev in self:
2700 for rev in self:
2695 entry = index[rev]
2701 entry = index[rev]
2696
2702
2697 # Some classes override linkrev to take filtered revs into
2703 # Some classes override linkrev to take filtered revs into
2698 # account. Use raw entry from index.
2704 # account. Use raw entry from index.
2699 flags = entry[0] & 0xFFFF
2705 flags = entry[0] & 0xFFFF
2700 linkrev = entry[4]
2706 linkrev = entry[4]
2701 p1 = index[entry[5]][7]
2707 p1 = index[entry[5]][7]
2702 p2 = index[entry[6]][7]
2708 p2 = index[entry[6]][7]
2703 node = entry[7]
2709 node = entry[7]
2704
2710
2705 sidedataactions = (False, [], {})
2711 sidedataactions = (False, [], {})
2706 if sidedatacompanion is not None:
2712 if sidedatacompanion is not None:
2707 sidedataactions = sidedatacompanion(self, rev)
2713 sidedataactions = sidedatacompanion(self, rev)
2708
2714
2709 # (Possibly) reuse the delta from the revlog if allowed and
2715 # (Possibly) reuse the delta from the revlog if allowed and
2710 # the revlog chunk is a delta.
2716 # the revlog chunk is a delta.
2711 cachedelta = None
2717 cachedelta = None
2712 rawtext = None
2718 rawtext = None
2713 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2719 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2714 dropall, filterout, update = sidedataactions
2720 dropall, filterout, update = sidedataactions
2715 text, sidedata = self._revisiondata(rev)
2721 text, sidedata = self._revisiondata(rev)
2716 if dropall:
2722 if dropall:
2717 sidedata = {}
2723 sidedata = {}
2718 for key in filterout:
2724 for key in filterout:
2719 sidedata.pop(key, None)
2725 sidedata.pop(key, None)
2720 sidedata.update(update)
2726 sidedata.update(update)
2721 if not sidedata:
2727 if not sidedata:
2722 sidedata = None
2728 sidedata = None
2723 destrevlog.addrevision(
2729 destrevlog.addrevision(
2724 text,
2730 text,
2725 tr,
2731 tr,
2726 linkrev,
2732 linkrev,
2727 p1,
2733 p1,
2728 p2,
2734 p2,
2729 cachedelta=cachedelta,
2735 cachedelta=cachedelta,
2730 node=node,
2736 node=node,
2731 flags=flags,
2737 flags=flags,
2732 deltacomputer=deltacomputer,
2738 deltacomputer=deltacomputer,
2733 sidedata=sidedata,
2739 sidedata=sidedata,
2734 )
2740 )
2735 else:
2741 else:
2736 if destrevlog._lazydelta:
2742 if destrevlog._lazydelta:
2737 dp = self.deltaparent(rev)
2743 dp = self.deltaparent(rev)
2738 if dp != nullrev:
2744 if dp != nullrev:
2739 cachedelta = (dp, bytes(self._chunk(rev)))
2745 cachedelta = (dp, bytes(self._chunk(rev)))
2740
2746
2741 if not cachedelta:
2747 if not cachedelta:
2742 rawtext = self.rawdata(rev)
2748 rawtext = self.rawdata(rev)
2743
2749
2744 ifh = destrevlog.opener(
2750 ifh = destrevlog.opener(
2745 destrevlog.indexfile, b'a+', checkambig=False
2751 destrevlog.indexfile, b'a+', checkambig=False
2746 )
2752 )
2747 dfh = None
2753 dfh = None
2748 if not destrevlog._inline:
2754 if not destrevlog._inline:
2749 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2755 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2750 try:
2756 try:
2751 destrevlog._addrevision(
2757 destrevlog._addrevision(
2752 node,
2758 node,
2753 rawtext,
2759 rawtext,
2754 tr,
2760 tr,
2755 linkrev,
2761 linkrev,
2756 p1,
2762 p1,
2757 p2,
2763 p2,
2758 flags,
2764 flags,
2759 cachedelta,
2765 cachedelta,
2760 ifh,
2766 ifh,
2761 dfh,
2767 dfh,
2762 deltacomputer=deltacomputer,
2768 deltacomputer=deltacomputer,
2763 )
2769 )
2764 finally:
2770 finally:
2765 if dfh:
2771 if dfh:
2766 dfh.close()
2772 dfh.close()
2767 ifh.close()
2773 ifh.close()
2768
2774
2769 if addrevisioncb:
2775 if addrevisioncb:
2770 addrevisioncb(self, rev, node)
2776 addrevisioncb(self, rev, node)
2771
2777
2772 def censorrevision(self, tr, censornode, tombstone=b''):
2778 def censorrevision(self, tr, censornode, tombstone=b''):
2773 if (self.version & 0xFFFF) == REVLOGV0:
2779 if (self.version & 0xFFFF) == REVLOGV0:
2774 raise error.RevlogError(
2780 raise error.RevlogError(
2775 _(b'cannot censor with version %d revlogs') % self.version
2781 _(b'cannot censor with version %d revlogs') % self.version
2776 )
2782 )
2777
2783
2778 censorrev = self.rev(censornode)
2784 censorrev = self.rev(censornode)
2779 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2785 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2780
2786
2781 if len(tombstone) > self.rawsize(censorrev):
2787 if len(tombstone) > self.rawsize(censorrev):
2782 raise error.Abort(
2788 raise error.Abort(
2783 _(b'censor tombstone must be no longer than censored data')
2789 _(b'censor tombstone must be no longer than censored data')
2784 )
2790 )
2785
2791
2786 # Rewriting the revlog in place is hard. Our strategy for censoring is
2792 # Rewriting the revlog in place is hard. Our strategy for censoring is
2787 # to create a new revlog, copy all revisions to it, then replace the
2793 # to create a new revlog, copy all revisions to it, then replace the
2788 # revlogs on transaction close.
2794 # revlogs on transaction close.
2789
2795
2790 newindexfile = self.indexfile + b'.tmpcensored'
2796 newindexfile = self.indexfile + b'.tmpcensored'
2791 newdatafile = self.datafile + b'.tmpcensored'
2797 newdatafile = self.datafile + b'.tmpcensored'
2792
2798
2793 # This is a bit dangerous. We could easily have a mismatch of state.
2799 # This is a bit dangerous. We could easily have a mismatch of state.
2794 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2800 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2795 newrl.version = self.version
2801 newrl.version = self.version
2796 newrl._generaldelta = self._generaldelta
2802 newrl._generaldelta = self._generaldelta
2797 newrl._io = self._io
2803 newrl._io = self._io
2798
2804
2799 for rev in self.revs():
2805 for rev in self.revs():
2800 node = self.node(rev)
2806 node = self.node(rev)
2801 p1, p2 = self.parents(node)
2807 p1, p2 = self.parents(node)
2802
2808
2803 if rev == censorrev:
2809 if rev == censorrev:
2804 newrl.addrawrevision(
2810 newrl.addrawrevision(
2805 tombstone,
2811 tombstone,
2806 tr,
2812 tr,
2807 self.linkrev(censorrev),
2813 self.linkrev(censorrev),
2808 p1,
2814 p1,
2809 p2,
2815 p2,
2810 censornode,
2816 censornode,
2811 REVIDX_ISCENSORED,
2817 REVIDX_ISCENSORED,
2812 )
2818 )
2813
2819
2814 if newrl.deltaparent(rev) != nullrev:
2820 if newrl.deltaparent(rev) != nullrev:
2815 raise error.Abort(
2821 raise error.Abort(
2816 _(
2822 _(
2817 b'censored revision stored as delta; '
2823 b'censored revision stored as delta; '
2818 b'cannot censor'
2824 b'cannot censor'
2819 ),
2825 ),
2820 hint=_(
2826 hint=_(
2821 b'censoring of revlogs is not '
2827 b'censoring of revlogs is not '
2822 b'fully implemented; please report '
2828 b'fully implemented; please report '
2823 b'this bug'
2829 b'this bug'
2824 ),
2830 ),
2825 )
2831 )
2826 continue
2832 continue
2827
2833
2828 if self.iscensored(rev):
2834 if self.iscensored(rev):
2829 if self.deltaparent(rev) != nullrev:
2835 if self.deltaparent(rev) != nullrev:
2830 raise error.Abort(
2836 raise error.Abort(
2831 _(
2837 _(
2832 b'cannot censor due to censored '
2838 b'cannot censor due to censored '
2833 b'revision having delta stored'
2839 b'revision having delta stored'
2834 )
2840 )
2835 )
2841 )
2836 rawtext = self._chunk(rev)
2842 rawtext = self._chunk(rev)
2837 else:
2843 else:
2838 rawtext = self.rawdata(rev)
2844 rawtext = self.rawdata(rev)
2839
2845
2840 newrl.addrawrevision(
2846 newrl.addrawrevision(
2841 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2847 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2842 )
2848 )
2843
2849
2844 tr.addbackup(self.indexfile, location=b'store')
2850 tr.addbackup(self.indexfile, location=b'store')
2845 if not self._inline:
2851 if not self._inline:
2846 tr.addbackup(self.datafile, location=b'store')
2852 tr.addbackup(self.datafile, location=b'store')
2847
2853
2848 self.opener.rename(newrl.indexfile, self.indexfile)
2854 self.opener.rename(newrl.indexfile, self.indexfile)
2849 if not self._inline:
2855 if not self._inline:
2850 self.opener.rename(newrl.datafile, self.datafile)
2856 self.opener.rename(newrl.datafile, self.datafile)
2851
2857
2852 self.clearcaches()
2858 self.clearcaches()
2853 self._loadindex()
2859 self._loadindex()
2854
2860
2855 def verifyintegrity(self, state):
2861 def verifyintegrity(self, state):
2856 """Verifies the integrity of the revlog.
2862 """Verifies the integrity of the revlog.
2857
2863
2858 Yields ``revlogproblem`` instances describing problems that are
2864 Yields ``revlogproblem`` instances describing problems that are
2859 found.
2865 found.
2860 """
2866 """
2861 dd, di = self.checksize()
2867 dd, di = self.checksize()
2862 if dd:
2868 if dd:
2863 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2869 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2864 if di:
2870 if di:
2865 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2871 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2866
2872
2867 version = self.version & 0xFFFF
2873 version = self.version & 0xFFFF
2868
2874
2869 # The verifier tells us what version revlog we should be.
2875 # The verifier tells us what version revlog we should be.
2870 if version != state[b'expectedversion']:
2876 if version != state[b'expectedversion']:
2871 yield revlogproblem(
2877 yield revlogproblem(
2872 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2878 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2873 % (self.indexfile, version, state[b'expectedversion'])
2879 % (self.indexfile, version, state[b'expectedversion'])
2874 )
2880 )
2875
2881
2876 state[b'skipread'] = set()
2882 state[b'skipread'] = set()
2877 state[b'safe_renamed'] = set()
2883 state[b'safe_renamed'] = set()
2878
2884
2879 for rev in self:
2885 for rev in self:
2880 node = self.node(rev)
2886 node = self.node(rev)
2881
2887
2882 # Verify contents. 4 cases to care about:
2888 # Verify contents. 4 cases to care about:
2883 #
2889 #
2884 # common: the most common case
2890 # common: the most common case
2885 # rename: with a rename
2891 # rename: with a rename
2886 # meta: file content starts with b'\1\n', the metadata
2892 # meta: file content starts with b'\1\n', the metadata
2887 # header defined in filelog.py, but without a rename
2893 # header defined in filelog.py, but without a rename
2888 # ext: content stored externally
2894 # ext: content stored externally
2889 #
2895 #
2890 # More formally, their differences are shown below:
2896 # More formally, their differences are shown below:
2891 #
2897 #
2892 # | common | rename | meta | ext
2898 # | common | rename | meta | ext
2893 # -------------------------------------------------------
2899 # -------------------------------------------------------
2894 # flags() | 0 | 0 | 0 | not 0
2900 # flags() | 0 | 0 | 0 | not 0
2895 # renamed() | False | True | False | ?
2901 # renamed() | False | True | False | ?
2896 # rawtext[0:2]=='\1\n'| False | True | True | ?
2902 # rawtext[0:2]=='\1\n'| False | True | True | ?
2897 #
2903 #
2898 # "rawtext" means the raw text stored in revlog data, which
2904 # "rawtext" means the raw text stored in revlog data, which
2899 # could be retrieved by "rawdata(rev)". "text"
2905 # could be retrieved by "rawdata(rev)". "text"
2900 # mentioned below is "revision(rev)".
2906 # mentioned below is "revision(rev)".
2901 #
2907 #
2902 # There are 3 different lengths stored physically:
2908 # There are 3 different lengths stored physically:
2903 # 1. L1: rawsize, stored in revlog index
2909 # 1. L1: rawsize, stored in revlog index
2904 # 2. L2: len(rawtext), stored in revlog data
2910 # 2. L2: len(rawtext), stored in revlog data
2905 # 3. L3: len(text), stored in revlog data if flags==0, or
2911 # 3. L3: len(text), stored in revlog data if flags==0, or
2906 # possibly somewhere else if flags!=0
2912 # possibly somewhere else if flags!=0
2907 #
2913 #
2908 # L1 should be equal to L2. L3 could be different from them.
2914 # L1 should be equal to L2. L3 could be different from them.
2909 # "text" may or may not affect commit hash depending on flag
2915 # "text" may or may not affect commit hash depending on flag
2910 # processors (see flagutil.addflagprocessor).
2916 # processors (see flagutil.addflagprocessor).
2911 #
2917 #
2912 # | common | rename | meta | ext
2918 # | common | rename | meta | ext
2913 # -------------------------------------------------
2919 # -------------------------------------------------
2914 # rawsize() | L1 | L1 | L1 | L1
2920 # rawsize() | L1 | L1 | L1 | L1
2915 # size() | L1 | L2-LM | L1(*) | L1 (?)
2921 # size() | L1 | L2-LM | L1(*) | L1 (?)
2916 # len(rawtext) | L2 | L2 | L2 | L2
2922 # len(rawtext) | L2 | L2 | L2 | L2
2917 # len(text) | L2 | L2 | L2 | L3
2923 # len(text) | L2 | L2 | L2 | L3
2918 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2924 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2919 #
2925 #
2920 # LM: length of metadata, depending on rawtext
2926 # LM: length of metadata, depending on rawtext
2921 # (*): not ideal, see comment in filelog.size
2927 # (*): not ideal, see comment in filelog.size
2922 # (?): could be "- len(meta)" if the resolved content has
2928 # (?): could be "- len(meta)" if the resolved content has
2923 # rename metadata
2929 # rename metadata
2924 #
2930 #
2925 # Checks needed to be done:
2931 # Checks needed to be done:
2926 # 1. length check: L1 == L2, in all cases.
2932 # 1. length check: L1 == L2, in all cases.
2927 # 2. hash check: depending on flag processor, we may need to
2933 # 2. hash check: depending on flag processor, we may need to
2928 # use either "text" (external), or "rawtext" (in revlog).
2934 # use either "text" (external), or "rawtext" (in revlog).
2929
2935
2930 try:
2936 try:
2931 skipflags = state.get(b'skipflags', 0)
2937 skipflags = state.get(b'skipflags', 0)
2932 if skipflags:
2938 if skipflags:
2933 skipflags &= self.flags(rev)
2939 skipflags &= self.flags(rev)
2934
2940
2935 _verify_revision(self, skipflags, state, node)
2941 _verify_revision(self, skipflags, state, node)
2936
2942
2937 l1 = self.rawsize(rev)
2943 l1 = self.rawsize(rev)
2938 l2 = len(self.rawdata(node))
2944 l2 = len(self.rawdata(node))
2939
2945
2940 if l1 != l2:
2946 if l1 != l2:
2941 yield revlogproblem(
2947 yield revlogproblem(
2942 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2948 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2943 node=node,
2949 node=node,
2944 )
2950 )
2945
2951
2946 except error.CensoredNodeError:
2952 except error.CensoredNodeError:
2947 if state[b'erroroncensored']:
2953 if state[b'erroroncensored']:
2948 yield revlogproblem(
2954 yield revlogproblem(
2949 error=_(b'censored file data'), node=node
2955 error=_(b'censored file data'), node=node
2950 )
2956 )
2951 state[b'skipread'].add(node)
2957 state[b'skipread'].add(node)
2952 except Exception as e:
2958 except Exception as e:
2953 yield revlogproblem(
2959 yield revlogproblem(
2954 error=_(b'unpacking %s: %s')
2960 error=_(b'unpacking %s: %s')
2955 % (short(node), stringutil.forcebytestr(e)),
2961 % (short(node), stringutil.forcebytestr(e)),
2956 node=node,
2962 node=node,
2957 )
2963 )
2958 state[b'skipread'].add(node)
2964 state[b'skipread'].add(node)
2959
2965
2960 def storageinfo(
2966 def storageinfo(
2961 self,
2967 self,
2962 exclusivefiles=False,
2968 exclusivefiles=False,
2963 sharedfiles=False,
2969 sharedfiles=False,
2964 revisionscount=False,
2970 revisionscount=False,
2965 trackedsize=False,
2971 trackedsize=False,
2966 storedsize=False,
2972 storedsize=False,
2967 ):
2973 ):
2968 d = {}
2974 d = {}
2969
2975
2970 if exclusivefiles:
2976 if exclusivefiles:
2971 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2977 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2972 if not self._inline:
2978 if not self._inline:
2973 d[b'exclusivefiles'].append((self.opener, self.datafile))
2979 d[b'exclusivefiles'].append((self.opener, self.datafile))
2974
2980
2975 if sharedfiles:
2981 if sharedfiles:
2976 d[b'sharedfiles'] = []
2982 d[b'sharedfiles'] = []
2977
2983
2978 if revisionscount:
2984 if revisionscount:
2979 d[b'revisionscount'] = len(self)
2985 d[b'revisionscount'] = len(self)
2980
2986
2981 if trackedsize:
2987 if trackedsize:
2982 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2988 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2983
2989
2984 if storedsize:
2990 if storedsize:
2985 d[b'storedsize'] = sum(
2991 d[b'storedsize'] = sum(
2986 self.opener.stat(path).st_size for path in self.files()
2992 self.opener.stat(path).st_size for path in self.files()
2987 )
2993 )
2988
2994
2989 return d
2995 return d
@@ -1,162 +1,195 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import struct
11 import struct
12
12
13 from .. import (
13 from .. import (
14 error,
14 error,
15 node as nodemod,
15 node as nodemod,
16 pycompat,
16 pycompat,
17 )
17 )
18
18
19
19
20 class NodeMap(dict):
20 class NodeMap(dict):
21 def __missing__(self, x):
21 def __missing__(self, x):
22 raise error.RevlogError(b'unknown node: %s' % x)
22 raise error.RevlogError(b'unknown node: %s' % x)
23
23
24
24
25 def setup_persistent_nodemap(tr, revlog):
26 """Install whatever is needed transaction side to persist a nodemap on disk
27
28 (only actually persist the nodemap if this is relevant for this revlog)
29 """
30 if revlog.nodemap_file is None:
31 return # we do not use persistent_nodemap on this revlog
32 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
33 if tr.hasfinalize(callback_id):
34 return # no need to register again
35 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
36
37
38 def _persist_nodemap(tr, revlog):
39 """Write nodemap data on disk for a given revlog
40 """
41 if getattr(revlog, 'filteredrevs', ()):
42 raise error.ProgrammingError(
43 "cannot persist nodemap of a filtered changelog"
44 )
45 if revlog.nodemap_file is None:
46 msg = "calling persist nodemap on a revlog without the feature enableb"
47 raise error.ProgrammingError(msg)
48 data = persistent_data(revlog.index)
49 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
50 # store vfs
51 with revlog.opener(revlog.nodemap_file, b'w') as f:
52 f.write(data)
53 # EXP-TODO: if the transaction abort, we should remove the new data and
54 # reinstall the old one. (This will be simpler when the file format get a
55 # bit more advanced)
56
57
25 ### Nodemap Trie
58 ### Nodemap Trie
26 #
59 #
27 # This is a simple reference implementation to compute and persist a nodemap
60 # This is a simple reference implementation to compute and persist a nodemap
28 # trie. This reference implementation is write only. The python version of this
61 # trie. This reference implementation is write only. The python version of this
29 # is not expected to be actually used, since it wont provide performance
62 # is not expected to be actually used, since it wont provide performance
30 # improvement over existing non-persistent C implementation.
63 # improvement over existing non-persistent C implementation.
31 #
64 #
32 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
65 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
33 # revision can be adressed using its node shortest prefix.
66 # revision can be adressed using its node shortest prefix.
34 #
67 #
35 # The trie is stored as a sequence of block. Each block contains 16 entries
68 # The trie is stored as a sequence of block. Each block contains 16 entries
36 # (signed 64bit integer, big endian). Each entry can be one of the following:
69 # (signed 64bit integer, big endian). Each entry can be one of the following:
37 #
70 #
38 # * value >= 0 -> index of sub-block
71 # * value >= 0 -> index of sub-block
39 # * value == -1 -> no value
72 # * value == -1 -> no value
40 # * value < -1 -> a revision value: rev = -(value+10)
73 # * value < -1 -> a revision value: rev = -(value+10)
41 #
74 #
42 # The implementation focus on simplicity, not on performance. A Rust
75 # The implementation focus on simplicity, not on performance. A Rust
43 # implementation should provide a efficient version of the same binary
76 # implementation should provide a efficient version of the same binary
44 # persistence. This reference python implementation is never meant to be
77 # persistence. This reference python implementation is never meant to be
45 # extensively use in production.
78 # extensively use in production.
46
79
47
80
48 def persistent_data(index):
81 def persistent_data(index):
49 """return the persistent binary form for a nodemap for a given index
82 """return the persistent binary form for a nodemap for a given index
50 """
83 """
51 trie = _build_trie(index)
84 trie = _build_trie(index)
52 return _persist_trie(trie)
85 return _persist_trie(trie)
53
86
54
87
55 S_BLOCK = struct.Struct(">" + ("l" * 16))
88 S_BLOCK = struct.Struct(">" + ("l" * 16))
56
89
57 NO_ENTRY = -1
90 NO_ENTRY = -1
58 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
91 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
59 REV_OFFSET = 2
92 REV_OFFSET = 2
60
93
61
94
62 def _transform_rev(rev):
95 def _transform_rev(rev):
63 """Return the number used to represent the rev in the tree.
96 """Return the number used to represent the rev in the tree.
64
97
65 (or retrieve a rev number from such representation)
98 (or retrieve a rev number from such representation)
66
99
67 Note that this is an involution, a function equal to its inverse (i.e.
100 Note that this is an involution, a function equal to its inverse (i.e.
68 which gives the identity when applied to itself).
101 which gives the identity when applied to itself).
69 """
102 """
70 return -(rev + REV_OFFSET)
103 return -(rev + REV_OFFSET)
71
104
72
105
73 def _to_int(hex_digit):
106 def _to_int(hex_digit):
74 """turn an hexadecimal digit into a proper integer"""
107 """turn an hexadecimal digit into a proper integer"""
75 return int(hex_digit, 16)
108 return int(hex_digit, 16)
76
109
77
110
78 def _build_trie(index):
111 def _build_trie(index):
79 """build a nodemap trie
112 """build a nodemap trie
80
113
81 The nodemap stores revision number for each unique prefix.
114 The nodemap stores revision number for each unique prefix.
82
115
83 Each block is a dictionary with keys in `[0, 15]`. Values are either
116 Each block is a dictionary with keys in `[0, 15]`. Values are either
84 another block or a revision number.
117 another block or a revision number.
85 """
118 """
86 root = {}
119 root = {}
87 for rev in range(len(index)):
120 for rev in range(len(index)):
88 hex = nodemod.hex(index[rev][7])
121 hex = nodemod.hex(index[rev][7])
89 _insert_into_block(index, 0, root, rev, hex)
122 _insert_into_block(index, 0, root, rev, hex)
90 return root
123 return root
91
124
92
125
93 def _insert_into_block(index, level, block, current_rev, current_hex):
126 def _insert_into_block(index, level, block, current_rev, current_hex):
94 """insert a new revision in a block
127 """insert a new revision in a block
95
128
96 index: the index we are adding revision for
129 index: the index we are adding revision for
97 level: the depth of the current block in the trie
130 level: the depth of the current block in the trie
98 block: the block currently being considered
131 block: the block currently being considered
99 current_rev: the revision number we are adding
132 current_rev: the revision number we are adding
100 current_hex: the hexadecimal representation of the of that revision
133 current_hex: the hexadecimal representation of the of that revision
101 """
134 """
102 hex_digit = _to_int(current_hex[level : level + 1])
135 hex_digit = _to_int(current_hex[level : level + 1])
103 entry = block.get(hex_digit)
136 entry = block.get(hex_digit)
104 if entry is None:
137 if entry is None:
105 # no entry, simply store the revision number
138 # no entry, simply store the revision number
106 block[hex_digit] = current_rev
139 block[hex_digit] = current_rev
107 elif isinstance(entry, dict):
140 elif isinstance(entry, dict):
108 # need to recurse to an underlying block
141 # need to recurse to an underlying block
109 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
142 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
110 else:
143 else:
111 # collision with a previously unique prefix, inserting new
144 # collision with a previously unique prefix, inserting new
112 # vertices to fit both entry.
145 # vertices to fit both entry.
113 other_hex = nodemod.hex(index[entry][7])
146 other_hex = nodemod.hex(index[entry][7])
114 other_rev = entry
147 other_rev = entry
115 new = {}
148 new = {}
116 block[hex_digit] = new
149 block[hex_digit] = new
117 _insert_into_block(index, level + 1, new, other_rev, other_hex)
150 _insert_into_block(index, level + 1, new, other_rev, other_hex)
118 _insert_into_block(index, level + 1, new, current_rev, current_hex)
151 _insert_into_block(index, level + 1, new, current_rev, current_hex)
119
152
120
153
121 def _persist_trie(root):
154 def _persist_trie(root):
122 """turn a nodemap trie into persistent binary data
155 """turn a nodemap trie into persistent binary data
123
156
124 See `_build_trie` for nodemap trie structure"""
157 See `_build_trie` for nodemap trie structure"""
125 block_map = {}
158 block_map = {}
126 chunks = []
159 chunks = []
127 for tn in _walk_trie(root):
160 for tn in _walk_trie(root):
128 block_map[id(tn)] = len(chunks)
161 block_map[id(tn)] = len(chunks)
129 chunks.append(_persist_block(tn, block_map))
162 chunks.append(_persist_block(tn, block_map))
130 return b''.join(chunks)
163 return b''.join(chunks)
131
164
132
165
133 def _walk_trie(block):
166 def _walk_trie(block):
134 """yield all the block in a trie
167 """yield all the block in a trie
135
168
136 Children blocks are always yield before their parent block.
169 Children blocks are always yield before their parent block.
137 """
170 """
138 for (_, item) in sorted(block.items()):
171 for (_, item) in sorted(block.items()):
139 if isinstance(item, dict):
172 if isinstance(item, dict):
140 for sub_block in _walk_trie(item):
173 for sub_block in _walk_trie(item):
141 yield sub_block
174 yield sub_block
142 yield block
175 yield block
143
176
144
177
145 def _persist_block(block_node, block_map):
178 def _persist_block(block_node, block_map):
146 """produce persistent binary data for a single block
179 """produce persistent binary data for a single block
147
180
148 Children block are assumed to be already persisted and present in
181 Children block are assumed to be already persisted and present in
149 block_map.
182 block_map.
150 """
183 """
151 data = tuple(_to_value(block_node.get(i), block_map) for i in range(16))
184 data = tuple(_to_value(block_node.get(i), block_map) for i in range(16))
152 return S_BLOCK.pack(*data)
185 return S_BLOCK.pack(*data)
153
186
154
187
155 def _to_value(item, block_map):
188 def _to_value(item, block_map):
156 """persist any value as an integer"""
189 """persist any value as an integer"""
157 if item is None:
190 if item is None:
158 return NO_ENTRY
191 return NO_ENTRY
159 elif isinstance(item, dict):
192 elif isinstance(item, dict):
160 return block_map[id(item)]
193 return block_map[id(item)]
161 else:
194 else:
162 return _transform_rev(item)
195 return _transform_rev(item)
@@ -1,26 +1,32 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 $ hg init test-repo
6 $ hg init test-repo
7 $ cd test-repo
7 $ cd test-repo
8 $ cat << EOF >> .hg/hgrc
9 > [experimental]
10 > exp-persistent-nodemap=yes
11 > EOF
8 $ hg debugbuilddag .+5000
12 $ hg debugbuilddag .+5000
9 $ hg debugnodemap --dump | f --sha256 --bytes=256 --hexdump --size
13 $ hg debugnodemap --dump | f --sha256 --size
14 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
15 $ f --sha256 --bytes=256 --hexdump --size < .hg/store/00changelog.n
10 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
16 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
11 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
17 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
12 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
18 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
13 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
19 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
14 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
20 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
15 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
21 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
16 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
22 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
17 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
23 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
18 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
24 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
19 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
25 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
20 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
26 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
21 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
27 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
22 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
28 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
23 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
29 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
24 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
30 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
25 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
31 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
26 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
General Comments 0
You need to be logged in to leave comments. Login now