##// END OF EJS Templates
revlogv2: delay the update of the changelog docket to transaction end...
marmoute -
r48013:682f0985 default
parent child Browse files
Show More
@@ -1,627 +1,630 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 )
15 15 from .thirdparty import attr
16 16
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 metadata,
21 21 pycompat,
22 22 revlog,
23 23 )
24 24 from .utils import (
25 25 dateutil,
26 26 stringutil,
27 27 )
28 28 from .revlogutils import (
29 29 constants as revlog_constants,
30 30 flagutil,
31 31 )
32 32
33 33 _defaultextra = {b'branch': b'default'}
34 34
35 35
36 36 def _string_escape(text):
37 37 """
38 38 >>> from .pycompat import bytechr as chr
39 39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 41 >>> s
42 42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 43 >>> res = _string_escape(s)
44 44 >>> s == _string_unescape(res)
45 45 True
46 46 """
47 47 # subset of the string_escape codec
48 48 text = (
49 49 text.replace(b'\\', b'\\\\')
50 50 .replace(b'\n', b'\\n')
51 51 .replace(b'\r', b'\\r')
52 52 )
53 53 return text.replace(b'\0', b'\\0')
54 54
55 55
56 56 def _string_unescape(text):
57 57 if b'\\0' in text:
58 58 # fix up \0 without getting into trouble with \\0
59 59 text = text.replace(b'\\\\', b'\\\\\n')
60 60 text = text.replace(b'\\0', b'\0')
61 61 text = text.replace(b'\n', b'')
62 62 return stringutil.unescapestr(text)
63 63
64 64
65 65 def decodeextra(text):
66 66 """
67 67 >>> from .pycompat import bytechr as chr
68 68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 69 ... ).items())
70 70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 72 ... b'baz': chr(92) + chr(0) + b'2'})
73 73 ... ).items())
74 74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 75 """
76 76 extra = _defaultextra.copy()
77 77 for l in text.split(b'\0'):
78 78 if l:
79 79 k, v = _string_unescape(l).split(b':', 1)
80 80 extra[k] = v
81 81 return extra
82 82
83 83
84 84 def encodeextra(d):
85 85 # keys must be sorted to produce a deterministic changelog entry
86 86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 87 return b"\0".join(items)
88 88
89 89
90 90 def stripdesc(desc):
91 91 """strip trailing whitespace and leading and trailing empty lines"""
92 92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93 93
94 94
95 95 class appender(object):
96 96 """the changelog index must be updated last on disk, so we use this class
97 97 to delay writes to it"""
98 98
99 99 def __init__(self, vfs, name, mode, buf):
100 100 self.data = buf
101 101 fp = vfs(name, mode)
102 102 self.fp = fp
103 103 self.offset = fp.tell()
104 104 self.size = vfs.fstat(fp).st_size
105 105 self._end = self.size
106 106
107 107 def end(self):
108 108 return self._end
109 109
110 110 def tell(self):
111 111 return self.offset
112 112
113 113 def flush(self):
114 114 pass
115 115
116 116 @property
117 117 def closed(self):
118 118 return self.fp.closed
119 119
120 120 def close(self):
121 121 self.fp.close()
122 122
123 123 def seek(self, offset, whence=0):
124 124 '''virtual file offset spans real file and data'''
125 125 if whence == 0:
126 126 self.offset = offset
127 127 elif whence == 1:
128 128 self.offset += offset
129 129 elif whence == 2:
130 130 self.offset = self.end() + offset
131 131 if self.offset < self.size:
132 132 self.fp.seek(self.offset)
133 133
134 134 def read(self, count=-1):
135 135 '''only trick here is reads that span real file and data'''
136 136 ret = b""
137 137 if self.offset < self.size:
138 138 s = self.fp.read(count)
139 139 ret = s
140 140 self.offset += len(s)
141 141 if count > 0:
142 142 count -= len(s)
143 143 if count != 0:
144 144 doff = self.offset - self.size
145 145 self.data.insert(0, b"".join(self.data))
146 146 del self.data[1:]
147 147 s = self.data[0][doff : doff + count]
148 148 self.offset += len(s)
149 149 ret += s
150 150 return ret
151 151
152 152 def write(self, s):
153 153 self.data.append(bytes(s))
154 154 self.offset += len(s)
155 155 self._end += len(s)
156 156
157 157 def __enter__(self):
158 158 self.fp.__enter__()
159 159 return self
160 160
161 161 def __exit__(self, *args):
162 162 return self.fp.__exit__(*args)
163 163
164 164
165 165 class _divertopener(object):
166 166 def __init__(self, opener, target):
167 167 self._opener = opener
168 168 self._target = target
169 169
170 170 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
171 171 if name != self._target:
172 172 return self._opener(name, mode, **kwargs)
173 173 return self._opener(name + b".a", mode, **kwargs)
174 174
175 175 def __getattr__(self, attr):
176 176 return getattr(self._opener, attr)
177 177
178 178
179 179 def _delayopener(opener, target, buf):
180 180 """build an opener that stores chunks in 'buf' instead of 'target'"""
181 181
182 182 def _delay(name, mode=b'r', checkambig=False, **kwargs):
183 183 if name != target:
184 184 return opener(name, mode, **kwargs)
185 185 assert not kwargs
186 186 return appender(opener, name, mode, buf)
187 187
188 188 return _delay
189 189
190 190
191 191 @attr.s
192 192 class _changelogrevision(object):
193 193 # Extensions might modify _defaultextra, so let the constructor below pass
194 194 # it in
195 195 extra = attr.ib()
196 196 manifest = attr.ib()
197 197 user = attr.ib(default=b'')
198 198 date = attr.ib(default=(0, 0))
199 199 files = attr.ib(default=attr.Factory(list))
200 200 filesadded = attr.ib(default=None)
201 201 filesremoved = attr.ib(default=None)
202 202 p1copies = attr.ib(default=None)
203 203 p2copies = attr.ib(default=None)
204 204 description = attr.ib(default=b'')
205 205 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
206 206
207 207
208 208 class changelogrevision(object):
209 209 """Holds results of a parsed changelog revision.
210 210
211 211 Changelog revisions consist of multiple pieces of data, including
212 212 the manifest node, user, and date. This object exposes a view into
213 213 the parsed object.
214 214 """
215 215
216 216 __slots__ = (
217 217 '_offsets',
218 218 '_text',
219 219 '_sidedata',
220 220 '_cpsd',
221 221 '_changes',
222 222 )
223 223
224 224 def __new__(cls, cl, text, sidedata, cpsd):
225 225 if not text:
226 226 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
227 227
228 228 self = super(changelogrevision, cls).__new__(cls)
229 229 # We could return here and implement the following as an __init__.
230 230 # But doing it here is equivalent and saves an extra function call.
231 231
232 232 # format used:
233 233 # nodeid\n : manifest node in ascii
234 234 # user\n : user, no \n or \r allowed
235 235 # time tz extra\n : date (time is int or float, timezone is int)
236 236 # : extra is metadata, encoded and separated by '\0'
237 237 # : older versions ignore it
238 238 # files\n\n : files modified by the cset, no \n or \r allowed
239 239 # (.*) : comment (free text, ideally utf-8)
240 240 #
241 241 # changelog v0 doesn't use extra
242 242
243 243 nl1 = text.index(b'\n')
244 244 nl2 = text.index(b'\n', nl1 + 1)
245 245 nl3 = text.index(b'\n', nl2 + 1)
246 246
247 247 # The list of files may be empty. Which means nl3 is the first of the
248 248 # double newline that precedes the description.
249 249 if text[nl3 + 1 : nl3 + 2] == b'\n':
250 250 doublenl = nl3
251 251 else:
252 252 doublenl = text.index(b'\n\n', nl3 + 1)
253 253
254 254 self._offsets = (nl1, nl2, nl3, doublenl)
255 255 self._text = text
256 256 self._sidedata = sidedata
257 257 self._cpsd = cpsd
258 258 self._changes = None
259 259
260 260 return self
261 261
262 262 @property
263 263 def manifest(self):
264 264 return bin(self._text[0 : self._offsets[0]])
265 265
266 266 @property
267 267 def user(self):
268 268 off = self._offsets
269 269 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
270 270
271 271 @property
272 272 def _rawdate(self):
273 273 off = self._offsets
274 274 dateextra = self._text[off[1] + 1 : off[2]]
275 275 return dateextra.split(b' ', 2)[0:2]
276 276
277 277 @property
278 278 def _rawextra(self):
279 279 off = self._offsets
280 280 dateextra = self._text[off[1] + 1 : off[2]]
281 281 fields = dateextra.split(b' ', 2)
282 282 if len(fields) != 3:
283 283 return None
284 284
285 285 return fields[2]
286 286
287 287 @property
288 288 def date(self):
289 289 raw = self._rawdate
290 290 time = float(raw[0])
291 291 # Various tools did silly things with the timezone.
292 292 try:
293 293 timezone = int(raw[1])
294 294 except ValueError:
295 295 timezone = 0
296 296
297 297 return time, timezone
298 298
299 299 @property
300 300 def extra(self):
301 301 raw = self._rawextra
302 302 if raw is None:
303 303 return _defaultextra
304 304
305 305 return decodeextra(raw)
306 306
307 307 @property
308 308 def changes(self):
309 309 if self._changes is not None:
310 310 return self._changes
311 311 if self._cpsd:
312 312 changes = metadata.decode_files_sidedata(self._sidedata)
313 313 else:
314 314 changes = metadata.ChangingFiles(
315 315 touched=self.files or (),
316 316 added=self.filesadded or (),
317 317 removed=self.filesremoved or (),
318 318 p1_copies=self.p1copies or {},
319 319 p2_copies=self.p2copies or {},
320 320 )
321 321 self._changes = changes
322 322 return changes
323 323
324 324 @property
325 325 def files(self):
326 326 if self._cpsd:
327 327 return sorted(self.changes.touched)
328 328 off = self._offsets
329 329 if off[2] == off[3]:
330 330 return []
331 331
332 332 return self._text[off[2] + 1 : off[3]].split(b'\n')
333 333
334 334 @property
335 335 def filesadded(self):
336 336 if self._cpsd:
337 337 return self.changes.added
338 338 else:
339 339 rawindices = self.extra.get(b'filesadded')
340 340 if rawindices is None:
341 341 return None
342 342 return metadata.decodefileindices(self.files, rawindices)
343 343
344 344 @property
345 345 def filesremoved(self):
346 346 if self._cpsd:
347 347 return self.changes.removed
348 348 else:
349 349 rawindices = self.extra.get(b'filesremoved')
350 350 if rawindices is None:
351 351 return None
352 352 return metadata.decodefileindices(self.files, rawindices)
353 353
354 354 @property
355 355 def p1copies(self):
356 356 if self._cpsd:
357 357 return self.changes.copied_from_p1
358 358 else:
359 359 rawcopies = self.extra.get(b'p1copies')
360 360 if rawcopies is None:
361 361 return None
362 362 return metadata.decodecopies(self.files, rawcopies)
363 363
364 364 @property
365 365 def p2copies(self):
366 366 if self._cpsd:
367 367 return self.changes.copied_from_p2
368 368 else:
369 369 rawcopies = self.extra.get(b'p2copies')
370 370 if rawcopies is None:
371 371 return None
372 372 return metadata.decodecopies(self.files, rawcopies)
373 373
374 374 @property
375 375 def description(self):
376 376 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
377 377
378 378 @property
379 379 def branchinfo(self):
380 380 extra = self.extra
381 381 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
382 382
383 383
384 384 class changelog(revlog.revlog):
385 385 def __init__(self, opener, trypending=False, concurrencychecker=None):
386 386 """Load a changelog revlog using an opener.
387 387
388 388 If ``trypending`` is true, we attempt to load the index from a
389 389 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
390 390 The ``00changelog.i.a`` file contains index (and possibly inline
391 391 revision) data for a transaction that hasn't been finalized yet.
392 392 It exists in a separate file to facilitate readers (such as
393 393 hooks processes) accessing data before a transaction is finalized.
394 394
395 395 ``concurrencychecker`` will be passed to the revlog init function, see
396 396 the documentation there.
397 397 """
398 398
399 399 if trypending and opener.exists(b'00changelog.i.a'):
400 400 postfix = b'a'
401 401 else:
402 402 postfix = None
403 403
404 404 revlog.revlog.__init__(
405 405 self,
406 406 opener,
407 407 target=(revlog_constants.KIND_CHANGELOG, None),
408 408 radix=b'00changelog',
409 409 postfix=postfix,
410 410 checkambig=True,
411 411 mmaplargeindex=True,
412 412 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
413 413 concurrencychecker=concurrencychecker,
414 414 )
415 415
416 416 if self._initempty and (self._format_version == revlog.REVLOGV1):
417 417 # changelogs don't benefit from generaldelta.
418 418
419 419 self._format_flags &= ~revlog.FLAG_GENERALDELTA
420 420 self._generaldelta = False
421 421
422 422 # Delta chains for changelogs tend to be very small because entries
423 423 # tend to be small and don't delta well with each. So disable delta
424 424 # chains.
425 425 self._storedeltachains = False
426 426
427 427 self._realopener = opener
428 428 self._delayed = False
429 429 self._delaybuf = None
430 430 self._divert = False
431 431 self._filteredrevs = frozenset()
432 432 self._filteredrevs_hashcache = {}
433 433 self._copiesstorage = opener.options.get(b'copies-storage')
434 434
435 435 @property
436 436 def filteredrevs(self):
437 437 return self._filteredrevs
438 438
439 439 @filteredrevs.setter
440 440 def filteredrevs(self, val):
441 441 # Ensure all updates go through this function
442 442 assert isinstance(val, frozenset)
443 443 self._filteredrevs = val
444 444 self._filteredrevs_hashcache = {}
445 445
446 def _write_docket(self, tr):
447 if not self._delayed:
448 super(changelog, self)._write_docket(tr)
449
446 450 def delayupdate(self, tr):
447 451 """delay visibility of index updates to other readers"""
448 if self._docket is not None:
449 return
450
451 if not self._delayed:
452 if self._docket is None and not self._delayed:
452 453 if len(self) == 0:
453 454 self._divert = True
454 455 if self._realopener.exists(self._indexfile + b'.a'):
455 456 self._realopener.unlink(self._indexfile + b'.a')
456 457 self.opener = _divertopener(self._realopener, self._indexfile)
457 458 else:
458 459 self._delaybuf = []
459 460 self.opener = _delayopener(
460 461 self._realopener, self._indexfile, self._delaybuf
461 462 )
462 463 self._delayed = True
463 464 tr.addpending(b'cl-%i' % id(self), self._writepending)
464 465 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
465 466
466 467 def _finalize(self, tr):
467 468 """finalize index updates"""
468 469 self._delayed = False
469 470 self.opener = self._realopener
470 471 # move redirected index data back into place
471 if self._divert:
472 if self._docket is not None:
473 self._write_docket(tr)
474 elif self._divert:
472 475 assert not self._delaybuf
473 476 tmpname = self._indexfile + b".a"
474 477 nfile = self.opener.open(tmpname)
475 478 nfile.close()
476 479 self.opener.rename(tmpname, self._indexfile, checkambig=True)
477 480 elif self._delaybuf:
478 481 fp = self.opener(self._indexfile, b'a', checkambig=True)
479 482 fp.write(b"".join(self._delaybuf))
480 483 fp.close()
481 484 self._delaybuf = None
482 485 self._divert = False
483 486 # split when we're done
484 487 self._enforceinlinesize(tr)
485 488
486 489 def _writepending(self, tr):
487 490 """create a file containing the unfinalized state for
488 491 pretxnchangegroup"""
489 492 if self._delaybuf:
490 493 # make a temporary copy of the index
491 494 fp1 = self._realopener(self._indexfile)
492 495 pendingfilename = self._indexfile + b".a"
493 496 # register as a temp file to ensure cleanup on failure
494 497 tr.registertmp(pendingfilename)
495 498 # write existing data
496 499 fp2 = self._realopener(pendingfilename, b"w")
497 500 fp2.write(fp1.read())
498 501 # add pending data
499 502 fp2.write(b"".join(self._delaybuf))
500 503 fp2.close()
501 504 # switch modes so finalize can simply rename
502 505 self._delaybuf = None
503 506 self._divert = True
504 507 self.opener = _divertopener(self._realopener, self._indexfile)
505 508
506 509 if self._divert:
507 510 return True
508 511
509 512 return False
510 513
511 514 def _enforceinlinesize(self, tr):
512 515 if not self._delayed:
513 516 revlog.revlog._enforceinlinesize(self, tr)
514 517
515 518 def read(self, nodeorrev):
516 519 """Obtain data from a parsed changelog revision.
517 520
518 521 Returns a 6-tuple of:
519 522
520 523 - manifest node in binary
521 524 - author/user as a localstr
522 525 - date as a 2-tuple of (time, timezone)
523 526 - list of files
524 527 - commit message as a localstr
525 528 - dict of extra metadata
526 529
527 530 Unless you need to access all fields, consider calling
528 531 ``changelogrevision`` instead, as it is faster for partial object
529 532 access.
530 533 """
531 534 d, s = self._revisiondata(nodeorrev)
532 535 c = changelogrevision(
533 536 self, d, s, self._copiesstorage == b'changeset-sidedata'
534 537 )
535 538 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
536 539
537 540 def changelogrevision(self, nodeorrev):
538 541 """Obtain a ``changelogrevision`` for a node or revision."""
539 542 text, sidedata = self._revisiondata(nodeorrev)
540 543 return changelogrevision(
541 544 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
542 545 )
543 546
544 547 def readfiles(self, nodeorrev):
545 548 """
546 549 short version of read that only returns the files modified by the cset
547 550 """
548 551 text = self.revision(nodeorrev)
549 552 if not text:
550 553 return []
551 554 last = text.index(b"\n\n")
552 555 l = text[:last].split(b'\n')
553 556 return l[3:]
554 557
555 558 def add(
556 559 self,
557 560 manifest,
558 561 files,
559 562 desc,
560 563 transaction,
561 564 p1,
562 565 p2,
563 566 user,
564 567 date=None,
565 568 extra=None,
566 569 ):
567 570 # Convert to UTF-8 encoded bytestrings as the very first
568 571 # thing: calling any method on a localstr object will turn it
569 572 # into a str object and the cached UTF-8 string is thus lost.
570 573 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
571 574
572 575 user = user.strip()
573 576 # An empty username or a username with a "\n" will make the
574 577 # revision text contain two "\n\n" sequences -> corrupt
575 578 # repository since read cannot unpack the revision.
576 579 if not user:
577 580 raise error.StorageError(_(b"empty username"))
578 581 if b"\n" in user:
579 582 raise error.StorageError(
580 583 _(b"username %r contains a newline") % pycompat.bytestr(user)
581 584 )
582 585
583 586 desc = stripdesc(desc)
584 587
585 588 if date:
586 589 parseddate = b"%d %d" % dateutil.parsedate(date)
587 590 else:
588 591 parseddate = b"%d %d" % dateutil.makedate()
589 592 if extra:
590 593 branch = extra.get(b"branch")
591 594 if branch in (b"default", b""):
592 595 del extra[b"branch"]
593 596 elif branch in (b".", b"null", b"tip"):
594 597 raise error.StorageError(
595 598 _(b'the name \'%s\' is reserved') % branch
596 599 )
597 600 sortedfiles = sorted(files.touched)
598 601 flags = 0
599 602 sidedata = None
600 603 if self._copiesstorage == b'changeset-sidedata':
601 604 if files.has_copies_info:
602 605 flags |= flagutil.REVIDX_HASCOPIESINFO
603 606 sidedata = metadata.encode_files_sidedata(files)
604 607
605 608 if extra:
606 609 extra = encodeextra(extra)
607 610 parseddate = b"%s %s" % (parseddate, extra)
608 611 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
609 612 text = b"\n".join(l)
610 613 rev = self.addrevision(
611 614 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
612 615 )
613 616 return self.node(rev)
614 617
615 618 def branchinfo(self, rev):
616 619 """return the branch name and open/close state of a revision
617 620
618 621 This function exists because creating a changectx object
619 622 just to access this is costly."""
620 623 return self.changelogrevision(rev).branchinfo
621 624
622 625 def _nodeduplicatecallback(self, transaction, rev):
623 626 # keep track of revisions that got "re-added", eg: unbunde of know rev.
624 627 #
625 628 # We track them in a list to preserve their order from the source bundle
626 629 duplicates = transaction.changes.setdefault(b'revduplicates', [])
627 630 duplicates.append(rev)
@@ -1,2699 +1,2698 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 # * properly hide uncommitted content to other process
1154 1153 # * expose transaction content hooks during pre-commit validation
1155 1154 # * include management of a persistent nodemap in the main docket
1156 1155 # * enforce a "no-truncate" policy for mmap safety
1157 1156 # - for censoring operation
1158 1157 # - for stripping operation
1159 1158 # - for rollback operation
1160 1159 # * proper streaming (race free) of the docket file
1161 1160 # * store the data size in the docket to simplify sidedata rewrite.
1162 1161 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1163 1162 # * Exchange-wise, we will also need to do something more efficient than
1164 1163 # keeping references to the affected revlogs, especially memory-wise when
1165 1164 # rewriting sidedata.
1166 1165 # * sidedata compression
1167 1166 # * introduce a proper solution to reduce the number of filelog related files.
1168 1167 # * Improvement to consider
1169 1168 # - track compression mode in the index entris instead of the chunks
1170 1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1171 1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1172 1171 # - keep track of chain base or size (probably not that useful anymore)
1173 1172 # - store data and sidedata in different files
1174 1173 coreconfigitem(
1175 1174 b'experimental',
1176 1175 b'revlogv2',
1177 1176 default=None,
1178 1177 )
1179 1178 coreconfigitem(
1180 1179 b'experimental',
1181 1180 b'revisions.disambiguatewithin',
1182 1181 default=None,
1183 1182 )
1184 1183 coreconfigitem(
1185 1184 b'experimental',
1186 1185 b'rust.index',
1187 1186 default=False,
1188 1187 )
1189 1188 coreconfigitem(
1190 1189 b'experimental',
1191 1190 b'server.filesdata.recommended-batch-size',
1192 1191 default=50000,
1193 1192 )
1194 1193 coreconfigitem(
1195 1194 b'experimental',
1196 1195 b'server.manifestdata.recommended-batch-size',
1197 1196 default=100000,
1198 1197 )
1199 1198 coreconfigitem(
1200 1199 b'experimental',
1201 1200 b'server.stream-narrow-clones',
1202 1201 default=False,
1203 1202 )
1204 1203 coreconfigitem(
1205 1204 b'experimental',
1206 1205 b'single-head-per-branch',
1207 1206 default=False,
1208 1207 )
1209 1208 coreconfigitem(
1210 1209 b'experimental',
1211 1210 b'single-head-per-branch:account-closed-heads',
1212 1211 default=False,
1213 1212 )
1214 1213 coreconfigitem(
1215 1214 b'experimental',
1216 1215 b'single-head-per-branch:public-changes-only',
1217 1216 default=False,
1218 1217 )
1219 1218 coreconfigitem(
1220 1219 b'experimental',
1221 1220 b'sshserver.support-v2',
1222 1221 default=False,
1223 1222 )
1224 1223 coreconfigitem(
1225 1224 b'experimental',
1226 1225 b'sparse-read',
1227 1226 default=False,
1228 1227 )
1229 1228 coreconfigitem(
1230 1229 b'experimental',
1231 1230 b'sparse-read.density-threshold',
1232 1231 default=0.50,
1233 1232 )
1234 1233 coreconfigitem(
1235 1234 b'experimental',
1236 1235 b'sparse-read.min-gap-size',
1237 1236 default=b'65K',
1238 1237 )
1239 1238 coreconfigitem(
1240 1239 b'experimental',
1241 1240 b'treemanifest',
1242 1241 default=False,
1243 1242 )
1244 1243 coreconfigitem(
1245 1244 b'experimental',
1246 1245 b'update.atomic-file',
1247 1246 default=False,
1248 1247 )
1249 1248 coreconfigitem(
1250 1249 b'experimental',
1251 1250 b'sshpeer.advertise-v2',
1252 1251 default=False,
1253 1252 )
1254 1253 coreconfigitem(
1255 1254 b'experimental',
1256 1255 b'web.apiserver',
1257 1256 default=False,
1258 1257 )
1259 1258 coreconfigitem(
1260 1259 b'experimental',
1261 1260 b'web.api.http-v2',
1262 1261 default=False,
1263 1262 )
1264 1263 coreconfigitem(
1265 1264 b'experimental',
1266 1265 b'web.api.debugreflect',
1267 1266 default=False,
1268 1267 )
1269 1268 coreconfigitem(
1270 1269 b'experimental',
1271 1270 b'worker.wdir-get-thread-safe',
1272 1271 default=False,
1273 1272 )
1274 1273 coreconfigitem(
1275 1274 b'experimental',
1276 1275 b'worker.repository-upgrade',
1277 1276 default=False,
1278 1277 )
1279 1278 coreconfigitem(
1280 1279 b'experimental',
1281 1280 b'xdiff',
1282 1281 default=False,
1283 1282 )
1284 1283 coreconfigitem(
1285 1284 b'extensions',
1286 1285 b'.*',
1287 1286 default=None,
1288 1287 generic=True,
1289 1288 )
1290 1289 coreconfigitem(
1291 1290 b'extdata',
1292 1291 b'.*',
1293 1292 default=None,
1294 1293 generic=True,
1295 1294 )
1296 1295 coreconfigitem(
1297 1296 b'format',
1298 1297 b'bookmarks-in-store',
1299 1298 default=False,
1300 1299 )
1301 1300 coreconfigitem(
1302 1301 b'format',
1303 1302 b'chunkcachesize',
1304 1303 default=None,
1305 1304 experimental=True,
1306 1305 )
1307 1306 coreconfigitem(
1308 1307 b'format',
1309 1308 b'dotencode',
1310 1309 default=True,
1311 1310 )
1312 1311 coreconfigitem(
1313 1312 b'format',
1314 1313 b'generaldelta',
1315 1314 default=False,
1316 1315 experimental=True,
1317 1316 )
1318 1317 coreconfigitem(
1319 1318 b'format',
1320 1319 b'manifestcachesize',
1321 1320 default=None,
1322 1321 experimental=True,
1323 1322 )
1324 1323 coreconfigitem(
1325 1324 b'format',
1326 1325 b'maxchainlen',
1327 1326 default=dynamicdefault,
1328 1327 experimental=True,
1329 1328 )
1330 1329 coreconfigitem(
1331 1330 b'format',
1332 1331 b'obsstore-version',
1333 1332 default=None,
1334 1333 )
1335 1334 coreconfigitem(
1336 1335 b'format',
1337 1336 b'sparse-revlog',
1338 1337 default=True,
1339 1338 )
1340 1339 coreconfigitem(
1341 1340 b'format',
1342 1341 b'revlog-compression',
1343 1342 default=lambda: [b'zstd', b'zlib'],
1344 1343 alias=[(b'experimental', b'format.compression')],
1345 1344 )
1346 1345 coreconfigitem(
1347 1346 b'format',
1348 1347 b'usefncache',
1349 1348 default=True,
1350 1349 )
1351 1350 coreconfigitem(
1352 1351 b'format',
1353 1352 b'usegeneraldelta',
1354 1353 default=True,
1355 1354 )
1356 1355 coreconfigitem(
1357 1356 b'format',
1358 1357 b'usestore',
1359 1358 default=True,
1360 1359 )
1361 1360
1362 1361
1363 1362 def _persistent_nodemap_default():
1364 1363 """compute `use-persistent-nodemap` default value
1365 1364
1366 1365 The feature is disabled unless a fast implementation is available.
1367 1366 """
1368 1367 from . import policy
1369 1368
1370 1369 return policy.importrust('revlog') is not None
1371 1370
1372 1371
1373 1372 coreconfigitem(
1374 1373 b'format',
1375 1374 b'use-persistent-nodemap',
1376 1375 default=_persistent_nodemap_default,
1377 1376 )
1378 1377 coreconfigitem(
1379 1378 b'format',
1380 1379 b'exp-use-copies-side-data-changeset',
1381 1380 default=False,
1382 1381 experimental=True,
1383 1382 )
1384 1383 coreconfigitem(
1385 1384 b'format',
1386 1385 b'use-share-safe',
1387 1386 default=False,
1388 1387 )
1389 1388 coreconfigitem(
1390 1389 b'format',
1391 1390 b'internal-phase',
1392 1391 default=False,
1393 1392 experimental=True,
1394 1393 )
1395 1394 coreconfigitem(
1396 1395 b'fsmonitor',
1397 1396 b'warn_when_unused',
1398 1397 default=True,
1399 1398 )
1400 1399 coreconfigitem(
1401 1400 b'fsmonitor',
1402 1401 b'warn_update_file_count',
1403 1402 default=50000,
1404 1403 )
1405 1404 coreconfigitem(
1406 1405 b'fsmonitor',
1407 1406 b'warn_update_file_count_rust',
1408 1407 default=400000,
1409 1408 )
1410 1409 coreconfigitem(
1411 1410 b'help',
1412 1411 br'hidden-command\..*',
1413 1412 default=False,
1414 1413 generic=True,
1415 1414 )
1416 1415 coreconfigitem(
1417 1416 b'help',
1418 1417 br'hidden-topic\..*',
1419 1418 default=False,
1420 1419 generic=True,
1421 1420 )
1422 1421 coreconfigitem(
1423 1422 b'hooks',
1424 1423 b'[^:]*',
1425 1424 default=dynamicdefault,
1426 1425 generic=True,
1427 1426 )
1428 1427 coreconfigitem(
1429 1428 b'hooks',
1430 1429 b'.*:run-with-plain',
1431 1430 default=True,
1432 1431 generic=True,
1433 1432 )
1434 1433 coreconfigitem(
1435 1434 b'hgweb-paths',
1436 1435 b'.*',
1437 1436 default=list,
1438 1437 generic=True,
1439 1438 )
1440 1439 coreconfigitem(
1441 1440 b'hostfingerprints',
1442 1441 b'.*',
1443 1442 default=list,
1444 1443 generic=True,
1445 1444 )
1446 1445 coreconfigitem(
1447 1446 b'hostsecurity',
1448 1447 b'ciphers',
1449 1448 default=None,
1450 1449 )
1451 1450 coreconfigitem(
1452 1451 b'hostsecurity',
1453 1452 b'minimumprotocol',
1454 1453 default=dynamicdefault,
1455 1454 )
1456 1455 coreconfigitem(
1457 1456 b'hostsecurity',
1458 1457 b'.*:minimumprotocol$',
1459 1458 default=dynamicdefault,
1460 1459 generic=True,
1461 1460 )
1462 1461 coreconfigitem(
1463 1462 b'hostsecurity',
1464 1463 b'.*:ciphers$',
1465 1464 default=dynamicdefault,
1466 1465 generic=True,
1467 1466 )
1468 1467 coreconfigitem(
1469 1468 b'hostsecurity',
1470 1469 b'.*:fingerprints$',
1471 1470 default=list,
1472 1471 generic=True,
1473 1472 )
1474 1473 coreconfigitem(
1475 1474 b'hostsecurity',
1476 1475 b'.*:verifycertsfile$',
1477 1476 default=None,
1478 1477 generic=True,
1479 1478 )
1480 1479
1481 1480 coreconfigitem(
1482 1481 b'http_proxy',
1483 1482 b'always',
1484 1483 default=False,
1485 1484 )
1486 1485 coreconfigitem(
1487 1486 b'http_proxy',
1488 1487 b'host',
1489 1488 default=None,
1490 1489 )
1491 1490 coreconfigitem(
1492 1491 b'http_proxy',
1493 1492 b'no',
1494 1493 default=list,
1495 1494 )
1496 1495 coreconfigitem(
1497 1496 b'http_proxy',
1498 1497 b'passwd',
1499 1498 default=None,
1500 1499 )
1501 1500 coreconfigitem(
1502 1501 b'http_proxy',
1503 1502 b'user',
1504 1503 default=None,
1505 1504 )
1506 1505
1507 1506 coreconfigitem(
1508 1507 b'http',
1509 1508 b'timeout',
1510 1509 default=None,
1511 1510 )
1512 1511
1513 1512 coreconfigitem(
1514 1513 b'logtoprocess',
1515 1514 b'commandexception',
1516 1515 default=None,
1517 1516 )
1518 1517 coreconfigitem(
1519 1518 b'logtoprocess',
1520 1519 b'commandfinish',
1521 1520 default=None,
1522 1521 )
1523 1522 coreconfigitem(
1524 1523 b'logtoprocess',
1525 1524 b'command',
1526 1525 default=None,
1527 1526 )
1528 1527 coreconfigitem(
1529 1528 b'logtoprocess',
1530 1529 b'develwarn',
1531 1530 default=None,
1532 1531 )
1533 1532 coreconfigitem(
1534 1533 b'logtoprocess',
1535 1534 b'uiblocked',
1536 1535 default=None,
1537 1536 )
1538 1537 coreconfigitem(
1539 1538 b'merge',
1540 1539 b'checkunknown',
1541 1540 default=b'abort',
1542 1541 )
1543 1542 coreconfigitem(
1544 1543 b'merge',
1545 1544 b'checkignored',
1546 1545 default=b'abort',
1547 1546 )
1548 1547 coreconfigitem(
1549 1548 b'experimental',
1550 1549 b'merge.checkpathconflicts',
1551 1550 default=False,
1552 1551 )
1553 1552 coreconfigitem(
1554 1553 b'merge',
1555 1554 b'followcopies',
1556 1555 default=True,
1557 1556 )
1558 1557 coreconfigitem(
1559 1558 b'merge',
1560 1559 b'on-failure',
1561 1560 default=b'continue',
1562 1561 )
1563 1562 coreconfigitem(
1564 1563 b'merge',
1565 1564 b'preferancestor',
1566 1565 default=lambda: [b'*'],
1567 1566 experimental=True,
1568 1567 )
1569 1568 coreconfigitem(
1570 1569 b'merge',
1571 1570 b'strict-capability-check',
1572 1571 default=False,
1573 1572 )
1574 1573 coreconfigitem(
1575 1574 b'merge-tools',
1576 1575 b'.*',
1577 1576 default=None,
1578 1577 generic=True,
1579 1578 )
1580 1579 coreconfigitem(
1581 1580 b'merge-tools',
1582 1581 br'.*\.args$',
1583 1582 default=b"$local $base $other",
1584 1583 generic=True,
1585 1584 priority=-1,
1586 1585 )
1587 1586 coreconfigitem(
1588 1587 b'merge-tools',
1589 1588 br'.*\.binary$',
1590 1589 default=False,
1591 1590 generic=True,
1592 1591 priority=-1,
1593 1592 )
1594 1593 coreconfigitem(
1595 1594 b'merge-tools',
1596 1595 br'.*\.check$',
1597 1596 default=list,
1598 1597 generic=True,
1599 1598 priority=-1,
1600 1599 )
1601 1600 coreconfigitem(
1602 1601 b'merge-tools',
1603 1602 br'.*\.checkchanged$',
1604 1603 default=False,
1605 1604 generic=True,
1606 1605 priority=-1,
1607 1606 )
1608 1607 coreconfigitem(
1609 1608 b'merge-tools',
1610 1609 br'.*\.executable$',
1611 1610 default=dynamicdefault,
1612 1611 generic=True,
1613 1612 priority=-1,
1614 1613 )
1615 1614 coreconfigitem(
1616 1615 b'merge-tools',
1617 1616 br'.*\.fixeol$',
1618 1617 default=False,
1619 1618 generic=True,
1620 1619 priority=-1,
1621 1620 )
1622 1621 coreconfigitem(
1623 1622 b'merge-tools',
1624 1623 br'.*\.gui$',
1625 1624 default=False,
1626 1625 generic=True,
1627 1626 priority=-1,
1628 1627 )
1629 1628 coreconfigitem(
1630 1629 b'merge-tools',
1631 1630 br'.*\.mergemarkers$',
1632 1631 default=b'basic',
1633 1632 generic=True,
1634 1633 priority=-1,
1635 1634 )
1636 1635 coreconfigitem(
1637 1636 b'merge-tools',
1638 1637 br'.*\.mergemarkertemplate$',
1639 1638 default=dynamicdefault, # take from command-templates.mergemarker
1640 1639 generic=True,
1641 1640 priority=-1,
1642 1641 )
1643 1642 coreconfigitem(
1644 1643 b'merge-tools',
1645 1644 br'.*\.priority$',
1646 1645 default=0,
1647 1646 generic=True,
1648 1647 priority=-1,
1649 1648 )
1650 1649 coreconfigitem(
1651 1650 b'merge-tools',
1652 1651 br'.*\.premerge$',
1653 1652 default=dynamicdefault,
1654 1653 generic=True,
1655 1654 priority=-1,
1656 1655 )
1657 1656 coreconfigitem(
1658 1657 b'merge-tools',
1659 1658 br'.*\.symlink$',
1660 1659 default=False,
1661 1660 generic=True,
1662 1661 priority=-1,
1663 1662 )
1664 1663 coreconfigitem(
1665 1664 b'pager',
1666 1665 b'attend-.*',
1667 1666 default=dynamicdefault,
1668 1667 generic=True,
1669 1668 )
1670 1669 coreconfigitem(
1671 1670 b'pager',
1672 1671 b'ignore',
1673 1672 default=list,
1674 1673 )
1675 1674 coreconfigitem(
1676 1675 b'pager',
1677 1676 b'pager',
1678 1677 default=dynamicdefault,
1679 1678 )
1680 1679 coreconfigitem(
1681 1680 b'patch',
1682 1681 b'eol',
1683 1682 default=b'strict',
1684 1683 )
1685 1684 coreconfigitem(
1686 1685 b'patch',
1687 1686 b'fuzz',
1688 1687 default=2,
1689 1688 )
1690 1689 coreconfigitem(
1691 1690 b'paths',
1692 1691 b'default',
1693 1692 default=None,
1694 1693 )
1695 1694 coreconfigitem(
1696 1695 b'paths',
1697 1696 b'default-push',
1698 1697 default=None,
1699 1698 )
1700 1699 coreconfigitem(
1701 1700 b'paths',
1702 1701 b'.*',
1703 1702 default=None,
1704 1703 generic=True,
1705 1704 )
1706 1705 coreconfigitem(
1707 1706 b'phases',
1708 1707 b'checksubrepos',
1709 1708 default=b'follow',
1710 1709 )
1711 1710 coreconfigitem(
1712 1711 b'phases',
1713 1712 b'new-commit',
1714 1713 default=b'draft',
1715 1714 )
1716 1715 coreconfigitem(
1717 1716 b'phases',
1718 1717 b'publish',
1719 1718 default=True,
1720 1719 )
1721 1720 coreconfigitem(
1722 1721 b'profiling',
1723 1722 b'enabled',
1724 1723 default=False,
1725 1724 )
1726 1725 coreconfigitem(
1727 1726 b'profiling',
1728 1727 b'format',
1729 1728 default=b'text',
1730 1729 )
1731 1730 coreconfigitem(
1732 1731 b'profiling',
1733 1732 b'freq',
1734 1733 default=1000,
1735 1734 )
1736 1735 coreconfigitem(
1737 1736 b'profiling',
1738 1737 b'limit',
1739 1738 default=30,
1740 1739 )
1741 1740 coreconfigitem(
1742 1741 b'profiling',
1743 1742 b'nested',
1744 1743 default=0,
1745 1744 )
1746 1745 coreconfigitem(
1747 1746 b'profiling',
1748 1747 b'output',
1749 1748 default=None,
1750 1749 )
1751 1750 coreconfigitem(
1752 1751 b'profiling',
1753 1752 b'showmax',
1754 1753 default=0.999,
1755 1754 )
1756 1755 coreconfigitem(
1757 1756 b'profiling',
1758 1757 b'showmin',
1759 1758 default=dynamicdefault,
1760 1759 )
1761 1760 coreconfigitem(
1762 1761 b'profiling',
1763 1762 b'showtime',
1764 1763 default=True,
1765 1764 )
1766 1765 coreconfigitem(
1767 1766 b'profiling',
1768 1767 b'sort',
1769 1768 default=b'inlinetime',
1770 1769 )
1771 1770 coreconfigitem(
1772 1771 b'profiling',
1773 1772 b'statformat',
1774 1773 default=b'hotpath',
1775 1774 )
1776 1775 coreconfigitem(
1777 1776 b'profiling',
1778 1777 b'time-track',
1779 1778 default=dynamicdefault,
1780 1779 )
1781 1780 coreconfigitem(
1782 1781 b'profiling',
1783 1782 b'type',
1784 1783 default=b'stat',
1785 1784 )
1786 1785 coreconfigitem(
1787 1786 b'progress',
1788 1787 b'assume-tty',
1789 1788 default=False,
1790 1789 )
1791 1790 coreconfigitem(
1792 1791 b'progress',
1793 1792 b'changedelay',
1794 1793 default=1,
1795 1794 )
1796 1795 coreconfigitem(
1797 1796 b'progress',
1798 1797 b'clear-complete',
1799 1798 default=True,
1800 1799 )
1801 1800 coreconfigitem(
1802 1801 b'progress',
1803 1802 b'debug',
1804 1803 default=False,
1805 1804 )
1806 1805 coreconfigitem(
1807 1806 b'progress',
1808 1807 b'delay',
1809 1808 default=3,
1810 1809 )
1811 1810 coreconfigitem(
1812 1811 b'progress',
1813 1812 b'disable',
1814 1813 default=False,
1815 1814 )
1816 1815 coreconfigitem(
1817 1816 b'progress',
1818 1817 b'estimateinterval',
1819 1818 default=60.0,
1820 1819 )
1821 1820 coreconfigitem(
1822 1821 b'progress',
1823 1822 b'format',
1824 1823 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1825 1824 )
1826 1825 coreconfigitem(
1827 1826 b'progress',
1828 1827 b'refresh',
1829 1828 default=0.1,
1830 1829 )
1831 1830 coreconfigitem(
1832 1831 b'progress',
1833 1832 b'width',
1834 1833 default=dynamicdefault,
1835 1834 )
1836 1835 coreconfigitem(
1837 1836 b'pull',
1838 1837 b'confirm',
1839 1838 default=False,
1840 1839 )
1841 1840 coreconfigitem(
1842 1841 b'push',
1843 1842 b'pushvars.server',
1844 1843 default=False,
1845 1844 )
1846 1845 coreconfigitem(
1847 1846 b'rewrite',
1848 1847 b'backup-bundle',
1849 1848 default=True,
1850 1849 alias=[(b'ui', b'history-editing-backup')],
1851 1850 )
1852 1851 coreconfigitem(
1853 1852 b'rewrite',
1854 1853 b'update-timestamp',
1855 1854 default=False,
1856 1855 )
1857 1856 coreconfigitem(
1858 1857 b'rewrite',
1859 1858 b'empty-successor',
1860 1859 default=b'skip',
1861 1860 experimental=True,
1862 1861 )
1863 1862 coreconfigitem(
1864 1863 b'storage',
1865 1864 b'new-repo-backend',
1866 1865 default=b'revlogv1',
1867 1866 experimental=True,
1868 1867 )
1869 1868 coreconfigitem(
1870 1869 b'storage',
1871 1870 b'revlog.optimize-delta-parent-choice',
1872 1871 default=True,
1873 1872 alias=[(b'format', b'aggressivemergedeltas')],
1874 1873 )
1875 1874 # experimental as long as rust is experimental (or a C version is implemented)
1876 1875 coreconfigitem(
1877 1876 b'storage',
1878 1877 b'revlog.persistent-nodemap.mmap',
1879 1878 default=True,
1880 1879 )
1881 1880 # experimental as long as format.use-persistent-nodemap is.
1882 1881 coreconfigitem(
1883 1882 b'storage',
1884 1883 b'revlog.persistent-nodemap.slow-path',
1885 1884 default=b"abort",
1886 1885 )
1887 1886
1888 1887 coreconfigitem(
1889 1888 b'storage',
1890 1889 b'revlog.reuse-external-delta',
1891 1890 default=True,
1892 1891 )
1893 1892 coreconfigitem(
1894 1893 b'storage',
1895 1894 b'revlog.reuse-external-delta-parent',
1896 1895 default=None,
1897 1896 )
1898 1897 coreconfigitem(
1899 1898 b'storage',
1900 1899 b'revlog.zlib.level',
1901 1900 default=None,
1902 1901 )
1903 1902 coreconfigitem(
1904 1903 b'storage',
1905 1904 b'revlog.zstd.level',
1906 1905 default=None,
1907 1906 )
1908 1907 coreconfigitem(
1909 1908 b'server',
1910 1909 b'bookmarks-pushkey-compat',
1911 1910 default=True,
1912 1911 )
1913 1912 coreconfigitem(
1914 1913 b'server',
1915 1914 b'bundle1',
1916 1915 default=True,
1917 1916 )
1918 1917 coreconfigitem(
1919 1918 b'server',
1920 1919 b'bundle1gd',
1921 1920 default=None,
1922 1921 )
1923 1922 coreconfigitem(
1924 1923 b'server',
1925 1924 b'bundle1.pull',
1926 1925 default=None,
1927 1926 )
1928 1927 coreconfigitem(
1929 1928 b'server',
1930 1929 b'bundle1gd.pull',
1931 1930 default=None,
1932 1931 )
1933 1932 coreconfigitem(
1934 1933 b'server',
1935 1934 b'bundle1.push',
1936 1935 default=None,
1937 1936 )
1938 1937 coreconfigitem(
1939 1938 b'server',
1940 1939 b'bundle1gd.push',
1941 1940 default=None,
1942 1941 )
1943 1942 coreconfigitem(
1944 1943 b'server',
1945 1944 b'bundle2.stream',
1946 1945 default=True,
1947 1946 alias=[(b'experimental', b'bundle2.stream')],
1948 1947 )
1949 1948 coreconfigitem(
1950 1949 b'server',
1951 1950 b'compressionengines',
1952 1951 default=list,
1953 1952 )
1954 1953 coreconfigitem(
1955 1954 b'server',
1956 1955 b'concurrent-push-mode',
1957 1956 default=b'check-related',
1958 1957 )
1959 1958 coreconfigitem(
1960 1959 b'server',
1961 1960 b'disablefullbundle',
1962 1961 default=False,
1963 1962 )
1964 1963 coreconfigitem(
1965 1964 b'server',
1966 1965 b'maxhttpheaderlen',
1967 1966 default=1024,
1968 1967 )
1969 1968 coreconfigitem(
1970 1969 b'server',
1971 1970 b'pullbundle',
1972 1971 default=False,
1973 1972 )
1974 1973 coreconfigitem(
1975 1974 b'server',
1976 1975 b'preferuncompressed',
1977 1976 default=False,
1978 1977 )
1979 1978 coreconfigitem(
1980 1979 b'server',
1981 1980 b'streamunbundle',
1982 1981 default=False,
1983 1982 )
1984 1983 coreconfigitem(
1985 1984 b'server',
1986 1985 b'uncompressed',
1987 1986 default=True,
1988 1987 )
1989 1988 coreconfigitem(
1990 1989 b'server',
1991 1990 b'uncompressedallowsecret',
1992 1991 default=False,
1993 1992 )
1994 1993 coreconfigitem(
1995 1994 b'server',
1996 1995 b'view',
1997 1996 default=b'served',
1998 1997 )
1999 1998 coreconfigitem(
2000 1999 b'server',
2001 2000 b'validate',
2002 2001 default=False,
2003 2002 )
2004 2003 coreconfigitem(
2005 2004 b'server',
2006 2005 b'zliblevel',
2007 2006 default=-1,
2008 2007 )
2009 2008 coreconfigitem(
2010 2009 b'server',
2011 2010 b'zstdlevel',
2012 2011 default=3,
2013 2012 )
2014 2013 coreconfigitem(
2015 2014 b'share',
2016 2015 b'pool',
2017 2016 default=None,
2018 2017 )
2019 2018 coreconfigitem(
2020 2019 b'share',
2021 2020 b'poolnaming',
2022 2021 default=b'identity',
2023 2022 )
2024 2023 coreconfigitem(
2025 2024 b'share',
2026 2025 b'safe-mismatch.source-not-safe',
2027 2026 default=b'abort',
2028 2027 )
2029 2028 coreconfigitem(
2030 2029 b'share',
2031 2030 b'safe-mismatch.source-safe',
2032 2031 default=b'abort',
2033 2032 )
2034 2033 coreconfigitem(
2035 2034 b'share',
2036 2035 b'safe-mismatch.source-not-safe.warn',
2037 2036 default=True,
2038 2037 )
2039 2038 coreconfigitem(
2040 2039 b'share',
2041 2040 b'safe-mismatch.source-safe.warn',
2042 2041 default=True,
2043 2042 )
2044 2043 coreconfigitem(
2045 2044 b'shelve',
2046 2045 b'maxbackups',
2047 2046 default=10,
2048 2047 )
2049 2048 coreconfigitem(
2050 2049 b'smtp',
2051 2050 b'host',
2052 2051 default=None,
2053 2052 )
2054 2053 coreconfigitem(
2055 2054 b'smtp',
2056 2055 b'local_hostname',
2057 2056 default=None,
2058 2057 )
2059 2058 coreconfigitem(
2060 2059 b'smtp',
2061 2060 b'password',
2062 2061 default=None,
2063 2062 )
2064 2063 coreconfigitem(
2065 2064 b'smtp',
2066 2065 b'port',
2067 2066 default=dynamicdefault,
2068 2067 )
2069 2068 coreconfigitem(
2070 2069 b'smtp',
2071 2070 b'tls',
2072 2071 default=b'none',
2073 2072 )
2074 2073 coreconfigitem(
2075 2074 b'smtp',
2076 2075 b'username',
2077 2076 default=None,
2078 2077 )
2079 2078 coreconfigitem(
2080 2079 b'sparse',
2081 2080 b'missingwarning',
2082 2081 default=True,
2083 2082 experimental=True,
2084 2083 )
2085 2084 coreconfigitem(
2086 2085 b'subrepos',
2087 2086 b'allowed',
2088 2087 default=dynamicdefault, # to make backporting simpler
2089 2088 )
2090 2089 coreconfigitem(
2091 2090 b'subrepos',
2092 2091 b'hg:allowed',
2093 2092 default=dynamicdefault,
2094 2093 )
2095 2094 coreconfigitem(
2096 2095 b'subrepos',
2097 2096 b'git:allowed',
2098 2097 default=dynamicdefault,
2099 2098 )
2100 2099 coreconfigitem(
2101 2100 b'subrepos',
2102 2101 b'svn:allowed',
2103 2102 default=dynamicdefault,
2104 2103 )
2105 2104 coreconfigitem(
2106 2105 b'templates',
2107 2106 b'.*',
2108 2107 default=None,
2109 2108 generic=True,
2110 2109 )
2111 2110 coreconfigitem(
2112 2111 b'templateconfig',
2113 2112 b'.*',
2114 2113 default=dynamicdefault,
2115 2114 generic=True,
2116 2115 )
2117 2116 coreconfigitem(
2118 2117 b'trusted',
2119 2118 b'groups',
2120 2119 default=list,
2121 2120 )
2122 2121 coreconfigitem(
2123 2122 b'trusted',
2124 2123 b'users',
2125 2124 default=list,
2126 2125 )
2127 2126 coreconfigitem(
2128 2127 b'ui',
2129 2128 b'_usedassubrepo',
2130 2129 default=False,
2131 2130 )
2132 2131 coreconfigitem(
2133 2132 b'ui',
2134 2133 b'allowemptycommit',
2135 2134 default=False,
2136 2135 )
2137 2136 coreconfigitem(
2138 2137 b'ui',
2139 2138 b'archivemeta',
2140 2139 default=True,
2141 2140 )
2142 2141 coreconfigitem(
2143 2142 b'ui',
2144 2143 b'askusername',
2145 2144 default=False,
2146 2145 )
2147 2146 coreconfigitem(
2148 2147 b'ui',
2149 2148 b'available-memory',
2150 2149 default=None,
2151 2150 )
2152 2151
2153 2152 coreconfigitem(
2154 2153 b'ui',
2155 2154 b'clonebundlefallback',
2156 2155 default=False,
2157 2156 )
2158 2157 coreconfigitem(
2159 2158 b'ui',
2160 2159 b'clonebundleprefers',
2161 2160 default=list,
2162 2161 )
2163 2162 coreconfigitem(
2164 2163 b'ui',
2165 2164 b'clonebundles',
2166 2165 default=True,
2167 2166 )
2168 2167 coreconfigitem(
2169 2168 b'ui',
2170 2169 b'color',
2171 2170 default=b'auto',
2172 2171 )
2173 2172 coreconfigitem(
2174 2173 b'ui',
2175 2174 b'commitsubrepos',
2176 2175 default=False,
2177 2176 )
2178 2177 coreconfigitem(
2179 2178 b'ui',
2180 2179 b'debug',
2181 2180 default=False,
2182 2181 )
2183 2182 coreconfigitem(
2184 2183 b'ui',
2185 2184 b'debugger',
2186 2185 default=None,
2187 2186 )
2188 2187 coreconfigitem(
2189 2188 b'ui',
2190 2189 b'editor',
2191 2190 default=dynamicdefault,
2192 2191 )
2193 2192 coreconfigitem(
2194 2193 b'ui',
2195 2194 b'detailed-exit-code',
2196 2195 default=False,
2197 2196 experimental=True,
2198 2197 )
2199 2198 coreconfigitem(
2200 2199 b'ui',
2201 2200 b'fallbackencoding',
2202 2201 default=None,
2203 2202 )
2204 2203 coreconfigitem(
2205 2204 b'ui',
2206 2205 b'forcecwd',
2207 2206 default=None,
2208 2207 )
2209 2208 coreconfigitem(
2210 2209 b'ui',
2211 2210 b'forcemerge',
2212 2211 default=None,
2213 2212 )
2214 2213 coreconfigitem(
2215 2214 b'ui',
2216 2215 b'formatdebug',
2217 2216 default=False,
2218 2217 )
2219 2218 coreconfigitem(
2220 2219 b'ui',
2221 2220 b'formatjson',
2222 2221 default=False,
2223 2222 )
2224 2223 coreconfigitem(
2225 2224 b'ui',
2226 2225 b'formatted',
2227 2226 default=None,
2228 2227 )
2229 2228 coreconfigitem(
2230 2229 b'ui',
2231 2230 b'interactive',
2232 2231 default=None,
2233 2232 )
2234 2233 coreconfigitem(
2235 2234 b'ui',
2236 2235 b'interface',
2237 2236 default=None,
2238 2237 )
2239 2238 coreconfigitem(
2240 2239 b'ui',
2241 2240 b'interface.chunkselector',
2242 2241 default=None,
2243 2242 )
2244 2243 coreconfigitem(
2245 2244 b'ui',
2246 2245 b'large-file-limit',
2247 2246 default=10000000,
2248 2247 )
2249 2248 coreconfigitem(
2250 2249 b'ui',
2251 2250 b'logblockedtimes',
2252 2251 default=False,
2253 2252 )
2254 2253 coreconfigitem(
2255 2254 b'ui',
2256 2255 b'merge',
2257 2256 default=None,
2258 2257 )
2259 2258 coreconfigitem(
2260 2259 b'ui',
2261 2260 b'mergemarkers',
2262 2261 default=b'basic',
2263 2262 )
2264 2263 coreconfigitem(
2265 2264 b'ui',
2266 2265 b'message-output',
2267 2266 default=b'stdio',
2268 2267 )
2269 2268 coreconfigitem(
2270 2269 b'ui',
2271 2270 b'nontty',
2272 2271 default=False,
2273 2272 )
2274 2273 coreconfigitem(
2275 2274 b'ui',
2276 2275 b'origbackuppath',
2277 2276 default=None,
2278 2277 )
2279 2278 coreconfigitem(
2280 2279 b'ui',
2281 2280 b'paginate',
2282 2281 default=True,
2283 2282 )
2284 2283 coreconfigitem(
2285 2284 b'ui',
2286 2285 b'patch',
2287 2286 default=None,
2288 2287 )
2289 2288 coreconfigitem(
2290 2289 b'ui',
2291 2290 b'portablefilenames',
2292 2291 default=b'warn',
2293 2292 )
2294 2293 coreconfigitem(
2295 2294 b'ui',
2296 2295 b'promptecho',
2297 2296 default=False,
2298 2297 )
2299 2298 coreconfigitem(
2300 2299 b'ui',
2301 2300 b'quiet',
2302 2301 default=False,
2303 2302 )
2304 2303 coreconfigitem(
2305 2304 b'ui',
2306 2305 b'quietbookmarkmove',
2307 2306 default=False,
2308 2307 )
2309 2308 coreconfigitem(
2310 2309 b'ui',
2311 2310 b'relative-paths',
2312 2311 default=b'legacy',
2313 2312 )
2314 2313 coreconfigitem(
2315 2314 b'ui',
2316 2315 b'remotecmd',
2317 2316 default=b'hg',
2318 2317 )
2319 2318 coreconfigitem(
2320 2319 b'ui',
2321 2320 b'report_untrusted',
2322 2321 default=True,
2323 2322 )
2324 2323 coreconfigitem(
2325 2324 b'ui',
2326 2325 b'rollback',
2327 2326 default=True,
2328 2327 )
2329 2328 coreconfigitem(
2330 2329 b'ui',
2331 2330 b'signal-safe-lock',
2332 2331 default=True,
2333 2332 )
2334 2333 coreconfigitem(
2335 2334 b'ui',
2336 2335 b'slash',
2337 2336 default=False,
2338 2337 )
2339 2338 coreconfigitem(
2340 2339 b'ui',
2341 2340 b'ssh',
2342 2341 default=b'ssh',
2343 2342 )
2344 2343 coreconfigitem(
2345 2344 b'ui',
2346 2345 b'ssherrorhint',
2347 2346 default=None,
2348 2347 )
2349 2348 coreconfigitem(
2350 2349 b'ui',
2351 2350 b'statuscopies',
2352 2351 default=False,
2353 2352 )
2354 2353 coreconfigitem(
2355 2354 b'ui',
2356 2355 b'strict',
2357 2356 default=False,
2358 2357 )
2359 2358 coreconfigitem(
2360 2359 b'ui',
2361 2360 b'style',
2362 2361 default=b'',
2363 2362 )
2364 2363 coreconfigitem(
2365 2364 b'ui',
2366 2365 b'supportcontact',
2367 2366 default=None,
2368 2367 )
2369 2368 coreconfigitem(
2370 2369 b'ui',
2371 2370 b'textwidth',
2372 2371 default=78,
2373 2372 )
2374 2373 coreconfigitem(
2375 2374 b'ui',
2376 2375 b'timeout',
2377 2376 default=b'600',
2378 2377 )
2379 2378 coreconfigitem(
2380 2379 b'ui',
2381 2380 b'timeout.warn',
2382 2381 default=0,
2383 2382 )
2384 2383 coreconfigitem(
2385 2384 b'ui',
2386 2385 b'timestamp-output',
2387 2386 default=False,
2388 2387 )
2389 2388 coreconfigitem(
2390 2389 b'ui',
2391 2390 b'traceback',
2392 2391 default=False,
2393 2392 )
2394 2393 coreconfigitem(
2395 2394 b'ui',
2396 2395 b'tweakdefaults',
2397 2396 default=False,
2398 2397 )
2399 2398 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2400 2399 coreconfigitem(
2401 2400 b'ui',
2402 2401 b'verbose',
2403 2402 default=False,
2404 2403 )
2405 2404 coreconfigitem(
2406 2405 b'verify',
2407 2406 b'skipflags',
2408 2407 default=None,
2409 2408 )
2410 2409 coreconfigitem(
2411 2410 b'web',
2412 2411 b'allowbz2',
2413 2412 default=False,
2414 2413 )
2415 2414 coreconfigitem(
2416 2415 b'web',
2417 2416 b'allowgz',
2418 2417 default=False,
2419 2418 )
2420 2419 coreconfigitem(
2421 2420 b'web',
2422 2421 b'allow-pull',
2423 2422 alias=[(b'web', b'allowpull')],
2424 2423 default=True,
2425 2424 )
2426 2425 coreconfigitem(
2427 2426 b'web',
2428 2427 b'allow-push',
2429 2428 alias=[(b'web', b'allow_push')],
2430 2429 default=list,
2431 2430 )
2432 2431 coreconfigitem(
2433 2432 b'web',
2434 2433 b'allowzip',
2435 2434 default=False,
2436 2435 )
2437 2436 coreconfigitem(
2438 2437 b'web',
2439 2438 b'archivesubrepos',
2440 2439 default=False,
2441 2440 )
2442 2441 coreconfigitem(
2443 2442 b'web',
2444 2443 b'cache',
2445 2444 default=True,
2446 2445 )
2447 2446 coreconfigitem(
2448 2447 b'web',
2449 2448 b'comparisoncontext',
2450 2449 default=5,
2451 2450 )
2452 2451 coreconfigitem(
2453 2452 b'web',
2454 2453 b'contact',
2455 2454 default=None,
2456 2455 )
2457 2456 coreconfigitem(
2458 2457 b'web',
2459 2458 b'deny_push',
2460 2459 default=list,
2461 2460 )
2462 2461 coreconfigitem(
2463 2462 b'web',
2464 2463 b'guessmime',
2465 2464 default=False,
2466 2465 )
2467 2466 coreconfigitem(
2468 2467 b'web',
2469 2468 b'hidden',
2470 2469 default=False,
2471 2470 )
2472 2471 coreconfigitem(
2473 2472 b'web',
2474 2473 b'labels',
2475 2474 default=list,
2476 2475 )
2477 2476 coreconfigitem(
2478 2477 b'web',
2479 2478 b'logoimg',
2480 2479 default=b'hglogo.png',
2481 2480 )
2482 2481 coreconfigitem(
2483 2482 b'web',
2484 2483 b'logourl',
2485 2484 default=b'https://mercurial-scm.org/',
2486 2485 )
2487 2486 coreconfigitem(
2488 2487 b'web',
2489 2488 b'accesslog',
2490 2489 default=b'-',
2491 2490 )
2492 2491 coreconfigitem(
2493 2492 b'web',
2494 2493 b'address',
2495 2494 default=b'',
2496 2495 )
2497 2496 coreconfigitem(
2498 2497 b'web',
2499 2498 b'allow-archive',
2500 2499 alias=[(b'web', b'allow_archive')],
2501 2500 default=list,
2502 2501 )
2503 2502 coreconfigitem(
2504 2503 b'web',
2505 2504 b'allow_read',
2506 2505 default=list,
2507 2506 )
2508 2507 coreconfigitem(
2509 2508 b'web',
2510 2509 b'baseurl',
2511 2510 default=None,
2512 2511 )
2513 2512 coreconfigitem(
2514 2513 b'web',
2515 2514 b'cacerts',
2516 2515 default=None,
2517 2516 )
2518 2517 coreconfigitem(
2519 2518 b'web',
2520 2519 b'certificate',
2521 2520 default=None,
2522 2521 )
2523 2522 coreconfigitem(
2524 2523 b'web',
2525 2524 b'collapse',
2526 2525 default=False,
2527 2526 )
2528 2527 coreconfigitem(
2529 2528 b'web',
2530 2529 b'csp',
2531 2530 default=None,
2532 2531 )
2533 2532 coreconfigitem(
2534 2533 b'web',
2535 2534 b'deny_read',
2536 2535 default=list,
2537 2536 )
2538 2537 coreconfigitem(
2539 2538 b'web',
2540 2539 b'descend',
2541 2540 default=True,
2542 2541 )
2543 2542 coreconfigitem(
2544 2543 b'web',
2545 2544 b'description',
2546 2545 default=b"",
2547 2546 )
2548 2547 coreconfigitem(
2549 2548 b'web',
2550 2549 b'encoding',
2551 2550 default=lambda: encoding.encoding,
2552 2551 )
2553 2552 coreconfigitem(
2554 2553 b'web',
2555 2554 b'errorlog',
2556 2555 default=b'-',
2557 2556 )
2558 2557 coreconfigitem(
2559 2558 b'web',
2560 2559 b'ipv6',
2561 2560 default=False,
2562 2561 )
2563 2562 coreconfigitem(
2564 2563 b'web',
2565 2564 b'maxchanges',
2566 2565 default=10,
2567 2566 )
2568 2567 coreconfigitem(
2569 2568 b'web',
2570 2569 b'maxfiles',
2571 2570 default=10,
2572 2571 )
2573 2572 coreconfigitem(
2574 2573 b'web',
2575 2574 b'maxshortchanges',
2576 2575 default=60,
2577 2576 )
2578 2577 coreconfigitem(
2579 2578 b'web',
2580 2579 b'motd',
2581 2580 default=b'',
2582 2581 )
2583 2582 coreconfigitem(
2584 2583 b'web',
2585 2584 b'name',
2586 2585 default=dynamicdefault,
2587 2586 )
2588 2587 coreconfigitem(
2589 2588 b'web',
2590 2589 b'port',
2591 2590 default=8000,
2592 2591 )
2593 2592 coreconfigitem(
2594 2593 b'web',
2595 2594 b'prefix',
2596 2595 default=b'',
2597 2596 )
2598 2597 coreconfigitem(
2599 2598 b'web',
2600 2599 b'push_ssl',
2601 2600 default=True,
2602 2601 )
2603 2602 coreconfigitem(
2604 2603 b'web',
2605 2604 b'refreshinterval',
2606 2605 default=20,
2607 2606 )
2608 2607 coreconfigitem(
2609 2608 b'web',
2610 2609 b'server-header',
2611 2610 default=None,
2612 2611 )
2613 2612 coreconfigitem(
2614 2613 b'web',
2615 2614 b'static',
2616 2615 default=None,
2617 2616 )
2618 2617 coreconfigitem(
2619 2618 b'web',
2620 2619 b'staticurl',
2621 2620 default=None,
2622 2621 )
2623 2622 coreconfigitem(
2624 2623 b'web',
2625 2624 b'stripes',
2626 2625 default=1,
2627 2626 )
2628 2627 coreconfigitem(
2629 2628 b'web',
2630 2629 b'style',
2631 2630 default=b'paper',
2632 2631 )
2633 2632 coreconfigitem(
2634 2633 b'web',
2635 2634 b'templates',
2636 2635 default=None,
2637 2636 )
2638 2637 coreconfigitem(
2639 2638 b'web',
2640 2639 b'view',
2641 2640 default=b'served',
2642 2641 experimental=True,
2643 2642 )
2644 2643 coreconfigitem(
2645 2644 b'worker',
2646 2645 b'backgroundclose',
2647 2646 default=dynamicdefault,
2648 2647 )
2649 2648 # Windows defaults to a limit of 512 open files. A buffer of 128
2650 2649 # should give us enough headway.
2651 2650 coreconfigitem(
2652 2651 b'worker',
2653 2652 b'backgroundclosemaxqueue',
2654 2653 default=384,
2655 2654 )
2656 2655 coreconfigitem(
2657 2656 b'worker',
2658 2657 b'backgroundcloseminfilecount',
2659 2658 default=2048,
2660 2659 )
2661 2660 coreconfigitem(
2662 2661 b'worker',
2663 2662 b'backgroundclosethreadcount',
2664 2663 default=4,
2665 2664 )
2666 2665 coreconfigitem(
2667 2666 b'worker',
2668 2667 b'enabled',
2669 2668 default=True,
2670 2669 )
2671 2670 coreconfigitem(
2672 2671 b'worker',
2673 2672 b'numcpus',
2674 2673 default=None,
2675 2674 )
2676 2675
2677 2676 # Rebase related configuration moved to core because other extension are doing
2678 2677 # strange things. For example, shelve import the extensions to reuse some bit
2679 2678 # without formally loading it.
2680 2679 coreconfigitem(
2681 2680 b'commands',
2682 2681 b'rebase.requiredest',
2683 2682 default=False,
2684 2683 )
2685 2684 coreconfigitem(
2686 2685 b'experimental',
2687 2686 b'rebaseskipobsolete',
2688 2687 default=True,
2689 2688 )
2690 2689 coreconfigitem(
2691 2690 b'rebase',
2692 2691 b'singletransaction',
2693 2692 default=False,
2694 2693 )
2695 2694 coreconfigitem(
2696 2695 b'rebase',
2697 2696 b'experimental.inmemory',
2698 2697 default=False,
2699 2698 )
@@ -1,3249 +1,3246 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 FEATURES_BY_VERSION,
39 39 FLAG_GENERALDELTA,
40 40 FLAG_INLINE_DATA,
41 41 INDEX_HEADER,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 SUPPORTED_FLAGS,
51 51 )
52 52 from .revlogutils.flagutil import (
53 53 REVIDX_DEFAULT_FLAGS,
54 54 REVIDX_ELLIPSIS,
55 55 REVIDX_EXTSTORED,
56 56 REVIDX_FLAGS_ORDER,
57 57 REVIDX_HASCOPIESINFO,
58 58 REVIDX_ISCENSORED,
59 59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 60 )
61 61 from .thirdparty import attr
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 templatefilters,
70 70 util,
71 71 )
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76 from .revlogutils import (
77 77 deltas as deltautil,
78 78 docket as docketutil,
79 79 flagutil,
80 80 nodemap as nodemaputil,
81 81 revlogv0,
82 82 sidedata as sidedatautil,
83 83 )
84 84 from .utils import (
85 85 storageutil,
86 86 stringutil,
87 87 )
88 88
89 89 # blanked usage of all the name to prevent pyflakes constraints
90 90 # We need these name available in the module for extensions.
91 91
92 92 REVLOGV0
93 93 REVLOGV1
94 94 REVLOGV2
95 95 FLAG_INLINE_DATA
96 96 FLAG_GENERALDELTA
97 97 REVLOG_DEFAULT_FLAGS
98 98 REVLOG_DEFAULT_FORMAT
99 99 REVLOG_DEFAULT_VERSION
100 100 REVLOGV1_FLAGS
101 101 REVLOGV2_FLAGS
102 102 REVIDX_ISCENSORED
103 103 REVIDX_ELLIPSIS
104 104 REVIDX_HASCOPIESINFO
105 105 REVIDX_EXTSTORED
106 106 REVIDX_DEFAULT_FLAGS
107 107 REVIDX_FLAGS_ORDER
108 108 REVIDX_RAWTEXT_CHANGING_FLAGS
109 109
110 110 parsers = policy.importmod('parsers')
111 111 rustancestor = policy.importrust('ancestor')
112 112 rustdagop = policy.importrust('dagop')
113 113 rustrevlog = policy.importrust('revlog')
114 114
115 115 # Aliased for performance.
116 116 _zlibdecompress = zlib.decompress
117 117
118 118 # max size of revlog with inline data
119 119 _maxinline = 131072
120 120 _chunksize = 1048576
121 121
122 122 # Flag processors for REVIDX_ELLIPSIS.
123 123 def ellipsisreadprocessor(rl, text):
124 124 return text, False
125 125
126 126
127 127 def ellipsiswriteprocessor(rl, text):
128 128 return text, False
129 129
130 130
131 131 def ellipsisrawprocessor(rl, text):
132 132 return False
133 133
134 134
135 135 ellipsisprocessor = (
136 136 ellipsisreadprocessor,
137 137 ellipsiswriteprocessor,
138 138 ellipsisrawprocessor,
139 139 )
140 140
141 141
142 142 def offset_type(offset, type):
143 143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
144 144 raise ValueError(b'unknown revlog index flags')
145 145 return int(int(offset) << 16 | type)
146 146
147 147
148 148 def _verify_revision(rl, skipflags, state, node):
149 149 """Verify the integrity of the given revlog ``node`` while providing a hook
150 150 point for extensions to influence the operation."""
151 151 if skipflags:
152 152 state[b'skipread'].add(node)
153 153 else:
154 154 # Side-effect: read content and verify hash.
155 155 rl.revision(node)
156 156
157 157
158 158 # True if a fast implementation for persistent-nodemap is available
159 159 #
160 160 # We also consider we have a "fast" implementation in "pure" python because
161 161 # people using pure don't really have performance consideration (and a
162 162 # wheelbarrow of other slowness source)
163 163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
164 164 parsers, 'BaseIndexObject'
165 165 )
166 166
167 167
168 168 @attr.s(slots=True, frozen=True)
169 169 class _revisioninfo(object):
170 170 """Information about a revision that allows building its fulltext
171 171 node: expected hash of the revision
172 172 p1, p2: parent revs of the revision
173 173 btext: built text cache consisting of a one-element list
174 174 cachedelta: (baserev, uncompressed_delta) or None
175 175 flags: flags associated to the revision storage
176 176
177 177 One of btext[0] or cachedelta must be set.
178 178 """
179 179
180 180 node = attr.ib()
181 181 p1 = attr.ib()
182 182 p2 = attr.ib()
183 183 btext = attr.ib()
184 184 textlen = attr.ib()
185 185 cachedelta = attr.ib()
186 186 flags = attr.ib()
187 187
188 188
189 189 @interfaceutil.implementer(repository.irevisiondelta)
190 190 @attr.s(slots=True)
191 191 class revlogrevisiondelta(object):
192 192 node = attr.ib()
193 193 p1node = attr.ib()
194 194 p2node = attr.ib()
195 195 basenode = attr.ib()
196 196 flags = attr.ib()
197 197 baserevisionsize = attr.ib()
198 198 revision = attr.ib()
199 199 delta = attr.ib()
200 200 sidedata = attr.ib()
201 201 protocol_flags = attr.ib()
202 202 linknode = attr.ib(default=None)
203 203
204 204
205 205 @interfaceutil.implementer(repository.iverifyproblem)
206 206 @attr.s(frozen=True)
207 207 class revlogproblem(object):
208 208 warning = attr.ib(default=None)
209 209 error = attr.ib(default=None)
210 210 node = attr.ib(default=None)
211 211
212 212
213 213 def parse_index_v1(data, inline):
214 214 # call the C implementation to parse the index data
215 215 index, cache = parsers.parse_index2(data, inline)
216 216 return index, cache
217 217
218 218
219 219 def parse_index_v2(data, inline):
220 220 # call the C implementation to parse the index data
221 221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
222 222 return index, cache
223 223
224 224
225 225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
226 226
227 227 def parse_index_v1_nodemap(data, inline):
228 228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
229 229 return index, cache
230 230
231 231
232 232 else:
233 233 parse_index_v1_nodemap = None
234 234
235 235
236 236 def parse_index_v1_mixed(data, inline):
237 237 index, cache = parse_index_v1(data, inline)
238 238 return rustrevlog.MixedIndex(index), cache
239 239
240 240
241 241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
242 242 # signed integer)
243 243 _maxentrysize = 0x7FFFFFFF
244 244
245 245
246 246 class revlog(object):
247 247 """
248 248 the underlying revision storage object
249 249
250 250 A revlog consists of two parts, an index and the revision data.
251 251
252 252 The index is a file with a fixed record size containing
253 253 information on each revision, including its nodeid (hash), the
254 254 nodeids of its parents, the position and offset of its data within
255 255 the data file, and the revision it's based on. Finally, each entry
256 256 contains a linkrev entry that can serve as a pointer to external
257 257 data.
258 258
259 259 The revision data itself is a linear collection of data chunks.
260 260 Each chunk represents a revision and is usually represented as a
261 261 delta against the previous chunk. To bound lookup time, runs of
262 262 deltas are limited to about 2 times the length of the original
263 263 version data. This makes retrieval of a version proportional to
264 264 its size, or O(1) relative to the number of revisions.
265 265
266 266 Both pieces of the revlog are written to in an append-only
267 267 fashion, which means we never need to rewrite a file to insert or
268 268 remove data, and can use some simple techniques to avoid the need
269 269 for locking while reading.
270 270
271 271 If checkambig, indexfile is opened with checkambig=True at
272 272 writing, to avoid file stat ambiguity.
273 273
274 274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
275 275 index will be mmapped rather than read if it is larger than the
276 276 configured threshold.
277 277
278 278 If censorable is True, the revlog can have censored revisions.
279 279
280 280 If `upperboundcomp` is not None, this is the expected maximal gain from
281 281 compression for the data content.
282 282
283 283 `concurrencychecker` is an optional function that receives 3 arguments: a
284 284 file handle, a filename, and an expected position. It should check whether
285 285 the current position in the file handle is valid, and log/warn/fail (by
286 286 raising).
287 287 """
288 288
289 289 _flagserrorclass = error.RevlogError
290 290
291 291 def __init__(
292 292 self,
293 293 opener,
294 294 target,
295 295 radix,
296 296 postfix=None,
297 297 checkambig=False,
298 298 mmaplargeindex=False,
299 299 censorable=False,
300 300 upperboundcomp=None,
301 301 persistentnodemap=False,
302 302 concurrencychecker=None,
303 303 ):
304 304 """
305 305 create a revlog object
306 306
307 307 opener is a function that abstracts the file opening operation
308 308 and can be used to implement COW semantics or the like.
309 309
310 310 `target`: a (KIND, ID) tuple that identify the content stored in
311 311 this revlog. It help the rest of the code to understand what the revlog
312 312 is about without having to resort to heuristic and index filename
313 313 analysis. Note: that this must be reliably be set by normal code, but
314 314 that test, debug, or performance measurement code might not set this to
315 315 accurate value.
316 316 """
317 317 self.upperboundcomp = upperboundcomp
318 318
319 319 self.radix = radix
320 320
321 321 self._docket_file = None
322 322 self._indexfile = None
323 323 self._datafile = None
324 324 self._nodemap_file = None
325 325 self.postfix = postfix
326 326 self.opener = opener
327 327 if persistentnodemap:
328 328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
329 329
330 330 assert target[0] in ALL_KINDS
331 331 assert len(target) == 2
332 332 self.target = target
333 333 # When True, indexfile is opened with checkambig=True at writing, to
334 334 # avoid file stat ambiguity.
335 335 self._checkambig = checkambig
336 336 self._mmaplargeindex = mmaplargeindex
337 337 self._censorable = censorable
338 338 # 3-tuple of (node, rev, text) for a raw revision.
339 339 self._revisioncache = None
340 340 # Maps rev to chain base rev.
341 341 self._chainbasecache = util.lrucachedict(100)
342 342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
343 343 self._chunkcache = (0, b'')
344 344 # How much data to read and cache into the raw revlog data cache.
345 345 self._chunkcachesize = 65536
346 346 self._maxchainlen = None
347 347 self._deltabothparents = True
348 348 self.index = None
349 349 self._docket = None
350 350 self._nodemap_docket = None
351 351 # Mapping of partial identifiers to full nodes.
352 352 self._pcache = {}
353 353 # Mapping of revision integer to full node.
354 354 self._compengine = b'zlib'
355 355 self._compengineopts = {}
356 356 self._maxdeltachainspan = -1
357 357 self._withsparseread = False
358 358 self._sparserevlog = False
359 359 self.hassidedata = False
360 360 self._srdensitythreshold = 0.50
361 361 self._srmingapsize = 262144
362 362
363 363 # Make copy of flag processors so each revlog instance can support
364 364 # custom flags.
365 365 self._flagprocessors = dict(flagutil.flagprocessors)
366 366
367 367 # 2-tuple of file handles being used for active writing.
368 368 self._writinghandles = None
369 369 # prevent nesting of addgroup
370 370 self._adding_group = None
371 371
372 372 self._loadindex()
373 373
374 374 self._concurrencychecker = concurrencychecker
375 375
376 376 def _init_opts(self):
377 377 """process options (from above/config) to setup associated default revlog mode
378 378
379 379 These values might be affected when actually reading on disk information.
380 380
381 381 The relevant values are returned for use in _loadindex().
382 382
383 383 * newversionflags:
384 384 version header to use if we need to create a new revlog
385 385
386 386 * mmapindexthreshold:
387 387 minimal index size for start to use mmap
388 388
389 389 * force_nodemap:
390 390 force the usage of a "development" version of the nodemap code
391 391 """
392 392 mmapindexthreshold = None
393 393 opts = self.opener.options
394 394
395 395 if b'revlogv2' in opts:
396 396 new_header = REVLOGV2 | FLAG_INLINE_DATA
397 397 elif b'revlogv1' in opts:
398 398 new_header = REVLOGV1 | FLAG_INLINE_DATA
399 399 if b'generaldelta' in opts:
400 400 new_header |= FLAG_GENERALDELTA
401 401 elif b'revlogv0' in self.opener.options:
402 402 new_header = REVLOGV0
403 403 else:
404 404 new_header = REVLOG_DEFAULT_VERSION
405 405
406 406 if b'chunkcachesize' in opts:
407 407 self._chunkcachesize = opts[b'chunkcachesize']
408 408 if b'maxchainlen' in opts:
409 409 self._maxchainlen = opts[b'maxchainlen']
410 410 if b'deltabothparents' in opts:
411 411 self._deltabothparents = opts[b'deltabothparents']
412 412 self._lazydelta = bool(opts.get(b'lazydelta', True))
413 413 self._lazydeltabase = False
414 414 if self._lazydelta:
415 415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
416 416 if b'compengine' in opts:
417 417 self._compengine = opts[b'compengine']
418 418 if b'zlib.level' in opts:
419 419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
420 420 if b'zstd.level' in opts:
421 421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
422 422 if b'maxdeltachainspan' in opts:
423 423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
424 424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
425 425 mmapindexthreshold = opts[b'mmapindexthreshold']
426 426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
427 427 withsparseread = bool(opts.get(b'with-sparse-read', False))
428 428 # sparse-revlog forces sparse-read
429 429 self._withsparseread = self._sparserevlog or withsparseread
430 430 if b'sparse-read-density-threshold' in opts:
431 431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
432 432 if b'sparse-read-min-gap-size' in opts:
433 433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
434 434 if opts.get(b'enableellipsis'):
435 435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
436 436
437 437 # revlog v0 doesn't have flag processors
438 438 for flag, processor in pycompat.iteritems(
439 439 opts.get(b'flagprocessors', {})
440 440 ):
441 441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
442 442
443 443 if self._chunkcachesize <= 0:
444 444 raise error.RevlogError(
445 445 _(b'revlog chunk cache size %r is not greater than 0')
446 446 % self._chunkcachesize
447 447 )
448 448 elif self._chunkcachesize & (self._chunkcachesize - 1):
449 449 raise error.RevlogError(
450 450 _(b'revlog chunk cache size %r is not a power of 2')
451 451 % self._chunkcachesize
452 452 )
453 453 force_nodemap = opts.get(b'devel-force-nodemap', False)
454 454 return new_header, mmapindexthreshold, force_nodemap
455 455
456 456 def _get_data(self, filepath, mmap_threshold, size=None):
457 457 """return a file content with or without mmap
458 458
459 459 If the file is missing return the empty string"""
460 460 try:
461 461 with self.opener(filepath) as fp:
462 462 if mmap_threshold is not None:
463 463 file_size = self.opener.fstat(fp).st_size
464 464 if file_size >= mmap_threshold:
465 465 if size is not None:
466 466 # avoid potentiel mmap crash
467 467 size = min(file_size, size)
468 468 # TODO: should .close() to release resources without
469 469 # relying on Python GC
470 470 if size is None:
471 471 return util.buffer(util.mmapread(fp))
472 472 else:
473 473 return util.buffer(util.mmapread(fp, size))
474 474 if size is None:
475 475 return fp.read()
476 476 else:
477 477 return fp.read(size)
478 478 except IOError as inst:
479 479 if inst.errno != errno.ENOENT:
480 480 raise
481 481 return b''
482 482
483 483 def _loadindex(self):
484 484
485 485 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
486 486
487 487 if self.postfix is None:
488 488 entry_point = b'%s.i' % self.radix
489 489 else:
490 490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
491 491
492 492 entry_data = b''
493 493 self._initempty = True
494 494 entry_data = self._get_data(entry_point, mmapindexthreshold)
495 495 if len(entry_data) > 0:
496 496 header = INDEX_HEADER.unpack(entry_data[:4])[0]
497 497 self._initempty = False
498 498 else:
499 499 header = new_header
500 500
501 501 self._format_flags = header & ~0xFFFF
502 502 self._format_version = header & 0xFFFF
503 503
504 504 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
505 505 if supported_flags is None:
506 506 msg = _(b'unknown version (%d) in revlog %s')
507 507 msg %= (self._format_version, self.display_id)
508 508 raise error.RevlogError(msg)
509 509 elif self._format_flags & ~supported_flags:
510 510 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
511 511 display_flag = self._format_flags >> 16
512 512 msg %= (display_flag, self._format_version, self.display_id)
513 513 raise error.RevlogError(msg)
514 514
515 515 features = FEATURES_BY_VERSION[self._format_version]
516 516 self._inline = features[b'inline'](self._format_flags)
517 517 self._generaldelta = features[b'generaldelta'](self._format_flags)
518 518 self.hassidedata = features[b'sidedata']
519 519
520 520 if not features[b'docket']:
521 521 self._indexfile = entry_point
522 522 index_data = entry_data
523 523 else:
524 524 self._docket_file = entry_point
525 525 if self._initempty:
526 526 self._docket = docketutil.default_docket(self, header)
527 527 else:
528 528 self._docket = docketutil.parse_docket(self, entry_data)
529 529 self._indexfile = self._docket.index_filepath()
530 530 index_data = b''
531 531 index_size = self._docket.index_end
532 532 if index_size > 0:
533 533 index_data = self._get_data(
534 534 self._indexfile, mmapindexthreshold, size=index_size
535 535 )
536 536 if len(index_data) < index_size:
537 537 msg = _(b'too few index data for %s: got %d, expected %d')
538 538 msg %= (self.display_id, len(index_data), index_size)
539 539 raise error.RevlogError(msg)
540 540
541 541 self._inline = False
542 542 # generaldelta implied by version 2 revlogs.
543 543 self._generaldelta = True
544 544 # the logic for persistent nodemap will be dealt with within the
545 545 # main docket, so disable it for now.
546 546 self._nodemap_file = None
547 547
548 548 if self.postfix is None or self.postfix == b'a':
549 549 self._datafile = b'%s.d' % self.radix
550 550 else:
551 551 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
552 552
553 553 self.nodeconstants = sha1nodeconstants
554 554 self.nullid = self.nodeconstants.nullid
555 555
556 556 # sparse-revlog can't be on without general-delta (issue6056)
557 557 if not self._generaldelta:
558 558 self._sparserevlog = False
559 559
560 560 self._storedeltachains = True
561 561
562 562 devel_nodemap = (
563 563 self._nodemap_file
564 564 and force_nodemap
565 565 and parse_index_v1_nodemap is not None
566 566 )
567 567
568 568 use_rust_index = False
569 569 if rustrevlog is not None:
570 570 if self._nodemap_file is not None:
571 571 use_rust_index = True
572 572 else:
573 573 use_rust_index = self.opener.options.get(b'rust.index')
574 574
575 575 self._parse_index = parse_index_v1
576 576 if self._format_version == REVLOGV0:
577 577 self._parse_index = revlogv0.parse_index_v0
578 578 elif self._format_version == REVLOGV2:
579 579 self._parse_index = parse_index_v2
580 580 elif devel_nodemap:
581 581 self._parse_index = parse_index_v1_nodemap
582 582 elif use_rust_index:
583 583 self._parse_index = parse_index_v1_mixed
584 584 try:
585 585 d = self._parse_index(index_data, self._inline)
586 586 index, _chunkcache = d
587 587 use_nodemap = (
588 588 not self._inline
589 589 and self._nodemap_file is not None
590 590 and util.safehasattr(index, 'update_nodemap_data')
591 591 )
592 592 if use_nodemap:
593 593 nodemap_data = nodemaputil.persisted_data(self)
594 594 if nodemap_data is not None:
595 595 docket = nodemap_data[0]
596 596 if (
597 597 len(d[0]) > docket.tip_rev
598 598 and d[0][docket.tip_rev][7] == docket.tip_node
599 599 ):
600 600 # no changelog tampering
601 601 self._nodemap_docket = docket
602 602 index.update_nodemap_data(*nodemap_data)
603 603 except (ValueError, IndexError):
604 604 raise error.RevlogError(
605 605 _(b"index %s is corrupted") % self.display_id
606 606 )
607 607 self.index, self._chunkcache = d
608 608 if not self._chunkcache:
609 609 self._chunkclear()
610 610 # revnum -> (chain-length, sum-delta-length)
611 611 self._chaininfocache = util.lrucachedict(500)
612 612 # revlog header -> revlog compressor
613 613 self._decompressors = {}
614 614
615 615 @util.propertycache
616 616 def revlog_kind(self):
617 617 return self.target[0]
618 618
619 619 @util.propertycache
620 620 def display_id(self):
621 621 """The public facing "ID" of the revlog that we use in message"""
622 622 # Maybe we should build a user facing representation of
623 623 # revlog.target instead of using `self.radix`
624 624 return self.radix
625 625
626 626 @util.propertycache
627 627 def _compressor(self):
628 628 engine = util.compengines[self._compengine]
629 629 return engine.revlogcompressor(self._compengineopts)
630 630
631 631 def _indexfp(self):
632 632 """file object for the revlog's index file"""
633 633 return self.opener(self._indexfile, mode=b"r")
634 634
635 635 def __index_write_fp(self):
636 636 # You should not use this directly and use `_writing` instead
637 637 try:
638 638 f = self.opener(
639 639 self._indexfile, mode=b"r+", checkambig=self._checkambig
640 640 )
641 641 if self._docket is None:
642 642 f.seek(0, os.SEEK_END)
643 643 else:
644 644 f.seek(self._docket.index_end, os.SEEK_SET)
645 645 return f
646 646 except IOError as inst:
647 647 if inst.errno != errno.ENOENT:
648 648 raise
649 649 return self.opener(
650 650 self._indexfile, mode=b"w+", checkambig=self._checkambig
651 651 )
652 652
653 653 def __index_new_fp(self):
654 654 # You should not use this unless you are upgrading from inline revlog
655 655 return self.opener(
656 656 self._indexfile,
657 657 mode=b"w",
658 658 checkambig=self._checkambig,
659 659 atomictemp=True,
660 660 )
661 661
662 662 def _datafp(self, mode=b'r'):
663 663 """file object for the revlog's data file"""
664 664 return self.opener(self._datafile, mode=mode)
665 665
666 666 @contextlib.contextmanager
667 667 def _datareadfp(self, existingfp=None):
668 668 """file object suitable to read data"""
669 669 # Use explicit file handle, if given.
670 670 if existingfp is not None:
671 671 yield existingfp
672 672
673 673 # Use a file handle being actively used for writes, if available.
674 674 # There is some danger to doing this because reads will seek the
675 675 # file. However, _writeentry() performs a SEEK_END before all writes,
676 676 # so we should be safe.
677 677 elif self._writinghandles:
678 678 if self._inline:
679 679 yield self._writinghandles[0]
680 680 else:
681 681 yield self._writinghandles[1]
682 682
683 683 # Otherwise open a new file handle.
684 684 else:
685 685 if self._inline:
686 686 func = self._indexfp
687 687 else:
688 688 func = self._datafp
689 689 with func() as fp:
690 690 yield fp
691 691
692 692 def tiprev(self):
693 693 return len(self.index) - 1
694 694
695 695 def tip(self):
696 696 return self.node(self.tiprev())
697 697
698 698 def __contains__(self, rev):
699 699 return 0 <= rev < len(self)
700 700
701 701 def __len__(self):
702 702 return len(self.index)
703 703
704 704 def __iter__(self):
705 705 return iter(pycompat.xrange(len(self)))
706 706
707 707 def revs(self, start=0, stop=None):
708 708 """iterate over all rev in this revlog (from start to stop)"""
709 709 return storageutil.iterrevs(len(self), start=start, stop=stop)
710 710
711 711 @property
712 712 def nodemap(self):
713 713 msg = (
714 714 b"revlog.nodemap is deprecated, "
715 715 b"use revlog.index.[has_node|rev|get_rev]"
716 716 )
717 717 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
718 718 return self.index.nodemap
719 719
720 720 @property
721 721 def _nodecache(self):
722 722 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
723 723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
724 724 return self.index.nodemap
725 725
726 726 def hasnode(self, node):
727 727 try:
728 728 self.rev(node)
729 729 return True
730 730 except KeyError:
731 731 return False
732 732
733 733 def candelta(self, baserev, rev):
734 734 """whether two revisions (baserev, rev) can be delta-ed or not"""
735 735 # Disable delta if either rev requires a content-changing flag
736 736 # processor (ex. LFS). This is because such flag processor can alter
737 737 # the rawtext content that the delta will be based on, and two clients
738 738 # could have a same revlog node with different flags (i.e. different
739 739 # rawtext contents) and the delta could be incompatible.
740 740 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
741 741 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
742 742 ):
743 743 return False
744 744 return True
745 745
746 746 def update_caches(self, transaction):
747 747 if self._nodemap_file is not None:
748 748 if transaction is None:
749 749 nodemaputil.update_persistent_nodemap(self)
750 750 else:
751 751 nodemaputil.setup_persistent_nodemap(transaction, self)
752 752
753 753 def clearcaches(self):
754 754 self._revisioncache = None
755 755 self._chainbasecache.clear()
756 756 self._chunkcache = (0, b'')
757 757 self._pcache = {}
758 758 self._nodemap_docket = None
759 759 self.index.clearcaches()
760 760 # The python code is the one responsible for validating the docket, we
761 761 # end up having to refresh it here.
762 762 use_nodemap = (
763 763 not self._inline
764 764 and self._nodemap_file is not None
765 765 and util.safehasattr(self.index, 'update_nodemap_data')
766 766 )
767 767 if use_nodemap:
768 768 nodemap_data = nodemaputil.persisted_data(self)
769 769 if nodemap_data is not None:
770 770 self._nodemap_docket = nodemap_data[0]
771 771 self.index.update_nodemap_data(*nodemap_data)
772 772
773 773 def rev(self, node):
774 774 try:
775 775 return self.index.rev(node)
776 776 except TypeError:
777 777 raise
778 778 except error.RevlogError:
779 779 # parsers.c radix tree lookup failed
780 780 if (
781 781 node == self.nodeconstants.wdirid
782 782 or node in self.nodeconstants.wdirfilenodeids
783 783 ):
784 784 raise error.WdirUnsupported
785 785 raise error.LookupError(node, self.display_id, _(b'no node'))
786 786
787 787 # Accessors for index entries.
788 788
789 789 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
790 790 # are flags.
791 791 def start(self, rev):
792 792 return int(self.index[rev][0] >> 16)
793 793
794 794 def flags(self, rev):
795 795 return self.index[rev][0] & 0xFFFF
796 796
797 797 def length(self, rev):
798 798 return self.index[rev][1]
799 799
800 800 def sidedata_length(self, rev):
801 801 if not self.hassidedata:
802 802 return 0
803 803 return self.index[rev][9]
804 804
805 805 def rawsize(self, rev):
806 806 """return the length of the uncompressed text for a given revision"""
807 807 l = self.index[rev][2]
808 808 if l >= 0:
809 809 return l
810 810
811 811 t = self.rawdata(rev)
812 812 return len(t)
813 813
814 814 def size(self, rev):
815 815 """length of non-raw text (processed by a "read" flag processor)"""
816 816 # fast path: if no "read" flag processor could change the content,
817 817 # size is rawsize. note: ELLIPSIS is known to not change the content.
818 818 flags = self.flags(rev)
819 819 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
820 820 return self.rawsize(rev)
821 821
822 822 return len(self.revision(rev, raw=False))
823 823
824 824 def chainbase(self, rev):
825 825 base = self._chainbasecache.get(rev)
826 826 if base is not None:
827 827 return base
828 828
829 829 index = self.index
830 830 iterrev = rev
831 831 base = index[iterrev][3]
832 832 while base != iterrev:
833 833 iterrev = base
834 834 base = index[iterrev][3]
835 835
836 836 self._chainbasecache[rev] = base
837 837 return base
838 838
839 839 def linkrev(self, rev):
840 840 return self.index[rev][4]
841 841
842 842 def parentrevs(self, rev):
843 843 try:
844 844 entry = self.index[rev]
845 845 except IndexError:
846 846 if rev == wdirrev:
847 847 raise error.WdirUnsupported
848 848 raise
849 849 if entry[5] == nullrev:
850 850 return entry[6], entry[5]
851 851 else:
852 852 return entry[5], entry[6]
853 853
854 854 # fast parentrevs(rev) where rev isn't filtered
855 855 _uncheckedparentrevs = parentrevs
856 856
857 857 def node(self, rev):
858 858 try:
859 859 return self.index[rev][7]
860 860 except IndexError:
861 861 if rev == wdirrev:
862 862 raise error.WdirUnsupported
863 863 raise
864 864
865 865 # Derived from index values.
866 866
867 867 def end(self, rev):
868 868 return self.start(rev) + self.length(rev)
869 869
870 870 def parents(self, node):
871 871 i = self.index
872 872 d = i[self.rev(node)]
873 873 # inline node() to avoid function call overhead
874 874 if d[5] == self.nullid:
875 875 return i[d[6]][7], i[d[5]][7]
876 876 else:
877 877 return i[d[5]][7], i[d[6]][7]
878 878
879 879 def chainlen(self, rev):
880 880 return self._chaininfo(rev)[0]
881 881
882 882 def _chaininfo(self, rev):
883 883 chaininfocache = self._chaininfocache
884 884 if rev in chaininfocache:
885 885 return chaininfocache[rev]
886 886 index = self.index
887 887 generaldelta = self._generaldelta
888 888 iterrev = rev
889 889 e = index[iterrev]
890 890 clen = 0
891 891 compresseddeltalen = 0
892 892 while iterrev != e[3]:
893 893 clen += 1
894 894 compresseddeltalen += e[1]
895 895 if generaldelta:
896 896 iterrev = e[3]
897 897 else:
898 898 iterrev -= 1
899 899 if iterrev in chaininfocache:
900 900 t = chaininfocache[iterrev]
901 901 clen += t[0]
902 902 compresseddeltalen += t[1]
903 903 break
904 904 e = index[iterrev]
905 905 else:
906 906 # Add text length of base since decompressing that also takes
907 907 # work. For cache hits the length is already included.
908 908 compresseddeltalen += e[1]
909 909 r = (clen, compresseddeltalen)
910 910 chaininfocache[rev] = r
911 911 return r
912 912
913 913 def _deltachain(self, rev, stoprev=None):
914 914 """Obtain the delta chain for a revision.
915 915
916 916 ``stoprev`` specifies a revision to stop at. If not specified, we
917 917 stop at the base of the chain.
918 918
919 919 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
920 920 revs in ascending order and ``stopped`` is a bool indicating whether
921 921 ``stoprev`` was hit.
922 922 """
923 923 # Try C implementation.
924 924 try:
925 925 return self.index.deltachain(rev, stoprev, self._generaldelta)
926 926 except AttributeError:
927 927 pass
928 928
929 929 chain = []
930 930
931 931 # Alias to prevent attribute lookup in tight loop.
932 932 index = self.index
933 933 generaldelta = self._generaldelta
934 934
935 935 iterrev = rev
936 936 e = index[iterrev]
937 937 while iterrev != e[3] and iterrev != stoprev:
938 938 chain.append(iterrev)
939 939 if generaldelta:
940 940 iterrev = e[3]
941 941 else:
942 942 iterrev -= 1
943 943 e = index[iterrev]
944 944
945 945 if iterrev == stoprev:
946 946 stopped = True
947 947 else:
948 948 chain.append(iterrev)
949 949 stopped = False
950 950
951 951 chain.reverse()
952 952 return chain, stopped
953 953
954 954 def ancestors(self, revs, stoprev=0, inclusive=False):
955 955 """Generate the ancestors of 'revs' in reverse revision order.
956 956 Does not generate revs lower than stoprev.
957 957
958 958 See the documentation for ancestor.lazyancestors for more details."""
959 959
960 960 # first, make sure start revisions aren't filtered
961 961 revs = list(revs)
962 962 checkrev = self.node
963 963 for r in revs:
964 964 checkrev(r)
965 965 # and we're sure ancestors aren't filtered as well
966 966
967 967 if rustancestor is not None:
968 968 lazyancestors = rustancestor.LazyAncestors
969 969 arg = self.index
970 970 else:
971 971 lazyancestors = ancestor.lazyancestors
972 972 arg = self._uncheckedparentrevs
973 973 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
974 974
975 975 def descendants(self, revs):
976 976 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
977 977
978 978 def findcommonmissing(self, common=None, heads=None):
979 979 """Return a tuple of the ancestors of common and the ancestors of heads
980 980 that are not ancestors of common. In revset terminology, we return the
981 981 tuple:
982 982
983 983 ::common, (::heads) - (::common)
984 984
985 985 The list is sorted by revision number, meaning it is
986 986 topologically sorted.
987 987
988 988 'heads' and 'common' are both lists of node IDs. If heads is
989 989 not supplied, uses all of the revlog's heads. If common is not
990 990 supplied, uses nullid."""
991 991 if common is None:
992 992 common = [self.nullid]
993 993 if heads is None:
994 994 heads = self.heads()
995 995
996 996 common = [self.rev(n) for n in common]
997 997 heads = [self.rev(n) for n in heads]
998 998
999 999 # we want the ancestors, but inclusive
1000 1000 class lazyset(object):
1001 1001 def __init__(self, lazyvalues):
1002 1002 self.addedvalues = set()
1003 1003 self.lazyvalues = lazyvalues
1004 1004
1005 1005 def __contains__(self, value):
1006 1006 return value in self.addedvalues or value in self.lazyvalues
1007 1007
1008 1008 def __iter__(self):
1009 1009 added = self.addedvalues
1010 1010 for r in added:
1011 1011 yield r
1012 1012 for r in self.lazyvalues:
1013 1013 if not r in added:
1014 1014 yield r
1015 1015
1016 1016 def add(self, value):
1017 1017 self.addedvalues.add(value)
1018 1018
1019 1019 def update(self, values):
1020 1020 self.addedvalues.update(values)
1021 1021
1022 1022 has = lazyset(self.ancestors(common))
1023 1023 has.add(nullrev)
1024 1024 has.update(common)
1025 1025
1026 1026 # take all ancestors from heads that aren't in has
1027 1027 missing = set()
1028 1028 visit = collections.deque(r for r in heads if r not in has)
1029 1029 while visit:
1030 1030 r = visit.popleft()
1031 1031 if r in missing:
1032 1032 continue
1033 1033 else:
1034 1034 missing.add(r)
1035 1035 for p in self.parentrevs(r):
1036 1036 if p not in has:
1037 1037 visit.append(p)
1038 1038 missing = list(missing)
1039 1039 missing.sort()
1040 1040 return has, [self.node(miss) for miss in missing]
1041 1041
1042 1042 def incrementalmissingrevs(self, common=None):
1043 1043 """Return an object that can be used to incrementally compute the
1044 1044 revision numbers of the ancestors of arbitrary sets that are not
1045 1045 ancestors of common. This is an ancestor.incrementalmissingancestors
1046 1046 object.
1047 1047
1048 1048 'common' is a list of revision numbers. If common is not supplied, uses
1049 1049 nullrev.
1050 1050 """
1051 1051 if common is None:
1052 1052 common = [nullrev]
1053 1053
1054 1054 if rustancestor is not None:
1055 1055 return rustancestor.MissingAncestors(self.index, common)
1056 1056 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1057 1057
1058 1058 def findmissingrevs(self, common=None, heads=None):
1059 1059 """Return the revision numbers of the ancestors of heads that
1060 1060 are not ancestors of common.
1061 1061
1062 1062 More specifically, return a list of revision numbers corresponding to
1063 1063 nodes N such that every N satisfies the following constraints:
1064 1064
1065 1065 1. N is an ancestor of some node in 'heads'
1066 1066 2. N is not an ancestor of any node in 'common'
1067 1067
1068 1068 The list is sorted by revision number, meaning it is
1069 1069 topologically sorted.
1070 1070
1071 1071 'heads' and 'common' are both lists of revision numbers. If heads is
1072 1072 not supplied, uses all of the revlog's heads. If common is not
1073 1073 supplied, uses nullid."""
1074 1074 if common is None:
1075 1075 common = [nullrev]
1076 1076 if heads is None:
1077 1077 heads = self.headrevs()
1078 1078
1079 1079 inc = self.incrementalmissingrevs(common=common)
1080 1080 return inc.missingancestors(heads)
1081 1081
1082 1082 def findmissing(self, common=None, heads=None):
1083 1083 """Return the ancestors of heads that are not ancestors of common.
1084 1084
1085 1085 More specifically, return a list of nodes N such that every N
1086 1086 satisfies the following constraints:
1087 1087
1088 1088 1. N is an ancestor of some node in 'heads'
1089 1089 2. N is not an ancestor of any node in 'common'
1090 1090
1091 1091 The list is sorted by revision number, meaning it is
1092 1092 topologically sorted.
1093 1093
1094 1094 'heads' and 'common' are both lists of node IDs. If heads is
1095 1095 not supplied, uses all of the revlog's heads. If common is not
1096 1096 supplied, uses nullid."""
1097 1097 if common is None:
1098 1098 common = [self.nullid]
1099 1099 if heads is None:
1100 1100 heads = self.heads()
1101 1101
1102 1102 common = [self.rev(n) for n in common]
1103 1103 heads = [self.rev(n) for n in heads]
1104 1104
1105 1105 inc = self.incrementalmissingrevs(common=common)
1106 1106 return [self.node(r) for r in inc.missingancestors(heads)]
1107 1107
1108 1108 def nodesbetween(self, roots=None, heads=None):
1109 1109 """Return a topological path from 'roots' to 'heads'.
1110 1110
1111 1111 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1112 1112 topologically sorted list of all nodes N that satisfy both of
1113 1113 these constraints:
1114 1114
1115 1115 1. N is a descendant of some node in 'roots'
1116 1116 2. N is an ancestor of some node in 'heads'
1117 1117
1118 1118 Every node is considered to be both a descendant and an ancestor
1119 1119 of itself, so every reachable node in 'roots' and 'heads' will be
1120 1120 included in 'nodes'.
1121 1121
1122 1122 'outroots' is the list of reachable nodes in 'roots', i.e., the
1123 1123 subset of 'roots' that is returned in 'nodes'. Likewise,
1124 1124 'outheads' is the subset of 'heads' that is also in 'nodes'.
1125 1125
1126 1126 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1127 1127 unspecified, uses nullid as the only root. If 'heads' is
1128 1128 unspecified, uses list of all of the revlog's heads."""
1129 1129 nonodes = ([], [], [])
1130 1130 if roots is not None:
1131 1131 roots = list(roots)
1132 1132 if not roots:
1133 1133 return nonodes
1134 1134 lowestrev = min([self.rev(n) for n in roots])
1135 1135 else:
1136 1136 roots = [self.nullid] # Everybody's a descendant of nullid
1137 1137 lowestrev = nullrev
1138 1138 if (lowestrev == nullrev) and (heads is None):
1139 1139 # We want _all_ the nodes!
1140 1140 return (
1141 1141 [self.node(r) for r in self],
1142 1142 [self.nullid],
1143 1143 list(self.heads()),
1144 1144 )
1145 1145 if heads is None:
1146 1146 # All nodes are ancestors, so the latest ancestor is the last
1147 1147 # node.
1148 1148 highestrev = len(self) - 1
1149 1149 # Set ancestors to None to signal that every node is an ancestor.
1150 1150 ancestors = None
1151 1151 # Set heads to an empty dictionary for later discovery of heads
1152 1152 heads = {}
1153 1153 else:
1154 1154 heads = list(heads)
1155 1155 if not heads:
1156 1156 return nonodes
1157 1157 ancestors = set()
1158 1158 # Turn heads into a dictionary so we can remove 'fake' heads.
1159 1159 # Also, later we will be using it to filter out the heads we can't
1160 1160 # find from roots.
1161 1161 heads = dict.fromkeys(heads, False)
1162 1162 # Start at the top and keep marking parents until we're done.
1163 1163 nodestotag = set(heads)
1164 1164 # Remember where the top was so we can use it as a limit later.
1165 1165 highestrev = max([self.rev(n) for n in nodestotag])
1166 1166 while nodestotag:
1167 1167 # grab a node to tag
1168 1168 n = nodestotag.pop()
1169 1169 # Never tag nullid
1170 1170 if n == self.nullid:
1171 1171 continue
1172 1172 # A node's revision number represents its place in a
1173 1173 # topologically sorted list of nodes.
1174 1174 r = self.rev(n)
1175 1175 if r >= lowestrev:
1176 1176 if n not in ancestors:
1177 1177 # If we are possibly a descendant of one of the roots
1178 1178 # and we haven't already been marked as an ancestor
1179 1179 ancestors.add(n) # Mark as ancestor
1180 1180 # Add non-nullid parents to list of nodes to tag.
1181 1181 nodestotag.update(
1182 1182 [p for p in self.parents(n) if p != self.nullid]
1183 1183 )
1184 1184 elif n in heads: # We've seen it before, is it a fake head?
1185 1185 # So it is, real heads should not be the ancestors of
1186 1186 # any other heads.
1187 1187 heads.pop(n)
1188 1188 if not ancestors:
1189 1189 return nonodes
1190 1190 # Now that we have our set of ancestors, we want to remove any
1191 1191 # roots that are not ancestors.
1192 1192
1193 1193 # If one of the roots was nullid, everything is included anyway.
1194 1194 if lowestrev > nullrev:
1195 1195 # But, since we weren't, let's recompute the lowest rev to not
1196 1196 # include roots that aren't ancestors.
1197 1197
1198 1198 # Filter out roots that aren't ancestors of heads
1199 1199 roots = [root for root in roots if root in ancestors]
1200 1200 # Recompute the lowest revision
1201 1201 if roots:
1202 1202 lowestrev = min([self.rev(root) for root in roots])
1203 1203 else:
1204 1204 # No more roots? Return empty list
1205 1205 return nonodes
1206 1206 else:
1207 1207 # We are descending from nullid, and don't need to care about
1208 1208 # any other roots.
1209 1209 lowestrev = nullrev
1210 1210 roots = [self.nullid]
1211 1211 # Transform our roots list into a set.
1212 1212 descendants = set(roots)
1213 1213 # Also, keep the original roots so we can filter out roots that aren't
1214 1214 # 'real' roots (i.e. are descended from other roots).
1215 1215 roots = descendants.copy()
1216 1216 # Our topologically sorted list of output nodes.
1217 1217 orderedout = []
1218 1218 # Don't start at nullid since we don't want nullid in our output list,
1219 1219 # and if nullid shows up in descendants, empty parents will look like
1220 1220 # they're descendants.
1221 1221 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1222 1222 n = self.node(r)
1223 1223 isdescendant = False
1224 1224 if lowestrev == nullrev: # Everybody is a descendant of nullid
1225 1225 isdescendant = True
1226 1226 elif n in descendants:
1227 1227 # n is already a descendant
1228 1228 isdescendant = True
1229 1229 # This check only needs to be done here because all the roots
1230 1230 # will start being marked is descendants before the loop.
1231 1231 if n in roots:
1232 1232 # If n was a root, check if it's a 'real' root.
1233 1233 p = tuple(self.parents(n))
1234 1234 # If any of its parents are descendants, it's not a root.
1235 1235 if (p[0] in descendants) or (p[1] in descendants):
1236 1236 roots.remove(n)
1237 1237 else:
1238 1238 p = tuple(self.parents(n))
1239 1239 # A node is a descendant if either of its parents are
1240 1240 # descendants. (We seeded the dependents list with the roots
1241 1241 # up there, remember?)
1242 1242 if (p[0] in descendants) or (p[1] in descendants):
1243 1243 descendants.add(n)
1244 1244 isdescendant = True
1245 1245 if isdescendant and ((ancestors is None) or (n in ancestors)):
1246 1246 # Only include nodes that are both descendants and ancestors.
1247 1247 orderedout.append(n)
1248 1248 if (ancestors is not None) and (n in heads):
1249 1249 # We're trying to figure out which heads are reachable
1250 1250 # from roots.
1251 1251 # Mark this head as having been reached
1252 1252 heads[n] = True
1253 1253 elif ancestors is None:
1254 1254 # Otherwise, we're trying to discover the heads.
1255 1255 # Assume this is a head because if it isn't, the next step
1256 1256 # will eventually remove it.
1257 1257 heads[n] = True
1258 1258 # But, obviously its parents aren't.
1259 1259 for p in self.parents(n):
1260 1260 heads.pop(p, None)
1261 1261 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1262 1262 roots = list(roots)
1263 1263 assert orderedout
1264 1264 assert roots
1265 1265 assert heads
1266 1266 return (orderedout, roots, heads)
1267 1267
1268 1268 def headrevs(self, revs=None):
1269 1269 if revs is None:
1270 1270 try:
1271 1271 return self.index.headrevs()
1272 1272 except AttributeError:
1273 1273 return self._headrevs()
1274 1274 if rustdagop is not None:
1275 1275 return rustdagop.headrevs(self.index, revs)
1276 1276 return dagop.headrevs(revs, self._uncheckedparentrevs)
1277 1277
1278 1278 def computephases(self, roots):
1279 1279 return self.index.computephasesmapsets(roots)
1280 1280
1281 1281 def _headrevs(self):
1282 1282 count = len(self)
1283 1283 if not count:
1284 1284 return [nullrev]
1285 1285 # we won't iter over filtered rev so nobody is a head at start
1286 1286 ishead = [0] * (count + 1)
1287 1287 index = self.index
1288 1288 for r in self:
1289 1289 ishead[r] = 1 # I may be an head
1290 1290 e = index[r]
1291 1291 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1292 1292 return [r for r, val in enumerate(ishead) if val]
1293 1293
1294 1294 def heads(self, start=None, stop=None):
1295 1295 """return the list of all nodes that have no children
1296 1296
1297 1297 if start is specified, only heads that are descendants of
1298 1298 start will be returned
1299 1299 if stop is specified, it will consider all the revs from stop
1300 1300 as if they had no children
1301 1301 """
1302 1302 if start is None and stop is None:
1303 1303 if not len(self):
1304 1304 return [self.nullid]
1305 1305 return [self.node(r) for r in self.headrevs()]
1306 1306
1307 1307 if start is None:
1308 1308 start = nullrev
1309 1309 else:
1310 1310 start = self.rev(start)
1311 1311
1312 1312 stoprevs = {self.rev(n) for n in stop or []}
1313 1313
1314 1314 revs = dagop.headrevssubset(
1315 1315 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1316 1316 )
1317 1317
1318 1318 return [self.node(rev) for rev in revs]
1319 1319
1320 1320 def children(self, node):
1321 1321 """find the children of a given node"""
1322 1322 c = []
1323 1323 p = self.rev(node)
1324 1324 for r in self.revs(start=p + 1):
1325 1325 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1326 1326 if prevs:
1327 1327 for pr in prevs:
1328 1328 if pr == p:
1329 1329 c.append(self.node(r))
1330 1330 elif p == nullrev:
1331 1331 c.append(self.node(r))
1332 1332 return c
1333 1333
1334 1334 def commonancestorsheads(self, a, b):
1335 1335 """calculate all the heads of the common ancestors of nodes a and b"""
1336 1336 a, b = self.rev(a), self.rev(b)
1337 1337 ancs = self._commonancestorsheads(a, b)
1338 1338 return pycompat.maplist(self.node, ancs)
1339 1339
1340 1340 def _commonancestorsheads(self, *revs):
1341 1341 """calculate all the heads of the common ancestors of revs"""
1342 1342 try:
1343 1343 ancs = self.index.commonancestorsheads(*revs)
1344 1344 except (AttributeError, OverflowError): # C implementation failed
1345 1345 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1346 1346 return ancs
1347 1347
1348 1348 def isancestor(self, a, b):
1349 1349 """return True if node a is an ancestor of node b
1350 1350
1351 1351 A revision is considered an ancestor of itself."""
1352 1352 a, b = self.rev(a), self.rev(b)
1353 1353 return self.isancestorrev(a, b)
1354 1354
1355 1355 def isancestorrev(self, a, b):
1356 1356 """return True if revision a is an ancestor of revision b
1357 1357
1358 1358 A revision is considered an ancestor of itself.
1359 1359
1360 1360 The implementation of this is trivial but the use of
1361 1361 reachableroots is not."""
1362 1362 if a == nullrev:
1363 1363 return True
1364 1364 elif a == b:
1365 1365 return True
1366 1366 elif a > b:
1367 1367 return False
1368 1368 return bool(self.reachableroots(a, [b], [a], includepath=False))
1369 1369
1370 1370 def reachableroots(self, minroot, heads, roots, includepath=False):
1371 1371 """return (heads(::(<roots> and <roots>::<heads>)))
1372 1372
1373 1373 If includepath is True, return (<roots>::<heads>)."""
1374 1374 try:
1375 1375 return self.index.reachableroots2(
1376 1376 minroot, heads, roots, includepath
1377 1377 )
1378 1378 except AttributeError:
1379 1379 return dagop._reachablerootspure(
1380 1380 self.parentrevs, minroot, roots, heads, includepath
1381 1381 )
1382 1382
1383 1383 def ancestor(self, a, b):
1384 1384 """calculate the "best" common ancestor of nodes a and b"""
1385 1385
1386 1386 a, b = self.rev(a), self.rev(b)
1387 1387 try:
1388 1388 ancs = self.index.ancestors(a, b)
1389 1389 except (AttributeError, OverflowError):
1390 1390 ancs = ancestor.ancestors(self.parentrevs, a, b)
1391 1391 if ancs:
1392 1392 # choose a consistent winner when there's a tie
1393 1393 return min(map(self.node, ancs))
1394 1394 return self.nullid
1395 1395
1396 1396 def _match(self, id):
1397 1397 if isinstance(id, int):
1398 1398 # rev
1399 1399 return self.node(id)
1400 1400 if len(id) == self.nodeconstants.nodelen:
1401 1401 # possibly a binary node
1402 1402 # odds of a binary node being all hex in ASCII are 1 in 10**25
1403 1403 try:
1404 1404 node = id
1405 1405 self.rev(node) # quick search the index
1406 1406 return node
1407 1407 except error.LookupError:
1408 1408 pass # may be partial hex id
1409 1409 try:
1410 1410 # str(rev)
1411 1411 rev = int(id)
1412 1412 if b"%d" % rev != id:
1413 1413 raise ValueError
1414 1414 if rev < 0:
1415 1415 rev = len(self) + rev
1416 1416 if rev < 0 or rev >= len(self):
1417 1417 raise ValueError
1418 1418 return self.node(rev)
1419 1419 except (ValueError, OverflowError):
1420 1420 pass
1421 1421 if len(id) == 2 * self.nodeconstants.nodelen:
1422 1422 try:
1423 1423 # a full hex nodeid?
1424 1424 node = bin(id)
1425 1425 self.rev(node)
1426 1426 return node
1427 1427 except (TypeError, error.LookupError):
1428 1428 pass
1429 1429
1430 1430 def _partialmatch(self, id):
1431 1431 # we don't care wdirfilenodeids as they should be always full hash
1432 1432 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1433 1433 try:
1434 1434 partial = self.index.partialmatch(id)
1435 1435 if partial and self.hasnode(partial):
1436 1436 if maybewdir:
1437 1437 # single 'ff...' match in radix tree, ambiguous with wdir
1438 1438 raise error.RevlogError
1439 1439 return partial
1440 1440 if maybewdir:
1441 1441 # no 'ff...' match in radix tree, wdir identified
1442 1442 raise error.WdirUnsupported
1443 1443 return None
1444 1444 except error.RevlogError:
1445 1445 # parsers.c radix tree lookup gave multiple matches
1446 1446 # fast path: for unfiltered changelog, radix tree is accurate
1447 1447 if not getattr(self, 'filteredrevs', None):
1448 1448 raise error.AmbiguousPrefixLookupError(
1449 1449 id, self.display_id, _(b'ambiguous identifier')
1450 1450 )
1451 1451 # fall through to slow path that filters hidden revisions
1452 1452 except (AttributeError, ValueError):
1453 1453 # we are pure python, or key was too short to search radix tree
1454 1454 pass
1455 1455
1456 1456 if id in self._pcache:
1457 1457 return self._pcache[id]
1458 1458
1459 1459 if len(id) <= 40:
1460 1460 try:
1461 1461 # hex(node)[:...]
1462 1462 l = len(id) // 2 # grab an even number of digits
1463 1463 prefix = bin(id[: l * 2])
1464 1464 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1465 1465 nl = [
1466 1466 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1467 1467 ]
1468 1468 if self.nodeconstants.nullhex.startswith(id):
1469 1469 nl.append(self.nullid)
1470 1470 if len(nl) > 0:
1471 1471 if len(nl) == 1 and not maybewdir:
1472 1472 self._pcache[id] = nl[0]
1473 1473 return nl[0]
1474 1474 raise error.AmbiguousPrefixLookupError(
1475 1475 id, self.display_id, _(b'ambiguous identifier')
1476 1476 )
1477 1477 if maybewdir:
1478 1478 raise error.WdirUnsupported
1479 1479 return None
1480 1480 except TypeError:
1481 1481 pass
1482 1482
1483 1483 def lookup(self, id):
1484 1484 """locate a node based on:
1485 1485 - revision number or str(revision number)
1486 1486 - nodeid or subset of hex nodeid
1487 1487 """
1488 1488 n = self._match(id)
1489 1489 if n is not None:
1490 1490 return n
1491 1491 n = self._partialmatch(id)
1492 1492 if n:
1493 1493 return n
1494 1494
1495 1495 raise error.LookupError(id, self.display_id, _(b'no match found'))
1496 1496
1497 1497 def shortest(self, node, minlength=1):
1498 1498 """Find the shortest unambiguous prefix that matches node."""
1499 1499
1500 1500 def isvalid(prefix):
1501 1501 try:
1502 1502 matchednode = self._partialmatch(prefix)
1503 1503 except error.AmbiguousPrefixLookupError:
1504 1504 return False
1505 1505 except error.WdirUnsupported:
1506 1506 # single 'ff...' match
1507 1507 return True
1508 1508 if matchednode is None:
1509 1509 raise error.LookupError(node, self.display_id, _(b'no node'))
1510 1510 return True
1511 1511
1512 1512 def maybewdir(prefix):
1513 1513 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1514 1514
1515 1515 hexnode = hex(node)
1516 1516
1517 1517 def disambiguate(hexnode, minlength):
1518 1518 """Disambiguate against wdirid."""
1519 1519 for length in range(minlength, len(hexnode) + 1):
1520 1520 prefix = hexnode[:length]
1521 1521 if not maybewdir(prefix):
1522 1522 return prefix
1523 1523
1524 1524 if not getattr(self, 'filteredrevs', None):
1525 1525 try:
1526 1526 length = max(self.index.shortest(node), minlength)
1527 1527 return disambiguate(hexnode, length)
1528 1528 except error.RevlogError:
1529 1529 if node != self.nodeconstants.wdirid:
1530 1530 raise error.LookupError(
1531 1531 node, self.display_id, _(b'no node')
1532 1532 )
1533 1533 except AttributeError:
1534 1534 # Fall through to pure code
1535 1535 pass
1536 1536
1537 1537 if node == self.nodeconstants.wdirid:
1538 1538 for length in range(minlength, len(hexnode) + 1):
1539 1539 prefix = hexnode[:length]
1540 1540 if isvalid(prefix):
1541 1541 return prefix
1542 1542
1543 1543 for length in range(minlength, len(hexnode) + 1):
1544 1544 prefix = hexnode[:length]
1545 1545 if isvalid(prefix):
1546 1546 return disambiguate(hexnode, length)
1547 1547
1548 1548 def cmp(self, node, text):
1549 1549 """compare text with a given file revision
1550 1550
1551 1551 returns True if text is different than what is stored.
1552 1552 """
1553 1553 p1, p2 = self.parents(node)
1554 1554 return storageutil.hashrevisionsha1(text, p1, p2) != node
1555 1555
1556 1556 def _cachesegment(self, offset, data):
1557 1557 """Add a segment to the revlog cache.
1558 1558
1559 1559 Accepts an absolute offset and the data that is at that location.
1560 1560 """
1561 1561 o, d = self._chunkcache
1562 1562 # try to add to existing cache
1563 1563 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1564 1564 self._chunkcache = o, d + data
1565 1565 else:
1566 1566 self._chunkcache = offset, data
1567 1567
1568 1568 def _readsegment(self, offset, length, df=None):
1569 1569 """Load a segment of raw data from the revlog.
1570 1570
1571 1571 Accepts an absolute offset, length to read, and an optional existing
1572 1572 file handle to read from.
1573 1573
1574 1574 If an existing file handle is passed, it will be seeked and the
1575 1575 original seek position will NOT be restored.
1576 1576
1577 1577 Returns a str or buffer of raw byte data.
1578 1578
1579 1579 Raises if the requested number of bytes could not be read.
1580 1580 """
1581 1581 # Cache data both forward and backward around the requested
1582 1582 # data, in a fixed size window. This helps speed up operations
1583 1583 # involving reading the revlog backwards.
1584 1584 cachesize = self._chunkcachesize
1585 1585 realoffset = offset & ~(cachesize - 1)
1586 1586 reallength = (
1587 1587 (offset + length + cachesize) & ~(cachesize - 1)
1588 1588 ) - realoffset
1589 1589 with self._datareadfp(df) as df:
1590 1590 df.seek(realoffset)
1591 1591 d = df.read(reallength)
1592 1592
1593 1593 self._cachesegment(realoffset, d)
1594 1594 if offset != realoffset or reallength != length:
1595 1595 startoffset = offset - realoffset
1596 1596 if len(d) - startoffset < length:
1597 1597 raise error.RevlogError(
1598 1598 _(
1599 1599 b'partial read of revlog %s; expected %d bytes from '
1600 1600 b'offset %d, got %d'
1601 1601 )
1602 1602 % (
1603 1603 self._indexfile if self._inline else self._datafile,
1604 1604 length,
1605 1605 offset,
1606 1606 len(d) - startoffset,
1607 1607 )
1608 1608 )
1609 1609
1610 1610 return util.buffer(d, startoffset, length)
1611 1611
1612 1612 if len(d) < length:
1613 1613 raise error.RevlogError(
1614 1614 _(
1615 1615 b'partial read of revlog %s; expected %d bytes from offset '
1616 1616 b'%d, got %d'
1617 1617 )
1618 1618 % (
1619 1619 self._indexfile if self._inline else self._datafile,
1620 1620 length,
1621 1621 offset,
1622 1622 len(d),
1623 1623 )
1624 1624 )
1625 1625
1626 1626 return d
1627 1627
1628 1628 def _getsegment(self, offset, length, df=None):
1629 1629 """Obtain a segment of raw data from the revlog.
1630 1630
1631 1631 Accepts an absolute offset, length of bytes to obtain, and an
1632 1632 optional file handle to the already-opened revlog. If the file
1633 1633 handle is used, it's original seek position will not be preserved.
1634 1634
1635 1635 Requests for data may be returned from a cache.
1636 1636
1637 1637 Returns a str or a buffer instance of raw byte data.
1638 1638 """
1639 1639 o, d = self._chunkcache
1640 1640 l = len(d)
1641 1641
1642 1642 # is it in the cache?
1643 1643 cachestart = offset - o
1644 1644 cacheend = cachestart + length
1645 1645 if cachestart >= 0 and cacheend <= l:
1646 1646 if cachestart == 0 and cacheend == l:
1647 1647 return d # avoid a copy
1648 1648 return util.buffer(d, cachestart, cacheend - cachestart)
1649 1649
1650 1650 return self._readsegment(offset, length, df=df)
1651 1651
1652 1652 def _getsegmentforrevs(self, startrev, endrev, df=None):
1653 1653 """Obtain a segment of raw data corresponding to a range of revisions.
1654 1654
1655 1655 Accepts the start and end revisions and an optional already-open
1656 1656 file handle to be used for reading. If the file handle is read, its
1657 1657 seek position will not be preserved.
1658 1658
1659 1659 Requests for data may be satisfied by a cache.
1660 1660
1661 1661 Returns a 2-tuple of (offset, data) for the requested range of
1662 1662 revisions. Offset is the integer offset from the beginning of the
1663 1663 revlog and data is a str or buffer of the raw byte data.
1664 1664
1665 1665 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1666 1666 to determine where each revision's data begins and ends.
1667 1667 """
1668 1668 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1669 1669 # (functions are expensive).
1670 1670 index = self.index
1671 1671 istart = index[startrev]
1672 1672 start = int(istart[0] >> 16)
1673 1673 if startrev == endrev:
1674 1674 end = start + istart[1]
1675 1675 else:
1676 1676 iend = index[endrev]
1677 1677 end = int(iend[0] >> 16) + iend[1]
1678 1678
1679 1679 if self._inline:
1680 1680 start += (startrev + 1) * self.index.entry_size
1681 1681 end += (endrev + 1) * self.index.entry_size
1682 1682 length = end - start
1683 1683
1684 1684 return start, self._getsegment(start, length, df=df)
1685 1685
1686 1686 def _chunk(self, rev, df=None):
1687 1687 """Obtain a single decompressed chunk for a revision.
1688 1688
1689 1689 Accepts an integer revision and an optional already-open file handle
1690 1690 to be used for reading. If used, the seek position of the file will not
1691 1691 be preserved.
1692 1692
1693 1693 Returns a str holding uncompressed data for the requested revision.
1694 1694 """
1695 1695 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1696 1696
1697 1697 def _chunks(self, revs, df=None, targetsize=None):
1698 1698 """Obtain decompressed chunks for the specified revisions.
1699 1699
1700 1700 Accepts an iterable of numeric revisions that are assumed to be in
1701 1701 ascending order. Also accepts an optional already-open file handle
1702 1702 to be used for reading. If used, the seek position of the file will
1703 1703 not be preserved.
1704 1704
1705 1705 This function is similar to calling ``self._chunk()`` multiple times,
1706 1706 but is faster.
1707 1707
1708 1708 Returns a list with decompressed data for each requested revision.
1709 1709 """
1710 1710 if not revs:
1711 1711 return []
1712 1712 start = self.start
1713 1713 length = self.length
1714 1714 inline = self._inline
1715 1715 iosize = self.index.entry_size
1716 1716 buffer = util.buffer
1717 1717
1718 1718 l = []
1719 1719 ladd = l.append
1720 1720
1721 1721 if not self._withsparseread:
1722 1722 slicedchunks = (revs,)
1723 1723 else:
1724 1724 slicedchunks = deltautil.slicechunk(
1725 1725 self, revs, targetsize=targetsize
1726 1726 )
1727 1727
1728 1728 for revschunk in slicedchunks:
1729 1729 firstrev = revschunk[0]
1730 1730 # Skip trailing revisions with empty diff
1731 1731 for lastrev in revschunk[::-1]:
1732 1732 if length(lastrev) != 0:
1733 1733 break
1734 1734
1735 1735 try:
1736 1736 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1737 1737 except OverflowError:
1738 1738 # issue4215 - we can't cache a run of chunks greater than
1739 1739 # 2G on Windows
1740 1740 return [self._chunk(rev, df=df) for rev in revschunk]
1741 1741
1742 1742 decomp = self.decompress
1743 1743 for rev in revschunk:
1744 1744 chunkstart = start(rev)
1745 1745 if inline:
1746 1746 chunkstart += (rev + 1) * iosize
1747 1747 chunklength = length(rev)
1748 1748 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1749 1749
1750 1750 return l
1751 1751
1752 1752 def _chunkclear(self):
1753 1753 """Clear the raw chunk cache."""
1754 1754 self._chunkcache = (0, b'')
1755 1755
1756 1756 def deltaparent(self, rev):
1757 1757 """return deltaparent of the given revision"""
1758 1758 base = self.index[rev][3]
1759 1759 if base == rev:
1760 1760 return nullrev
1761 1761 elif self._generaldelta:
1762 1762 return base
1763 1763 else:
1764 1764 return rev - 1
1765 1765
1766 1766 def issnapshot(self, rev):
1767 1767 """tells whether rev is a snapshot"""
1768 1768 if not self._sparserevlog:
1769 1769 return self.deltaparent(rev) == nullrev
1770 1770 elif util.safehasattr(self.index, b'issnapshot'):
1771 1771 # directly assign the method to cache the testing and access
1772 1772 self.issnapshot = self.index.issnapshot
1773 1773 return self.issnapshot(rev)
1774 1774 if rev == nullrev:
1775 1775 return True
1776 1776 entry = self.index[rev]
1777 1777 base = entry[3]
1778 1778 if base == rev:
1779 1779 return True
1780 1780 if base == nullrev:
1781 1781 return True
1782 1782 p1 = entry[5]
1783 1783 p2 = entry[6]
1784 1784 if base == p1 or base == p2:
1785 1785 return False
1786 1786 return self.issnapshot(base)
1787 1787
1788 1788 def snapshotdepth(self, rev):
1789 1789 """number of snapshot in the chain before this one"""
1790 1790 if not self.issnapshot(rev):
1791 1791 raise error.ProgrammingError(b'revision %d not a snapshot')
1792 1792 return len(self._deltachain(rev)[0]) - 1
1793 1793
1794 1794 def revdiff(self, rev1, rev2):
1795 1795 """return or calculate a delta between two revisions
1796 1796
1797 1797 The delta calculated is in binary form and is intended to be written to
1798 1798 revlog data directly. So this function needs raw revision data.
1799 1799 """
1800 1800 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1801 1801 return bytes(self._chunk(rev2))
1802 1802
1803 1803 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1804 1804
1805 1805 def _processflags(self, text, flags, operation, raw=False):
1806 1806 """deprecated entry point to access flag processors"""
1807 1807 msg = b'_processflag(...) use the specialized variant'
1808 1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1809 1809 if raw:
1810 1810 return text, flagutil.processflagsraw(self, text, flags)
1811 1811 elif operation == b'read':
1812 1812 return flagutil.processflagsread(self, text, flags)
1813 1813 else: # write operation
1814 1814 return flagutil.processflagswrite(self, text, flags)
1815 1815
1816 1816 def revision(self, nodeorrev, _df=None, raw=False):
1817 1817 """return an uncompressed revision of a given node or revision
1818 1818 number.
1819 1819
1820 1820 _df - an existing file handle to read from. (internal-only)
1821 1821 raw - an optional argument specifying if the revision data is to be
1822 1822 treated as raw data when applying flag transforms. 'raw' should be set
1823 1823 to True when generating changegroups or in debug commands.
1824 1824 """
1825 1825 if raw:
1826 1826 msg = (
1827 1827 b'revlog.revision(..., raw=True) is deprecated, '
1828 1828 b'use revlog.rawdata(...)'
1829 1829 )
1830 1830 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1831 1831 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1832 1832
1833 1833 def sidedata(self, nodeorrev, _df=None):
1834 1834 """a map of extra data related to the changeset but not part of the hash
1835 1835
1836 1836 This function currently return a dictionary. However, more advanced
1837 1837 mapping object will likely be used in the future for a more
1838 1838 efficient/lazy code.
1839 1839 """
1840 1840 return self._revisiondata(nodeorrev, _df)[1]
1841 1841
1842 1842 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1843 1843 # deal with <nodeorrev> argument type
1844 1844 if isinstance(nodeorrev, int):
1845 1845 rev = nodeorrev
1846 1846 node = self.node(rev)
1847 1847 else:
1848 1848 node = nodeorrev
1849 1849 rev = None
1850 1850
1851 1851 # fast path the special `nullid` rev
1852 1852 if node == self.nullid:
1853 1853 return b"", {}
1854 1854
1855 1855 # ``rawtext`` is the text as stored inside the revlog. Might be the
1856 1856 # revision or might need to be processed to retrieve the revision.
1857 1857 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1858 1858
1859 1859 if self.hassidedata:
1860 1860 if rev is None:
1861 1861 rev = self.rev(node)
1862 1862 sidedata = self._sidedata(rev)
1863 1863 else:
1864 1864 sidedata = {}
1865 1865
1866 1866 if raw and validated:
1867 1867 # if we don't want to process the raw text and that raw
1868 1868 # text is cached, we can exit early.
1869 1869 return rawtext, sidedata
1870 1870 if rev is None:
1871 1871 rev = self.rev(node)
1872 1872 # the revlog's flag for this revision
1873 1873 # (usually alter its state or content)
1874 1874 flags = self.flags(rev)
1875 1875
1876 1876 if validated and flags == REVIDX_DEFAULT_FLAGS:
1877 1877 # no extra flags set, no flag processor runs, text = rawtext
1878 1878 return rawtext, sidedata
1879 1879
1880 1880 if raw:
1881 1881 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1882 1882 text = rawtext
1883 1883 else:
1884 1884 r = flagutil.processflagsread(self, rawtext, flags)
1885 1885 text, validatehash = r
1886 1886 if validatehash:
1887 1887 self.checkhash(text, node, rev=rev)
1888 1888 if not validated:
1889 1889 self._revisioncache = (node, rev, rawtext)
1890 1890
1891 1891 return text, sidedata
1892 1892
1893 1893 def _rawtext(self, node, rev, _df=None):
1894 1894 """return the possibly unvalidated rawtext for a revision
1895 1895
1896 1896 returns (rev, rawtext, validated)
1897 1897 """
1898 1898
1899 1899 # revision in the cache (could be useful to apply delta)
1900 1900 cachedrev = None
1901 1901 # An intermediate text to apply deltas to
1902 1902 basetext = None
1903 1903
1904 1904 # Check if we have the entry in cache
1905 1905 # The cache entry looks like (node, rev, rawtext)
1906 1906 if self._revisioncache:
1907 1907 if self._revisioncache[0] == node:
1908 1908 return (rev, self._revisioncache[2], True)
1909 1909 cachedrev = self._revisioncache[1]
1910 1910
1911 1911 if rev is None:
1912 1912 rev = self.rev(node)
1913 1913
1914 1914 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1915 1915 if stopped:
1916 1916 basetext = self._revisioncache[2]
1917 1917
1918 1918 # drop cache to save memory, the caller is expected to
1919 1919 # update self._revisioncache after validating the text
1920 1920 self._revisioncache = None
1921 1921
1922 1922 targetsize = None
1923 1923 rawsize = self.index[rev][2]
1924 1924 if 0 <= rawsize:
1925 1925 targetsize = 4 * rawsize
1926 1926
1927 1927 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1928 1928 if basetext is None:
1929 1929 basetext = bytes(bins[0])
1930 1930 bins = bins[1:]
1931 1931
1932 1932 rawtext = mdiff.patches(basetext, bins)
1933 1933 del basetext # let us have a chance to free memory early
1934 1934 return (rev, rawtext, False)
1935 1935
1936 1936 def _sidedata(self, rev):
1937 1937 """Return the sidedata for a given revision number."""
1938 1938 index_entry = self.index[rev]
1939 1939 sidedata_offset = index_entry[8]
1940 1940 sidedata_size = index_entry[9]
1941 1941
1942 1942 if self._inline:
1943 1943 sidedata_offset += self.index.entry_size * (1 + rev)
1944 1944 if sidedata_size == 0:
1945 1945 return {}
1946 1946
1947 1947 segment = self._getsegment(sidedata_offset, sidedata_size)
1948 1948 sidedata = sidedatautil.deserialize_sidedata(segment)
1949 1949 return sidedata
1950 1950
1951 1951 def rawdata(self, nodeorrev, _df=None):
1952 1952 """return an uncompressed raw data of a given node or revision number.
1953 1953
1954 1954 _df - an existing file handle to read from. (internal-only)
1955 1955 """
1956 1956 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1957 1957
1958 1958 def hash(self, text, p1, p2):
1959 1959 """Compute a node hash.
1960 1960
1961 1961 Available as a function so that subclasses can replace the hash
1962 1962 as needed.
1963 1963 """
1964 1964 return storageutil.hashrevisionsha1(text, p1, p2)
1965 1965
1966 1966 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1967 1967 """Check node hash integrity.
1968 1968
1969 1969 Available as a function so that subclasses can extend hash mismatch
1970 1970 behaviors as needed.
1971 1971 """
1972 1972 try:
1973 1973 if p1 is None and p2 is None:
1974 1974 p1, p2 = self.parents(node)
1975 1975 if node != self.hash(text, p1, p2):
1976 1976 # Clear the revision cache on hash failure. The revision cache
1977 1977 # only stores the raw revision and clearing the cache does have
1978 1978 # the side-effect that we won't have a cache hit when the raw
1979 1979 # revision data is accessed. But this case should be rare and
1980 1980 # it is extra work to teach the cache about the hash
1981 1981 # verification state.
1982 1982 if self._revisioncache and self._revisioncache[0] == node:
1983 1983 self._revisioncache = None
1984 1984
1985 1985 revornode = rev
1986 1986 if revornode is None:
1987 1987 revornode = templatefilters.short(hex(node))
1988 1988 raise error.RevlogError(
1989 1989 _(b"integrity check failed on %s:%s")
1990 1990 % (self.display_id, pycompat.bytestr(revornode))
1991 1991 )
1992 1992 except error.RevlogError:
1993 1993 if self._censorable and storageutil.iscensoredtext(text):
1994 1994 raise error.CensoredNodeError(self.display_id, node, text)
1995 1995 raise
1996 1996
1997 1997 def _enforceinlinesize(self, tr):
1998 1998 """Check if the revlog is too big for inline and convert if so.
1999 1999
2000 2000 This should be called after revisions are added to the revlog. If the
2001 2001 revlog has grown too large to be an inline revlog, it will convert it
2002 2002 to use multiple index and data files.
2003 2003 """
2004 2004 tiprev = len(self) - 1
2005 2005 total_size = self.start(tiprev) + self.length(tiprev)
2006 2006 if not self._inline or total_size < _maxinline:
2007 2007 return
2008 2008
2009 2009 troffset = tr.findoffset(self._indexfile)
2010 2010 if troffset is None:
2011 2011 raise error.RevlogError(
2012 2012 _(b"%s not found in the transaction") % self._indexfile
2013 2013 )
2014 2014 trindex = 0
2015 2015 tr.add(self._datafile, 0)
2016 2016
2017 2017 existing_handles = False
2018 2018 if self._writinghandles is not None:
2019 2019 existing_handles = True
2020 2020 fp = self._writinghandles[0]
2021 2021 fp.flush()
2022 2022 fp.close()
2023 2023 # We can't use the cached file handle after close(). So prevent
2024 2024 # its usage.
2025 2025 self._writinghandles = None
2026 2026
2027 2027 new_dfh = self._datafp(b'w+')
2028 2028 new_dfh.truncate(0) # drop any potentially existing data
2029 2029 try:
2030 2030 with self._indexfp() as read_ifh:
2031 2031 for r in self:
2032 2032 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2033 2033 if troffset <= self.start(r):
2034 2034 trindex = r
2035 2035 new_dfh.flush()
2036 2036
2037 2037 with self.__index_new_fp() as fp:
2038 2038 self._format_flags &= ~FLAG_INLINE_DATA
2039 2039 self._inline = False
2040 2040 for i in self:
2041 2041 e = self.index.entry_binary(i)
2042 2042 if i == 0 and self._docket is None:
2043 2043 header = self._format_flags | self._format_version
2044 2044 header = self.index.pack_header(header)
2045 2045 e = header + e
2046 2046 fp.write(e)
2047 2047 if self._docket is not None:
2048 2048 self._docket.index_end = fp.tell()
2049 2049 # the temp file replace the real index when we exit the context
2050 2050 # manager
2051 2051
2052 2052 tr.replace(self._indexfile, trindex * self.index.entry_size)
2053 2053 nodemaputil.setup_persistent_nodemap(tr, self)
2054 2054 self._chunkclear()
2055 2055
2056 2056 if existing_handles:
2057 2057 # switched from inline to conventional reopen the index
2058 2058 ifh = self.__index_write_fp()
2059 2059 self._writinghandles = (ifh, new_dfh)
2060 2060 new_dfh = None
2061 2061 finally:
2062 2062 if new_dfh is not None:
2063 2063 new_dfh.close()
2064 2064
2065 2065 def _nodeduplicatecallback(self, transaction, node):
2066 2066 """called when trying to add a node already stored."""
2067 2067
2068 2068 @contextlib.contextmanager
2069 2069 def _writing(self, transaction):
2070 2070 if self._writinghandles is not None:
2071 2071 yield
2072 2072 else:
2073 2073 r = len(self)
2074 2074 dsize = 0
2075 2075 if r:
2076 2076 dsize = self.end(r - 1)
2077 2077 dfh = None
2078 2078 if not self._inline:
2079 2079 try:
2080 2080 dfh = self._datafp(b"r+")
2081 2081 dfh.seek(0, os.SEEK_END)
2082 2082 except IOError as inst:
2083 2083 if inst.errno != errno.ENOENT:
2084 2084 raise
2085 2085 dfh = self._datafp(b"w+")
2086 2086 transaction.add(self._datafile, dsize)
2087 2087 try:
2088 2088 isize = r * self.index.entry_size
2089 2089 ifh = self.__index_write_fp()
2090 2090 if self._inline:
2091 2091 transaction.add(self._indexfile, dsize + isize)
2092 2092 else:
2093 2093 transaction.add(self._indexfile, isize)
2094 2094 try:
2095 2095 self._writinghandles = (ifh, dfh)
2096 2096 try:
2097 2097 yield
2098 2098 if self._docket is not None:
2099 self._docket.write(transaction)
2099 self._write_docket(transaction)
2100 2100 finally:
2101 2101 self._writinghandles = None
2102 2102 finally:
2103 2103 ifh.close()
2104 2104 finally:
2105 2105 if dfh is not None:
2106 2106 dfh.close()
2107 2107
2108 def _write_docket(self, transaction):
2109 """write the current docket on disk
2110
2111 Exist as a method to help changelog to implement transaction logic
2112
2113 We could also imagine using the same transaction logic for all revlog
2114 since docket are cheap."""
2115 self._docket.write(transaction)
2116
2108 2117 def addrevision(
2109 2118 self,
2110 2119 text,
2111 2120 transaction,
2112 2121 link,
2113 2122 p1,
2114 2123 p2,
2115 2124 cachedelta=None,
2116 2125 node=None,
2117 2126 flags=REVIDX_DEFAULT_FLAGS,
2118 2127 deltacomputer=None,
2119 2128 sidedata=None,
2120 2129 ):
2121 2130 """add a revision to the log
2122 2131
2123 2132 text - the revision data to add
2124 2133 transaction - the transaction object used for rollback
2125 2134 link - the linkrev data to add
2126 2135 p1, p2 - the parent nodeids of the revision
2127 2136 cachedelta - an optional precomputed delta
2128 2137 node - nodeid of revision; typically node is not specified, and it is
2129 2138 computed by default as hash(text, p1, p2), however subclasses might
2130 2139 use different hashing method (and override checkhash() in such case)
2131 2140 flags - the known flags to set on the revision
2132 2141 deltacomputer - an optional deltacomputer instance shared between
2133 2142 multiple calls
2134 2143 """
2135 2144 if link == nullrev:
2136 2145 raise error.RevlogError(
2137 2146 _(b"attempted to add linkrev -1 to %s") % self.display_id
2138 2147 )
2139 2148
2140 2149 if sidedata is None:
2141 2150 sidedata = {}
2142 2151 elif sidedata and not self.hassidedata:
2143 2152 raise error.ProgrammingError(
2144 2153 _(b"trying to add sidedata to a revlog who don't support them")
2145 2154 )
2146 2155
2147 2156 if flags:
2148 2157 node = node or self.hash(text, p1, p2)
2149 2158
2150 2159 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2151 2160
2152 2161 # If the flag processor modifies the revision data, ignore any provided
2153 2162 # cachedelta.
2154 2163 if rawtext != text:
2155 2164 cachedelta = None
2156 2165
2157 2166 if len(rawtext) > _maxentrysize:
2158 2167 raise error.RevlogError(
2159 2168 _(
2160 2169 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2161 2170 )
2162 2171 % (self.display_id, len(rawtext))
2163 2172 )
2164 2173
2165 2174 node = node or self.hash(rawtext, p1, p2)
2166 2175 rev = self.index.get_rev(node)
2167 2176 if rev is not None:
2168 2177 return rev
2169 2178
2170 2179 if validatehash:
2171 2180 self.checkhash(rawtext, node, p1=p1, p2=p2)
2172 2181
2173 2182 return self.addrawrevision(
2174 2183 rawtext,
2175 2184 transaction,
2176 2185 link,
2177 2186 p1,
2178 2187 p2,
2179 2188 node,
2180 2189 flags,
2181 2190 cachedelta=cachedelta,
2182 2191 deltacomputer=deltacomputer,
2183 2192 sidedata=sidedata,
2184 2193 )
2185 2194
2186 2195 def addrawrevision(
2187 2196 self,
2188 2197 rawtext,
2189 2198 transaction,
2190 2199 link,
2191 2200 p1,
2192 2201 p2,
2193 2202 node,
2194 2203 flags,
2195 2204 cachedelta=None,
2196 2205 deltacomputer=None,
2197 2206 sidedata=None,
2198 2207 ):
2199 2208 """add a raw revision with known flags, node and parents
2200 2209 useful when reusing a revision not stored in this revlog (ex: received
2201 2210 over wire, or read from an external bundle).
2202 2211 """
2203 2212 with self._writing(transaction):
2204 2213 return self._addrevision(
2205 2214 node,
2206 2215 rawtext,
2207 2216 transaction,
2208 2217 link,
2209 2218 p1,
2210 2219 p2,
2211 2220 flags,
2212 2221 cachedelta,
2213 2222 deltacomputer=deltacomputer,
2214 2223 sidedata=sidedata,
2215 2224 )
2216 2225
2217 2226 def compress(self, data):
2218 2227 """Generate a possibly-compressed representation of data."""
2219 2228 if not data:
2220 2229 return b'', data
2221 2230
2222 2231 compressed = self._compressor.compress(data)
2223 2232
2224 2233 if compressed:
2225 2234 # The revlog compressor added the header in the returned data.
2226 2235 return b'', compressed
2227 2236
2228 2237 if data[0:1] == b'\0':
2229 2238 return b'', data
2230 2239 return b'u', data
2231 2240
2232 2241 def decompress(self, data):
2233 2242 """Decompress a revlog chunk.
2234 2243
2235 2244 The chunk is expected to begin with a header identifying the
2236 2245 format type so it can be routed to an appropriate decompressor.
2237 2246 """
2238 2247 if not data:
2239 2248 return data
2240 2249
2241 2250 # Revlogs are read much more frequently than they are written and many
2242 2251 # chunks only take microseconds to decompress, so performance is
2243 2252 # important here.
2244 2253 #
2245 2254 # We can make a few assumptions about revlogs:
2246 2255 #
2247 2256 # 1) the majority of chunks will be compressed (as opposed to inline
2248 2257 # raw data).
2249 2258 # 2) decompressing *any* data will likely by at least 10x slower than
2250 2259 # returning raw inline data.
2251 2260 # 3) we want to prioritize common and officially supported compression
2252 2261 # engines
2253 2262 #
2254 2263 # It follows that we want to optimize for "decompress compressed data
2255 2264 # when encoded with common and officially supported compression engines"
2256 2265 # case over "raw data" and "data encoded by less common or non-official
2257 2266 # compression engines." That is why we have the inline lookup first
2258 2267 # followed by the compengines lookup.
2259 2268 #
2260 2269 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2261 2270 # compressed chunks. And this matters for changelog and manifest reads.
2262 2271 t = data[0:1]
2263 2272
2264 2273 if t == b'x':
2265 2274 try:
2266 2275 return _zlibdecompress(data)
2267 2276 except zlib.error as e:
2268 2277 raise error.RevlogError(
2269 2278 _(b'revlog decompress error: %s')
2270 2279 % stringutil.forcebytestr(e)
2271 2280 )
2272 2281 # '\0' is more common than 'u' so it goes first.
2273 2282 elif t == b'\0':
2274 2283 return data
2275 2284 elif t == b'u':
2276 2285 return util.buffer(data, 1)
2277 2286
2278 2287 try:
2279 2288 compressor = self._decompressors[t]
2280 2289 except KeyError:
2281 2290 try:
2282 2291 engine = util.compengines.forrevlogheader(t)
2283 2292 compressor = engine.revlogcompressor(self._compengineopts)
2284 2293 self._decompressors[t] = compressor
2285 2294 except KeyError:
2286 2295 raise error.RevlogError(
2287 2296 _(b'unknown compression type %s') % binascii.hexlify(t)
2288 2297 )
2289 2298
2290 2299 return compressor.decompress(data)
2291 2300
2292 2301 def _addrevision(
2293 2302 self,
2294 2303 node,
2295 2304 rawtext,
2296 2305 transaction,
2297 2306 link,
2298 2307 p1,
2299 2308 p2,
2300 2309 flags,
2301 2310 cachedelta,
2302 2311 alwayscache=False,
2303 2312 deltacomputer=None,
2304 2313 sidedata=None,
2305 2314 ):
2306 2315 """internal function to add revisions to the log
2307 2316
2308 2317 see addrevision for argument descriptions.
2309 2318
2310 2319 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2311 2320
2312 2321 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2313 2322 be used.
2314 2323
2315 2324 invariants:
2316 2325 - rawtext is optional (can be None); if not set, cachedelta must be set.
2317 2326 if both are set, they must correspond to each other.
2318 2327 """
2319 2328 if node == self.nullid:
2320 2329 raise error.RevlogError(
2321 2330 _(b"%s: attempt to add null revision") % self.display_id
2322 2331 )
2323 2332 if (
2324 2333 node == self.nodeconstants.wdirid
2325 2334 or node in self.nodeconstants.wdirfilenodeids
2326 2335 ):
2327 2336 raise error.RevlogError(
2328 2337 _(b"%s: attempt to add wdir revision") % self.display_id
2329 2338 )
2330 2339 if self._writinghandles is None:
2331 2340 msg = b'adding revision outside `revlog._writing` context'
2332 2341 raise error.ProgrammingError(msg)
2333 2342
2334 2343 if self._inline:
2335 2344 fh = self._writinghandles[0]
2336 2345 else:
2337 2346 fh = self._writinghandles[1]
2338 2347
2339 2348 btext = [rawtext]
2340 2349
2341 2350 curr = len(self)
2342 2351 prev = curr - 1
2343 2352
2344 2353 offset = self._get_data_offset(prev)
2345 2354
2346 2355 if self._concurrencychecker:
2347 2356 ifh, dfh = self._writinghandles
2348 2357 if self._inline:
2349 2358 # offset is "as if" it were in the .d file, so we need to add on
2350 2359 # the size of the entry metadata.
2351 2360 self._concurrencychecker(
2352 2361 ifh, self._indexfile, offset + curr * self.index.entry_size
2353 2362 )
2354 2363 else:
2355 2364 # Entries in the .i are a consistent size.
2356 2365 self._concurrencychecker(
2357 2366 ifh, self._indexfile, curr * self.index.entry_size
2358 2367 )
2359 2368 self._concurrencychecker(dfh, self._datafile, offset)
2360 2369
2361 2370 p1r, p2r = self.rev(p1), self.rev(p2)
2362 2371
2363 2372 # full versions are inserted when the needed deltas
2364 2373 # become comparable to the uncompressed text
2365 2374 if rawtext is None:
2366 2375 # need rawtext size, before changed by flag processors, which is
2367 2376 # the non-raw size. use revlog explicitly to avoid filelog's extra
2368 2377 # logic that might remove metadata size.
2369 2378 textlen = mdiff.patchedsize(
2370 2379 revlog.size(self, cachedelta[0]), cachedelta[1]
2371 2380 )
2372 2381 else:
2373 2382 textlen = len(rawtext)
2374 2383
2375 2384 if deltacomputer is None:
2376 2385 deltacomputer = deltautil.deltacomputer(self)
2377 2386
2378 2387 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2379 2388
2380 2389 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2381 2390
2382 2391 if sidedata and self.hassidedata:
2383 2392 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2384 2393 sidedata_offset = offset + deltainfo.deltalen
2385 2394 else:
2386 2395 serialized_sidedata = b""
2387 2396 # Don't store the offset if the sidedata is empty, that way
2388 2397 # we can easily detect empty sidedata and they will be no different
2389 2398 # than ones we manually add.
2390 2399 sidedata_offset = 0
2391 2400
2392 2401 e = (
2393 2402 offset_type(offset, flags),
2394 2403 deltainfo.deltalen,
2395 2404 textlen,
2396 2405 deltainfo.base,
2397 2406 link,
2398 2407 p1r,
2399 2408 p2r,
2400 2409 node,
2401 2410 sidedata_offset,
2402 2411 len(serialized_sidedata),
2403 2412 )
2404 2413
2405 2414 self.index.append(e)
2406 2415 entry = self.index.entry_binary(curr)
2407 2416 if curr == 0 and self._docket is None:
2408 2417 header = self._format_flags | self._format_version
2409 2418 header = self.index.pack_header(header)
2410 2419 entry = header + entry
2411 2420 self._writeentry(
2412 2421 transaction,
2413 2422 entry,
2414 2423 deltainfo.data,
2415 2424 link,
2416 2425 offset,
2417 2426 serialized_sidedata,
2418 2427 )
2419 2428
2420 2429 rawtext = btext[0]
2421 2430
2422 2431 if alwayscache and rawtext is None:
2423 2432 rawtext = deltacomputer.buildtext(revinfo, fh)
2424 2433
2425 2434 if type(rawtext) == bytes: # only accept immutable objects
2426 2435 self._revisioncache = (node, curr, rawtext)
2427 2436 self._chainbasecache[curr] = deltainfo.chainbase
2428 2437 return curr
2429 2438
2430 2439 def _get_data_offset(self, prev):
2431 2440 """Returns the current offset in the (in-transaction) data file.
2432 2441 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2433 2442 file to store that information: since sidedata can be rewritten to the
2434 2443 end of the data file within a transaction, you can have cases where, for
2435 2444 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2436 2445 to `n - 1`'s sidedata being written after `n`'s data.
2437 2446
2438 2447 TODO cache this in a docket file before getting out of experimental."""
2439 2448 if self._format_version != REVLOGV2:
2440 2449 return self.end(prev)
2441 2450
2442 2451 offset = 0
2443 2452 for rev, entry in enumerate(self.index):
2444 2453 sidedata_end = entry[8] + entry[9]
2445 2454 # Sidedata for a previous rev has potentially been written after
2446 2455 # this rev's end, so take the max.
2447 2456 offset = max(self.end(rev), offset, sidedata_end)
2448 2457 return offset
2449 2458
2450 2459 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2451 2460 # Files opened in a+ mode have inconsistent behavior on various
2452 2461 # platforms. Windows requires that a file positioning call be made
2453 2462 # when the file handle transitions between reads and writes. See
2454 2463 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2455 2464 # platforms, Python or the platform itself can be buggy. Some versions
2456 2465 # of Solaris have been observed to not append at the end of the file
2457 2466 # if the file was seeked to before the end. See issue4943 for more.
2458 2467 #
2459 2468 # We work around this issue by inserting a seek() before writing.
2460 2469 # Note: This is likely not necessary on Python 3. However, because
2461 2470 # the file handle is reused for reads and may be seeked there, we need
2462 2471 # to be careful before changing this.
2463 2472 if self._writinghandles is None:
2464 2473 msg = b'adding revision outside `revlog._writing` context'
2465 2474 raise error.ProgrammingError(msg)
2466 2475 ifh, dfh = self._writinghandles
2467 2476 if self._docket is None:
2468 2477 ifh.seek(0, os.SEEK_END)
2469 2478 else:
2470 2479 ifh.seek(self._docket.index_end, os.SEEK_SET)
2471 2480 if dfh:
2472 2481 dfh.seek(0, os.SEEK_END)
2473 2482
2474 2483 curr = len(self) - 1
2475 2484 if not self._inline:
2476 2485 transaction.add(self._datafile, offset)
2477 2486 transaction.add(self._indexfile, curr * len(entry))
2478 2487 if data[0]:
2479 2488 dfh.write(data[0])
2480 2489 dfh.write(data[1])
2481 2490 if sidedata:
2482 2491 dfh.write(sidedata)
2483 2492 ifh.write(entry)
2484 2493 else:
2485 2494 offset += curr * self.index.entry_size
2486 2495 transaction.add(self._indexfile, offset)
2487 2496 ifh.write(entry)
2488 2497 ifh.write(data[0])
2489 2498 ifh.write(data[1])
2490 2499 if sidedata:
2491 2500 ifh.write(sidedata)
2492 2501 self._enforceinlinesize(transaction)
2493 2502 if self._docket is not None:
2494 2503 self._docket.index_end = self._writinghandles[0].tell()
2495 2504
2496 2505 nodemaputil.setup_persistent_nodemap(transaction, self)
2497 2506
2498 2507 def addgroup(
2499 2508 self,
2500 2509 deltas,
2501 2510 linkmapper,
2502 2511 transaction,
2503 2512 alwayscache=False,
2504 2513 addrevisioncb=None,
2505 2514 duplicaterevisioncb=None,
2506 2515 ):
2507 2516 """
2508 2517 add a delta group
2509 2518
2510 2519 given a set of deltas, add them to the revision log. the
2511 2520 first delta is against its parent, which should be in our
2512 2521 log, the rest are against the previous delta.
2513 2522
2514 2523 If ``addrevisioncb`` is defined, it will be called with arguments of
2515 2524 this revlog and the node that was added.
2516 2525 """
2517 2526
2518 2527 if self._adding_group:
2519 2528 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2520 2529
2521 2530 self._adding_group = True
2522 2531 empty = True
2523 2532 try:
2524 2533 with self._writing(transaction):
2525 2534 deltacomputer = deltautil.deltacomputer(self)
2526 2535 # loop through our set of deltas
2527 2536 for data in deltas:
2528 2537 (
2529 2538 node,
2530 2539 p1,
2531 2540 p2,
2532 2541 linknode,
2533 2542 deltabase,
2534 2543 delta,
2535 2544 flags,
2536 2545 sidedata,
2537 2546 ) = data
2538 2547 link = linkmapper(linknode)
2539 2548 flags = flags or REVIDX_DEFAULT_FLAGS
2540 2549
2541 2550 rev = self.index.get_rev(node)
2542 2551 if rev is not None:
2543 2552 # this can happen if two branches make the same change
2544 2553 self._nodeduplicatecallback(transaction, rev)
2545 2554 if duplicaterevisioncb:
2546 2555 duplicaterevisioncb(self, rev)
2547 2556 empty = False
2548 2557 continue
2549 2558
2550 2559 for p in (p1, p2):
2551 2560 if not self.index.has_node(p):
2552 2561 raise error.LookupError(
2553 2562 p, self.radix, _(b'unknown parent')
2554 2563 )
2555 2564
2556 2565 if not self.index.has_node(deltabase):
2557 2566 raise error.LookupError(
2558 2567 deltabase, self.display_id, _(b'unknown delta base')
2559 2568 )
2560 2569
2561 2570 baserev = self.rev(deltabase)
2562 2571
2563 2572 if baserev != nullrev and self.iscensored(baserev):
2564 2573 # if base is censored, delta must be full replacement in a
2565 2574 # single patch operation
2566 2575 hlen = struct.calcsize(b">lll")
2567 2576 oldlen = self.rawsize(baserev)
2568 2577 newlen = len(delta) - hlen
2569 2578 if delta[:hlen] != mdiff.replacediffheader(
2570 2579 oldlen, newlen
2571 2580 ):
2572 2581 raise error.CensoredBaseError(
2573 2582 self.display_id, self.node(baserev)
2574 2583 )
2575 2584
2576 2585 if not flags and self._peek_iscensored(baserev, delta):
2577 2586 flags |= REVIDX_ISCENSORED
2578 2587
2579 2588 # We assume consumers of addrevisioncb will want to retrieve
2580 2589 # the added revision, which will require a call to
2581 2590 # revision(). revision() will fast path if there is a cache
2582 2591 # hit. So, we tell _addrevision() to always cache in this case.
2583 2592 # We're only using addgroup() in the context of changegroup
2584 2593 # generation so the revision data can always be handled as raw
2585 2594 # by the flagprocessor.
2586 2595 rev = self._addrevision(
2587 2596 node,
2588 2597 None,
2589 2598 transaction,
2590 2599 link,
2591 2600 p1,
2592 2601 p2,
2593 2602 flags,
2594 2603 (baserev, delta),
2595 2604 alwayscache=alwayscache,
2596 2605 deltacomputer=deltacomputer,
2597 2606 sidedata=sidedata,
2598 2607 )
2599 2608
2600 2609 if addrevisioncb:
2601 2610 addrevisioncb(self, rev)
2602 2611 empty = False
2603 2612 finally:
2604 2613 self._adding_group = False
2605 2614 return not empty
2606 2615
2607 2616 def iscensored(self, rev):
2608 2617 """Check if a file revision is censored."""
2609 2618 if not self._censorable:
2610 2619 return False
2611 2620
2612 2621 return self.flags(rev) & REVIDX_ISCENSORED
2613 2622
2614 2623 def _peek_iscensored(self, baserev, delta):
2615 2624 """Quickly check if a delta produces a censored revision."""
2616 2625 if not self._censorable:
2617 2626 return False
2618 2627
2619 2628 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2620 2629
2621 2630 def getstrippoint(self, minlink):
2622 2631 """find the minimum rev that must be stripped to strip the linkrev
2623 2632
2624 2633 Returns a tuple containing the minimum rev and a set of all revs that
2625 2634 have linkrevs that will be broken by this strip.
2626 2635 """
2627 2636 return storageutil.resolvestripinfo(
2628 2637 minlink,
2629 2638 len(self) - 1,
2630 2639 self.headrevs(),
2631 2640 self.linkrev,
2632 2641 self.parentrevs,
2633 2642 )
2634 2643
2635 2644 def strip(self, minlink, transaction):
2636 2645 """truncate the revlog on the first revision with a linkrev >= minlink
2637 2646
2638 2647 This function is called when we're stripping revision minlink and
2639 2648 its descendants from the repository.
2640 2649
2641 2650 We have to remove all revisions with linkrev >= minlink, because
2642 2651 the equivalent changelog revisions will be renumbered after the
2643 2652 strip.
2644 2653
2645 2654 So we truncate the revlog on the first of these revisions, and
2646 2655 trust that the caller has saved the revisions that shouldn't be
2647 2656 removed and that it'll re-add them after this truncation.
2648 2657 """
2649 2658 if len(self) == 0:
2650 2659 return
2651 2660
2652 2661 rev, _ = self.getstrippoint(minlink)
2653 2662 if rev == len(self):
2654 2663 return
2655 2664
2656 2665 # first truncate the files on disk
2657 2666 end = self.start(rev)
2658 2667 if not self._inline:
2659 2668 transaction.add(self._datafile, end)
2660 2669 end = rev * self.index.entry_size
2661 2670 else:
2662 2671 end += rev * self.index.entry_size
2663 2672
2664 2673 transaction.add(self._indexfile, end)
2665 2674 if self._docket is not None:
2666 2675 # XXX we could, leverage the docket while stripping. However it is
2667 2676 # not powerfull enough at the time of this comment
2668 2677 self._docket.index_end = end
2669 2678 self._docket.write(transaction, stripping=True)
2670 2679
2671 2680 # then reset internal state in memory to forget those revisions
2672 2681 self._revisioncache = None
2673 2682 self._chaininfocache = util.lrucachedict(500)
2674 2683 self._chunkclear()
2675 2684
2676 2685 del self.index[rev:-1]
2677 2686
2678 2687 def checksize(self):
2679 2688 """Check size of index and data files
2680 2689
2681 2690 return a (dd, di) tuple.
2682 2691 - dd: extra bytes for the "data" file
2683 2692 - di: extra bytes for the "index" file
2684 2693
2685 2694 A healthy revlog will return (0, 0).
2686 2695 """
2687 2696 expected = 0
2688 2697 if len(self):
2689 2698 expected = max(0, self.end(len(self) - 1))
2690 2699
2691 2700 try:
2692 2701 with self._datafp() as f:
2693 2702 f.seek(0, io.SEEK_END)
2694 2703 actual = f.tell()
2695 2704 dd = actual - expected
2696 2705 except IOError as inst:
2697 2706 if inst.errno != errno.ENOENT:
2698 2707 raise
2699 2708 dd = 0
2700 2709
2701 2710 try:
2702 2711 f = self.opener(self._indexfile)
2703 2712 f.seek(0, io.SEEK_END)
2704 2713 actual = f.tell()
2705 2714 f.close()
2706 2715 s = self.index.entry_size
2707 2716 i = max(0, actual // s)
2708 2717 di = actual - (i * s)
2709 2718 if self._inline:
2710 2719 databytes = 0
2711 2720 for r in self:
2712 2721 databytes += max(0, self.length(r))
2713 2722 dd = 0
2714 2723 di = actual - len(self) * s - databytes
2715 2724 except IOError as inst:
2716 2725 if inst.errno != errno.ENOENT:
2717 2726 raise
2718 2727 di = 0
2719 2728
2720 2729 return (dd, di)
2721 2730
2722 2731 def files(self):
2723 2732 res = [self._indexfile]
2724 2733 if not self._inline:
2725 2734 res.append(self._datafile)
2726 2735 return res
2727 2736
2728 2737 def emitrevisions(
2729 2738 self,
2730 2739 nodes,
2731 2740 nodesorder=None,
2732 2741 revisiondata=False,
2733 2742 assumehaveparentrevisions=False,
2734 2743 deltamode=repository.CG_DELTAMODE_STD,
2735 2744 sidedata_helpers=None,
2736 2745 ):
2737 2746 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2738 2747 raise error.ProgrammingError(
2739 2748 b'unhandled value for nodesorder: %s' % nodesorder
2740 2749 )
2741 2750
2742 2751 if nodesorder is None and not self._generaldelta:
2743 2752 nodesorder = b'storage'
2744 2753
2745 2754 if (
2746 2755 not self._storedeltachains
2747 2756 and deltamode != repository.CG_DELTAMODE_PREV
2748 2757 ):
2749 2758 deltamode = repository.CG_DELTAMODE_FULL
2750 2759
2751 2760 return storageutil.emitrevisions(
2752 2761 self,
2753 2762 nodes,
2754 2763 nodesorder,
2755 2764 revlogrevisiondelta,
2756 2765 deltaparentfn=self.deltaparent,
2757 2766 candeltafn=self.candelta,
2758 2767 rawsizefn=self.rawsize,
2759 2768 revdifffn=self.revdiff,
2760 2769 flagsfn=self.flags,
2761 2770 deltamode=deltamode,
2762 2771 revisiondata=revisiondata,
2763 2772 assumehaveparentrevisions=assumehaveparentrevisions,
2764 2773 sidedata_helpers=sidedata_helpers,
2765 2774 )
2766 2775
2767 2776 DELTAREUSEALWAYS = b'always'
2768 2777 DELTAREUSESAMEREVS = b'samerevs'
2769 2778 DELTAREUSENEVER = b'never'
2770 2779
2771 2780 DELTAREUSEFULLADD = b'fulladd'
2772 2781
2773 2782 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2774 2783
2775 2784 def clone(
2776 2785 self,
2777 2786 tr,
2778 2787 destrevlog,
2779 2788 addrevisioncb=None,
2780 2789 deltareuse=DELTAREUSESAMEREVS,
2781 2790 forcedeltabothparents=None,
2782 2791 sidedata_helpers=None,
2783 2792 ):
2784 2793 """Copy this revlog to another, possibly with format changes.
2785 2794
2786 2795 The destination revlog will contain the same revisions and nodes.
2787 2796 However, it may not be bit-for-bit identical due to e.g. delta encoding
2788 2797 differences.
2789 2798
2790 2799 The ``deltareuse`` argument control how deltas from the existing revlog
2791 2800 are preserved in the destination revlog. The argument can have the
2792 2801 following values:
2793 2802
2794 2803 DELTAREUSEALWAYS
2795 2804 Deltas will always be reused (if possible), even if the destination
2796 2805 revlog would not select the same revisions for the delta. This is the
2797 2806 fastest mode of operation.
2798 2807 DELTAREUSESAMEREVS
2799 2808 Deltas will be reused if the destination revlog would pick the same
2800 2809 revisions for the delta. This mode strikes a balance between speed
2801 2810 and optimization.
2802 2811 DELTAREUSENEVER
2803 2812 Deltas will never be reused. This is the slowest mode of execution.
2804 2813 This mode can be used to recompute deltas (e.g. if the diff/delta
2805 2814 algorithm changes).
2806 2815 DELTAREUSEFULLADD
2807 2816 Revision will be re-added as if their were new content. This is
2808 2817 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2809 2818 eg: large file detection and handling.
2810 2819
2811 2820 Delta computation can be slow, so the choice of delta reuse policy can
2812 2821 significantly affect run time.
2813 2822
2814 2823 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2815 2824 two extremes. Deltas will be reused if they are appropriate. But if the
2816 2825 delta could choose a better revision, it will do so. This means if you
2817 2826 are converting a non-generaldelta revlog to a generaldelta revlog,
2818 2827 deltas will be recomputed if the delta's parent isn't a parent of the
2819 2828 revision.
2820 2829
2821 2830 In addition to the delta policy, the ``forcedeltabothparents``
2822 2831 argument controls whether to force compute deltas against both parents
2823 2832 for merges. By default, the current default is used.
2824 2833
2825 2834 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2826 2835 `sidedata_helpers`.
2827 2836 """
2828 2837 if deltareuse not in self.DELTAREUSEALL:
2829 2838 raise ValueError(
2830 2839 _(b'value for deltareuse invalid: %s') % deltareuse
2831 2840 )
2832 2841
2833 2842 if len(destrevlog):
2834 2843 raise ValueError(_(b'destination revlog is not empty'))
2835 2844
2836 2845 if getattr(self, 'filteredrevs', None):
2837 2846 raise ValueError(_(b'source revlog has filtered revisions'))
2838 2847 if getattr(destrevlog, 'filteredrevs', None):
2839 2848 raise ValueError(_(b'destination revlog has filtered revisions'))
2840 2849
2841 2850 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2842 2851 # if possible.
2843 2852 oldlazydelta = destrevlog._lazydelta
2844 2853 oldlazydeltabase = destrevlog._lazydeltabase
2845 2854 oldamd = destrevlog._deltabothparents
2846 2855
2847 2856 try:
2848 2857 if deltareuse == self.DELTAREUSEALWAYS:
2849 2858 destrevlog._lazydeltabase = True
2850 2859 destrevlog._lazydelta = True
2851 2860 elif deltareuse == self.DELTAREUSESAMEREVS:
2852 2861 destrevlog._lazydeltabase = False
2853 2862 destrevlog._lazydelta = True
2854 2863 elif deltareuse == self.DELTAREUSENEVER:
2855 2864 destrevlog._lazydeltabase = False
2856 2865 destrevlog._lazydelta = False
2857 2866
2858 2867 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2859 2868
2860 2869 self._clone(
2861 2870 tr,
2862 2871 destrevlog,
2863 2872 addrevisioncb,
2864 2873 deltareuse,
2865 2874 forcedeltabothparents,
2866 2875 sidedata_helpers,
2867 2876 )
2868 2877
2869 2878 finally:
2870 2879 destrevlog._lazydelta = oldlazydelta
2871 2880 destrevlog._lazydeltabase = oldlazydeltabase
2872 2881 destrevlog._deltabothparents = oldamd
2873 2882
2874 2883 def _clone(
2875 2884 self,
2876 2885 tr,
2877 2886 destrevlog,
2878 2887 addrevisioncb,
2879 2888 deltareuse,
2880 2889 forcedeltabothparents,
2881 2890 sidedata_helpers,
2882 2891 ):
2883 2892 """perform the core duty of `revlog.clone` after parameter processing"""
2884 2893 deltacomputer = deltautil.deltacomputer(destrevlog)
2885 2894 index = self.index
2886 2895 for rev in self:
2887 2896 entry = index[rev]
2888 2897
2889 2898 # Some classes override linkrev to take filtered revs into
2890 2899 # account. Use raw entry from index.
2891 2900 flags = entry[0] & 0xFFFF
2892 2901 linkrev = entry[4]
2893 2902 p1 = index[entry[5]][7]
2894 2903 p2 = index[entry[6]][7]
2895 2904 node = entry[7]
2896 2905
2897 2906 # (Possibly) reuse the delta from the revlog if allowed and
2898 2907 # the revlog chunk is a delta.
2899 2908 cachedelta = None
2900 2909 rawtext = None
2901 2910 if deltareuse == self.DELTAREUSEFULLADD:
2902 2911 text, sidedata = self._revisiondata(rev)
2903 2912
2904 2913 if sidedata_helpers is not None:
2905 2914 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2906 2915 self, sidedata_helpers, sidedata, rev
2907 2916 )
2908 2917 flags = flags | new_flags[0] & ~new_flags[1]
2909 2918
2910 2919 destrevlog.addrevision(
2911 2920 text,
2912 2921 tr,
2913 2922 linkrev,
2914 2923 p1,
2915 2924 p2,
2916 2925 cachedelta=cachedelta,
2917 2926 node=node,
2918 2927 flags=flags,
2919 2928 deltacomputer=deltacomputer,
2920 2929 sidedata=sidedata,
2921 2930 )
2922 2931 else:
2923 2932 if destrevlog._lazydelta:
2924 2933 dp = self.deltaparent(rev)
2925 2934 if dp != nullrev:
2926 2935 cachedelta = (dp, bytes(self._chunk(rev)))
2927 2936
2928 2937 sidedata = None
2929 2938 if not cachedelta:
2930 2939 rawtext, sidedata = self._revisiondata(rev)
2931 2940 if sidedata is None:
2932 2941 sidedata = self.sidedata(rev)
2933 2942
2934 2943 if sidedata_helpers is not None:
2935 2944 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2936 2945 self, sidedata_helpers, sidedata, rev
2937 2946 )
2938 2947 flags = flags | new_flags[0] & ~new_flags[1]
2939 2948
2940 2949 with destrevlog._writing(tr):
2941 2950 destrevlog._addrevision(
2942 2951 node,
2943 2952 rawtext,
2944 2953 tr,
2945 2954 linkrev,
2946 2955 p1,
2947 2956 p2,
2948 2957 flags,
2949 2958 cachedelta,
2950 2959 deltacomputer=deltacomputer,
2951 2960 sidedata=sidedata,
2952 2961 )
2953 2962
2954 2963 if addrevisioncb:
2955 2964 addrevisioncb(self, rev, node)
2956 2965
2957 2966 def censorrevision(self, tr, censornode, tombstone=b''):
2958 2967 if self._format_version == REVLOGV0:
2959 2968 raise error.RevlogError(
2960 2969 _(b'cannot censor with version %d revlogs')
2961 2970 % self._format_version
2962 2971 )
2963 2972
2964 2973 censorrev = self.rev(censornode)
2965 2974 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2966 2975
2967 2976 if len(tombstone) > self.rawsize(censorrev):
2968 2977 raise error.Abort(
2969 2978 _(b'censor tombstone must be no longer than censored data')
2970 2979 )
2971 2980
2972 2981 # Rewriting the revlog in place is hard. Our strategy for censoring is
2973 2982 # to create a new revlog, copy all revisions to it, then replace the
2974 2983 # revlogs on transaction close.
2975 2984 #
2976 2985 # This is a bit dangerous. We could easily have a mismatch of state.
2977 2986 newrl = revlog(
2978 2987 self.opener,
2979 2988 target=self.target,
2980 2989 radix=self.radix,
2981 2990 postfix=b'tmpcensored',
2982 2991 censorable=True,
2983 2992 )
2984 2993 newrl._format_version = self._format_version
2985 2994 newrl._format_flags = self._format_flags
2986 2995 newrl._generaldelta = self._generaldelta
2987 2996 newrl._parse_index = self._parse_index
2988 2997
2989 2998 for rev in self.revs():
2990 2999 node = self.node(rev)
2991 3000 p1, p2 = self.parents(node)
2992 3001
2993 3002 if rev == censorrev:
2994 3003 newrl.addrawrevision(
2995 3004 tombstone,
2996 3005 tr,
2997 3006 self.linkrev(censorrev),
2998 3007 p1,
2999 3008 p2,
3000 3009 censornode,
3001 3010 REVIDX_ISCENSORED,
3002 3011 )
3003 3012
3004 3013 if newrl.deltaparent(rev) != nullrev:
3005 3014 raise error.Abort(
3006 3015 _(
3007 3016 b'censored revision stored as delta; '
3008 3017 b'cannot censor'
3009 3018 ),
3010 3019 hint=_(
3011 3020 b'censoring of revlogs is not '
3012 3021 b'fully implemented; please report '
3013 3022 b'this bug'
3014 3023 ),
3015 3024 )
3016 3025 continue
3017 3026
3018 3027 if self.iscensored(rev):
3019 3028 if self.deltaparent(rev) != nullrev:
3020 3029 raise error.Abort(
3021 3030 _(
3022 3031 b'cannot censor due to censored '
3023 3032 b'revision having delta stored'
3024 3033 )
3025 3034 )
3026 3035 rawtext = self._chunk(rev)
3027 3036 else:
3028 3037 rawtext = self.rawdata(rev)
3029 3038
3030 3039 newrl.addrawrevision(
3031 3040 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3032 3041 )
3033 3042
3034 3043 tr.addbackup(self._indexfile, location=b'store')
3035 3044 if not self._inline:
3036 3045 tr.addbackup(self._datafile, location=b'store')
3037 3046
3038 3047 self.opener.rename(newrl._indexfile, self._indexfile)
3039 3048 if not self._inline:
3040 3049 self.opener.rename(newrl._datafile, self._datafile)
3041 3050
3042 3051 self.clearcaches()
3043 3052 self._loadindex()
3044 3053
3045 3054 def verifyintegrity(self, state):
3046 3055 """Verifies the integrity of the revlog.
3047 3056
3048 3057 Yields ``revlogproblem`` instances describing problems that are
3049 3058 found.
3050 3059 """
3051 3060 dd, di = self.checksize()
3052 3061 if dd:
3053 3062 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3054 3063 if di:
3055 3064 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3056 3065
3057 3066 version = self._format_version
3058 3067
3059 3068 # The verifier tells us what version revlog we should be.
3060 3069 if version != state[b'expectedversion']:
3061 3070 yield revlogproblem(
3062 3071 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3063 3072 % (self.display_id, version, state[b'expectedversion'])
3064 3073 )
3065 3074
3066 3075 state[b'skipread'] = set()
3067 3076 state[b'safe_renamed'] = set()
3068 3077
3069 3078 for rev in self:
3070 3079 node = self.node(rev)
3071 3080
3072 3081 # Verify contents. 4 cases to care about:
3073 3082 #
3074 3083 # common: the most common case
3075 3084 # rename: with a rename
3076 3085 # meta: file content starts with b'\1\n', the metadata
3077 3086 # header defined in filelog.py, but without a rename
3078 3087 # ext: content stored externally
3079 3088 #
3080 3089 # More formally, their differences are shown below:
3081 3090 #
3082 3091 # | common | rename | meta | ext
3083 3092 # -------------------------------------------------------
3084 3093 # flags() | 0 | 0 | 0 | not 0
3085 3094 # renamed() | False | True | False | ?
3086 3095 # rawtext[0:2]=='\1\n'| False | True | True | ?
3087 3096 #
3088 3097 # "rawtext" means the raw text stored in revlog data, which
3089 3098 # could be retrieved by "rawdata(rev)". "text"
3090 3099 # mentioned below is "revision(rev)".
3091 3100 #
3092 3101 # There are 3 different lengths stored physically:
3093 3102 # 1. L1: rawsize, stored in revlog index
3094 3103 # 2. L2: len(rawtext), stored in revlog data
3095 3104 # 3. L3: len(text), stored in revlog data if flags==0, or
3096 3105 # possibly somewhere else if flags!=0
3097 3106 #
3098 3107 # L1 should be equal to L2. L3 could be different from them.
3099 3108 # "text" may or may not affect commit hash depending on flag
3100 3109 # processors (see flagutil.addflagprocessor).
3101 3110 #
3102 3111 # | common | rename | meta | ext
3103 3112 # -------------------------------------------------
3104 3113 # rawsize() | L1 | L1 | L1 | L1
3105 3114 # size() | L1 | L2-LM | L1(*) | L1 (?)
3106 3115 # len(rawtext) | L2 | L2 | L2 | L2
3107 3116 # len(text) | L2 | L2 | L2 | L3
3108 3117 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3109 3118 #
3110 3119 # LM: length of metadata, depending on rawtext
3111 3120 # (*): not ideal, see comment in filelog.size
3112 3121 # (?): could be "- len(meta)" if the resolved content has
3113 3122 # rename metadata
3114 3123 #
3115 3124 # Checks needed to be done:
3116 3125 # 1. length check: L1 == L2, in all cases.
3117 3126 # 2. hash check: depending on flag processor, we may need to
3118 3127 # use either "text" (external), or "rawtext" (in revlog).
3119 3128
3120 3129 try:
3121 3130 skipflags = state.get(b'skipflags', 0)
3122 3131 if skipflags:
3123 3132 skipflags &= self.flags(rev)
3124 3133
3125 3134 _verify_revision(self, skipflags, state, node)
3126 3135
3127 3136 l1 = self.rawsize(rev)
3128 3137 l2 = len(self.rawdata(node))
3129 3138
3130 3139 if l1 != l2:
3131 3140 yield revlogproblem(
3132 3141 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3133 3142 node=node,
3134 3143 )
3135 3144
3136 3145 except error.CensoredNodeError:
3137 3146 if state[b'erroroncensored']:
3138 3147 yield revlogproblem(
3139 3148 error=_(b'censored file data'), node=node
3140 3149 )
3141 3150 state[b'skipread'].add(node)
3142 3151 except Exception as e:
3143 3152 yield revlogproblem(
3144 3153 error=_(b'unpacking %s: %s')
3145 3154 % (short(node), stringutil.forcebytestr(e)),
3146 3155 node=node,
3147 3156 )
3148 3157 state[b'skipread'].add(node)
3149 3158
3150 3159 def storageinfo(
3151 3160 self,
3152 3161 exclusivefiles=False,
3153 3162 sharedfiles=False,
3154 3163 revisionscount=False,
3155 3164 trackedsize=False,
3156 3165 storedsize=False,
3157 3166 ):
3158 3167 d = {}
3159 3168
3160 3169 if exclusivefiles:
3161 3170 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3162 3171 if not self._inline:
3163 3172 d[b'exclusivefiles'].append((self.opener, self._datafile))
3164 3173
3165 3174 if sharedfiles:
3166 3175 d[b'sharedfiles'] = []
3167 3176
3168 3177 if revisionscount:
3169 3178 d[b'revisionscount'] = len(self)
3170 3179
3171 3180 if trackedsize:
3172 3181 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3173 3182
3174 3183 if storedsize:
3175 3184 d[b'storedsize'] = sum(
3176 3185 self.opener.stat(path).st_size for path in self.files()
3177 3186 )
3178 3187
3179 3188 return d
3180 3189
3181 3190 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3182 3191 if not self.hassidedata:
3183 3192 return
3184 3193 # revlog formats with sidedata support does not support inline
3185 3194 assert not self._inline
3186 3195 if not helpers[1] and not helpers[2]:
3187 3196 # Nothing to generate or remove
3188 3197 return
3189 3198
3190 # changelog implement some "delayed" writing mechanism that assume that
3191 # all index data is writen in append mode and is therefor incompatible
3192 # with the seeked write done in this method. The use of such "delayed"
3193 # writing will soon be removed for revlog version that support side
3194 # data, so for now, we only keep this simple assert to highlight the
3195 # situation.
3196 delayed = getattr(self, '_delayed', False)
3197 diverted = getattr(self, '_divert', False)
3198 if delayed and not diverted:
3199 msg = "cannot rewrite_sidedata of a delayed revlog"
3200 raise error.ProgrammingError(msg)
3201
3202 3199 new_entries = []
3203 3200 # append the new sidedata
3204 3201 with self._writing(transaction):
3205 3202 ifh, dfh = self._writinghandles
3206 3203 dfh.seek(0, os.SEEK_END)
3207 3204 current_offset = dfh.tell()
3208 3205 for rev in range(startrev, endrev + 1):
3209 3206 entry = self.index[rev]
3210 3207 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3211 3208 store=self,
3212 3209 sidedata_helpers=helpers,
3213 3210 sidedata={},
3214 3211 rev=rev,
3215 3212 )
3216 3213
3217 3214 serialized_sidedata = sidedatautil.serialize_sidedata(
3218 3215 new_sidedata
3219 3216 )
3220 3217 if entry[8] != 0 or entry[9] != 0:
3221 3218 # rewriting entries that already have sidedata is not
3222 3219 # supported yet, because it introduces garbage data in the
3223 3220 # revlog.
3224 3221 msg = b"rewriting existing sidedata is not supported yet"
3225 3222 raise error.Abort(msg)
3226 3223
3227 3224 # Apply (potential) flags to add and to remove after running
3228 3225 # the sidedata helpers
3229 3226 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3230 3227 entry = (new_offset_flags,) + entry[1:8]
3231 3228 entry += (current_offset, len(serialized_sidedata))
3232 3229
3233 3230 # the sidedata computation might have move the file cursors around
3234 3231 dfh.seek(current_offset, os.SEEK_SET)
3235 3232 dfh.write(serialized_sidedata)
3236 3233 new_entries.append(entry)
3237 3234 current_offset += len(serialized_sidedata)
3238 3235
3239 3236 # rewrite the new index entries
3240 3237 ifh.seek(startrev * self.index.entry_size)
3241 3238 for i, e in enumerate(new_entries):
3242 3239 rev = startrev + i
3243 3240 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3244 3241 packed = self.index.entry_binary(rev)
3245 3242 if rev == 0 and self._docket is None:
3246 3243 header = self._format_flags | self._format_version
3247 3244 header = self.index.pack_header(header)
3248 3245 packed = header + packed
3249 3246 ifh.write(packed)
@@ -1,268 +1,268 b''
1 1 Test transaction safety
2 2 =======================
3 3
4 4 #testcases revlogv1 revlogv2
5 5
6 6 #if revlogv1
7 7
8 8 $ cat << EOF >> $HGRCPATH
9 9 > [experimental]
10 10 > revlogv2=no
11 11 > EOF
12 12
13 13 #endif
14 14
15 15 #if revlogv2
16 16
17 17 $ cat << EOF >> $HGRCPATH
18 18 > [experimental]
19 19 > revlogv2=enable-unstable-format-and-corrupt-my-data
20 20 > EOF
21 21
22 22 #endif
23 23
24 24 This test basic case to make sure external process do not see transaction
25 25 content until it is committed.
26 26
27 27 # TODO: also add an external reader accessing revlog files while they are written
28 28 # (instead of during transaction finalisation)
29 29
30 30 # TODO: also add stream clone and hardlink clone happening during these transaction.
31 31
32 32 setup
33 33 -----
34 34
35 35 synchronisation+output script:
36 36
37 37 $ mkdir sync
38 38 $ mkdir output
39 39 $ mkdir script
40 40 $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
41 41 $ export HG_TEST_FILE_EXT_WAITING
42 42 $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
43 43 $ export HG_TEST_FILE_EXT_UNLOCK
44 44 $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
45 45 $ export HG_TEST_FILE_EXT_DONE
46 46 $ cat << EOF > script/external.sh
47 47 > #!/bin/sh
48 48 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_UNLOCK $HG_TEST_FILE_EXT_WAITING
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out
49 > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > $TESTTMP/output/external.out 2>/dev/null
50 50 > touch $HG_TEST_FILE_EXT_DONE
51 51 > EOF
52 52 $ chmod +x script/external.sh
53 53 $ cat << EOF > script/internal.sh
54 54 > #!/bin/sh
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out
55 > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > $TESTTMP/output/internal.out 2>/dev/null
56 56 > $RUNTESTDIR/testlib/wait-on-file 5 $HG_TEST_FILE_EXT_DONE $HG_TEST_FILE_EXT_UNLOCK
57 57 > EOF
58 58 $ chmod +x script/internal.sh
59 59
60 60
61 61 Automated commands:
62 62
63 63 $ make_one_commit() {
64 64 > rm -f $TESTTMP/sync/*
65 65 > rm -f $TESTTMP/output/*
66 66 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
67 67 > echo x >> a
68 68 > $TESTTMP/script/external.sh & hg commit -m "$1"
69 69 > cat $TESTTMP/output/external.out
70 70 > cat $TESTTMP/output/internal.out
71 71 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
72 72 > }
73 73
74 74
75 75 $ make_one_pull() {
76 76 > rm -f $TESTTMP/sync/*
77 77 > rm -f $TESTTMP/output/*
78 78 > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
79 79 > echo x >> a
80 80 > $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
81 81 > cat $TESTTMP/output/external.out
82 82 > cat $TESTTMP/output/internal.out
83 83 > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
84 84 > }
85 85
86 86 prepare a large source to which to pull from:
87 87
88 88 The source is large to unsure we don't use inline more after the pull
89 89
90 90 $ hg init other-repo
91 91 $ hg -R other-repo debugbuilddag .+500
92 92
93 93
94 94 prepare an empty repository where to make test:
95 95
96 96 $ hg init repo
97 97 $ cd repo
98 98 $ touch a
99 99 $ hg add a
100 100
101 101 prepare a small extension to controll inline size
102 102
103 103 $ mkdir $TESTTMP/ext
104 104 $ cat << EOF > $TESTTMP/ext/small_inline.py
105 105 > from mercurial import revlog
106 106 > revlog._maxinline = 64 * 100
107 107 > EOF
108 108
109 109
110 110
111 111
112 112 $ cat << EOF >> $HGRCPATH
113 113 > [extensions]
114 114 > small_inline=$TESTTMP/ext/small_inline.py
115 115 > [hooks]
116 116 > pretxnclose = $TESTTMP/script/internal.sh
117 117 > EOF
118 118
119 119 check this is true for the initial commit (inline → inline)
120 120 -----------------------------------------------------------
121 121
122 122 the repository should still be inline (for relevant format)
123 123
124 124 $ make_one_commit first
125 125 pre-commit: -1
126 external: -1 (revlogv1 !)
127 external: 0 first (revlogv2 known-bad-output !)
128 internal: 0 first
126 external: -1
127 internal: 0 first (revlogv1 !)
128 internal: -1 (revlogv2 known-bad-output !)
129 129 post-tr: 0 first
130 130
131 131 #if revlogv1
132 132
133 133 $ hg debugrevlog -c | grep inline
134 134 flags : inline
135 135
136 136 #endif
137 137
138 138 check this is true for extra commit (inline → inline)
139 139 -----------------------------------------------------
140 140
141 141 the repository should still be inline (for relevant format)
142 142
143 143 #if revlogv1
144 144
145 145 $ hg debugrevlog -c | grep inline
146 146 flags : inline
147 147
148 148 #endif
149 149
150 150 $ make_one_commit second
151 151 pre-commit: 0 first
152 external: 0 first (revlogv1 !)
153 external: 1 second (revlogv2 known-bad-output !)
154 internal: 1 second
152 external: 0 first
153 internal: 1 second (revlogv1 !)
154 internal: 0 first (revlogv2 known-bad-output !)
155 155 post-tr: 1 second
156 156
157 157 #if revlogv1
158 158
159 159 $ hg debugrevlog -c | grep inline
160 160 flags : inline
161 161
162 162 #endif
163 163
164 164 check this is true for a small pull (inline → inline)
165 165 -----------------------------------------------------
166 166
167 167 the repository should still be inline (for relevant format)
168 168
169 169 #if revlogv1
170 170
171 171 $ hg debugrevlog -c | grep inline
172 172 flags : inline
173 173
174 174 #endif
175 175
176 176 $ make_one_pull 3
177 177 pre-commit: 1 second
178 178 warning: repository is unrelated
179 external: 1 second (revlogv1 !)
180 external: 5 r3 (revlogv2 known-bad-output !)
181 internal: 5 r3
179 external: 1 second
180 internal: 5 r3 (revlogv1 !)
181 internal: 1 second (revlogv2 known-bad-output !)
182 182 post-tr: 5 r3
183 183
184 184 #if revlogv1
185 185
186 186 $ hg debugrevlog -c | grep inline
187 187 flags : inline
188 188
189 189 #endif
190 190
191 191 Make a large pull (inline → no-inline)
192 192 ---------------------------------------
193 193
194 194 the repository should no longer be inline (for relevant format)
195 195
196 196 #if revlogv1
197 197
198 198 $ hg debugrevlog -c | grep inline
199 199 flags : inline
200 200
201 201 #endif
202 202
203 203 $ make_one_pull 400
204 204 pre-commit: 5 r3
205 external: 5 r3 (revlogv1 !)
206 external: 402 r400 (revlogv2 known-bad-output !)
207 internal: 402 r400
205 external: 5 r3
206 internal: 402 r400 (revlogv1 !)
207 internal: 5 r3 (revlogv2 known-bad-output !)
208 208 post-tr: 402 r400
209 209
210 210 #if revlogv1
211 211
212 212 $ hg debugrevlog -c | grep inline
213 213 [1]
214 214
215 215 #endif
216 216
217 217 check this is true for extra commit (no-inline → no-inline)
218 218 -----------------------------------------------------------
219 219
220 220 the repository should no longer be inline (for relevant format)
221 221
222 222 #if revlogv1
223 223
224 224 $ hg debugrevlog -c | grep inline
225 225 [1]
226 226
227 227 #endif
228 228
229 229 $ make_one_commit third
230 230 pre-commit: 402 r400
231 external: 402 r400 (revlogv1 !)
232 external: 403 third (revlogv2 known-bad-output !)
233 internal: 403 third
231 external: 402 r400
232 internal: 403 third (revlogv1 !)
233 internal: 402 r400 (revlogv2 known-bad-output !)
234 234 post-tr: 403 third
235 235
236 236 #if revlogv1
237 237
238 238 $ hg debugrevlog -c | grep inline
239 239 [1]
240 240
241 241 #endif
242 242
243 243
244 244 Make a pull (not-inline → no-inline)
245 245 -------------------------------------
246 246
247 247 the repository should no longer be inline (for relevant format)
248 248
249 249 #if revlogv1
250 250
251 251 $ hg debugrevlog -c | grep inline
252 252 [1]
253 253
254 254 #endif
255 255
256 256 $ make_one_pull tip
257 257 pre-commit: 403 third
258 external: 403 third (revlogv1 !)
259 external: 503 r500 (revlogv2 known-bad-output !)
260 internal: 503 r500
258 external: 403 third
259 internal: 503 r500 (revlogv1 !)
260 internal: 403 third (revlogv2 known-bad-output !)
261 261 post-tr: 503 r500
262 262
263 263 #if revlogv1
264 264
265 265 $ hg debugrevlog -c | grep inline
266 266 [1]
267 267
268 268 #endif
General Comments 0
You need to be logged in to leave comments. Login now