##// END OF EJS Templates
flagutil: move the `flagprocessors` mapping in the new module...
marmoute -
r42955:05c80f9e default
parent child Browse files
Show More
@@ -1,2704 +1,2700 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .revlogutils.constants import (
39 39 FLAG_GENERALDELTA,
40 40 FLAG_INLINE_DATA,
41 41 REVLOGV0,
42 42 REVLOGV1,
43 43 REVLOGV1_FLAGS,
44 44 REVLOGV2,
45 45 REVLOGV2_FLAGS,
46 46 REVLOG_DEFAULT_FLAGS,
47 47 REVLOG_DEFAULT_FORMAT,
48 48 REVLOG_DEFAULT_VERSION,
49 49 )
50 50 from .revlogutils.flagutil import (
51 51 REVIDX_DEFAULT_FLAGS,
52 52 REVIDX_ELLIPSIS,
53 53 REVIDX_EXTSTORED,
54 54 REVIDX_FLAGS_ORDER,
55 55 REVIDX_ISCENSORED,
56 56 REVIDX_KNOWN_FLAGS,
57 57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 58 )
59 59 from .thirdparty import (
60 60 attr,
61 61 )
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 repository,
70 70 templatefilters,
71 71 util,
72 72 )
73 73 from .revlogutils import (
74 74 deltas as deltautil,
75 flagutil,
75 76 )
76 77 from .utils import (
77 78 interfaceutil,
78 79 storageutil,
79 80 stringutil,
80 81 )
81 82
82 83 # blanked usage of all the name to prevent pyflakes constraints
83 84 # We need these name available in the module for extensions.
84 85 REVLOGV0
85 86 REVLOGV1
86 87 REVLOGV2
87 88 FLAG_INLINE_DATA
88 89 FLAG_GENERALDELTA
89 90 REVLOG_DEFAULT_FLAGS
90 91 REVLOG_DEFAULT_FORMAT
91 92 REVLOG_DEFAULT_VERSION
92 93 REVLOGV1_FLAGS
93 94 REVLOGV2_FLAGS
94 95 REVIDX_ISCENSORED
95 96 REVIDX_ELLIPSIS
96 97 REVIDX_EXTSTORED
97 98 REVIDX_DEFAULT_FLAGS
98 99 REVIDX_FLAGS_ORDER
99 100 REVIDX_KNOWN_FLAGS
100 101 REVIDX_RAWTEXT_CHANGING_FLAGS
101 102
102 103 parsers = policy.importmod(r'parsers')
103 104 rustancestor = policy.importrust(r'ancestor')
104 105 rustdagop = policy.importrust(r'dagop')
105 106
106 107 # Aliased for performance.
107 108 _zlibdecompress = zlib.decompress
108 109
109 110 # max size of revlog with inline data
110 111 _maxinline = 131072
111 112 _chunksize = 1048576
112 113
113 # Store flag processors (cf. 'addflagprocessor()' to register)
114 _flagprocessors = {
115 REVIDX_ISCENSORED: None,
116 }
117
118 114 # Flag processors for REVIDX_ELLIPSIS.
119 115 def ellipsisreadprocessor(rl, text):
120 116 return text, False
121 117
122 118 def ellipsiswriteprocessor(rl, text):
123 119 return text, False
124 120
125 121 def ellipsisrawprocessor(rl, text):
126 122 return False
127 123
128 124 ellipsisprocessor = (
129 125 ellipsisreadprocessor,
130 126 ellipsiswriteprocessor,
131 127 ellipsisrawprocessor,
132 128 )
133 129
134 130 def addflagprocessor(flag, processor):
135 131 """Register a flag processor on a revision data flag.
136 132
137 133 Invariant:
138 134 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
139 135 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
140 136 - Only one flag processor can be registered on a specific flag.
141 137 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
142 138 following signatures:
143 139 - (read) f(self, rawtext) -> text, bool
144 140 - (write) f(self, text) -> rawtext, bool
145 141 - (raw) f(self, rawtext) -> bool
146 142 "text" is presented to the user. "rawtext" is stored in revlog data, not
147 143 directly visible to the user.
148 144 The boolean returned by these transforms is used to determine whether
149 145 the returned text can be used for hash integrity checking. For example,
150 146 if "write" returns False, then "text" is used to generate hash. If
151 147 "write" returns True, that basically means "rawtext" returned by "write"
152 148 should be used to generate hash. Usually, "write" and "read" return
153 149 different booleans. And "raw" returns a same boolean as "write".
154 150
155 151 Note: The 'raw' transform is used for changegroup generation and in some
156 152 debug commands. In this case the transform only indicates whether the
157 153 contents can be used for hash integrity checks.
158 154 """
159 _insertflagprocessor(flag, processor, _flagprocessors)
155 _insertflagprocessor(flag, processor, flagutil.flagprocessors)
160 156
161 157 def _insertflagprocessor(flag, processor, flagprocessors):
162 158 if not flag & REVIDX_KNOWN_FLAGS:
163 159 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
164 160 raise error.ProgrammingError(msg)
165 161 if flag not in REVIDX_FLAGS_ORDER:
166 162 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
167 163 raise error.ProgrammingError(msg)
168 164 if flag in flagprocessors:
169 165 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
170 166 raise error.Abort(msg)
171 167 flagprocessors[flag] = processor
172 168
173 169 def getoffset(q):
174 170 return int(q >> 16)
175 171
176 172 def gettype(q):
177 173 return int(q & 0xFFFF)
178 174
179 175 def offset_type(offset, type):
180 176 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
181 177 raise ValueError('unknown revlog index flags')
182 178 return int(int(offset) << 16 | type)
183 179
184 180 @attr.s(slots=True, frozen=True)
185 181 class _revisioninfo(object):
186 182 """Information about a revision that allows building its fulltext
187 183 node: expected hash of the revision
188 184 p1, p2: parent revs of the revision
189 185 btext: built text cache consisting of a one-element list
190 186 cachedelta: (baserev, uncompressed_delta) or None
191 187 flags: flags associated to the revision storage
192 188
193 189 One of btext[0] or cachedelta must be set.
194 190 """
195 191 node = attr.ib()
196 192 p1 = attr.ib()
197 193 p2 = attr.ib()
198 194 btext = attr.ib()
199 195 textlen = attr.ib()
200 196 cachedelta = attr.ib()
201 197 flags = attr.ib()
202 198
203 199 @interfaceutil.implementer(repository.irevisiondelta)
204 200 @attr.s(slots=True)
205 201 class revlogrevisiondelta(object):
206 202 node = attr.ib()
207 203 p1node = attr.ib()
208 204 p2node = attr.ib()
209 205 basenode = attr.ib()
210 206 flags = attr.ib()
211 207 baserevisionsize = attr.ib()
212 208 revision = attr.ib()
213 209 delta = attr.ib()
214 210 linknode = attr.ib(default=None)
215 211
216 212 @interfaceutil.implementer(repository.iverifyproblem)
217 213 @attr.s(frozen=True)
218 214 class revlogproblem(object):
219 215 warning = attr.ib(default=None)
220 216 error = attr.ib(default=None)
221 217 node = attr.ib(default=None)
222 218
223 219 # index v0:
224 220 # 4 bytes: offset
225 221 # 4 bytes: compressed length
226 222 # 4 bytes: base rev
227 223 # 4 bytes: link rev
228 224 # 20 bytes: parent 1 nodeid
229 225 # 20 bytes: parent 2 nodeid
230 226 # 20 bytes: nodeid
231 227 indexformatv0 = struct.Struct(">4l20s20s20s")
232 228 indexformatv0_pack = indexformatv0.pack
233 229 indexformatv0_unpack = indexformatv0.unpack
234 230
235 231 class revlogoldindex(list):
236 232 def __getitem__(self, i):
237 233 if i == -1:
238 234 return (0, 0, 0, -1, -1, -1, -1, nullid)
239 235 return list.__getitem__(self, i)
240 236
241 237 class revlogoldio(object):
242 238 def __init__(self):
243 239 self.size = indexformatv0.size
244 240
245 241 def parseindex(self, data, inline):
246 242 s = self.size
247 243 index = []
248 244 nodemap = {nullid: nullrev}
249 245 n = off = 0
250 246 l = len(data)
251 247 while off + s <= l:
252 248 cur = data[off:off + s]
253 249 off += s
254 250 e = indexformatv0_unpack(cur)
255 251 # transform to revlogv1 format
256 252 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
257 253 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
258 254 index.append(e2)
259 255 nodemap[e[6]] = n
260 256 n += 1
261 257
262 258 return revlogoldindex(index), nodemap, None
263 259
264 260 def packentry(self, entry, node, version, rev):
265 261 if gettype(entry[0]):
266 262 raise error.RevlogError(_('index entry flags need revlog '
267 263 'version 1'))
268 264 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
269 265 node(entry[5]), node(entry[6]), entry[7])
270 266 return indexformatv0_pack(*e2)
271 267
272 268 # index ng:
273 269 # 6 bytes: offset
274 270 # 2 bytes: flags
275 271 # 4 bytes: compressed length
276 272 # 4 bytes: uncompressed length
277 273 # 4 bytes: base rev
278 274 # 4 bytes: link rev
279 275 # 4 bytes: parent 1 rev
280 276 # 4 bytes: parent 2 rev
281 277 # 32 bytes: nodeid
282 278 indexformatng = struct.Struct(">Qiiiiii20s12x")
283 279 indexformatng_pack = indexformatng.pack
284 280 versionformat = struct.Struct(">I")
285 281 versionformat_pack = versionformat.pack
286 282 versionformat_unpack = versionformat.unpack
287 283
288 284 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
289 285 # signed integer)
290 286 _maxentrysize = 0x7fffffff
291 287
292 288 class revlogio(object):
293 289 def __init__(self):
294 290 self.size = indexformatng.size
295 291
296 292 def parseindex(self, data, inline):
297 293 # call the C implementation to parse the index data
298 294 index, cache = parsers.parse_index2(data, inline)
299 295 return index, getattr(index, 'nodemap', None), cache
300 296
301 297 def packentry(self, entry, node, version, rev):
302 298 p = indexformatng_pack(*entry)
303 299 if rev == 0:
304 300 p = versionformat_pack(version) + p[4:]
305 301 return p
306 302
307 303 class revlog(object):
308 304 """
309 305 the underlying revision storage object
310 306
311 307 A revlog consists of two parts, an index and the revision data.
312 308
313 309 The index is a file with a fixed record size containing
314 310 information on each revision, including its nodeid (hash), the
315 311 nodeids of its parents, the position and offset of its data within
316 312 the data file, and the revision it's based on. Finally, each entry
317 313 contains a linkrev entry that can serve as a pointer to external
318 314 data.
319 315
320 316 The revision data itself is a linear collection of data chunks.
321 317 Each chunk represents a revision and is usually represented as a
322 318 delta against the previous chunk. To bound lookup time, runs of
323 319 deltas are limited to about 2 times the length of the original
324 320 version data. This makes retrieval of a version proportional to
325 321 its size, or O(1) relative to the number of revisions.
326 322
327 323 Both pieces of the revlog are written to in an append-only
328 324 fashion, which means we never need to rewrite a file to insert or
329 325 remove data, and can use some simple techniques to avoid the need
330 326 for locking while reading.
331 327
332 328 If checkambig, indexfile is opened with checkambig=True at
333 329 writing, to avoid file stat ambiguity.
334 330
335 331 If mmaplargeindex is True, and an mmapindexthreshold is set, the
336 332 index will be mmapped rather than read if it is larger than the
337 333 configured threshold.
338 334
339 335 If censorable is True, the revlog can have censored revisions.
340 336
341 337 If `upperboundcomp` is not None, this is the expected maximal gain from
342 338 compression for the data content.
343 339 """
344 340 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
345 341 mmaplargeindex=False, censorable=False,
346 342 upperboundcomp=None):
347 343 """
348 344 create a revlog object
349 345
350 346 opener is a function that abstracts the file opening operation
351 347 and can be used to implement COW semantics or the like.
352 348
353 349 """
354 350 self.upperboundcomp = upperboundcomp
355 351 self.indexfile = indexfile
356 352 self.datafile = datafile or (indexfile[:-2] + ".d")
357 353 self.opener = opener
358 354 # When True, indexfile is opened with checkambig=True at writing, to
359 355 # avoid file stat ambiguity.
360 356 self._checkambig = checkambig
361 357 self._mmaplargeindex = mmaplargeindex
362 358 self._censorable = censorable
363 359 # 3-tuple of (node, rev, text) for a raw revision.
364 360 self._revisioncache = None
365 361 # Maps rev to chain base rev.
366 362 self._chainbasecache = util.lrucachedict(100)
367 363 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
368 364 self._chunkcache = (0, '')
369 365 # How much data to read and cache into the raw revlog data cache.
370 366 self._chunkcachesize = 65536
371 367 self._maxchainlen = None
372 368 self._deltabothparents = True
373 369 self.index = []
374 370 # Mapping of partial identifiers to full nodes.
375 371 self._pcache = {}
376 372 # Mapping of revision integer to full node.
377 373 self._nodecache = {nullid: nullrev}
378 374 self._nodepos = None
379 375 self._compengine = 'zlib'
380 376 self._compengineopts = {}
381 377 self._maxdeltachainspan = -1
382 378 self._withsparseread = False
383 379 self._sparserevlog = False
384 380 self._srdensitythreshold = 0.50
385 381 self._srmingapsize = 262144
386 382
387 383 # Make copy of flag processors so each revlog instance can support
388 384 # custom flags.
389 self._flagprocessors = dict(_flagprocessors)
385 self._flagprocessors = dict(flagutil.flagprocessors)
390 386
391 387 # 2-tuple of file handles being used for active writing.
392 388 self._writinghandles = None
393 389
394 390 self._loadindex()
395 391
396 392 def _loadindex(self):
397 393 mmapindexthreshold = None
398 394 opts = getattr(self.opener, 'options', {}) or {}
399 395
400 396 if 'revlogv2' in opts:
401 397 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
402 398 elif 'revlogv1' in opts:
403 399 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
404 400 if 'generaldelta' in opts:
405 401 newversionflags |= FLAG_GENERALDELTA
406 402 elif getattr(self.opener, 'options', None) is not None:
407 403 # If options provided but no 'revlog*' found, the repository
408 404 # would have no 'requires' file in it, which means we have to
409 405 # stick to the old format.
410 406 newversionflags = REVLOGV0
411 407 else:
412 408 newversionflags = REVLOG_DEFAULT_VERSION
413 409
414 410 if 'chunkcachesize' in opts:
415 411 self._chunkcachesize = opts['chunkcachesize']
416 412 if 'maxchainlen' in opts:
417 413 self._maxchainlen = opts['maxchainlen']
418 414 if 'deltabothparents' in opts:
419 415 self._deltabothparents = opts['deltabothparents']
420 416 self._lazydelta = bool(opts.get('lazydelta', True))
421 417 self._lazydeltabase = False
422 418 if self._lazydelta:
423 419 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
424 420 if 'compengine' in opts:
425 421 self._compengine = opts['compengine']
426 422 if 'zlib.level' in opts:
427 423 self._compengineopts['zlib.level'] = opts['zlib.level']
428 424 if 'zstd.level' in opts:
429 425 self._compengineopts['zstd.level'] = opts['zstd.level']
430 426 if 'maxdeltachainspan' in opts:
431 427 self._maxdeltachainspan = opts['maxdeltachainspan']
432 428 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
433 429 mmapindexthreshold = opts['mmapindexthreshold']
434 430 self._sparserevlog = bool(opts.get('sparse-revlog', False))
435 431 withsparseread = bool(opts.get('with-sparse-read', False))
436 432 # sparse-revlog forces sparse-read
437 433 self._withsparseread = self._sparserevlog or withsparseread
438 434 if 'sparse-read-density-threshold' in opts:
439 435 self._srdensitythreshold = opts['sparse-read-density-threshold']
440 436 if 'sparse-read-min-gap-size' in opts:
441 437 self._srmingapsize = opts['sparse-read-min-gap-size']
442 438 if opts.get('enableellipsis'):
443 439 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
444 440
445 441 # revlog v0 doesn't have flag processors
446 442 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
447 443 _insertflagprocessor(flag, processor, self._flagprocessors)
448 444
449 445 if self._chunkcachesize <= 0:
450 446 raise error.RevlogError(_('revlog chunk cache size %r is not '
451 447 'greater than 0') % self._chunkcachesize)
452 448 elif self._chunkcachesize & (self._chunkcachesize - 1):
453 449 raise error.RevlogError(_('revlog chunk cache size %r is not a '
454 450 'power of 2') % self._chunkcachesize)
455 451
456 452 indexdata = ''
457 453 self._initempty = True
458 454 try:
459 455 with self._indexfp() as f:
460 456 if (mmapindexthreshold is not None and
461 457 self.opener.fstat(f).st_size >= mmapindexthreshold):
462 458 # TODO: should .close() to release resources without
463 459 # relying on Python GC
464 460 indexdata = util.buffer(util.mmapread(f))
465 461 else:
466 462 indexdata = f.read()
467 463 if len(indexdata) > 0:
468 464 versionflags = versionformat_unpack(indexdata[:4])[0]
469 465 self._initempty = False
470 466 else:
471 467 versionflags = newversionflags
472 468 except IOError as inst:
473 469 if inst.errno != errno.ENOENT:
474 470 raise
475 471
476 472 versionflags = newversionflags
477 473
478 474 self.version = versionflags
479 475
480 476 flags = versionflags & ~0xFFFF
481 477 fmt = versionflags & 0xFFFF
482 478
483 479 if fmt == REVLOGV0:
484 480 if flags:
485 481 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
486 482 'revlog %s') %
487 483 (flags >> 16, fmt, self.indexfile))
488 484
489 485 self._inline = False
490 486 self._generaldelta = False
491 487
492 488 elif fmt == REVLOGV1:
493 489 if flags & ~REVLOGV1_FLAGS:
494 490 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
495 491 'revlog %s') %
496 492 (flags >> 16, fmt, self.indexfile))
497 493
498 494 self._inline = versionflags & FLAG_INLINE_DATA
499 495 self._generaldelta = versionflags & FLAG_GENERALDELTA
500 496
501 497 elif fmt == REVLOGV2:
502 498 if flags & ~REVLOGV2_FLAGS:
503 499 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
504 500 'revlog %s') %
505 501 (flags >> 16, fmt, self.indexfile))
506 502
507 503 self._inline = versionflags & FLAG_INLINE_DATA
508 504 # generaldelta implied by version 2 revlogs.
509 505 self._generaldelta = True
510 506
511 507 else:
512 508 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
513 509 (fmt, self.indexfile))
514 510 # sparse-revlog can't be on without general-delta (issue6056)
515 511 if not self._generaldelta:
516 512 self._sparserevlog = False
517 513
518 514 self._storedeltachains = True
519 515
520 516 self._io = revlogio()
521 517 if self.version == REVLOGV0:
522 518 self._io = revlogoldio()
523 519 try:
524 520 d = self._io.parseindex(indexdata, self._inline)
525 521 except (ValueError, IndexError):
526 522 raise error.RevlogError(_("index %s is corrupted") %
527 523 self.indexfile)
528 524 self.index, nodemap, self._chunkcache = d
529 525 if nodemap is not None:
530 526 self.nodemap = self._nodecache = nodemap
531 527 if not self._chunkcache:
532 528 self._chunkclear()
533 529 # revnum -> (chain-length, sum-delta-length)
534 530 self._chaininfocache = {}
535 531 # revlog header -> revlog compressor
536 532 self._decompressors = {}
537 533
538 534 @util.propertycache
539 535 def _compressor(self):
540 536 engine = util.compengines[self._compengine]
541 537 return engine.revlogcompressor(self._compengineopts)
542 538
543 539 def _indexfp(self, mode='r'):
544 540 """file object for the revlog's index file"""
545 541 args = {r'mode': mode}
546 542 if mode != 'r':
547 543 args[r'checkambig'] = self._checkambig
548 544 if mode == 'w':
549 545 args[r'atomictemp'] = True
550 546 return self.opener(self.indexfile, **args)
551 547
552 548 def _datafp(self, mode='r'):
553 549 """file object for the revlog's data file"""
554 550 return self.opener(self.datafile, mode=mode)
555 551
556 552 @contextlib.contextmanager
557 553 def _datareadfp(self, existingfp=None):
558 554 """file object suitable to read data"""
559 555 # Use explicit file handle, if given.
560 556 if existingfp is not None:
561 557 yield existingfp
562 558
563 559 # Use a file handle being actively used for writes, if available.
564 560 # There is some danger to doing this because reads will seek the
565 561 # file. However, _writeentry() performs a SEEK_END before all writes,
566 562 # so we should be safe.
567 563 elif self._writinghandles:
568 564 if self._inline:
569 565 yield self._writinghandles[0]
570 566 else:
571 567 yield self._writinghandles[1]
572 568
573 569 # Otherwise open a new file handle.
574 570 else:
575 571 if self._inline:
576 572 func = self._indexfp
577 573 else:
578 574 func = self._datafp
579 575 with func() as fp:
580 576 yield fp
581 577
582 578 def tip(self):
583 579 return self.node(len(self.index) - 1)
584 580 def __contains__(self, rev):
585 581 return 0 <= rev < len(self)
586 582 def __len__(self):
587 583 return len(self.index)
588 584 def __iter__(self):
589 585 return iter(pycompat.xrange(len(self)))
590 586 def revs(self, start=0, stop=None):
591 587 """iterate over all rev in this revlog (from start to stop)"""
592 588 return storageutil.iterrevs(len(self), start=start, stop=stop)
593 589
594 590 @util.propertycache
595 591 def nodemap(self):
596 592 if self.index:
597 593 # populate mapping down to the initial node
598 594 node0 = self.index[0][7] # get around changelog filtering
599 595 self.rev(node0)
600 596 return self._nodecache
601 597
602 598 def hasnode(self, node):
603 599 try:
604 600 self.rev(node)
605 601 return True
606 602 except KeyError:
607 603 return False
608 604
609 605 def candelta(self, baserev, rev):
610 606 """whether two revisions (baserev, rev) can be delta-ed or not"""
611 607 # Disable delta if either rev requires a content-changing flag
612 608 # processor (ex. LFS). This is because such flag processor can alter
613 609 # the rawtext content that the delta will be based on, and two clients
614 610 # could have a same revlog node with different flags (i.e. different
615 611 # rawtext contents) and the delta could be incompatible.
616 612 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
617 613 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
618 614 return False
619 615 return True
620 616
621 617 def clearcaches(self):
622 618 self._revisioncache = None
623 619 self._chainbasecache.clear()
624 620 self._chunkcache = (0, '')
625 621 self._pcache = {}
626 622
627 623 try:
628 624 # If we are using the native C version, you are in a fun case
629 625 # where self.index, self.nodemap and self._nodecaches is the same
630 626 # object.
631 627 self._nodecache.clearcaches()
632 628 except AttributeError:
633 629 self._nodecache = {nullid: nullrev}
634 630 self._nodepos = None
635 631
636 632 def rev(self, node):
637 633 try:
638 634 return self._nodecache[node]
639 635 except TypeError:
640 636 raise
641 637 except error.RevlogError:
642 638 # parsers.c radix tree lookup failed
643 639 if node == wdirid or node in wdirfilenodeids:
644 640 raise error.WdirUnsupported
645 641 raise error.LookupError(node, self.indexfile, _('no node'))
646 642 except KeyError:
647 643 # pure python cache lookup failed
648 644 n = self._nodecache
649 645 i = self.index
650 646 p = self._nodepos
651 647 if p is None:
652 648 p = len(i) - 1
653 649 else:
654 650 assert p < len(i)
655 651 for r in pycompat.xrange(p, -1, -1):
656 652 v = i[r][7]
657 653 n[v] = r
658 654 if v == node:
659 655 self._nodepos = r - 1
660 656 return r
661 657 if node == wdirid or node in wdirfilenodeids:
662 658 raise error.WdirUnsupported
663 659 raise error.LookupError(node, self.indexfile, _('no node'))
664 660
665 661 # Accessors for index entries.
666 662
667 663 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
668 664 # are flags.
669 665 def start(self, rev):
670 666 return int(self.index[rev][0] >> 16)
671 667
672 668 def flags(self, rev):
673 669 return self.index[rev][0] & 0xFFFF
674 670
675 671 def length(self, rev):
676 672 return self.index[rev][1]
677 673
678 674 def rawsize(self, rev):
679 675 """return the length of the uncompressed text for a given revision"""
680 676 l = self.index[rev][2]
681 677 if l >= 0:
682 678 return l
683 679
684 680 t = self.revision(rev, raw=True)
685 681 return len(t)
686 682
687 683 def size(self, rev):
688 684 """length of non-raw text (processed by a "read" flag processor)"""
689 685 # fast path: if no "read" flag processor could change the content,
690 686 # size is rawsize. note: ELLIPSIS is known to not change the content.
691 687 flags = self.flags(rev)
692 688 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
693 689 return self.rawsize(rev)
694 690
695 691 return len(self.revision(rev, raw=False))
696 692
697 693 def chainbase(self, rev):
698 694 base = self._chainbasecache.get(rev)
699 695 if base is not None:
700 696 return base
701 697
702 698 index = self.index
703 699 iterrev = rev
704 700 base = index[iterrev][3]
705 701 while base != iterrev:
706 702 iterrev = base
707 703 base = index[iterrev][3]
708 704
709 705 self._chainbasecache[rev] = base
710 706 return base
711 707
712 708 def linkrev(self, rev):
713 709 return self.index[rev][4]
714 710
715 711 def parentrevs(self, rev):
716 712 try:
717 713 entry = self.index[rev]
718 714 except IndexError:
719 715 if rev == wdirrev:
720 716 raise error.WdirUnsupported
721 717 raise
722 718
723 719 return entry[5], entry[6]
724 720
725 721 # fast parentrevs(rev) where rev isn't filtered
726 722 _uncheckedparentrevs = parentrevs
727 723
728 724 def node(self, rev):
729 725 try:
730 726 return self.index[rev][7]
731 727 except IndexError:
732 728 if rev == wdirrev:
733 729 raise error.WdirUnsupported
734 730 raise
735 731
736 732 # Derived from index values.
737 733
738 734 def end(self, rev):
739 735 return self.start(rev) + self.length(rev)
740 736
741 737 def parents(self, node):
742 738 i = self.index
743 739 d = i[self.rev(node)]
744 740 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
745 741
746 742 def chainlen(self, rev):
747 743 return self._chaininfo(rev)[0]
748 744
749 745 def _chaininfo(self, rev):
750 746 chaininfocache = self._chaininfocache
751 747 if rev in chaininfocache:
752 748 return chaininfocache[rev]
753 749 index = self.index
754 750 generaldelta = self._generaldelta
755 751 iterrev = rev
756 752 e = index[iterrev]
757 753 clen = 0
758 754 compresseddeltalen = 0
759 755 while iterrev != e[3]:
760 756 clen += 1
761 757 compresseddeltalen += e[1]
762 758 if generaldelta:
763 759 iterrev = e[3]
764 760 else:
765 761 iterrev -= 1
766 762 if iterrev in chaininfocache:
767 763 t = chaininfocache[iterrev]
768 764 clen += t[0]
769 765 compresseddeltalen += t[1]
770 766 break
771 767 e = index[iterrev]
772 768 else:
773 769 # Add text length of base since decompressing that also takes
774 770 # work. For cache hits the length is already included.
775 771 compresseddeltalen += e[1]
776 772 r = (clen, compresseddeltalen)
777 773 chaininfocache[rev] = r
778 774 return r
779 775
780 776 def _deltachain(self, rev, stoprev=None):
781 777 """Obtain the delta chain for a revision.
782 778
783 779 ``stoprev`` specifies a revision to stop at. If not specified, we
784 780 stop at the base of the chain.
785 781
786 782 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
787 783 revs in ascending order and ``stopped`` is a bool indicating whether
788 784 ``stoprev`` was hit.
789 785 """
790 786 # Try C implementation.
791 787 try:
792 788 return self.index.deltachain(rev, stoprev, self._generaldelta)
793 789 except AttributeError:
794 790 pass
795 791
796 792 chain = []
797 793
798 794 # Alias to prevent attribute lookup in tight loop.
799 795 index = self.index
800 796 generaldelta = self._generaldelta
801 797
802 798 iterrev = rev
803 799 e = index[iterrev]
804 800 while iterrev != e[3] and iterrev != stoprev:
805 801 chain.append(iterrev)
806 802 if generaldelta:
807 803 iterrev = e[3]
808 804 else:
809 805 iterrev -= 1
810 806 e = index[iterrev]
811 807
812 808 if iterrev == stoprev:
813 809 stopped = True
814 810 else:
815 811 chain.append(iterrev)
816 812 stopped = False
817 813
818 814 chain.reverse()
819 815 return chain, stopped
820 816
821 817 def ancestors(self, revs, stoprev=0, inclusive=False):
822 818 """Generate the ancestors of 'revs' in reverse revision order.
823 819 Does not generate revs lower than stoprev.
824 820
825 821 See the documentation for ancestor.lazyancestors for more details."""
826 822
827 823 # first, make sure start revisions aren't filtered
828 824 revs = list(revs)
829 825 checkrev = self.node
830 826 for r in revs:
831 827 checkrev(r)
832 828 # and we're sure ancestors aren't filtered as well
833 829
834 830 if rustancestor is not None:
835 831 lazyancestors = rustancestor.LazyAncestors
836 832 arg = self.index
837 833 elif util.safehasattr(parsers, 'rustlazyancestors'):
838 834 lazyancestors = ancestor.rustlazyancestors
839 835 arg = self.index
840 836 else:
841 837 lazyancestors = ancestor.lazyancestors
842 838 arg = self._uncheckedparentrevs
843 839 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
844 840
845 841 def descendants(self, revs):
846 842 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
847 843
848 844 def findcommonmissing(self, common=None, heads=None):
849 845 """Return a tuple of the ancestors of common and the ancestors of heads
850 846 that are not ancestors of common. In revset terminology, we return the
851 847 tuple:
852 848
853 849 ::common, (::heads) - (::common)
854 850
855 851 The list is sorted by revision number, meaning it is
856 852 topologically sorted.
857 853
858 854 'heads' and 'common' are both lists of node IDs. If heads is
859 855 not supplied, uses all of the revlog's heads. If common is not
860 856 supplied, uses nullid."""
861 857 if common is None:
862 858 common = [nullid]
863 859 if heads is None:
864 860 heads = self.heads()
865 861
866 862 common = [self.rev(n) for n in common]
867 863 heads = [self.rev(n) for n in heads]
868 864
869 865 # we want the ancestors, but inclusive
870 866 class lazyset(object):
871 867 def __init__(self, lazyvalues):
872 868 self.addedvalues = set()
873 869 self.lazyvalues = lazyvalues
874 870
875 871 def __contains__(self, value):
876 872 return value in self.addedvalues or value in self.lazyvalues
877 873
878 874 def __iter__(self):
879 875 added = self.addedvalues
880 876 for r in added:
881 877 yield r
882 878 for r in self.lazyvalues:
883 879 if not r in added:
884 880 yield r
885 881
886 882 def add(self, value):
887 883 self.addedvalues.add(value)
888 884
889 885 def update(self, values):
890 886 self.addedvalues.update(values)
891 887
892 888 has = lazyset(self.ancestors(common))
893 889 has.add(nullrev)
894 890 has.update(common)
895 891
896 892 # take all ancestors from heads that aren't in has
897 893 missing = set()
898 894 visit = collections.deque(r for r in heads if r not in has)
899 895 while visit:
900 896 r = visit.popleft()
901 897 if r in missing:
902 898 continue
903 899 else:
904 900 missing.add(r)
905 901 for p in self.parentrevs(r):
906 902 if p not in has:
907 903 visit.append(p)
908 904 missing = list(missing)
909 905 missing.sort()
910 906 return has, [self.node(miss) for miss in missing]
911 907
912 908 def incrementalmissingrevs(self, common=None):
913 909 """Return an object that can be used to incrementally compute the
914 910 revision numbers of the ancestors of arbitrary sets that are not
915 911 ancestors of common. This is an ancestor.incrementalmissingancestors
916 912 object.
917 913
918 914 'common' is a list of revision numbers. If common is not supplied, uses
919 915 nullrev.
920 916 """
921 917 if common is None:
922 918 common = [nullrev]
923 919
924 920 if rustancestor is not None:
925 921 return rustancestor.MissingAncestors(self.index, common)
926 922 return ancestor.incrementalmissingancestors(self.parentrevs, common)
927 923
928 924 def findmissingrevs(self, common=None, heads=None):
929 925 """Return the revision numbers of the ancestors of heads that
930 926 are not ancestors of common.
931 927
932 928 More specifically, return a list of revision numbers corresponding to
933 929 nodes N such that every N satisfies the following constraints:
934 930
935 931 1. N is an ancestor of some node in 'heads'
936 932 2. N is not an ancestor of any node in 'common'
937 933
938 934 The list is sorted by revision number, meaning it is
939 935 topologically sorted.
940 936
941 937 'heads' and 'common' are both lists of revision numbers. If heads is
942 938 not supplied, uses all of the revlog's heads. If common is not
943 939 supplied, uses nullid."""
944 940 if common is None:
945 941 common = [nullrev]
946 942 if heads is None:
947 943 heads = self.headrevs()
948 944
949 945 inc = self.incrementalmissingrevs(common=common)
950 946 return inc.missingancestors(heads)
951 947
952 948 def findmissing(self, common=None, heads=None):
953 949 """Return the ancestors of heads that are not ancestors of common.
954 950
955 951 More specifically, return a list of nodes N such that every N
956 952 satisfies the following constraints:
957 953
958 954 1. N is an ancestor of some node in 'heads'
959 955 2. N is not an ancestor of any node in 'common'
960 956
961 957 The list is sorted by revision number, meaning it is
962 958 topologically sorted.
963 959
964 960 'heads' and 'common' are both lists of node IDs. If heads is
965 961 not supplied, uses all of the revlog's heads. If common is not
966 962 supplied, uses nullid."""
967 963 if common is None:
968 964 common = [nullid]
969 965 if heads is None:
970 966 heads = self.heads()
971 967
972 968 common = [self.rev(n) for n in common]
973 969 heads = [self.rev(n) for n in heads]
974 970
975 971 inc = self.incrementalmissingrevs(common=common)
976 972 return [self.node(r) for r in inc.missingancestors(heads)]
977 973
978 974 def nodesbetween(self, roots=None, heads=None):
979 975 """Return a topological path from 'roots' to 'heads'.
980 976
981 977 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
982 978 topologically sorted list of all nodes N that satisfy both of
983 979 these constraints:
984 980
985 981 1. N is a descendant of some node in 'roots'
986 982 2. N is an ancestor of some node in 'heads'
987 983
988 984 Every node is considered to be both a descendant and an ancestor
989 985 of itself, so every reachable node in 'roots' and 'heads' will be
990 986 included in 'nodes'.
991 987
992 988 'outroots' is the list of reachable nodes in 'roots', i.e., the
993 989 subset of 'roots' that is returned in 'nodes'. Likewise,
994 990 'outheads' is the subset of 'heads' that is also in 'nodes'.
995 991
996 992 'roots' and 'heads' are both lists of node IDs. If 'roots' is
997 993 unspecified, uses nullid as the only root. If 'heads' is
998 994 unspecified, uses list of all of the revlog's heads."""
999 995 nonodes = ([], [], [])
1000 996 if roots is not None:
1001 997 roots = list(roots)
1002 998 if not roots:
1003 999 return nonodes
1004 1000 lowestrev = min([self.rev(n) for n in roots])
1005 1001 else:
1006 1002 roots = [nullid] # Everybody's a descendant of nullid
1007 1003 lowestrev = nullrev
1008 1004 if (lowestrev == nullrev) and (heads is None):
1009 1005 # We want _all_ the nodes!
1010 1006 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1011 1007 if heads is None:
1012 1008 # All nodes are ancestors, so the latest ancestor is the last
1013 1009 # node.
1014 1010 highestrev = len(self) - 1
1015 1011 # Set ancestors to None to signal that every node is an ancestor.
1016 1012 ancestors = None
1017 1013 # Set heads to an empty dictionary for later discovery of heads
1018 1014 heads = {}
1019 1015 else:
1020 1016 heads = list(heads)
1021 1017 if not heads:
1022 1018 return nonodes
1023 1019 ancestors = set()
1024 1020 # Turn heads into a dictionary so we can remove 'fake' heads.
1025 1021 # Also, later we will be using it to filter out the heads we can't
1026 1022 # find from roots.
1027 1023 heads = dict.fromkeys(heads, False)
1028 1024 # Start at the top and keep marking parents until we're done.
1029 1025 nodestotag = set(heads)
1030 1026 # Remember where the top was so we can use it as a limit later.
1031 1027 highestrev = max([self.rev(n) for n in nodestotag])
1032 1028 while nodestotag:
1033 1029 # grab a node to tag
1034 1030 n = nodestotag.pop()
1035 1031 # Never tag nullid
1036 1032 if n == nullid:
1037 1033 continue
1038 1034 # A node's revision number represents its place in a
1039 1035 # topologically sorted list of nodes.
1040 1036 r = self.rev(n)
1041 1037 if r >= lowestrev:
1042 1038 if n not in ancestors:
1043 1039 # If we are possibly a descendant of one of the roots
1044 1040 # and we haven't already been marked as an ancestor
1045 1041 ancestors.add(n) # Mark as ancestor
1046 1042 # Add non-nullid parents to list of nodes to tag.
1047 1043 nodestotag.update([p for p in self.parents(n) if
1048 1044 p != nullid])
1049 1045 elif n in heads: # We've seen it before, is it a fake head?
1050 1046 # So it is, real heads should not be the ancestors of
1051 1047 # any other heads.
1052 1048 heads.pop(n)
1053 1049 if not ancestors:
1054 1050 return nonodes
1055 1051 # Now that we have our set of ancestors, we want to remove any
1056 1052 # roots that are not ancestors.
1057 1053
1058 1054 # If one of the roots was nullid, everything is included anyway.
1059 1055 if lowestrev > nullrev:
1060 1056 # But, since we weren't, let's recompute the lowest rev to not
1061 1057 # include roots that aren't ancestors.
1062 1058
1063 1059 # Filter out roots that aren't ancestors of heads
1064 1060 roots = [root for root in roots if root in ancestors]
1065 1061 # Recompute the lowest revision
1066 1062 if roots:
1067 1063 lowestrev = min([self.rev(root) for root in roots])
1068 1064 else:
1069 1065 # No more roots? Return empty list
1070 1066 return nonodes
1071 1067 else:
1072 1068 # We are descending from nullid, and don't need to care about
1073 1069 # any other roots.
1074 1070 lowestrev = nullrev
1075 1071 roots = [nullid]
1076 1072 # Transform our roots list into a set.
1077 1073 descendants = set(roots)
1078 1074 # Also, keep the original roots so we can filter out roots that aren't
1079 1075 # 'real' roots (i.e. are descended from other roots).
1080 1076 roots = descendants.copy()
1081 1077 # Our topologically sorted list of output nodes.
1082 1078 orderedout = []
1083 1079 # Don't start at nullid since we don't want nullid in our output list,
1084 1080 # and if nullid shows up in descendants, empty parents will look like
1085 1081 # they're descendants.
1086 1082 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1087 1083 n = self.node(r)
1088 1084 isdescendant = False
1089 1085 if lowestrev == nullrev: # Everybody is a descendant of nullid
1090 1086 isdescendant = True
1091 1087 elif n in descendants:
1092 1088 # n is already a descendant
1093 1089 isdescendant = True
1094 1090 # This check only needs to be done here because all the roots
1095 1091 # will start being marked is descendants before the loop.
1096 1092 if n in roots:
1097 1093 # If n was a root, check if it's a 'real' root.
1098 1094 p = tuple(self.parents(n))
1099 1095 # If any of its parents are descendants, it's not a root.
1100 1096 if (p[0] in descendants) or (p[1] in descendants):
1101 1097 roots.remove(n)
1102 1098 else:
1103 1099 p = tuple(self.parents(n))
1104 1100 # A node is a descendant if either of its parents are
1105 1101 # descendants. (We seeded the dependents list with the roots
1106 1102 # up there, remember?)
1107 1103 if (p[0] in descendants) or (p[1] in descendants):
1108 1104 descendants.add(n)
1109 1105 isdescendant = True
1110 1106 if isdescendant and ((ancestors is None) or (n in ancestors)):
1111 1107 # Only include nodes that are both descendants and ancestors.
1112 1108 orderedout.append(n)
1113 1109 if (ancestors is not None) and (n in heads):
1114 1110 # We're trying to figure out which heads are reachable
1115 1111 # from roots.
1116 1112 # Mark this head as having been reached
1117 1113 heads[n] = True
1118 1114 elif ancestors is None:
1119 1115 # Otherwise, we're trying to discover the heads.
1120 1116 # Assume this is a head because if it isn't, the next step
1121 1117 # will eventually remove it.
1122 1118 heads[n] = True
1123 1119 # But, obviously its parents aren't.
1124 1120 for p in self.parents(n):
1125 1121 heads.pop(p, None)
1126 1122 heads = [head for head, flag in heads.iteritems() if flag]
1127 1123 roots = list(roots)
1128 1124 assert orderedout
1129 1125 assert roots
1130 1126 assert heads
1131 1127 return (orderedout, roots, heads)
1132 1128
1133 1129 def headrevs(self, revs=None):
1134 1130 if revs is None:
1135 1131 try:
1136 1132 return self.index.headrevs()
1137 1133 except AttributeError:
1138 1134 return self._headrevs()
1139 1135 if rustdagop is not None:
1140 1136 return rustdagop.headrevs(self.index, revs)
1141 1137 return dagop.headrevs(revs, self._uncheckedparentrevs)
1142 1138
1143 1139 def computephases(self, roots):
1144 1140 return self.index.computephasesmapsets(roots)
1145 1141
1146 1142 def _headrevs(self):
1147 1143 count = len(self)
1148 1144 if not count:
1149 1145 return [nullrev]
1150 1146 # we won't iter over filtered rev so nobody is a head at start
1151 1147 ishead = [0] * (count + 1)
1152 1148 index = self.index
1153 1149 for r in self:
1154 1150 ishead[r] = 1 # I may be an head
1155 1151 e = index[r]
1156 1152 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1157 1153 return [r for r, val in enumerate(ishead) if val]
1158 1154
1159 1155 def heads(self, start=None, stop=None):
1160 1156 """return the list of all nodes that have no children
1161 1157
1162 1158 if start is specified, only heads that are descendants of
1163 1159 start will be returned
1164 1160 if stop is specified, it will consider all the revs from stop
1165 1161 as if they had no children
1166 1162 """
1167 1163 if start is None and stop is None:
1168 1164 if not len(self):
1169 1165 return [nullid]
1170 1166 return [self.node(r) for r in self.headrevs()]
1171 1167
1172 1168 if start is None:
1173 1169 start = nullrev
1174 1170 else:
1175 1171 start = self.rev(start)
1176 1172
1177 1173 stoprevs = set(self.rev(n) for n in stop or [])
1178 1174
1179 1175 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1180 1176 stoprevs=stoprevs)
1181 1177
1182 1178 return [self.node(rev) for rev in revs]
1183 1179
1184 1180 def children(self, node):
1185 1181 """find the children of a given node"""
1186 1182 c = []
1187 1183 p = self.rev(node)
1188 1184 for r in self.revs(start=p + 1):
1189 1185 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1190 1186 if prevs:
1191 1187 for pr in prevs:
1192 1188 if pr == p:
1193 1189 c.append(self.node(r))
1194 1190 elif p == nullrev:
1195 1191 c.append(self.node(r))
1196 1192 return c
1197 1193
1198 1194 def commonancestorsheads(self, a, b):
1199 1195 """calculate all the heads of the common ancestors of nodes a and b"""
1200 1196 a, b = self.rev(a), self.rev(b)
1201 1197 ancs = self._commonancestorsheads(a, b)
1202 1198 return pycompat.maplist(self.node, ancs)
1203 1199
1204 1200 def _commonancestorsheads(self, *revs):
1205 1201 """calculate all the heads of the common ancestors of revs"""
1206 1202 try:
1207 1203 ancs = self.index.commonancestorsheads(*revs)
1208 1204 except (AttributeError, OverflowError): # C implementation failed
1209 1205 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1210 1206 return ancs
1211 1207
1212 1208 def isancestor(self, a, b):
1213 1209 """return True if node a is an ancestor of node b
1214 1210
1215 1211 A revision is considered an ancestor of itself."""
1216 1212 a, b = self.rev(a), self.rev(b)
1217 1213 return self.isancestorrev(a, b)
1218 1214
1219 1215 def isancestorrev(self, a, b):
1220 1216 """return True if revision a is an ancestor of revision b
1221 1217
1222 1218 A revision is considered an ancestor of itself.
1223 1219
1224 1220 The implementation of this is trivial but the use of
1225 1221 reachableroots is not."""
1226 1222 if a == nullrev:
1227 1223 return True
1228 1224 elif a == b:
1229 1225 return True
1230 1226 elif a > b:
1231 1227 return False
1232 1228 return bool(self.reachableroots(a, [b], [a], includepath=False))
1233 1229
1234 1230 def reachableroots(self, minroot, heads, roots, includepath=False):
1235 1231 """return (heads(::<roots> and <roots>::<heads>))
1236 1232
1237 1233 If includepath is True, return (<roots>::<heads>)."""
1238 1234 try:
1239 1235 return self.index.reachableroots2(minroot, heads, roots,
1240 1236 includepath)
1241 1237 except AttributeError:
1242 1238 return dagop._reachablerootspure(self.parentrevs,
1243 1239 minroot, roots, heads, includepath)
1244 1240
1245 1241 def ancestor(self, a, b):
1246 1242 """calculate the "best" common ancestor of nodes a and b"""
1247 1243
1248 1244 a, b = self.rev(a), self.rev(b)
1249 1245 try:
1250 1246 ancs = self.index.ancestors(a, b)
1251 1247 except (AttributeError, OverflowError):
1252 1248 ancs = ancestor.ancestors(self.parentrevs, a, b)
1253 1249 if ancs:
1254 1250 # choose a consistent winner when there's a tie
1255 1251 return min(map(self.node, ancs))
1256 1252 return nullid
1257 1253
1258 1254 def _match(self, id):
1259 1255 if isinstance(id, int):
1260 1256 # rev
1261 1257 return self.node(id)
1262 1258 if len(id) == 20:
1263 1259 # possibly a binary node
1264 1260 # odds of a binary node being all hex in ASCII are 1 in 10**25
1265 1261 try:
1266 1262 node = id
1267 1263 self.rev(node) # quick search the index
1268 1264 return node
1269 1265 except error.LookupError:
1270 1266 pass # may be partial hex id
1271 1267 try:
1272 1268 # str(rev)
1273 1269 rev = int(id)
1274 1270 if "%d" % rev != id:
1275 1271 raise ValueError
1276 1272 if rev < 0:
1277 1273 rev = len(self) + rev
1278 1274 if rev < 0 or rev >= len(self):
1279 1275 raise ValueError
1280 1276 return self.node(rev)
1281 1277 except (ValueError, OverflowError):
1282 1278 pass
1283 1279 if len(id) == 40:
1284 1280 try:
1285 1281 # a full hex nodeid?
1286 1282 node = bin(id)
1287 1283 self.rev(node)
1288 1284 return node
1289 1285 except (TypeError, error.LookupError):
1290 1286 pass
1291 1287
1292 1288 def _partialmatch(self, id):
1293 1289 # we don't care wdirfilenodeids as they should be always full hash
1294 1290 maybewdir = wdirhex.startswith(id)
1295 1291 try:
1296 1292 partial = self.index.partialmatch(id)
1297 1293 if partial and self.hasnode(partial):
1298 1294 if maybewdir:
1299 1295 # single 'ff...' match in radix tree, ambiguous with wdir
1300 1296 raise error.RevlogError
1301 1297 return partial
1302 1298 if maybewdir:
1303 1299 # no 'ff...' match in radix tree, wdir identified
1304 1300 raise error.WdirUnsupported
1305 1301 return None
1306 1302 except error.RevlogError:
1307 1303 # parsers.c radix tree lookup gave multiple matches
1308 1304 # fast path: for unfiltered changelog, radix tree is accurate
1309 1305 if not getattr(self, 'filteredrevs', None):
1310 1306 raise error.AmbiguousPrefixLookupError(
1311 1307 id, self.indexfile, _('ambiguous identifier'))
1312 1308 # fall through to slow path that filters hidden revisions
1313 1309 except (AttributeError, ValueError):
1314 1310 # we are pure python, or key was too short to search radix tree
1315 1311 pass
1316 1312
1317 1313 if id in self._pcache:
1318 1314 return self._pcache[id]
1319 1315
1320 1316 if len(id) <= 40:
1321 1317 try:
1322 1318 # hex(node)[:...]
1323 1319 l = len(id) // 2 # grab an even number of digits
1324 1320 prefix = bin(id[:l * 2])
1325 1321 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1326 1322 nl = [n for n in nl if hex(n).startswith(id) and
1327 1323 self.hasnode(n)]
1328 1324 if nullhex.startswith(id):
1329 1325 nl.append(nullid)
1330 1326 if len(nl) > 0:
1331 1327 if len(nl) == 1 and not maybewdir:
1332 1328 self._pcache[id] = nl[0]
1333 1329 return nl[0]
1334 1330 raise error.AmbiguousPrefixLookupError(
1335 1331 id, self.indexfile, _('ambiguous identifier'))
1336 1332 if maybewdir:
1337 1333 raise error.WdirUnsupported
1338 1334 return None
1339 1335 except TypeError:
1340 1336 pass
1341 1337
1342 1338 def lookup(self, id):
1343 1339 """locate a node based on:
1344 1340 - revision number or str(revision number)
1345 1341 - nodeid or subset of hex nodeid
1346 1342 """
1347 1343 n = self._match(id)
1348 1344 if n is not None:
1349 1345 return n
1350 1346 n = self._partialmatch(id)
1351 1347 if n:
1352 1348 return n
1353 1349
1354 1350 raise error.LookupError(id, self.indexfile, _('no match found'))
1355 1351
1356 1352 def shortest(self, node, minlength=1):
1357 1353 """Find the shortest unambiguous prefix that matches node."""
1358 1354 def isvalid(prefix):
1359 1355 try:
1360 1356 matchednode = self._partialmatch(prefix)
1361 1357 except error.AmbiguousPrefixLookupError:
1362 1358 return False
1363 1359 except error.WdirUnsupported:
1364 1360 # single 'ff...' match
1365 1361 return True
1366 1362 if matchednode is None:
1367 1363 raise error.LookupError(node, self.indexfile, _('no node'))
1368 1364 return True
1369 1365
1370 1366 def maybewdir(prefix):
1371 1367 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1372 1368
1373 1369 hexnode = hex(node)
1374 1370
1375 1371 def disambiguate(hexnode, minlength):
1376 1372 """Disambiguate against wdirid."""
1377 1373 for length in range(minlength, 41):
1378 1374 prefix = hexnode[:length]
1379 1375 if not maybewdir(prefix):
1380 1376 return prefix
1381 1377
1382 1378 if not getattr(self, 'filteredrevs', None):
1383 1379 try:
1384 1380 length = max(self.index.shortest(node), minlength)
1385 1381 return disambiguate(hexnode, length)
1386 1382 except error.RevlogError:
1387 1383 if node != wdirid:
1388 1384 raise error.LookupError(node, self.indexfile, _('no node'))
1389 1385 except AttributeError:
1390 1386 # Fall through to pure code
1391 1387 pass
1392 1388
1393 1389 if node == wdirid:
1394 1390 for length in range(minlength, 41):
1395 1391 prefix = hexnode[:length]
1396 1392 if isvalid(prefix):
1397 1393 return prefix
1398 1394
1399 1395 for length in range(minlength, 41):
1400 1396 prefix = hexnode[:length]
1401 1397 if isvalid(prefix):
1402 1398 return disambiguate(hexnode, length)
1403 1399
1404 1400 def cmp(self, node, text):
1405 1401 """compare text with a given file revision
1406 1402
1407 1403 returns True if text is different than what is stored.
1408 1404 """
1409 1405 p1, p2 = self.parents(node)
1410 1406 return storageutil.hashrevisionsha1(text, p1, p2) != node
1411 1407
1412 1408 def _cachesegment(self, offset, data):
1413 1409 """Add a segment to the revlog cache.
1414 1410
1415 1411 Accepts an absolute offset and the data that is at that location.
1416 1412 """
1417 1413 o, d = self._chunkcache
1418 1414 # try to add to existing cache
1419 1415 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1420 1416 self._chunkcache = o, d + data
1421 1417 else:
1422 1418 self._chunkcache = offset, data
1423 1419
1424 1420 def _readsegment(self, offset, length, df=None):
1425 1421 """Load a segment of raw data from the revlog.
1426 1422
1427 1423 Accepts an absolute offset, length to read, and an optional existing
1428 1424 file handle to read from.
1429 1425
1430 1426 If an existing file handle is passed, it will be seeked and the
1431 1427 original seek position will NOT be restored.
1432 1428
1433 1429 Returns a str or buffer of raw byte data.
1434 1430
1435 1431 Raises if the requested number of bytes could not be read.
1436 1432 """
1437 1433 # Cache data both forward and backward around the requested
1438 1434 # data, in a fixed size window. This helps speed up operations
1439 1435 # involving reading the revlog backwards.
1440 1436 cachesize = self._chunkcachesize
1441 1437 realoffset = offset & ~(cachesize - 1)
1442 1438 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1443 1439 - realoffset)
1444 1440 with self._datareadfp(df) as df:
1445 1441 df.seek(realoffset)
1446 1442 d = df.read(reallength)
1447 1443
1448 1444 self._cachesegment(realoffset, d)
1449 1445 if offset != realoffset or reallength != length:
1450 1446 startoffset = offset - realoffset
1451 1447 if len(d) - startoffset < length:
1452 1448 raise error.RevlogError(
1453 1449 _('partial read of revlog %s; expected %d bytes from '
1454 1450 'offset %d, got %d') %
1455 1451 (self.indexfile if self._inline else self.datafile,
1456 1452 length, realoffset, len(d) - startoffset))
1457 1453
1458 1454 return util.buffer(d, startoffset, length)
1459 1455
1460 1456 if len(d) < length:
1461 1457 raise error.RevlogError(
1462 1458 _('partial read of revlog %s; expected %d bytes from offset '
1463 1459 '%d, got %d') %
1464 1460 (self.indexfile if self._inline else self.datafile,
1465 1461 length, offset, len(d)))
1466 1462
1467 1463 return d
1468 1464
1469 1465 def _getsegment(self, offset, length, df=None):
1470 1466 """Obtain a segment of raw data from the revlog.
1471 1467
1472 1468 Accepts an absolute offset, length of bytes to obtain, and an
1473 1469 optional file handle to the already-opened revlog. If the file
1474 1470 handle is used, it's original seek position will not be preserved.
1475 1471
1476 1472 Requests for data may be returned from a cache.
1477 1473
1478 1474 Returns a str or a buffer instance of raw byte data.
1479 1475 """
1480 1476 o, d = self._chunkcache
1481 1477 l = len(d)
1482 1478
1483 1479 # is it in the cache?
1484 1480 cachestart = offset - o
1485 1481 cacheend = cachestart + length
1486 1482 if cachestart >= 0 and cacheend <= l:
1487 1483 if cachestart == 0 and cacheend == l:
1488 1484 return d # avoid a copy
1489 1485 return util.buffer(d, cachestart, cacheend - cachestart)
1490 1486
1491 1487 return self._readsegment(offset, length, df=df)
1492 1488
1493 1489 def _getsegmentforrevs(self, startrev, endrev, df=None):
1494 1490 """Obtain a segment of raw data corresponding to a range of revisions.
1495 1491
1496 1492 Accepts the start and end revisions and an optional already-open
1497 1493 file handle to be used for reading. If the file handle is read, its
1498 1494 seek position will not be preserved.
1499 1495
1500 1496 Requests for data may be satisfied by a cache.
1501 1497
1502 1498 Returns a 2-tuple of (offset, data) for the requested range of
1503 1499 revisions. Offset is the integer offset from the beginning of the
1504 1500 revlog and data is a str or buffer of the raw byte data.
1505 1501
1506 1502 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1507 1503 to determine where each revision's data begins and ends.
1508 1504 """
1509 1505 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1510 1506 # (functions are expensive).
1511 1507 index = self.index
1512 1508 istart = index[startrev]
1513 1509 start = int(istart[0] >> 16)
1514 1510 if startrev == endrev:
1515 1511 end = start + istart[1]
1516 1512 else:
1517 1513 iend = index[endrev]
1518 1514 end = int(iend[0] >> 16) + iend[1]
1519 1515
1520 1516 if self._inline:
1521 1517 start += (startrev + 1) * self._io.size
1522 1518 end += (endrev + 1) * self._io.size
1523 1519 length = end - start
1524 1520
1525 1521 return start, self._getsegment(start, length, df=df)
1526 1522
1527 1523 def _chunk(self, rev, df=None):
1528 1524 """Obtain a single decompressed chunk for a revision.
1529 1525
1530 1526 Accepts an integer revision and an optional already-open file handle
1531 1527 to be used for reading. If used, the seek position of the file will not
1532 1528 be preserved.
1533 1529
1534 1530 Returns a str holding uncompressed data for the requested revision.
1535 1531 """
1536 1532 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1537 1533
1538 1534 def _chunks(self, revs, df=None, targetsize=None):
1539 1535 """Obtain decompressed chunks for the specified revisions.
1540 1536
1541 1537 Accepts an iterable of numeric revisions that are assumed to be in
1542 1538 ascending order. Also accepts an optional already-open file handle
1543 1539 to be used for reading. If used, the seek position of the file will
1544 1540 not be preserved.
1545 1541
1546 1542 This function is similar to calling ``self._chunk()`` multiple times,
1547 1543 but is faster.
1548 1544
1549 1545 Returns a list with decompressed data for each requested revision.
1550 1546 """
1551 1547 if not revs:
1552 1548 return []
1553 1549 start = self.start
1554 1550 length = self.length
1555 1551 inline = self._inline
1556 1552 iosize = self._io.size
1557 1553 buffer = util.buffer
1558 1554
1559 1555 l = []
1560 1556 ladd = l.append
1561 1557
1562 1558 if not self._withsparseread:
1563 1559 slicedchunks = (revs,)
1564 1560 else:
1565 1561 slicedchunks = deltautil.slicechunk(self, revs,
1566 1562 targetsize=targetsize)
1567 1563
1568 1564 for revschunk in slicedchunks:
1569 1565 firstrev = revschunk[0]
1570 1566 # Skip trailing revisions with empty diff
1571 1567 for lastrev in revschunk[::-1]:
1572 1568 if length(lastrev) != 0:
1573 1569 break
1574 1570
1575 1571 try:
1576 1572 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1577 1573 except OverflowError:
1578 1574 # issue4215 - we can't cache a run of chunks greater than
1579 1575 # 2G on Windows
1580 1576 return [self._chunk(rev, df=df) for rev in revschunk]
1581 1577
1582 1578 decomp = self.decompress
1583 1579 for rev in revschunk:
1584 1580 chunkstart = start(rev)
1585 1581 if inline:
1586 1582 chunkstart += (rev + 1) * iosize
1587 1583 chunklength = length(rev)
1588 1584 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1589 1585
1590 1586 return l
1591 1587
1592 1588 def _chunkclear(self):
1593 1589 """Clear the raw chunk cache."""
1594 1590 self._chunkcache = (0, '')
1595 1591
1596 1592 def deltaparent(self, rev):
1597 1593 """return deltaparent of the given revision"""
1598 1594 base = self.index[rev][3]
1599 1595 if base == rev:
1600 1596 return nullrev
1601 1597 elif self._generaldelta:
1602 1598 return base
1603 1599 else:
1604 1600 return rev - 1
1605 1601
1606 1602 def issnapshot(self, rev):
1607 1603 """tells whether rev is a snapshot
1608 1604 """
1609 1605 if not self._sparserevlog:
1610 1606 return self.deltaparent(rev) == nullrev
1611 1607 elif util.safehasattr(self.index, 'issnapshot'):
1612 1608 # directly assign the method to cache the testing and access
1613 1609 self.issnapshot = self.index.issnapshot
1614 1610 return self.issnapshot(rev)
1615 1611 if rev == nullrev:
1616 1612 return True
1617 1613 entry = self.index[rev]
1618 1614 base = entry[3]
1619 1615 if base == rev:
1620 1616 return True
1621 1617 if base == nullrev:
1622 1618 return True
1623 1619 p1 = entry[5]
1624 1620 p2 = entry[6]
1625 1621 if base == p1 or base == p2:
1626 1622 return False
1627 1623 return self.issnapshot(base)
1628 1624
1629 1625 def snapshotdepth(self, rev):
1630 1626 """number of snapshot in the chain before this one"""
1631 1627 if not self.issnapshot(rev):
1632 1628 raise error.ProgrammingError('revision %d not a snapshot')
1633 1629 return len(self._deltachain(rev)[0]) - 1
1634 1630
1635 1631 def revdiff(self, rev1, rev2):
1636 1632 """return or calculate a delta between two revisions
1637 1633
1638 1634 The delta calculated is in binary form and is intended to be written to
1639 1635 revlog data directly. So this function needs raw revision data.
1640 1636 """
1641 1637 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1642 1638 return bytes(self._chunk(rev2))
1643 1639
1644 1640 return mdiff.textdiff(self.revision(rev1, raw=True),
1645 1641 self.revision(rev2, raw=True))
1646 1642
1647 1643 def revision(self, nodeorrev, _df=None, raw=False):
1648 1644 """return an uncompressed revision of a given node or revision
1649 1645 number.
1650 1646
1651 1647 _df - an existing file handle to read from. (internal-only)
1652 1648 raw - an optional argument specifying if the revision data is to be
1653 1649 treated as raw data when applying flag transforms. 'raw' should be set
1654 1650 to True when generating changegroups or in debug commands.
1655 1651 """
1656 1652 return self._revisiondata(nodeorrev, _df, raw=raw)
1657 1653
1658 1654 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1659 1655 if isinstance(nodeorrev, int):
1660 1656 rev = nodeorrev
1661 1657 node = self.node(rev)
1662 1658 else:
1663 1659 node = nodeorrev
1664 1660 rev = None
1665 1661
1666 1662 cachedrev = None
1667 1663 flags = None
1668 1664 rawtext = None
1669 1665 if node == nullid:
1670 1666 return ""
1671 1667 if self._revisioncache:
1672 1668 if self._revisioncache[0] == node:
1673 1669 # _cache only stores rawtext
1674 1670 if raw:
1675 1671 return self._revisioncache[2]
1676 1672 # duplicated, but good for perf
1677 1673 if rev is None:
1678 1674 rev = self.rev(node)
1679 1675 if flags is None:
1680 1676 flags = self.flags(rev)
1681 1677 # no extra flags set, no flag processor runs, text = rawtext
1682 1678 if flags == REVIDX_DEFAULT_FLAGS:
1683 1679 return self._revisioncache[2]
1684 1680 # rawtext is reusable. need to run flag processor
1685 1681 rawtext = self._revisioncache[2]
1686 1682
1687 1683 cachedrev = self._revisioncache[1]
1688 1684
1689 1685 # look up what we need to read
1690 1686 if rawtext is None:
1691 1687 if rev is None:
1692 1688 rev = self.rev(node)
1693 1689
1694 1690 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1695 1691 if stopped:
1696 1692 rawtext = self._revisioncache[2]
1697 1693
1698 1694 # drop cache to save memory
1699 1695 self._revisioncache = None
1700 1696
1701 1697 targetsize = None
1702 1698 rawsize = self.index[rev][2]
1703 1699 if 0 <= rawsize:
1704 1700 targetsize = 4 * rawsize
1705 1701
1706 1702 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1707 1703 if rawtext is None:
1708 1704 rawtext = bytes(bins[0])
1709 1705 bins = bins[1:]
1710 1706
1711 1707 rawtext = mdiff.patches(rawtext, bins)
1712 1708 self._revisioncache = (node, rev, rawtext)
1713 1709
1714 1710 if flags is None:
1715 1711 if rev is None:
1716 1712 rev = self.rev(node)
1717 1713 flags = self.flags(rev)
1718 1714
1719 1715 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1720 1716 if validatehash:
1721 1717 self.checkhash(text, node, rev=rev)
1722 1718
1723 1719 return text
1724 1720
1725 1721 def rawdata(self, nodeorrev, _df=None, raw=False):
1726 1722 """return an uncompressed raw data of a given node or revision number.
1727 1723
1728 1724 _df - an existing file handle to read from. (internal-only)
1729 1725 """
1730 1726 return self._revisiondata(nodeorrev, _df, raw=True)
1731 1727
1732 1728 def hash(self, text, p1, p2):
1733 1729 """Compute a node hash.
1734 1730
1735 1731 Available as a function so that subclasses can replace the hash
1736 1732 as needed.
1737 1733 """
1738 1734 return storageutil.hashrevisionsha1(text, p1, p2)
1739 1735
1740 1736 def _processflags(self, text, flags, operation, raw=False):
1741 1737 """Inspect revision data flags and applies transforms defined by
1742 1738 registered flag processors.
1743 1739
1744 1740 ``text`` - the revision data to process
1745 1741 ``flags`` - the revision flags
1746 1742 ``operation`` - the operation being performed (read or write)
1747 1743 ``raw`` - an optional argument describing if the raw transform should be
1748 1744 applied.
1749 1745
1750 1746 This method processes the flags in the order (or reverse order if
1751 1747 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1752 1748 flag processors registered for present flags. The order of flags defined
1753 1749 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1754 1750
1755 1751 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1756 1752 processed text and ``validatehash`` is a bool indicating whether the
1757 1753 returned text should be checked for hash integrity.
1758 1754
1759 1755 Note: If the ``raw`` argument is set, it has precedence over the
1760 1756 operation and will only update the value of ``validatehash``.
1761 1757 """
1762 1758 # fast path: no flag processors will run
1763 1759 if flags == 0:
1764 1760 return text, True
1765 1761 if not operation in ('read', 'write'):
1766 1762 raise error.ProgrammingError(_("invalid '%s' operation") %
1767 1763 operation)
1768 1764 # Check all flags are known.
1769 1765 if flags & ~REVIDX_KNOWN_FLAGS:
1770 1766 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1771 1767 (flags & ~REVIDX_KNOWN_FLAGS))
1772 1768 validatehash = True
1773 1769 # Depending on the operation (read or write), the order might be
1774 1770 # reversed due to non-commutative transforms.
1775 1771 orderedflags = REVIDX_FLAGS_ORDER
1776 1772 if operation == 'write':
1777 1773 orderedflags = reversed(orderedflags)
1778 1774
1779 1775 for flag in orderedflags:
1780 1776 # If a flagprocessor has been registered for a known flag, apply the
1781 1777 # related operation transform and update result tuple.
1782 1778 if flag & flags:
1783 1779 vhash = True
1784 1780
1785 1781 if flag not in self._flagprocessors:
1786 1782 message = _("missing processor for flag '%#x'") % (flag)
1787 1783 raise error.RevlogError(message)
1788 1784
1789 1785 processor = self._flagprocessors[flag]
1790 1786 if processor is not None:
1791 1787 readtransform, writetransform, rawtransform = processor
1792 1788
1793 1789 if raw:
1794 1790 vhash = rawtransform(self, text)
1795 1791 elif operation == 'read':
1796 1792 text, vhash = readtransform(self, text)
1797 1793 else: # write operation
1798 1794 text, vhash = writetransform(self, text)
1799 1795 validatehash = validatehash and vhash
1800 1796
1801 1797 return text, validatehash
1802 1798
1803 1799 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1804 1800 """Check node hash integrity.
1805 1801
1806 1802 Available as a function so that subclasses can extend hash mismatch
1807 1803 behaviors as needed.
1808 1804 """
1809 1805 try:
1810 1806 if p1 is None and p2 is None:
1811 1807 p1, p2 = self.parents(node)
1812 1808 if node != self.hash(text, p1, p2):
1813 1809 # Clear the revision cache on hash failure. The revision cache
1814 1810 # only stores the raw revision and clearing the cache does have
1815 1811 # the side-effect that we won't have a cache hit when the raw
1816 1812 # revision data is accessed. But this case should be rare and
1817 1813 # it is extra work to teach the cache about the hash
1818 1814 # verification state.
1819 1815 if self._revisioncache and self._revisioncache[0] == node:
1820 1816 self._revisioncache = None
1821 1817
1822 1818 revornode = rev
1823 1819 if revornode is None:
1824 1820 revornode = templatefilters.short(hex(node))
1825 1821 raise error.RevlogError(_("integrity check failed on %s:%s")
1826 1822 % (self.indexfile, pycompat.bytestr(revornode)))
1827 1823 except error.RevlogError:
1828 1824 if self._censorable and storageutil.iscensoredtext(text):
1829 1825 raise error.CensoredNodeError(self.indexfile, node, text)
1830 1826 raise
1831 1827
1832 1828 def _enforceinlinesize(self, tr, fp=None):
1833 1829 """Check if the revlog is too big for inline and convert if so.
1834 1830
1835 1831 This should be called after revisions are added to the revlog. If the
1836 1832 revlog has grown too large to be an inline revlog, it will convert it
1837 1833 to use multiple index and data files.
1838 1834 """
1839 1835 tiprev = len(self) - 1
1840 1836 if (not self._inline or
1841 1837 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1842 1838 return
1843 1839
1844 1840 trinfo = tr.find(self.indexfile)
1845 1841 if trinfo is None:
1846 1842 raise error.RevlogError(_("%s not found in the transaction")
1847 1843 % self.indexfile)
1848 1844
1849 1845 trindex = trinfo[2]
1850 1846 if trindex is not None:
1851 1847 dataoff = self.start(trindex)
1852 1848 else:
1853 1849 # revlog was stripped at start of transaction, use all leftover data
1854 1850 trindex = len(self) - 1
1855 1851 dataoff = self.end(tiprev)
1856 1852
1857 1853 tr.add(self.datafile, dataoff)
1858 1854
1859 1855 if fp:
1860 1856 fp.flush()
1861 1857 fp.close()
1862 1858 # We can't use the cached file handle after close(). So prevent
1863 1859 # its usage.
1864 1860 self._writinghandles = None
1865 1861
1866 1862 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1867 1863 for r in self:
1868 1864 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1869 1865
1870 1866 with self._indexfp('w') as fp:
1871 1867 self.version &= ~FLAG_INLINE_DATA
1872 1868 self._inline = False
1873 1869 io = self._io
1874 1870 for i in self:
1875 1871 e = io.packentry(self.index[i], self.node, self.version, i)
1876 1872 fp.write(e)
1877 1873
1878 1874 # the temp file replace the real index when we exit the context
1879 1875 # manager
1880 1876
1881 1877 tr.replace(self.indexfile, trindex * self._io.size)
1882 1878 self._chunkclear()
1883 1879
1884 1880 def _nodeduplicatecallback(self, transaction, node):
1885 1881 """called when trying to add a node already stored.
1886 1882 """
1887 1883
1888 1884 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1889 1885 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1890 1886 """add a revision to the log
1891 1887
1892 1888 text - the revision data to add
1893 1889 transaction - the transaction object used for rollback
1894 1890 link - the linkrev data to add
1895 1891 p1, p2 - the parent nodeids of the revision
1896 1892 cachedelta - an optional precomputed delta
1897 1893 node - nodeid of revision; typically node is not specified, and it is
1898 1894 computed by default as hash(text, p1, p2), however subclasses might
1899 1895 use different hashing method (and override checkhash() in such case)
1900 1896 flags - the known flags to set on the revision
1901 1897 deltacomputer - an optional deltacomputer instance shared between
1902 1898 multiple calls
1903 1899 """
1904 1900 if link == nullrev:
1905 1901 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1906 1902 % self.indexfile)
1907 1903
1908 1904 if flags:
1909 1905 node = node or self.hash(text, p1, p2)
1910 1906
1911 1907 rawtext, validatehash = self._processflags(text, flags, 'write')
1912 1908
1913 1909 # If the flag processor modifies the revision data, ignore any provided
1914 1910 # cachedelta.
1915 1911 if rawtext != text:
1916 1912 cachedelta = None
1917 1913
1918 1914 if len(rawtext) > _maxentrysize:
1919 1915 raise error.RevlogError(
1920 1916 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1921 1917 % (self.indexfile, len(rawtext)))
1922 1918
1923 1919 node = node or self.hash(rawtext, p1, p2)
1924 1920 if node in self.nodemap:
1925 1921 return node
1926 1922
1927 1923 if validatehash:
1928 1924 self.checkhash(rawtext, node, p1=p1, p2=p2)
1929 1925
1930 1926 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1931 1927 flags, cachedelta=cachedelta,
1932 1928 deltacomputer=deltacomputer)
1933 1929
1934 1930 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1935 1931 cachedelta=None, deltacomputer=None):
1936 1932 """add a raw revision with known flags, node and parents
1937 1933 useful when reusing a revision not stored in this revlog (ex: received
1938 1934 over wire, or read from an external bundle).
1939 1935 """
1940 1936 dfh = None
1941 1937 if not self._inline:
1942 1938 dfh = self._datafp("a+")
1943 1939 ifh = self._indexfp("a+")
1944 1940 try:
1945 1941 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1946 1942 flags, cachedelta, ifh, dfh,
1947 1943 deltacomputer=deltacomputer)
1948 1944 finally:
1949 1945 if dfh:
1950 1946 dfh.close()
1951 1947 ifh.close()
1952 1948
1953 1949 def compress(self, data):
1954 1950 """Generate a possibly-compressed representation of data."""
1955 1951 if not data:
1956 1952 return '', data
1957 1953
1958 1954 compressed = self._compressor.compress(data)
1959 1955
1960 1956 if compressed:
1961 1957 # The revlog compressor added the header in the returned data.
1962 1958 return '', compressed
1963 1959
1964 1960 if data[0:1] == '\0':
1965 1961 return '', data
1966 1962 return 'u', data
1967 1963
1968 1964 def decompress(self, data):
1969 1965 """Decompress a revlog chunk.
1970 1966
1971 1967 The chunk is expected to begin with a header identifying the
1972 1968 format type so it can be routed to an appropriate decompressor.
1973 1969 """
1974 1970 if not data:
1975 1971 return data
1976 1972
1977 1973 # Revlogs are read much more frequently than they are written and many
1978 1974 # chunks only take microseconds to decompress, so performance is
1979 1975 # important here.
1980 1976 #
1981 1977 # We can make a few assumptions about revlogs:
1982 1978 #
1983 1979 # 1) the majority of chunks will be compressed (as opposed to inline
1984 1980 # raw data).
1985 1981 # 2) decompressing *any* data will likely by at least 10x slower than
1986 1982 # returning raw inline data.
1987 1983 # 3) we want to prioritize common and officially supported compression
1988 1984 # engines
1989 1985 #
1990 1986 # It follows that we want to optimize for "decompress compressed data
1991 1987 # when encoded with common and officially supported compression engines"
1992 1988 # case over "raw data" and "data encoded by less common or non-official
1993 1989 # compression engines." That is why we have the inline lookup first
1994 1990 # followed by the compengines lookup.
1995 1991 #
1996 1992 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1997 1993 # compressed chunks. And this matters for changelog and manifest reads.
1998 1994 t = data[0:1]
1999 1995
2000 1996 if t == 'x':
2001 1997 try:
2002 1998 return _zlibdecompress(data)
2003 1999 except zlib.error as e:
2004 2000 raise error.RevlogError(_('revlog decompress error: %s') %
2005 2001 stringutil.forcebytestr(e))
2006 2002 # '\0' is more common than 'u' so it goes first.
2007 2003 elif t == '\0':
2008 2004 return data
2009 2005 elif t == 'u':
2010 2006 return util.buffer(data, 1)
2011 2007
2012 2008 try:
2013 2009 compressor = self._decompressors[t]
2014 2010 except KeyError:
2015 2011 try:
2016 2012 engine = util.compengines.forrevlogheader(t)
2017 2013 compressor = engine.revlogcompressor(self._compengineopts)
2018 2014 self._decompressors[t] = compressor
2019 2015 except KeyError:
2020 2016 raise error.RevlogError(_('unknown compression type %r') % t)
2021 2017
2022 2018 return compressor.decompress(data)
2023 2019
2024 2020 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2025 2021 cachedelta, ifh, dfh, alwayscache=False,
2026 2022 deltacomputer=None):
2027 2023 """internal function to add revisions to the log
2028 2024
2029 2025 see addrevision for argument descriptions.
2030 2026
2031 2027 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2032 2028
2033 2029 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2034 2030 be used.
2035 2031
2036 2032 invariants:
2037 2033 - rawtext is optional (can be None); if not set, cachedelta must be set.
2038 2034 if both are set, they must correspond to each other.
2039 2035 """
2040 2036 if node == nullid:
2041 2037 raise error.RevlogError(_("%s: attempt to add null revision") %
2042 2038 self.indexfile)
2043 2039 if node == wdirid or node in wdirfilenodeids:
2044 2040 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2045 2041 self.indexfile)
2046 2042
2047 2043 if self._inline:
2048 2044 fh = ifh
2049 2045 else:
2050 2046 fh = dfh
2051 2047
2052 2048 btext = [rawtext]
2053 2049
2054 2050 curr = len(self)
2055 2051 prev = curr - 1
2056 2052 offset = self.end(prev)
2057 2053 p1r, p2r = self.rev(p1), self.rev(p2)
2058 2054
2059 2055 # full versions are inserted when the needed deltas
2060 2056 # become comparable to the uncompressed text
2061 2057 if rawtext is None:
2062 2058 # need rawtext size, before changed by flag processors, which is
2063 2059 # the non-raw size. use revlog explicitly to avoid filelog's extra
2064 2060 # logic that might remove metadata size.
2065 2061 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2066 2062 cachedelta[1])
2067 2063 else:
2068 2064 textlen = len(rawtext)
2069 2065
2070 2066 if deltacomputer is None:
2071 2067 deltacomputer = deltautil.deltacomputer(self)
2072 2068
2073 2069 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2074 2070
2075 2071 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2076 2072
2077 2073 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2078 2074 deltainfo.base, link, p1r, p2r, node)
2079 2075 self.index.append(e)
2080 2076 self.nodemap[node] = curr
2081 2077
2082 2078 # Reset the pure node cache start lookup offset to account for new
2083 2079 # revision.
2084 2080 if self._nodepos is not None:
2085 2081 self._nodepos = curr
2086 2082
2087 2083 entry = self._io.packentry(e, self.node, self.version, curr)
2088 2084 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2089 2085 link, offset)
2090 2086
2091 2087 rawtext = btext[0]
2092 2088
2093 2089 if alwayscache and rawtext is None:
2094 2090 rawtext = deltacomputer.buildtext(revinfo, fh)
2095 2091
2096 2092 if type(rawtext) == bytes: # only accept immutable objects
2097 2093 self._revisioncache = (node, curr, rawtext)
2098 2094 self._chainbasecache[curr] = deltainfo.chainbase
2099 2095 return node
2100 2096
2101 2097 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2102 2098 # Files opened in a+ mode have inconsistent behavior on various
2103 2099 # platforms. Windows requires that a file positioning call be made
2104 2100 # when the file handle transitions between reads and writes. See
2105 2101 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2106 2102 # platforms, Python or the platform itself can be buggy. Some versions
2107 2103 # of Solaris have been observed to not append at the end of the file
2108 2104 # if the file was seeked to before the end. See issue4943 for more.
2109 2105 #
2110 2106 # We work around this issue by inserting a seek() before writing.
2111 2107 # Note: This is likely not necessary on Python 3. However, because
2112 2108 # the file handle is reused for reads and may be seeked there, we need
2113 2109 # to be careful before changing this.
2114 2110 ifh.seek(0, os.SEEK_END)
2115 2111 if dfh:
2116 2112 dfh.seek(0, os.SEEK_END)
2117 2113
2118 2114 curr = len(self) - 1
2119 2115 if not self._inline:
2120 2116 transaction.add(self.datafile, offset)
2121 2117 transaction.add(self.indexfile, curr * len(entry))
2122 2118 if data[0]:
2123 2119 dfh.write(data[0])
2124 2120 dfh.write(data[1])
2125 2121 ifh.write(entry)
2126 2122 else:
2127 2123 offset += curr * self._io.size
2128 2124 transaction.add(self.indexfile, offset, curr)
2129 2125 ifh.write(entry)
2130 2126 ifh.write(data[0])
2131 2127 ifh.write(data[1])
2132 2128 self._enforceinlinesize(transaction, ifh)
2133 2129
2134 2130 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2135 2131 """
2136 2132 add a delta group
2137 2133
2138 2134 given a set of deltas, add them to the revision log. the
2139 2135 first delta is against its parent, which should be in our
2140 2136 log, the rest are against the previous delta.
2141 2137
2142 2138 If ``addrevisioncb`` is defined, it will be called with arguments of
2143 2139 this revlog and the node that was added.
2144 2140 """
2145 2141
2146 2142 if self._writinghandles:
2147 2143 raise error.ProgrammingError('cannot nest addgroup() calls')
2148 2144
2149 2145 nodes = []
2150 2146
2151 2147 r = len(self)
2152 2148 end = 0
2153 2149 if r:
2154 2150 end = self.end(r - 1)
2155 2151 ifh = self._indexfp("a+")
2156 2152 isize = r * self._io.size
2157 2153 if self._inline:
2158 2154 transaction.add(self.indexfile, end + isize, r)
2159 2155 dfh = None
2160 2156 else:
2161 2157 transaction.add(self.indexfile, isize, r)
2162 2158 transaction.add(self.datafile, end)
2163 2159 dfh = self._datafp("a+")
2164 2160 def flush():
2165 2161 if dfh:
2166 2162 dfh.flush()
2167 2163 ifh.flush()
2168 2164
2169 2165 self._writinghandles = (ifh, dfh)
2170 2166
2171 2167 try:
2172 2168 deltacomputer = deltautil.deltacomputer(self)
2173 2169 # loop through our set of deltas
2174 2170 for data in deltas:
2175 2171 node, p1, p2, linknode, deltabase, delta, flags = data
2176 2172 link = linkmapper(linknode)
2177 2173 flags = flags or REVIDX_DEFAULT_FLAGS
2178 2174
2179 2175 nodes.append(node)
2180 2176
2181 2177 if node in self.nodemap:
2182 2178 self._nodeduplicatecallback(transaction, node)
2183 2179 # this can happen if two branches make the same change
2184 2180 continue
2185 2181
2186 2182 for p in (p1, p2):
2187 2183 if p not in self.nodemap:
2188 2184 raise error.LookupError(p, self.indexfile,
2189 2185 _('unknown parent'))
2190 2186
2191 2187 if deltabase not in self.nodemap:
2192 2188 raise error.LookupError(deltabase, self.indexfile,
2193 2189 _('unknown delta base'))
2194 2190
2195 2191 baserev = self.rev(deltabase)
2196 2192
2197 2193 if baserev != nullrev and self.iscensored(baserev):
2198 2194 # if base is censored, delta must be full replacement in a
2199 2195 # single patch operation
2200 2196 hlen = struct.calcsize(">lll")
2201 2197 oldlen = self.rawsize(baserev)
2202 2198 newlen = len(delta) - hlen
2203 2199 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2204 2200 raise error.CensoredBaseError(self.indexfile,
2205 2201 self.node(baserev))
2206 2202
2207 2203 if not flags and self._peek_iscensored(baserev, delta, flush):
2208 2204 flags |= REVIDX_ISCENSORED
2209 2205
2210 2206 # We assume consumers of addrevisioncb will want to retrieve
2211 2207 # the added revision, which will require a call to
2212 2208 # revision(). revision() will fast path if there is a cache
2213 2209 # hit. So, we tell _addrevision() to always cache in this case.
2214 2210 # We're only using addgroup() in the context of changegroup
2215 2211 # generation so the revision data can always be handled as raw
2216 2212 # by the flagprocessor.
2217 2213 self._addrevision(node, None, transaction, link,
2218 2214 p1, p2, flags, (baserev, delta),
2219 2215 ifh, dfh,
2220 2216 alwayscache=bool(addrevisioncb),
2221 2217 deltacomputer=deltacomputer)
2222 2218
2223 2219 if addrevisioncb:
2224 2220 addrevisioncb(self, node)
2225 2221
2226 2222 if not dfh and not self._inline:
2227 2223 # addrevision switched from inline to conventional
2228 2224 # reopen the index
2229 2225 ifh.close()
2230 2226 dfh = self._datafp("a+")
2231 2227 ifh = self._indexfp("a+")
2232 2228 self._writinghandles = (ifh, dfh)
2233 2229 finally:
2234 2230 self._writinghandles = None
2235 2231
2236 2232 if dfh:
2237 2233 dfh.close()
2238 2234 ifh.close()
2239 2235
2240 2236 return nodes
2241 2237
2242 2238 def iscensored(self, rev):
2243 2239 """Check if a file revision is censored."""
2244 2240 if not self._censorable:
2245 2241 return False
2246 2242
2247 2243 return self.flags(rev) & REVIDX_ISCENSORED
2248 2244
2249 2245 def _peek_iscensored(self, baserev, delta, flush):
2250 2246 """Quickly check if a delta produces a censored revision."""
2251 2247 if not self._censorable:
2252 2248 return False
2253 2249
2254 2250 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2255 2251
2256 2252 def getstrippoint(self, minlink):
2257 2253 """find the minimum rev that must be stripped to strip the linkrev
2258 2254
2259 2255 Returns a tuple containing the minimum rev and a set of all revs that
2260 2256 have linkrevs that will be broken by this strip.
2261 2257 """
2262 2258 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2263 2259 self.headrevs(),
2264 2260 self.linkrev, self.parentrevs)
2265 2261
2266 2262 def strip(self, minlink, transaction):
2267 2263 """truncate the revlog on the first revision with a linkrev >= minlink
2268 2264
2269 2265 This function is called when we're stripping revision minlink and
2270 2266 its descendants from the repository.
2271 2267
2272 2268 We have to remove all revisions with linkrev >= minlink, because
2273 2269 the equivalent changelog revisions will be renumbered after the
2274 2270 strip.
2275 2271
2276 2272 So we truncate the revlog on the first of these revisions, and
2277 2273 trust that the caller has saved the revisions that shouldn't be
2278 2274 removed and that it'll re-add them after this truncation.
2279 2275 """
2280 2276 if len(self) == 0:
2281 2277 return
2282 2278
2283 2279 rev, _ = self.getstrippoint(minlink)
2284 2280 if rev == len(self):
2285 2281 return
2286 2282
2287 2283 # first truncate the files on disk
2288 2284 end = self.start(rev)
2289 2285 if not self._inline:
2290 2286 transaction.add(self.datafile, end)
2291 2287 end = rev * self._io.size
2292 2288 else:
2293 2289 end += rev * self._io.size
2294 2290
2295 2291 transaction.add(self.indexfile, end)
2296 2292
2297 2293 # then reset internal state in memory to forget those revisions
2298 2294 self._revisioncache = None
2299 2295 self._chaininfocache = {}
2300 2296 self._chunkclear()
2301 2297 for x in pycompat.xrange(rev, len(self)):
2302 2298 del self.nodemap[self.node(x)]
2303 2299
2304 2300 del self.index[rev:-1]
2305 2301 self._nodepos = None
2306 2302
2307 2303 def checksize(self):
2308 2304 """Check size of index and data files
2309 2305
2310 2306 return a (dd, di) tuple.
2311 2307 - dd: extra bytes for the "data" file
2312 2308 - di: extra bytes for the "index" file
2313 2309
2314 2310 A healthy revlog will return (0, 0).
2315 2311 """
2316 2312 expected = 0
2317 2313 if len(self):
2318 2314 expected = max(0, self.end(len(self) - 1))
2319 2315
2320 2316 try:
2321 2317 with self._datafp() as f:
2322 2318 f.seek(0, io.SEEK_END)
2323 2319 actual = f.tell()
2324 2320 dd = actual - expected
2325 2321 except IOError as inst:
2326 2322 if inst.errno != errno.ENOENT:
2327 2323 raise
2328 2324 dd = 0
2329 2325
2330 2326 try:
2331 2327 f = self.opener(self.indexfile)
2332 2328 f.seek(0, io.SEEK_END)
2333 2329 actual = f.tell()
2334 2330 f.close()
2335 2331 s = self._io.size
2336 2332 i = max(0, actual // s)
2337 2333 di = actual - (i * s)
2338 2334 if self._inline:
2339 2335 databytes = 0
2340 2336 for r in self:
2341 2337 databytes += max(0, self.length(r))
2342 2338 dd = 0
2343 2339 di = actual - len(self) * s - databytes
2344 2340 except IOError as inst:
2345 2341 if inst.errno != errno.ENOENT:
2346 2342 raise
2347 2343 di = 0
2348 2344
2349 2345 return (dd, di)
2350 2346
2351 2347 def files(self):
2352 2348 res = [self.indexfile]
2353 2349 if not self._inline:
2354 2350 res.append(self.datafile)
2355 2351 return res
2356 2352
2357 2353 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2358 2354 assumehaveparentrevisions=False,
2359 2355 deltamode=repository.CG_DELTAMODE_STD):
2360 2356 if nodesorder not in ('nodes', 'storage', 'linear', None):
2361 2357 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2362 2358 nodesorder)
2363 2359
2364 2360 if nodesorder is None and not self._generaldelta:
2365 2361 nodesorder = 'storage'
2366 2362
2367 2363 if (not self._storedeltachains and
2368 2364 deltamode != repository.CG_DELTAMODE_PREV):
2369 2365 deltamode = repository.CG_DELTAMODE_FULL
2370 2366
2371 2367 return storageutil.emitrevisions(
2372 2368 self, nodes, nodesorder, revlogrevisiondelta,
2373 2369 deltaparentfn=self.deltaparent,
2374 2370 candeltafn=self.candelta,
2375 2371 rawsizefn=self.rawsize,
2376 2372 revdifffn=self.revdiff,
2377 2373 flagsfn=self.flags,
2378 2374 deltamode=deltamode,
2379 2375 revisiondata=revisiondata,
2380 2376 assumehaveparentrevisions=assumehaveparentrevisions)
2381 2377
2382 2378 DELTAREUSEALWAYS = 'always'
2383 2379 DELTAREUSESAMEREVS = 'samerevs'
2384 2380 DELTAREUSENEVER = 'never'
2385 2381
2386 2382 DELTAREUSEFULLADD = 'fulladd'
2387 2383
2388 2384 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2389 2385
2390 2386 def clone(self, tr, destrevlog, addrevisioncb=None,
2391 2387 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2392 2388 """Copy this revlog to another, possibly with format changes.
2393 2389
2394 2390 The destination revlog will contain the same revisions and nodes.
2395 2391 However, it may not be bit-for-bit identical due to e.g. delta encoding
2396 2392 differences.
2397 2393
2398 2394 The ``deltareuse`` argument control how deltas from the existing revlog
2399 2395 are preserved in the destination revlog. The argument can have the
2400 2396 following values:
2401 2397
2402 2398 DELTAREUSEALWAYS
2403 2399 Deltas will always be reused (if possible), even if the destination
2404 2400 revlog would not select the same revisions for the delta. This is the
2405 2401 fastest mode of operation.
2406 2402 DELTAREUSESAMEREVS
2407 2403 Deltas will be reused if the destination revlog would pick the same
2408 2404 revisions for the delta. This mode strikes a balance between speed
2409 2405 and optimization.
2410 2406 DELTAREUSENEVER
2411 2407 Deltas will never be reused. This is the slowest mode of execution.
2412 2408 This mode can be used to recompute deltas (e.g. if the diff/delta
2413 2409 algorithm changes).
2414 2410
2415 2411 Delta computation can be slow, so the choice of delta reuse policy can
2416 2412 significantly affect run time.
2417 2413
2418 2414 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2419 2415 two extremes. Deltas will be reused if they are appropriate. But if the
2420 2416 delta could choose a better revision, it will do so. This means if you
2421 2417 are converting a non-generaldelta revlog to a generaldelta revlog,
2422 2418 deltas will be recomputed if the delta's parent isn't a parent of the
2423 2419 revision.
2424 2420
2425 2421 In addition to the delta policy, the ``forcedeltabothparents``
2426 2422 argument controls whether to force compute deltas against both parents
2427 2423 for merges. By default, the current default is used.
2428 2424 """
2429 2425 if deltareuse not in self.DELTAREUSEALL:
2430 2426 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2431 2427
2432 2428 if len(destrevlog):
2433 2429 raise ValueError(_('destination revlog is not empty'))
2434 2430
2435 2431 if getattr(self, 'filteredrevs', None):
2436 2432 raise ValueError(_('source revlog has filtered revisions'))
2437 2433 if getattr(destrevlog, 'filteredrevs', None):
2438 2434 raise ValueError(_('destination revlog has filtered revisions'))
2439 2435
2440 2436 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2441 2437 # if possible.
2442 2438 oldlazydelta = destrevlog._lazydelta
2443 2439 oldlazydeltabase = destrevlog._lazydeltabase
2444 2440 oldamd = destrevlog._deltabothparents
2445 2441
2446 2442 try:
2447 2443 if deltareuse == self.DELTAREUSEALWAYS:
2448 2444 destrevlog._lazydeltabase = True
2449 2445 destrevlog._lazydelta = True
2450 2446 elif deltareuse == self.DELTAREUSESAMEREVS:
2451 2447 destrevlog._lazydeltabase = False
2452 2448 destrevlog._lazydelta = True
2453 2449 elif deltareuse == self.DELTAREUSENEVER:
2454 2450 destrevlog._lazydeltabase = False
2455 2451 destrevlog._lazydelta = False
2456 2452
2457 2453 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2458 2454
2459 2455 deltacomputer = deltautil.deltacomputer(destrevlog)
2460 2456 index = self.index
2461 2457 for rev in self:
2462 2458 entry = index[rev]
2463 2459
2464 2460 # Some classes override linkrev to take filtered revs into
2465 2461 # account. Use raw entry from index.
2466 2462 flags = entry[0] & 0xffff
2467 2463 linkrev = entry[4]
2468 2464 p1 = index[entry[5]][7]
2469 2465 p2 = index[entry[6]][7]
2470 2466 node = entry[7]
2471 2467
2472 2468 # (Possibly) reuse the delta from the revlog if allowed and
2473 2469 # the revlog chunk is a delta.
2474 2470 cachedelta = None
2475 2471 rawtext = None
2476 2472 if (deltareuse != self.DELTAREUSEFULLADD
2477 2473 and destrevlog._lazydelta):
2478 2474 dp = self.deltaparent(rev)
2479 2475 if dp != nullrev:
2480 2476 cachedelta = (dp, bytes(self._chunk(rev)))
2481 2477
2482 2478 if not cachedelta:
2483 2479 rawtext = self.revision(rev, raw=True)
2484 2480
2485 2481
2486 2482 if deltareuse == self.DELTAREUSEFULLADD:
2487 2483 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2488 2484 cachedelta=cachedelta,
2489 2485 node=node, flags=flags,
2490 2486 deltacomputer=deltacomputer)
2491 2487 else:
2492 2488 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2493 2489 checkambig=False)
2494 2490 dfh = None
2495 2491 if not destrevlog._inline:
2496 2492 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2497 2493 try:
2498 2494 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2499 2495 p2, flags, cachedelta, ifh, dfh,
2500 2496 deltacomputer=deltacomputer)
2501 2497 finally:
2502 2498 if dfh:
2503 2499 dfh.close()
2504 2500 ifh.close()
2505 2501
2506 2502 if addrevisioncb:
2507 2503 addrevisioncb(self, rev, node)
2508 2504 finally:
2509 2505 destrevlog._lazydelta = oldlazydelta
2510 2506 destrevlog._lazydeltabase = oldlazydeltabase
2511 2507 destrevlog._deltabothparents = oldamd
2512 2508
2513 2509 def censorrevision(self, tr, censornode, tombstone=b''):
2514 2510 if (self.version & 0xFFFF) == REVLOGV0:
2515 2511 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2516 2512 self.version)
2517 2513
2518 2514 censorrev = self.rev(censornode)
2519 2515 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2520 2516
2521 2517 if len(tombstone) > self.rawsize(censorrev):
2522 2518 raise error.Abort(_('censor tombstone must be no longer than '
2523 2519 'censored data'))
2524 2520
2525 2521 # Rewriting the revlog in place is hard. Our strategy for censoring is
2526 2522 # to create a new revlog, copy all revisions to it, then replace the
2527 2523 # revlogs on transaction close.
2528 2524
2529 2525 newindexfile = self.indexfile + b'.tmpcensored'
2530 2526 newdatafile = self.datafile + b'.tmpcensored'
2531 2527
2532 2528 # This is a bit dangerous. We could easily have a mismatch of state.
2533 2529 newrl = revlog(self.opener, newindexfile, newdatafile,
2534 2530 censorable=True)
2535 2531 newrl.version = self.version
2536 2532 newrl._generaldelta = self._generaldelta
2537 2533 newrl._io = self._io
2538 2534
2539 2535 for rev in self.revs():
2540 2536 node = self.node(rev)
2541 2537 p1, p2 = self.parents(node)
2542 2538
2543 2539 if rev == censorrev:
2544 2540 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2545 2541 p1, p2, censornode, REVIDX_ISCENSORED)
2546 2542
2547 2543 if newrl.deltaparent(rev) != nullrev:
2548 2544 raise error.Abort(_('censored revision stored as delta; '
2549 2545 'cannot censor'),
2550 2546 hint=_('censoring of revlogs is not '
2551 2547 'fully implemented; please report '
2552 2548 'this bug'))
2553 2549 continue
2554 2550
2555 2551 if self.iscensored(rev):
2556 2552 if self.deltaparent(rev) != nullrev:
2557 2553 raise error.Abort(_('cannot censor due to censored '
2558 2554 'revision having delta stored'))
2559 2555 rawtext = self._chunk(rev)
2560 2556 else:
2561 2557 rawtext = self.revision(rev, raw=True)
2562 2558
2563 2559 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2564 2560 self.flags(rev))
2565 2561
2566 2562 tr.addbackup(self.indexfile, location='store')
2567 2563 if not self._inline:
2568 2564 tr.addbackup(self.datafile, location='store')
2569 2565
2570 2566 self.opener.rename(newrl.indexfile, self.indexfile)
2571 2567 if not self._inline:
2572 2568 self.opener.rename(newrl.datafile, self.datafile)
2573 2569
2574 2570 self.clearcaches()
2575 2571 self._loadindex()
2576 2572
2577 2573 def verifyintegrity(self, state):
2578 2574 """Verifies the integrity of the revlog.
2579 2575
2580 2576 Yields ``revlogproblem`` instances describing problems that are
2581 2577 found.
2582 2578 """
2583 2579 dd, di = self.checksize()
2584 2580 if dd:
2585 2581 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2586 2582 if di:
2587 2583 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2588 2584
2589 2585 version = self.version & 0xFFFF
2590 2586
2591 2587 # The verifier tells us what version revlog we should be.
2592 2588 if version != state['expectedversion']:
2593 2589 yield revlogproblem(
2594 2590 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2595 2591 (self.indexfile, version, state['expectedversion']))
2596 2592
2597 2593 state['skipread'] = set()
2598 2594
2599 2595 for rev in self:
2600 2596 node = self.node(rev)
2601 2597
2602 2598 # Verify contents. 4 cases to care about:
2603 2599 #
2604 2600 # common: the most common case
2605 2601 # rename: with a rename
2606 2602 # meta: file content starts with b'\1\n', the metadata
2607 2603 # header defined in filelog.py, but without a rename
2608 2604 # ext: content stored externally
2609 2605 #
2610 2606 # More formally, their differences are shown below:
2611 2607 #
2612 2608 # | common | rename | meta | ext
2613 2609 # -------------------------------------------------------
2614 2610 # flags() | 0 | 0 | 0 | not 0
2615 2611 # renamed() | False | True | False | ?
2616 2612 # rawtext[0:2]=='\1\n'| False | True | True | ?
2617 2613 #
2618 2614 # "rawtext" means the raw text stored in revlog data, which
2619 2615 # could be retrieved by "revision(rev, raw=True)". "text"
2620 2616 # mentioned below is "revision(rev, raw=False)".
2621 2617 #
2622 2618 # There are 3 different lengths stored physically:
2623 2619 # 1. L1: rawsize, stored in revlog index
2624 2620 # 2. L2: len(rawtext), stored in revlog data
2625 2621 # 3. L3: len(text), stored in revlog data if flags==0, or
2626 2622 # possibly somewhere else if flags!=0
2627 2623 #
2628 2624 # L1 should be equal to L2. L3 could be different from them.
2629 2625 # "text" may or may not affect commit hash depending on flag
2630 2626 # processors (see revlog.addflagprocessor).
2631 2627 #
2632 2628 # | common | rename | meta | ext
2633 2629 # -------------------------------------------------
2634 2630 # rawsize() | L1 | L1 | L1 | L1
2635 2631 # size() | L1 | L2-LM | L1(*) | L1 (?)
2636 2632 # len(rawtext) | L2 | L2 | L2 | L2
2637 2633 # len(text) | L2 | L2 | L2 | L3
2638 2634 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2639 2635 #
2640 2636 # LM: length of metadata, depending on rawtext
2641 2637 # (*): not ideal, see comment in filelog.size
2642 2638 # (?): could be "- len(meta)" if the resolved content has
2643 2639 # rename metadata
2644 2640 #
2645 2641 # Checks needed to be done:
2646 2642 # 1. length check: L1 == L2, in all cases.
2647 2643 # 2. hash check: depending on flag processor, we may need to
2648 2644 # use either "text" (external), or "rawtext" (in revlog).
2649 2645
2650 2646 try:
2651 2647 skipflags = state.get('skipflags', 0)
2652 2648 if skipflags:
2653 2649 skipflags &= self.flags(rev)
2654 2650
2655 2651 if skipflags:
2656 2652 state['skipread'].add(node)
2657 2653 else:
2658 2654 # Side-effect: read content and verify hash.
2659 2655 self.revision(node)
2660 2656
2661 2657 l1 = self.rawsize(rev)
2662 2658 l2 = len(self.revision(node, raw=True))
2663 2659
2664 2660 if l1 != l2:
2665 2661 yield revlogproblem(
2666 2662 error=_('unpacked size is %d, %d expected') % (l2, l1),
2667 2663 node=node)
2668 2664
2669 2665 except error.CensoredNodeError:
2670 2666 if state['erroroncensored']:
2671 2667 yield revlogproblem(error=_('censored file data'),
2672 2668 node=node)
2673 2669 state['skipread'].add(node)
2674 2670 except Exception as e:
2675 2671 yield revlogproblem(
2676 2672 error=_('unpacking %s: %s') % (short(node),
2677 2673 stringutil.forcebytestr(e)),
2678 2674 node=node)
2679 2675 state['skipread'].add(node)
2680 2676
2681 2677 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2682 2678 revisionscount=False, trackedsize=False,
2683 2679 storedsize=False):
2684 2680 d = {}
2685 2681
2686 2682 if exclusivefiles:
2687 2683 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2688 2684 if not self._inline:
2689 2685 d['exclusivefiles'].append((self.opener, self.datafile))
2690 2686
2691 2687 if sharedfiles:
2692 2688 d['sharedfiles'] = []
2693 2689
2694 2690 if revisionscount:
2695 2691 d['revisionscount'] = len(self)
2696 2692
2697 2693 if trackedsize:
2698 2694 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2699 2695
2700 2696 if storedsize:
2701 2697 d['storedsize'] = sum(self.opener.stat(path).st_size
2702 2698 for path in self.files())
2703 2699
2704 2700 return d
@@ -1,31 +1,35 b''
1 1 # flagutils.py - code to deal with revlog flags and their processors
2 2 #
3 3 # Copyright 2016 Remi Chaintron <remi@fb.com>
4 4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 from .constants import (
12 12 REVIDX_DEFAULT_FLAGS,
13 13 REVIDX_ELLIPSIS,
14 14 REVIDX_EXTSTORED,
15 15 REVIDX_FLAGS_ORDER,
16 16 REVIDX_ISCENSORED,
17 17 REVIDX_KNOWN_FLAGS,
18 18 REVIDX_RAWTEXT_CHANGING_FLAGS,
19 19 )
20 20
21 21 # blanked usage of all the name to prevent pyflakes constraints
22 22 # We need these name available in the module for extensions.
23 23 REVIDX_ISCENSORED
24 24 REVIDX_ELLIPSIS
25 25 REVIDX_EXTSTORED
26 26 REVIDX_DEFAULT_FLAGS
27 27 REVIDX_FLAGS_ORDER
28 28 REVIDX_KNOWN_FLAGS
29 29 REVIDX_RAWTEXT_CHANGING_FLAGS
30 30
31 # Store flag processors (cf. 'addflagprocessor()' to register)
32 flagprocessors = {
33 REVIDX_ISCENSORED: None,
34 }
31 35
@@ -1,304 +1,304 b''
1 1 # Create server
2 2 $ hg init server
3 3 $ cd server
4 4 $ cat >> .hg/hgrc << EOF
5 5 > [extensions]
6 6 > extension=$TESTDIR/flagprocessorext.py
7 7 > EOF
8 8 $ cd ../
9 9
10 10 # Clone server and enable extensions
11 11 $ hg clone -q server client
12 12 $ cd client
13 13 $ cat >> .hg/hgrc << EOF
14 14 > [extensions]
15 15 > extension=$TESTDIR/flagprocessorext.py
16 16 > EOF
17 17
18 18 # Commit file that will trigger the noop extension
19 19 $ echo '[NOOP]' > noop
20 20 $ hg commit -Aqm "noop"
21 21
22 22 # Commit file that will trigger the base64 extension
23 23 $ echo '[BASE64]' > base64
24 24 $ hg commit -Aqm 'base64'
25 25
26 26 # Commit file that will trigger the gzip extension
27 27 $ echo '[GZIP]' > gzip
28 28 $ hg commit -Aqm 'gzip'
29 29
30 30 # Commit file that will trigger noop and base64
31 31 $ echo '[NOOP][BASE64]' > noop-base64
32 32 $ hg commit -Aqm 'noop+base64'
33 33
34 34 # Commit file that will trigger noop and gzip
35 35 $ echo '[NOOP][GZIP]' > noop-gzip
36 36 $ hg commit -Aqm 'noop+gzip'
37 37
38 38 # Commit file that will trigger base64 and gzip
39 39 $ echo '[BASE64][GZIP]' > base64-gzip
40 40 $ hg commit -Aqm 'base64+gzip'
41 41
42 42 # Commit file that will trigger base64, gzip and noop
43 43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 44 $ hg commit -Aqm 'base64+gzip+noop'
45 45
46 46 # TEST: ensure the revision data is consistent
47 47 $ hg cat noop
48 48 [NOOP]
49 49 $ hg debugdata noop 0
50 50 [NOOP]
51 51
52 52 $ hg cat -r . base64
53 53 [BASE64]
54 54 $ hg debugdata base64 0
55 55 W0JBU0U2NF0K (no-eol)
56 56
57 57 $ hg cat -r . gzip
58 58 [GZIP]
59 59 $ hg debugdata gzip 0
60 60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61 61
62 62 $ hg cat -r . noop-base64
63 63 [NOOP][BASE64]
64 64 $ hg debugdata noop-base64 0
65 65 W05PT1BdW0JBU0U2NF0K (no-eol)
66 66
67 67 $ hg cat -r . noop-gzip
68 68 [NOOP][GZIP]
69 69 $ hg debugdata noop-gzip 0
70 70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71 71
72 72 $ hg cat -r . base64-gzip
73 73 [BASE64][GZIP]
74 74 $ hg debugdata base64-gzip 0
75 75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76 76
77 77 $ hg cat -r . base64-gzip-noop
78 78 [BASE64][GZIP][NOOP]
79 79 $ hg debugdata base64-gzip-noop 0
80 80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81 81
82 82 # Push to the server
83 83 $ hg push
84 84 pushing to $TESTTMP/server
85 85 searching for changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 7 changesets with 7 changes to 7 files
90 90
91 91 Ensure the data got to the server OK
92 92
93 93 $ cd ../server
94 94 $ hg cat -r 6e48f4215d24 noop
95 95 [NOOP]
96 96 $ hg debugdata noop 0
97 97 [NOOP]
98 98
99 99 $ hg cat -r 6e48f4215d24 base64
100 100 [BASE64]
101 101 $ hg debugdata base64 0
102 102 W0JBU0U2NF0K (no-eol)
103 103
104 104 $ hg cat -r 6e48f4215d24 gzip
105 105 [GZIP]
106 106 $ hg debugdata gzip 0
107 107 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
108 108
109 109 $ hg cat -r 6e48f4215d24 noop-base64
110 110 [NOOP][BASE64]
111 111 $ hg debugdata noop-base64 0
112 112 W05PT1BdW0JBU0U2NF0K (no-eol)
113 113
114 114 $ hg cat -r 6e48f4215d24 noop-gzip
115 115 [NOOP][GZIP]
116 116 $ hg debugdata noop-gzip 0
117 117 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
118 118
119 119 $ hg cat -r 6e48f4215d24 base64-gzip
120 120 [BASE64][GZIP]
121 121 $ hg debugdata base64-gzip 0
122 122 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
123 123
124 124 $ hg cat -r 6e48f4215d24 base64-gzip-noop
125 125 [BASE64][GZIP][NOOP]
126 126 $ hg debugdata base64-gzip-noop 0
127 127 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
128 128
129 129 # Initialize new client (not cloning) and setup extension
130 130 $ cd ..
131 131 $ hg init client2
132 132 $ cd client2
133 133 $ cat >> .hg/hgrc << EOF
134 134 > [paths]
135 135 > default = $TESTTMP/server
136 136 > [extensions]
137 137 > extension=$TESTDIR/flagprocessorext.py
138 138 > EOF
139 139
140 140 # Pull from server and update to latest revision
141 141 $ hg pull default
142 142 pulling from $TESTTMP/server
143 143 requesting all changes
144 144 adding changesets
145 145 adding manifests
146 146 adding file changes
147 147 added 7 changesets with 7 changes to 7 files
148 148 new changesets 07b1b9442c5b:6e48f4215d24
149 149 (run 'hg update' to get a working copy)
150 150 $ hg update
151 151 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 152
153 153 # TEST: ensure the revision data is consistent
154 154 $ hg cat noop
155 155 [NOOP]
156 156 $ hg debugdata noop 0
157 157 [NOOP]
158 158
159 159 $ hg cat -r . base64
160 160 [BASE64]
161 161 $ hg debugdata base64 0
162 162 W0JBU0U2NF0K (no-eol)
163 163
164 164 $ hg cat -r . gzip
165 165 [GZIP]
166 166 $ hg debugdata gzip 0
167 167 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
168 168
169 169 $ hg cat -r . noop-base64
170 170 [NOOP][BASE64]
171 171 $ hg debugdata noop-base64 0
172 172 W05PT1BdW0JBU0U2NF0K (no-eol)
173 173
174 174 $ hg cat -r . noop-gzip
175 175 [NOOP][GZIP]
176 176 $ hg debugdata noop-gzip 0
177 177 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
178 178
179 179 $ hg cat -r . base64-gzip
180 180 [BASE64][GZIP]
181 181 $ hg debugdata base64-gzip 0
182 182 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
183 183
184 184 $ hg cat -r . base64-gzip-noop
185 185 [BASE64][GZIP][NOOP]
186 186 $ hg debugdata base64-gzip-noop 0
187 187 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
188 188
189 189 # TEST: ensure a missing processor is handled
190 190 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
191 191 $ hg commit -Aqm 'fail+base64+gzip+noop'
192 192 abort: missing processor for flag '0x1'!
193 193 [255]
194 194 $ rm fail-base64-gzip-noop
195 195
196 196 # TEST: ensure we cannot register several flag processors on the same flag
197 197 $ cat >> .hg/hgrc << EOF
198 198 > [extensions]
199 199 > extension=$TESTDIR/flagprocessorext.py
200 200 > duplicate=$TESTDIR/flagprocessorext.py
201 201 > EOF
202 202 $ hg debugrebuilddirstate
203 203 Traceback (most recent call last):
204 204 File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
205 205 extsetup(ui)
206 206 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
207 207 validatehash,
208 208 File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
209 _insertflagprocessor(flag, processor, _flagprocessors)
209 _insertflagprocessor(flag, processor, flagutil.flagprocessors)
210 210 File "*/mercurial/revlog.py", line *, in _insertflagprocessor (glob)
211 211 raise error.Abort(msg)
212 212 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
213 213 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
214 214 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
215 215 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
216 216 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
217 217 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
218 218 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
219 219 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
220 220 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
221 221
222 222 $ cd ..
223 223
224 224 # TEST: bundle repo
225 225 $ hg init bundletest
226 226 $ cd bundletest
227 227
228 228 $ cat >> .hg/hgrc << EOF
229 229 > [extensions]
230 230 > flagprocessor=$TESTDIR/flagprocessorext.py
231 231 > EOF
232 232
233 233 $ for i in 0 single two three 4; do
234 234 > echo '[BASE64]a-bit-longer-'$i > base64
235 235 > hg commit -m base64-$i -A base64
236 236 > done
237 237
238 238 $ hg update 2 -q
239 239 $ echo '[BASE64]a-bit-longer-branching' > base64
240 240 $ hg commit -q -m branching
241 241
242 242 #if repobundlerepo
243 243 $ hg bundle --base 1 bundle.hg
244 244 4 changesets found
245 245 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
246 246 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
247 247 5 branching
248 248 base64 | 2 +-
249 249 1 files changed, 1 insertions(+), 1 deletions(-)
250 250
251 251 4 base64-4
252 252 base64 | 2 +-
253 253 1 files changed, 1 insertions(+), 1 deletions(-)
254 254
255 255 3 base64-three
256 256 base64 | 2 +-
257 257 1 files changed, 1 insertions(+), 1 deletions(-)
258 258
259 259 2 base64-two
260 260 base64 | 2 +-
261 261 1 files changed, 1 insertions(+), 1 deletions(-)
262 262
263 263 1 base64-single
264 264 base64 | 2 +-
265 265 1 files changed, 1 insertions(+), 1 deletions(-)
266 266
267 267 0 base64-0
268 268 base64 | 1 +
269 269 1 files changed, 1 insertions(+), 0 deletions(-)
270 270
271 271
272 272 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
273 273 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
274 274 5 branching
275 275 base64 | 2 +-
276 276 1 files changed, 1 insertions(+), 1 deletions(-)
277 277
278 278 4 base64-4
279 279 base64 | 2 +-
280 280 1 files changed, 1 insertions(+), 1 deletions(-)
281 281
282 282 3 base64-three
283 283 base64 | 2 +-
284 284 1 files changed, 1 insertions(+), 1 deletions(-)
285 285
286 286 2 base64-two
287 287 base64 | 2 +-
288 288 1 files changed, 1 insertions(+), 1 deletions(-)
289 289
290 290 1 base64-single
291 291 base64 | 2 +-
292 292 1 files changed, 1 insertions(+), 1 deletions(-)
293 293
294 294 0 base64-0
295 295 base64 | 1 +
296 296 1 files changed, 1 insertions(+), 0 deletions(-)
297 297
298 298 $ rm bundle.hg bundle-again.hg
299 299 #endif
300 300
301 301 # TEST: hg status
302 302
303 303 $ hg status
304 304 $ hg diff
General Comments 0
You need to be logged in to leave comments. Login now