##// END OF EJS Templates
revlog: drop more file description passing between private function...
marmoute -
r51919:8520db30 default
parent child Browse files
Show More
@@ -1,751 +1,752 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14
15 15 import contextlib
16 16 import os
17 17 import shutil
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullrev,
23 23 )
24 24
25 25 from . import (
26 26 bundle2,
27 27 changegroup,
28 28 changelog,
29 29 cmdutil,
30 30 discovery,
31 31 encoding,
32 32 error,
33 33 exchange,
34 34 filelog,
35 35 localrepo,
36 36 manifest,
37 37 mdiff,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 revlog,
42 42 revlogutils,
43 43 util,
44 44 vfs as vfsmod,
45 45 )
46 46 from .utils import (
47 47 urlutil,
48 48 )
49 49
50 50 from .revlogutils import (
51 51 constants as revlog_constants,
52 52 )
53 53
54 54
55 55 class bundlerevlog(revlog.revlog):
56 56 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
57 57 # How it works:
58 58 # To retrieve a revision, we need to know the offset of the revision in
59 59 # the bundle (an unbundle object). We store this offset in the index
60 60 # (start). The base of the delta is stored in the base field.
61 61 #
62 62 # To differentiate a rev in the bundle from a rev in the revlog, we
63 63 # check revision against repotiprev.
64 64 opener = vfsmod.readonlyvfs(opener)
65 65 revlog.revlog.__init__(self, opener, target=target, radix=radix)
66 66 self.bundle = cgunpacker
67 67 n = len(self)
68 68 self.repotiprev = n - 1
69 69 self.bundlerevs = set() # used by 'bundle()' revset expression
70 70 for deltadata in cgunpacker.deltaiter():
71 71 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
72 72
73 73 size = len(delta)
74 74 start = cgunpacker.tell() - size
75 75
76 76 if self.index.has_node(node):
77 77 # this can happen if two branches make the same change
78 78 self.bundlerevs.add(self.index.rev(node))
79 79 continue
80 80 if cs == node:
81 81 linkrev = nullrev
82 82 else:
83 83 linkrev = linkmapper(cs)
84 84
85 85 for p in (p1, p2):
86 86 if not self.index.has_node(p):
87 87 raise error.LookupError(
88 88 p, self.display_id, _(b"unknown parent")
89 89 )
90 90
91 91 if not self.index.has_node(deltabase):
92 92 raise error.LookupError(
93 93 deltabase, self.display_id, _(b'unknown delta base')
94 94 )
95 95
96 96 baserev = self.rev(deltabase)
97 97 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
98 98 e = revlogutils.entry(
99 99 flags=flags,
100 100 data_offset=start,
101 101 data_compressed_length=size,
102 102 data_delta_base=baserev,
103 103 link_rev=linkrev,
104 104 parent_rev_1=self.rev(p1),
105 105 parent_rev_2=self.rev(p2),
106 106 node_id=node,
107 107 )
108 108 self.index.append(e)
109 109 self.bundlerevs.add(n)
110 110 n += 1
111 111
112 112 @contextlib.contextmanager
113 113 def reading(self):
114 114 if self.repotiprev < 0:
115 115 yield
116 116 else:
117 117 with super().reading() as x:
118 118 yield x
119 119
120 def _chunk(self, rev, df=None):
120 def _chunk(self, rev):
121 121 # Warning: in case of bundle, the diff is against what we stored as
122 122 # delta base, not against rev - 1
123 123 # XXX: could use some caching
124 124 if rev <= self.repotiprev:
125 125 return revlog.revlog._chunk(self, rev)
126 126 self.bundle.seek(self.start(rev))
127 127 return self.bundle.read(self.length(rev))
128 128
129 129 def revdiff(self, rev1, rev2):
130 130 """return or calculate a delta between two revisions"""
131 131 if rev1 > self.repotiprev and rev2 > self.repotiprev:
132 132 # hot path for bundle
133 133 revb = self.index[rev2][3]
134 134 if revb == rev1:
135 135 return self._chunk(rev2)
136 136 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
137 137 return revlog.revlog.revdiff(self, rev1, rev2)
138 138
139 139 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
140 140
141 def _rawtext(self, node, rev, _df=None):
141 def _rawtext(self, node, rev):
142 142 if rev is None:
143 143 rev = self.rev(node)
144 144 validated = False
145 145 rawtext = None
146 146 chain = []
147 147 iterrev = rev
148 148 # reconstruct the revision if it is from a changegroup
149 149 while iterrev > self.repotiprev:
150 150 if self._revisioncache and self._revisioncache[1] == iterrev:
151 151 rawtext = self._revisioncache[2]
152 152 break
153 153 chain.append(iterrev)
154 154 iterrev = self.index[iterrev][3]
155 155 if iterrev == nullrev:
156 156 rawtext = b''
157 157 elif rawtext is None:
158 158 r = super(bundlerevlog, self)._rawtext(
159 self.node(iterrev), iterrev, _df=_df
159 self.node(iterrev),
160 iterrev,
160 161 )
161 162 __, rawtext, validated = r
162 163 if chain:
163 164 validated = False
164 165 while chain:
165 166 delta = self._chunk(chain.pop())
166 167 rawtext = mdiff.patches(rawtext, [delta])
167 168 return rev, rawtext, validated
168 169
169 170 def addrevision(self, *args, **kwargs):
170 171 raise NotImplementedError
171 172
172 173 def addgroup(self, *args, **kwargs):
173 174 raise NotImplementedError
174 175
175 176 def strip(self, *args, **kwargs):
176 177 raise NotImplementedError
177 178
178 179 def checksize(self):
179 180 raise NotImplementedError
180 181
181 182
182 183 class bundlechangelog(bundlerevlog, changelog.changelog):
183 184 def __init__(self, opener, cgunpacker):
184 185 changelog.changelog.__init__(self, opener)
185 186 linkmapper = lambda x: x
186 187 bundlerevlog.__init__(
187 188 self,
188 189 opener,
189 190 (revlog_constants.KIND_CHANGELOG, None),
190 191 self.radix,
191 192 cgunpacker,
192 193 linkmapper,
193 194 )
194 195
195 196
196 197 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
197 198 def __init__(
198 199 self,
199 200 nodeconstants,
200 201 opener,
201 202 cgunpacker,
202 203 linkmapper,
203 204 dirlogstarts=None,
204 205 dir=b'',
205 206 ):
206 207 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
207 208 bundlerevlog.__init__(
208 209 self,
209 210 opener,
210 211 (revlog_constants.KIND_MANIFESTLOG, dir),
211 212 self._revlog.radix,
212 213 cgunpacker,
213 214 linkmapper,
214 215 )
215 216 if dirlogstarts is None:
216 217 dirlogstarts = {}
217 218 if self.bundle.version == b"03":
218 219 dirlogstarts = _getfilestarts(self.bundle)
219 220 self._dirlogstarts = dirlogstarts
220 221 self._linkmapper = linkmapper
221 222
222 223 def dirlog(self, d):
223 224 if d in self._dirlogstarts:
224 225 self.bundle.seek(self._dirlogstarts[d])
225 226 return bundlemanifest(
226 227 self.nodeconstants,
227 228 self.opener,
228 229 self.bundle,
229 230 self._linkmapper,
230 231 self._dirlogstarts,
231 232 dir=d,
232 233 )
233 234 return super(bundlemanifest, self).dirlog(d)
234 235
235 236
236 237 class bundlefilelog(filelog.filelog):
237 238 def __init__(self, opener, path, cgunpacker, linkmapper):
238 239 filelog.filelog.__init__(self, opener, path)
239 240 self._revlog = bundlerevlog(
240 241 opener,
241 242 # XXX should use the unencoded path
242 243 target=(revlog_constants.KIND_FILELOG, path),
243 244 radix=self._revlog.radix,
244 245 cgunpacker=cgunpacker,
245 246 linkmapper=linkmapper,
246 247 )
247 248
248 249
249 250 class bundlepeer(localrepo.localpeer):
250 251 def canpush(self):
251 252 return False
252 253
253 254
254 255 class bundlephasecache(phases.phasecache):
255 256 def __init__(self, *args, **kwargs):
256 257 super(bundlephasecache, self).__init__(*args, **kwargs)
257 258 if hasattr(self, 'opener'):
258 259 self.opener = vfsmod.readonlyvfs(self.opener)
259 260
260 261 def write(self):
261 262 raise NotImplementedError
262 263
263 264 def _write(self, fp):
264 265 raise NotImplementedError
265 266
266 267 def _updateroots(self, phase, newroots, tr):
267 268 self.phaseroots[phase] = newroots
268 269 self.invalidate()
269 270 self.dirty = True
270 271
271 272
272 273 def _getfilestarts(cgunpacker):
273 274 filespos = {}
274 275 for chunkdata in iter(cgunpacker.filelogheader, {}):
275 276 fname = chunkdata[b'filename']
276 277 filespos[fname] = cgunpacker.tell()
277 278 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
278 279 pass
279 280 return filespos
280 281
281 282
282 283 class bundlerepository:
283 284 """A repository instance that is a union of a local repo and a bundle.
284 285
285 286 Instances represent a read-only repository composed of a local repository
286 287 with the contents of a bundle file applied. The repository instance is
287 288 conceptually similar to the state of a repository after an
288 289 ``hg unbundle`` operation. However, the contents of the bundle are never
289 290 applied to the actual base repository.
290 291
291 292 Instances constructed directly are not usable as repository objects.
292 293 Use instance() or makebundlerepository() to create instances.
293 294 """
294 295
295 296 def __init__(self, bundlepath, url, tempparent):
296 297 self._tempparent = tempparent
297 298 self._url = url
298 299
299 300 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
300 301
301 302 # dict with the mapping 'filename' -> position in the changegroup.
302 303 self._cgfilespos = {}
303 304 self._bundlefile = None
304 305 self._cgunpacker = None
305 306 self.tempfile = None
306 307 f = util.posixfile(bundlepath, b"rb")
307 308 bundle = exchange.readbundle(self.ui, f, bundlepath)
308 309
309 310 if isinstance(bundle, bundle2.unbundle20):
310 311 self._bundlefile = bundle
311 312
312 313 cgpart = None
313 314 for part in bundle.iterparts(seekable=True):
314 315 if part.type == b'phase-heads':
315 316 self._handle_bundle2_phase_part(bundle, part)
316 317 elif part.type == b'changegroup':
317 318 if cgpart:
318 319 raise NotImplementedError(
319 320 b"can't process multiple changegroups"
320 321 )
321 322 cgpart = part
322 323 self._handle_bundle2_cg_part(bundle, part)
323 324
324 325 if not cgpart:
325 326 raise error.Abort(_(b"No changegroups found"))
326 327
327 328 # This is required to placate a later consumer, which expects
328 329 # the payload offset to be at the beginning of the changegroup.
329 330 # We need to do this after the iterparts() generator advances
330 331 # because iterparts() will seek to end of payload after the
331 332 # generator returns control to iterparts().
332 333 cgpart.seek(0, os.SEEK_SET)
333 334
334 335 elif isinstance(bundle, changegroup.cg1unpacker):
335 336 self._handle_bundle1(bundle, bundlepath)
336 337 else:
337 338 raise error.Abort(
338 339 _(b'bundle type %r cannot be read') % type(bundle)
339 340 )
340 341
341 342 def _handle_bundle1(self, bundle, bundlepath):
342 343 if bundle.compressed():
343 344 f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN')
344 345 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
345 346
346 347 self._bundlefile = bundle
347 348 self._cgunpacker = bundle
348 349
349 350 self.firstnewrev = self.changelog.repotiprev + 1
350 351 phases.retractboundary(
351 352 self,
352 353 None,
353 354 phases.draft,
354 355 [ctx.node() for ctx in self[self.firstnewrev :]],
355 356 )
356 357
357 358 def _handle_bundle2_cg_part(self, bundle, part):
358 359 assert part.type == b'changegroup'
359 360 cgstream = part
360 361 targetphase = part.params.get(b'targetphase')
361 362 try:
362 363 targetphase = int(targetphase)
363 364 except TypeError:
364 365 pass
365 366 if targetphase is None:
366 367 targetphase = phases.draft
367 368 if targetphase not in phases.allphases:
368 369 m = _(b'unsupported targetphase: %d')
369 370 m %= targetphase
370 371 raise error.Abort(m)
371 372 version = part.params.get(b'version', b'01')
372 373 legalcgvers = changegroup.supportedincomingversions(self)
373 374 if version not in legalcgvers:
374 375 msg = _(b'Unsupported changegroup version: %s')
375 376 raise error.Abort(msg % version)
376 377 if bundle.compressed():
377 378 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
378 379
379 380 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
380 381
381 382 self.firstnewrev = self.changelog.repotiprev + 1
382 383 phases.retractboundary(
383 384 self,
384 385 None,
385 386 targetphase,
386 387 [ctx.node() for ctx in self[self.firstnewrev :]],
387 388 )
388 389
389 390 def _handle_bundle2_phase_part(self, bundle, part):
390 391 assert part.type == b'phase-heads'
391 392
392 393 unfi = self.unfiltered()
393 394 headsbyphase = phases.binarydecode(part)
394 395 phases.updatephases(unfi, lambda: None, headsbyphase)
395 396
396 397 def _writetempbundle(self, readfn, suffix, header=b''):
397 398 """Write a temporary file to disk"""
398 399 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
399 400 self.tempfile = temp
400 401
401 402 with os.fdopen(fdtemp, 'wb') as fptemp:
402 403 fptemp.write(header)
403 404 while True:
404 405 chunk = readfn(2 ** 18)
405 406 if not chunk:
406 407 break
407 408 fptemp.write(chunk)
408 409
409 410 return self.vfs.open(self.tempfile, mode=b"rb")
410 411
411 412 @localrepo.unfilteredpropertycache
412 413 def _phasecache(self):
413 414 return bundlephasecache(self, self._phasedefaults)
414 415
415 416 @localrepo.unfilteredpropertycache
416 417 def changelog(self):
417 418 # consume the header if it exists
418 419 self._cgunpacker.changelogheader()
419 420 c = bundlechangelog(self.svfs, self._cgunpacker)
420 421 self.manstart = self._cgunpacker.tell()
421 422 return c
422 423
423 424 def _refreshchangelog(self):
424 425 # changelog for bundle repo are not filecache, this method is not
425 426 # applicable.
426 427 pass
427 428
428 429 @localrepo.unfilteredpropertycache
429 430 def manifestlog(self):
430 431 self._cgunpacker.seek(self.manstart)
431 432 # consume the header if it exists
432 433 self._cgunpacker.manifestheader()
433 434 linkmapper = self.unfiltered().changelog.rev
434 435 rootstore = bundlemanifest(
435 436 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
436 437 )
437 438 self.filestart = self._cgunpacker.tell()
438 439
439 440 return manifest.manifestlog(
440 441 self.svfs, self, rootstore, self.narrowmatch()
441 442 )
442 443
443 444 def _consumemanifest(self):
444 445 """Consumes the manifest portion of the bundle, setting filestart so the
445 446 file portion can be read."""
446 447 self._cgunpacker.seek(self.manstart)
447 448 self._cgunpacker.manifestheader()
448 449 for delta in self._cgunpacker.deltaiter():
449 450 pass
450 451 self.filestart = self._cgunpacker.tell()
451 452
452 453 @localrepo.unfilteredpropertycache
453 454 def manstart(self):
454 455 self.changelog
455 456 return self.manstart
456 457
457 458 @localrepo.unfilteredpropertycache
458 459 def filestart(self):
459 460 self.manifestlog
460 461
461 462 # If filestart was not set by self.manifestlog, that means the
462 463 # manifestlog implementation did not consume the manifests from the
463 464 # changegroup (ex: it might be consuming trees from a separate bundle2
464 465 # part instead). So we need to manually consume it.
465 466 if 'filestart' not in self.__dict__:
466 467 self._consumemanifest()
467 468
468 469 return self.filestart
469 470
470 471 def url(self):
471 472 return self._url
472 473
473 474 def file(self, f):
474 475 if not self._cgfilespos:
475 476 self._cgunpacker.seek(self.filestart)
476 477 self._cgfilespos = _getfilestarts(self._cgunpacker)
477 478
478 479 if f in self._cgfilespos:
479 480 self._cgunpacker.seek(self._cgfilespos[f])
480 481 linkmapper = self.unfiltered().changelog.rev
481 482 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
482 483 else:
483 484 return super(bundlerepository, self).file(f)
484 485
485 486 def close(self):
486 487 """Close assigned bundle file immediately."""
487 488 self._bundlefile.close()
488 489 if self.tempfile is not None:
489 490 self.vfs.unlink(self.tempfile)
490 491 if self._tempparent:
491 492 shutil.rmtree(self._tempparent, True)
492 493
493 494 def cancopy(self):
494 495 return False
495 496
496 497 def peer(self, path=None, remotehidden=False):
497 498 return bundlepeer(self, path=path, remotehidden=remotehidden)
498 499
499 500 def getcwd(self):
500 501 return encoding.getcwd() # always outside the repo
501 502
502 503 # Check if parents exist in localrepo before setting
503 504 def setparents(self, p1, p2=None):
504 505 if p2 is None:
505 506 p2 = self.nullid
506 507 p1rev = self.changelog.rev(p1)
507 508 p2rev = self.changelog.rev(p2)
508 509 msg = _(b"setting parent to node %s that only exists in the bundle\n")
509 510 if self.changelog.repotiprev < p1rev:
510 511 self.ui.warn(msg % hex(p1))
511 512 if self.changelog.repotiprev < p2rev:
512 513 self.ui.warn(msg % hex(p2))
513 514 return super(bundlerepository, self).setparents(p1, p2)
514 515
515 516
516 517 def instance(ui, path, create, intents=None, createopts=None):
517 518 if create:
518 519 raise error.Abort(_(b'cannot create new bundle repository'))
519 520 # internal config: bundle.mainreporoot
520 521 parentpath = ui.config(b"bundle", b"mainreporoot")
521 522 if not parentpath:
522 523 # try to find the correct path to the working directory repo
523 524 parentpath = cmdutil.findrepo(encoding.getcwd())
524 525 if parentpath is None:
525 526 parentpath = b''
526 527 if parentpath:
527 528 # Try to make the full path relative so we get a nice, short URL.
528 529 # In particular, we don't want temp dir names in test outputs.
529 530 cwd = encoding.getcwd()
530 531 if parentpath == cwd:
531 532 parentpath = b''
532 533 else:
533 534 cwd = pathutil.normasprefix(cwd)
534 535 if parentpath.startswith(cwd):
535 536 parentpath = parentpath[len(cwd) :]
536 537 u = urlutil.url(path)
537 538 path = u.localpath()
538 539 if u.scheme == b'bundle':
539 540 s = path.split(b"+", 1)
540 541 if len(s) == 1:
541 542 repopath, bundlename = parentpath, s[0]
542 543 else:
543 544 repopath, bundlename = s
544 545 else:
545 546 repopath, bundlename = parentpath, path
546 547
547 548 return makebundlerepository(ui, repopath, bundlename)
548 549
549 550
550 551 def makebundlerepository(ui, repopath, bundlepath):
551 552 """Make a bundle repository object based on repo and bundle paths."""
552 553 if repopath:
553 554 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
554 555 else:
555 556 url = b'bundle:%s' % bundlepath
556 557
557 558 # Because we can't make any guarantees about the type of the base
558 559 # repository, we can't have a static class representing the bundle
559 560 # repository. We also can't make any guarantees about how to even
560 561 # call the base repository's constructor!
561 562 #
562 563 # So, our strategy is to go through ``localrepo.instance()`` to construct
563 564 # a repo instance. Then, we dynamically create a new type derived from
564 565 # both it and our ``bundlerepository`` class which overrides some
565 566 # functionality. We then change the type of the constructed repository
566 567 # to this new type and initialize the bundle-specific bits of it.
567 568
568 569 try:
569 570 repo = localrepo.instance(ui, repopath, create=False)
570 571 tempparent = None
571 572 except error.RequirementError:
572 573 raise # no fallback if the backing repo is unsupported
573 574 except error.RepoError:
574 575 tempparent = pycompat.mkdtemp()
575 576 try:
576 577 repo = localrepo.instance(ui, tempparent, create=True)
577 578 except Exception:
578 579 shutil.rmtree(tempparent)
579 580 raise
580 581
581 582 class derivedbundlerepository(bundlerepository, repo.__class__):
582 583 pass
583 584
584 585 repo.__class__ = derivedbundlerepository
585 586 bundlerepository.__init__(repo, bundlepath, url, tempparent)
586 587
587 588 return repo
588 589
589 590
590 591 class bundletransactionmanager:
591 592 def transaction(self):
592 593 return None
593 594
594 595 def close(self):
595 596 raise NotImplementedError
596 597
597 598 def release(self):
598 599 raise NotImplementedError
599 600
600 601
601 602 def getremotechanges(
602 603 ui, repo, peer, onlyheads=None, bundlename=None, force=False
603 604 ):
604 605 """obtains a bundle of changes incoming from peer
605 606
606 607 "onlyheads" restricts the returned changes to those reachable from the
607 608 specified heads.
608 609 "bundlename", if given, stores the bundle to this file path permanently;
609 610 otherwise it's stored to a temp file and gets deleted again when you call
610 611 the returned "cleanupfn".
611 612 "force" indicates whether to proceed on unrelated repos.
612 613
613 614 Returns a tuple (local, csets, cleanupfn):
614 615
615 616 "local" is a local repo from which to obtain the actual incoming
616 617 changesets; it is a bundlerepo for the obtained bundle when the
617 618 original "peer" is remote.
618 619 "csets" lists the incoming changeset node ids.
619 620 "cleanupfn" must be called without arguments when you're done processing
620 621 the changes; it closes both the original "peer" and the one returned
621 622 here.
622 623 """
623 624 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
624 625 common, incoming, rheads = tmp
625 626 if not incoming:
626 627 try:
627 628 if bundlename:
628 629 os.unlink(bundlename)
629 630 except OSError:
630 631 pass
631 632 return repo, [], peer.close
632 633
633 634 commonset = set(common)
634 635 rheads = [x for x in rheads if x not in commonset]
635 636
636 637 bundle = None
637 638 bundlerepo = None
638 639 localrepo = peer.local()
639 640 if bundlename or not localrepo:
640 641 # create a bundle (uncompressed if peer repo is not local)
641 642
642 643 # developer config: devel.legacy.exchange
643 644 legexc = ui.configlist(b'devel', b'legacy.exchange')
644 645 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
645 646 canbundle2 = (
646 647 not forcebundle1
647 648 and peer.capable(b'getbundle')
648 649 and peer.capable(b'bundle2')
649 650 )
650 651 if canbundle2:
651 652 with peer.commandexecutor() as e:
652 653 b2 = e.callcommand(
653 654 b'getbundle',
654 655 {
655 656 b'source': b'incoming',
656 657 b'common': common,
657 658 b'heads': rheads,
658 659 b'bundlecaps': exchange.caps20to10(
659 660 repo, role=b'client'
660 661 ),
661 662 b'cg': True,
662 663 },
663 664 ).result()
664 665
665 666 fname = bundle = changegroup.writechunks(
666 667 ui, b2._forwardchunks(), bundlename
667 668 )
668 669 else:
669 670 if peer.capable(b'getbundle'):
670 671 with peer.commandexecutor() as e:
671 672 cg = e.callcommand(
672 673 b'getbundle',
673 674 {
674 675 b'source': b'incoming',
675 676 b'common': common,
676 677 b'heads': rheads,
677 678 },
678 679 ).result()
679 680 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
680 681 # compat with older servers when pulling all remote heads
681 682
682 683 with peer.commandexecutor() as e:
683 684 cg = e.callcommand(
684 685 b'changegroup',
685 686 {
686 687 b'nodes': incoming,
687 688 b'source': b'incoming',
688 689 },
689 690 ).result()
690 691
691 692 rheads = None
692 693 else:
693 694 with peer.commandexecutor() as e:
694 695 cg = e.callcommand(
695 696 b'changegroupsubset',
696 697 {
697 698 b'bases': incoming,
698 699 b'heads': rheads,
699 700 b'source': b'incoming',
700 701 },
701 702 ).result()
702 703
703 704 if localrepo:
704 705 bundletype = b"HG10BZ"
705 706 else:
706 707 bundletype = b"HG10UN"
707 708 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
708 709 # keep written bundle?
709 710 if bundlename:
710 711 bundle = None
711 712 if not localrepo:
712 713 # use the created uncompressed bundlerepo
713 714 localrepo = bundlerepo = makebundlerepository(
714 715 repo.baseui, repo.root, fname
715 716 )
716 717
717 718 # this repo contains local and peer now, so filter out local again
718 719 common = repo.heads()
719 720 if localrepo:
720 721 # Part of common may be remotely filtered
721 722 # So use an unfiltered version
722 723 # The discovery process probably need cleanup to avoid that
723 724 localrepo = localrepo.unfiltered()
724 725
725 726 csets = localrepo.changelog.findmissing(common, rheads)
726 727
727 728 if bundlerepo:
728 729 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
729 730
730 731 with peer.commandexecutor() as e:
731 732 remotephases = e.callcommand(
732 733 b'listkeys',
733 734 {
734 735 b'namespace': b'phases',
735 736 },
736 737 ).result()
737 738
738 739 pullop = exchange.pulloperation(
739 740 bundlerepo, peer, path=None, heads=reponodes
740 741 )
741 742 pullop.trmanager = bundletransactionmanager()
742 743 exchange._pullapplyphases(pullop, remotephases)
743 744
744 745 def cleanup():
745 746 if bundlerepo:
746 747 bundlerepo.close()
747 748 if bundle:
748 749 os.unlink(bundle)
749 750 peer.close()
750 751
751 752 return (localrepo, csets, cleanup)
@@ -1,3546 +1,3546 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 # coding: utf8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Storage back-end for Mercurial.
10 10
11 11 This provides efficient delta storage with O(1) retrieve and append
12 12 and O(changes) merge between branches.
13 13 """
14 14
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import io
20 20 import os
21 21 import struct
22 22 import weakref
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .revlogutils.constants import (
36 36 ALL_KINDS,
37 37 CHANGELOGV2,
38 38 COMP_MODE_DEFAULT,
39 39 COMP_MODE_INLINE,
40 40 COMP_MODE_PLAIN,
41 41 DELTA_BASE_REUSE_NO,
42 42 DELTA_BASE_REUSE_TRY,
43 43 ENTRY_RANK,
44 44 FEATURES_BY_VERSION,
45 45 FLAG_GENERALDELTA,
46 46 FLAG_INLINE_DATA,
47 47 INDEX_HEADER,
48 48 KIND_CHANGELOG,
49 49 KIND_FILELOG,
50 50 RANK_UNKNOWN,
51 51 REVLOGV0,
52 52 REVLOGV1,
53 53 REVLOGV1_FLAGS,
54 54 REVLOGV2,
55 55 REVLOGV2_FLAGS,
56 56 REVLOG_DEFAULT_FLAGS,
57 57 REVLOG_DEFAULT_FORMAT,
58 58 REVLOG_DEFAULT_VERSION,
59 59 SUPPORTED_FLAGS,
60 60 )
61 61 from .revlogutils.flagutil import (
62 62 REVIDX_DEFAULT_FLAGS,
63 63 REVIDX_ELLIPSIS,
64 64 REVIDX_EXTSTORED,
65 65 REVIDX_FLAGS_ORDER,
66 66 REVIDX_HASCOPIESINFO,
67 67 REVIDX_ISCENSORED,
68 68 REVIDX_RAWTEXT_CHANGING_FLAGS,
69 69 )
70 70 from .thirdparty import attr
71 71 from . import (
72 72 ancestor,
73 73 dagop,
74 74 error,
75 75 mdiff,
76 76 policy,
77 77 pycompat,
78 78 revlogutils,
79 79 templatefilters,
80 80 util,
81 81 )
82 82 from .interfaces import (
83 83 repository,
84 84 util as interfaceutil,
85 85 )
86 86 from .revlogutils import (
87 87 deltas as deltautil,
88 88 docket as docketutil,
89 89 flagutil,
90 90 nodemap as nodemaputil,
91 91 randomaccessfile,
92 92 revlogv0,
93 93 rewrite,
94 94 sidedata as sidedatautil,
95 95 )
96 96 from .utils import (
97 97 storageutil,
98 98 stringutil,
99 99 )
100 100
101 101 # blanked usage of all the name to prevent pyflakes constraints
102 102 # We need these name available in the module for extensions.
103 103
104 104 REVLOGV0
105 105 REVLOGV1
106 106 REVLOGV2
107 107 CHANGELOGV2
108 108 FLAG_INLINE_DATA
109 109 FLAG_GENERALDELTA
110 110 REVLOG_DEFAULT_FLAGS
111 111 REVLOG_DEFAULT_FORMAT
112 112 REVLOG_DEFAULT_VERSION
113 113 REVLOGV1_FLAGS
114 114 REVLOGV2_FLAGS
115 115 REVIDX_ISCENSORED
116 116 REVIDX_ELLIPSIS
117 117 REVIDX_HASCOPIESINFO
118 118 REVIDX_EXTSTORED
119 119 REVIDX_DEFAULT_FLAGS
120 120 REVIDX_FLAGS_ORDER
121 121 REVIDX_RAWTEXT_CHANGING_FLAGS
122 122
123 123 parsers = policy.importmod('parsers')
124 124 rustancestor = policy.importrust('ancestor')
125 125 rustdagop = policy.importrust('dagop')
126 126 rustrevlog = policy.importrust('revlog')
127 127
128 128 # Aliased for performance.
129 129 _zlibdecompress = zlib.decompress
130 130
131 131 # max size of inline data embedded into a revlog
132 132 _maxinline = 131072
133 133
134 134 # Flag processors for REVIDX_ELLIPSIS.
135 135 def ellipsisreadprocessor(rl, text):
136 136 return text, False
137 137
138 138
139 139 def ellipsiswriteprocessor(rl, text):
140 140 return text, False
141 141
142 142
143 143 def ellipsisrawprocessor(rl, text):
144 144 return False
145 145
146 146
147 147 ellipsisprocessor = (
148 148 ellipsisreadprocessor,
149 149 ellipsiswriteprocessor,
150 150 ellipsisrawprocessor,
151 151 )
152 152
153 153
154 154 def _verify_revision(rl, skipflags, state, node):
155 155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 156 point for extensions to influence the operation."""
157 157 if skipflags:
158 158 state[b'skipread'].add(node)
159 159 else:
160 160 # Side-effect: read content and verify hash.
161 161 rl.revision(node)
162 162
163 163
164 164 # True if a fast implementation for persistent-nodemap is available
165 165 #
166 166 # We also consider we have a "fast" implementation in "pure" python because
167 167 # people using pure don't really have performance consideration (and a
168 168 # wheelbarrow of other slowness source)
169 169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
170 170 parsers, 'BaseIndexObject'
171 171 )
172 172
173 173
174 174 @interfaceutil.implementer(repository.irevisiondelta)
175 175 @attr.s(slots=True)
176 176 class revlogrevisiondelta:
177 177 node = attr.ib()
178 178 p1node = attr.ib()
179 179 p2node = attr.ib()
180 180 basenode = attr.ib()
181 181 flags = attr.ib()
182 182 baserevisionsize = attr.ib()
183 183 revision = attr.ib()
184 184 delta = attr.ib()
185 185 sidedata = attr.ib()
186 186 protocol_flags = attr.ib()
187 187 linknode = attr.ib(default=None)
188 188
189 189
190 190 @interfaceutil.implementer(repository.iverifyproblem)
191 191 @attr.s(frozen=True)
192 192 class revlogproblem:
193 193 warning = attr.ib(default=None)
194 194 error = attr.ib(default=None)
195 195 node = attr.ib(default=None)
196 196
197 197
198 198 def parse_index_v1(data, inline):
199 199 # call the C implementation to parse the index data
200 200 index, cache = parsers.parse_index2(data, inline)
201 201 return index, cache
202 202
203 203
204 204 def parse_index_v2(data, inline):
205 205 # call the C implementation to parse the index data
206 206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
207 207 return index, cache
208 208
209 209
210 210 def parse_index_cl_v2(data, inline):
211 211 # call the C implementation to parse the index data
212 212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
213 213 return index, cache
214 214
215 215
216 216 if hasattr(parsers, 'parse_index_devel_nodemap'):
217 217
218 218 def parse_index_v1_nodemap(data, inline):
219 219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
220 220 return index, cache
221 221
222 222
223 223 else:
224 224 parse_index_v1_nodemap = None
225 225
226 226
227 227 def parse_index_v1_mixed(data, inline):
228 228 index, cache = parse_index_v1(data, inline)
229 229 return rustrevlog.MixedIndex(index), cache
230 230
231 231
232 232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
233 233 # signed integer)
234 234 _maxentrysize = 0x7FFFFFFF
235 235
236 236 FILE_TOO_SHORT_MSG = _(
237 237 b'cannot read from revlog %s;'
238 238 b' expected %d bytes from offset %d, data size is %d'
239 239 )
240 240
241 241 hexdigits = b'0123456789abcdefABCDEF'
242 242
243 243
244 244 class revlog:
245 245 """
246 246 the underlying revision storage object
247 247
248 248 A revlog consists of two parts, an index and the revision data.
249 249
250 250 The index is a file with a fixed record size containing
251 251 information on each revision, including its nodeid (hash), the
252 252 nodeids of its parents, the position and offset of its data within
253 253 the data file, and the revision it's based on. Finally, each entry
254 254 contains a linkrev entry that can serve as a pointer to external
255 255 data.
256 256
257 257 The revision data itself is a linear collection of data chunks.
258 258 Each chunk represents a revision and is usually represented as a
259 259 delta against the previous chunk. To bound lookup time, runs of
260 260 deltas are limited to about 2 times the length of the original
261 261 version data. This makes retrieval of a version proportional to
262 262 its size, or O(1) relative to the number of revisions.
263 263
264 264 Both pieces of the revlog are written to in an append-only
265 265 fashion, which means we never need to rewrite a file to insert or
266 266 remove data, and can use some simple techniques to avoid the need
267 267 for locking while reading.
268 268
269 269 If checkambig, indexfile is opened with checkambig=True at
270 270 writing, to avoid file stat ambiguity.
271 271
272 272 If mmaplargeindex is True, and an mmapindexthreshold is set, the
273 273 index will be mmapped rather than read if it is larger than the
274 274 configured threshold.
275 275
276 276 If censorable is True, the revlog can have censored revisions.
277 277
278 278 If `upperboundcomp` is not None, this is the expected maximal gain from
279 279 compression for the data content.
280 280
281 281 `concurrencychecker` is an optional function that receives 3 arguments: a
282 282 file handle, a filename, and an expected position. It should check whether
283 283 the current position in the file handle is valid, and log/warn/fail (by
284 284 raising).
285 285
286 286 See mercurial/revlogutils/contants.py for details about the content of an
287 287 index entry.
288 288 """
289 289
290 290 _flagserrorclass = error.RevlogError
291 291
292 292 @staticmethod
293 293 def is_inline_index(header_bytes):
294 294 """Determine if a revlog is inline from the initial bytes of the index"""
295 295 header = INDEX_HEADER.unpack(header_bytes)[0]
296 296
297 297 _format_flags = header & ~0xFFFF
298 298 _format_version = header & 0xFFFF
299 299
300 300 features = FEATURES_BY_VERSION[_format_version]
301 301 return features[b'inline'](_format_flags)
302 302
303 303 def __init__(
304 304 self,
305 305 opener,
306 306 target,
307 307 radix,
308 308 postfix=None, # only exist for `tmpcensored` now
309 309 checkambig=False,
310 310 mmaplargeindex=False,
311 311 censorable=False,
312 312 upperboundcomp=None,
313 313 persistentnodemap=False,
314 314 concurrencychecker=None,
315 315 trypending=False,
316 316 try_split=False,
317 317 canonical_parent_order=True,
318 318 ):
319 319 """
320 320 create a revlog object
321 321
322 322 opener is a function that abstracts the file opening operation
323 323 and can be used to implement COW semantics or the like.
324 324
325 325 `target`: a (KIND, ID) tuple that identify the content stored in
326 326 this revlog. It help the rest of the code to understand what the revlog
327 327 is about without having to resort to heuristic and index filename
328 328 analysis. Note: that this must be reliably be set by normal code, but
329 329 that test, debug, or performance measurement code might not set this to
330 330 accurate value.
331 331 """
332 332 self.upperboundcomp = upperboundcomp
333 333
334 334 self.radix = radix
335 335
336 336 self._docket_file = None
337 337 self._indexfile = None
338 338 self._datafile = None
339 339 self._sidedatafile = None
340 340 self._nodemap_file = None
341 341 self.postfix = postfix
342 342 self._trypending = trypending
343 343 self._try_split = try_split
344 344 self.opener = opener
345 345 if persistentnodemap:
346 346 self._nodemap_file = nodemaputil.get_nodemap_file(self)
347 347
348 348 assert target[0] in ALL_KINDS
349 349 assert len(target) == 2
350 350 self.target = target
351 351 # When True, indexfile is opened with checkambig=True at writing, to
352 352 # avoid file stat ambiguity.
353 353 self._checkambig = checkambig
354 354 self._mmaplargeindex = mmaplargeindex
355 355 self._censorable = censorable
356 356 # 3-tuple of (node, rev, text) for a raw revision.
357 357 self._revisioncache = None
358 358 # Maps rev to chain base rev.
359 359 self._chainbasecache = util.lrucachedict(100)
360 360 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 361 self._chunkcache = (0, b'')
362 362 # How much data to read and cache into the raw revlog data cache.
363 363 self._chunkcachesize = 65536
364 364 self._maxchainlen = None
365 365 self._deltabothparents = True
366 366 self._candidate_group_chunk_size = 0
367 367 self._debug_delta = False
368 368 self.index = None
369 369 self._docket = None
370 370 self._nodemap_docket = None
371 371 # Mapping of partial identifiers to full nodes.
372 372 self._pcache = {}
373 373 # Mapping of revision integer to full node.
374 374 self._compengine = b'zlib'
375 375 self._compengineopts = {}
376 376 self._maxdeltachainspan = -1
377 377 self._withsparseread = False
378 378 self._sparserevlog = False
379 379 self.hassidedata = False
380 380 self._srdensitythreshold = 0.50
381 381 self._srmingapsize = 262144
382 382
383 383 # other optionnals features
384 384
385 385 # might remove rank configuration once the computation has no impact
386 386 self._compute_rank = False
387 387
388 388 # Make copy of flag processors so each revlog instance can support
389 389 # custom flags.
390 390 self._flagprocessors = dict(flagutil.flagprocessors)
391 391
392 392 # 3-tuple of file handles being used for active writing.
393 393 self._writinghandles = None
394 394 # prevent nesting of addgroup
395 395 self._adding_group = None
396 396
397 397 self._loadindex()
398 398
399 399 self._concurrencychecker = concurrencychecker
400 400
401 401 # parent order is supposed to be semantically irrelevant, so we
402 402 # normally resort parents to ensure that the first parent is non-null,
403 403 # if there is a non-null parent at all.
404 404 # filelog abuses the parent order as flag to mark some instances of
405 405 # meta-encoded files, so allow it to disable this behavior.
406 406 self.canonical_parent_order = canonical_parent_order
407 407
408 408 def _init_opts(self):
409 409 """process options (from above/config) to setup associated default revlog mode
410 410
411 411 These values might be affected when actually reading on disk information.
412 412
413 413 The relevant values are returned for use in _loadindex().
414 414
415 415 * newversionflags:
416 416 version header to use if we need to create a new revlog
417 417
418 418 * mmapindexthreshold:
419 419 minimal index size for start to use mmap
420 420
421 421 * force_nodemap:
422 422 force the usage of a "development" version of the nodemap code
423 423 """
424 424 mmapindexthreshold = None
425 425 opts = self.opener.options
426 426
427 427 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
428 428 new_header = CHANGELOGV2
429 429 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
430 430 elif b'revlogv2' in opts:
431 431 new_header = REVLOGV2
432 432 elif b'revlogv1' in opts:
433 433 new_header = REVLOGV1 | FLAG_INLINE_DATA
434 434 if b'generaldelta' in opts:
435 435 new_header |= FLAG_GENERALDELTA
436 436 elif b'revlogv0' in self.opener.options:
437 437 new_header = REVLOGV0
438 438 else:
439 439 new_header = REVLOG_DEFAULT_VERSION
440 440
441 441 if b'chunkcachesize' in opts:
442 442 self._chunkcachesize = opts[b'chunkcachesize']
443 443 if b'maxchainlen' in opts:
444 444 self._maxchainlen = opts[b'maxchainlen']
445 445 if b'deltabothparents' in opts:
446 446 self._deltabothparents = opts[b'deltabothparents']
447 447 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
448 448 if dps_cgds:
449 449 self._candidate_group_chunk_size = dps_cgds
450 450 self._lazydelta = bool(opts.get(b'lazydelta', True))
451 451 self._lazydeltabase = False
452 452 if self._lazydelta:
453 453 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
454 454 if b'debug-delta' in opts:
455 455 self._debug_delta = opts[b'debug-delta']
456 456 if b'compengine' in opts:
457 457 self._compengine = opts[b'compengine']
458 458 if b'zlib.level' in opts:
459 459 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
460 460 if b'zstd.level' in opts:
461 461 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
462 462 if b'maxdeltachainspan' in opts:
463 463 self._maxdeltachainspan = opts[b'maxdeltachainspan']
464 464 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
465 465 mmapindexthreshold = opts[b'mmapindexthreshold']
466 466 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
467 467 withsparseread = bool(opts.get(b'with-sparse-read', False))
468 468 # sparse-revlog forces sparse-read
469 469 self._withsparseread = self._sparserevlog or withsparseread
470 470 if b'sparse-read-density-threshold' in opts:
471 471 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
472 472 if b'sparse-read-min-gap-size' in opts:
473 473 self._srmingapsize = opts[b'sparse-read-min-gap-size']
474 474 if opts.get(b'enableellipsis'):
475 475 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
476 476
477 477 # revlog v0 doesn't have flag processors
478 478 for flag, processor in opts.get(b'flagprocessors', {}).items():
479 479 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
480 480
481 481 if self._chunkcachesize <= 0:
482 482 raise error.RevlogError(
483 483 _(b'revlog chunk cache size %r is not greater than 0')
484 484 % self._chunkcachesize
485 485 )
486 486 elif self._chunkcachesize & (self._chunkcachesize - 1):
487 487 raise error.RevlogError(
488 488 _(b'revlog chunk cache size %r is not a power of 2')
489 489 % self._chunkcachesize
490 490 )
491 491 force_nodemap = opts.get(b'devel-force-nodemap', False)
492 492 return new_header, mmapindexthreshold, force_nodemap
493 493
494 494 def _get_data(self, filepath, mmap_threshold, size=None):
495 495 """return a file content with or without mmap
496 496
497 497 If the file is missing return the empty string"""
498 498 try:
499 499 with self.opener(filepath) as fp:
500 500 if mmap_threshold is not None:
501 501 file_size = self.opener.fstat(fp).st_size
502 502 if file_size >= mmap_threshold:
503 503 if size is not None:
504 504 # avoid potentiel mmap crash
505 505 size = min(file_size, size)
506 506 # TODO: should .close() to release resources without
507 507 # relying on Python GC
508 508 if size is None:
509 509 return util.buffer(util.mmapread(fp))
510 510 else:
511 511 return util.buffer(util.mmapread(fp, size))
512 512 if size is None:
513 513 return fp.read()
514 514 else:
515 515 return fp.read(size)
516 516 except FileNotFoundError:
517 517 return b''
518 518
519 519 def get_streams(self, max_linkrev, force_inline=False):
520 520 """return a list of streams that represent this revlog
521 521
522 522 This is used by stream-clone to do bytes to bytes copies of a repository.
523 523
524 524 This streams data for all revisions that refer to a changelog revision up
525 525 to `max_linkrev`.
526 526
527 527 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
528 528
529 529 It returns is a list of three-tuple:
530 530
531 531 [
532 532 (filename, bytes_stream, stream_size),
533 533 …
534 534 ]
535 535 """
536 536 n = len(self)
537 537 index = self.index
538 538 while n > 0:
539 539 linkrev = index[n - 1][4]
540 540 if linkrev < max_linkrev:
541 541 break
542 542 # note: this loop will rarely go through multiple iterations, since
543 543 # it only traverses commits created during the current streaming
544 544 # pull operation.
545 545 #
546 546 # If this become a problem, using a binary search should cap the
547 547 # runtime of this.
548 548 n = n - 1
549 549 if n == 0:
550 550 # no data to send
551 551 return []
552 552 index_size = n * index.entry_size
553 553 data_size = self.end(n - 1)
554 554
555 555 # XXX we might have been split (or stripped) since the object
556 556 # initialization, We need to close this race too, but having a way to
557 557 # pre-open the file we feed to the revlog and never closing them before
558 558 # we are done streaming.
559 559
560 560 if self._inline:
561 561
562 562 def get_stream():
563 563 with self._indexfp() as fp:
564 564 yield None
565 565 size = index_size + data_size
566 566 if size <= 65536:
567 567 yield fp.read(size)
568 568 else:
569 569 yield from util.filechunkiter(fp, limit=size)
570 570
571 571 inline_stream = get_stream()
572 572 next(inline_stream)
573 573 return [
574 574 (self._indexfile, inline_stream, index_size + data_size),
575 575 ]
576 576 elif force_inline:
577 577
578 578 def get_stream():
579 579 with self.reading():
580 580 yield None
581 581
582 582 for rev in range(n):
583 583 idx = self.index.entry_binary(rev)
584 584 if rev == 0 and self._docket is None:
585 585 # re-inject the inline flag
586 586 header = self._format_flags
587 587 header |= self._format_version
588 588 header |= FLAG_INLINE_DATA
589 589 header = self.index.pack_header(header)
590 590 idx = header + idx
591 591 yield idx
592 592 yield self._getsegmentforrevs(rev, rev)[1]
593 593
594 594 inline_stream = get_stream()
595 595 next(inline_stream)
596 596 return [
597 597 (self._indexfile, inline_stream, index_size + data_size),
598 598 ]
599 599 else:
600 600
601 601 def get_index_stream():
602 602 with self._indexfp() as fp:
603 603 yield None
604 604 if index_size <= 65536:
605 605 yield fp.read(index_size)
606 606 else:
607 607 yield from util.filechunkiter(fp, limit=index_size)
608 608
609 609 def get_data_stream():
610 610 with self._datafp() as fp:
611 611 yield None
612 612 if data_size <= 65536:
613 613 yield fp.read(data_size)
614 614 else:
615 615 yield from util.filechunkiter(fp, limit=data_size)
616 616
617 617 index_stream = get_index_stream()
618 618 next(index_stream)
619 619 data_stream = get_data_stream()
620 620 next(data_stream)
621 621 return [
622 622 (self._datafile, data_stream, data_size),
623 623 (self._indexfile, index_stream, index_size),
624 624 ]
625 625
626 626 def _loadindex(self, docket=None):
627 627
628 628 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
629 629
630 630 if self.postfix is not None:
631 631 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
632 632 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
633 633 entry_point = b'%s.i.a' % self.radix
634 634 elif self._try_split and self.opener.exists(self._split_index_file):
635 635 entry_point = self._split_index_file
636 636 else:
637 637 entry_point = b'%s.i' % self.radix
638 638
639 639 if docket is not None:
640 640 self._docket = docket
641 641 self._docket_file = entry_point
642 642 else:
643 643 self._initempty = True
644 644 entry_data = self._get_data(entry_point, mmapindexthreshold)
645 645 if len(entry_data) > 0:
646 646 header = INDEX_HEADER.unpack(entry_data[:4])[0]
647 647 self._initempty = False
648 648 else:
649 649 header = new_header
650 650
651 651 self._format_flags = header & ~0xFFFF
652 652 self._format_version = header & 0xFFFF
653 653
654 654 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
655 655 if supported_flags is None:
656 656 msg = _(b'unknown version (%d) in revlog %s')
657 657 msg %= (self._format_version, self.display_id)
658 658 raise error.RevlogError(msg)
659 659 elif self._format_flags & ~supported_flags:
660 660 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
661 661 display_flag = self._format_flags >> 16
662 662 msg %= (display_flag, self._format_version, self.display_id)
663 663 raise error.RevlogError(msg)
664 664
665 665 features = FEATURES_BY_VERSION[self._format_version]
666 666 self._inline = features[b'inline'](self._format_flags)
667 667 self._generaldelta = features[b'generaldelta'](self._format_flags)
668 668 self.hassidedata = features[b'sidedata']
669 669
670 670 if not features[b'docket']:
671 671 self._indexfile = entry_point
672 672 index_data = entry_data
673 673 else:
674 674 self._docket_file = entry_point
675 675 if self._initempty:
676 676 self._docket = docketutil.default_docket(self, header)
677 677 else:
678 678 self._docket = docketutil.parse_docket(
679 679 self, entry_data, use_pending=self._trypending
680 680 )
681 681
682 682 if self._docket is not None:
683 683 self._indexfile = self._docket.index_filepath()
684 684 index_data = b''
685 685 index_size = self._docket.index_end
686 686 if index_size > 0:
687 687 index_data = self._get_data(
688 688 self._indexfile, mmapindexthreshold, size=index_size
689 689 )
690 690 if len(index_data) < index_size:
691 691 msg = _(b'too few index data for %s: got %d, expected %d')
692 692 msg %= (self.display_id, len(index_data), index_size)
693 693 raise error.RevlogError(msg)
694 694
695 695 self._inline = False
696 696 # generaldelta implied by version 2 revlogs.
697 697 self._generaldelta = True
698 698 # the logic for persistent nodemap will be dealt with within the
699 699 # main docket, so disable it for now.
700 700 self._nodemap_file = None
701 701
702 702 if self._docket is not None:
703 703 self._datafile = self._docket.data_filepath()
704 704 self._sidedatafile = self._docket.sidedata_filepath()
705 705 elif self.postfix is None:
706 706 self._datafile = b'%s.d' % self.radix
707 707 else:
708 708 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
709 709
710 710 self.nodeconstants = sha1nodeconstants
711 711 self.nullid = self.nodeconstants.nullid
712 712
713 713 # sparse-revlog can't be on without general-delta (issue6056)
714 714 if not self._generaldelta:
715 715 self._sparserevlog = False
716 716
717 717 self._storedeltachains = True
718 718
719 719 devel_nodemap = (
720 720 self._nodemap_file
721 721 and force_nodemap
722 722 and parse_index_v1_nodemap is not None
723 723 )
724 724
725 725 use_rust_index = False
726 726 if rustrevlog is not None:
727 727 if self._nodemap_file is not None:
728 728 use_rust_index = True
729 729 else:
730 730 use_rust_index = self.opener.options.get(b'rust.index')
731 731
732 732 self._parse_index = parse_index_v1
733 733 if self._format_version == REVLOGV0:
734 734 self._parse_index = revlogv0.parse_index_v0
735 735 elif self._format_version == REVLOGV2:
736 736 self._parse_index = parse_index_v2
737 737 elif self._format_version == CHANGELOGV2:
738 738 self._parse_index = parse_index_cl_v2
739 739 elif devel_nodemap:
740 740 self._parse_index = parse_index_v1_nodemap
741 741 elif use_rust_index:
742 742 self._parse_index = parse_index_v1_mixed
743 743 try:
744 744 d = self._parse_index(index_data, self._inline)
745 745 index, chunkcache = d
746 746 use_nodemap = (
747 747 not self._inline
748 748 and self._nodemap_file is not None
749 749 and hasattr(index, 'update_nodemap_data')
750 750 )
751 751 if use_nodemap:
752 752 nodemap_data = nodemaputil.persisted_data(self)
753 753 if nodemap_data is not None:
754 754 docket = nodemap_data[0]
755 755 if (
756 756 len(d[0]) > docket.tip_rev
757 757 and d[0][docket.tip_rev][7] == docket.tip_node
758 758 ):
759 759 # no changelog tampering
760 760 self._nodemap_docket = docket
761 761 index.update_nodemap_data(*nodemap_data)
762 762 except (ValueError, IndexError):
763 763 raise error.RevlogError(
764 764 _(b"index %s is corrupted") % self.display_id
765 765 )
766 766 self.index = index
767 767 self._segmentfile = randomaccessfile.randomaccessfile(
768 768 self.opener,
769 769 (self._indexfile if self._inline else self._datafile),
770 770 self._chunkcachesize,
771 771 chunkcache,
772 772 )
773 773 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
774 774 self.opener,
775 775 self._sidedatafile,
776 776 self._chunkcachesize,
777 777 )
778 778 # revnum -> (chain-length, sum-delta-length)
779 779 self._chaininfocache = util.lrucachedict(500)
780 780 # revlog header -> revlog compressor
781 781 self._decompressors = {}
782 782
783 783 def get_revlog(self):
784 784 """simple function to mirror API of other not-really-revlog API"""
785 785 return self
786 786
787 787 @util.propertycache
788 788 def revlog_kind(self):
789 789 return self.target[0]
790 790
791 791 @util.propertycache
792 792 def display_id(self):
793 793 """The public facing "ID" of the revlog that we use in message"""
794 794 if self.revlog_kind == KIND_FILELOG:
795 795 # Reference the file without the "data/" prefix, so it is familiar
796 796 # to the user.
797 797 return self.target[1]
798 798 else:
799 799 return self.radix
800 800
801 801 def _get_decompressor(self, t):
802 802 try:
803 803 compressor = self._decompressors[t]
804 804 except KeyError:
805 805 try:
806 806 engine = util.compengines.forrevlogheader(t)
807 807 compressor = engine.revlogcompressor(self._compengineopts)
808 808 self._decompressors[t] = compressor
809 809 except KeyError:
810 810 raise error.RevlogError(
811 811 _(b'unknown compression type %s') % binascii.hexlify(t)
812 812 )
813 813 return compressor
814 814
815 815 @util.propertycache
816 816 def _compressor(self):
817 817 engine = util.compengines[self._compengine]
818 818 return engine.revlogcompressor(self._compengineopts)
819 819
820 820 @util.propertycache
821 821 def _decompressor(self):
822 822 """the default decompressor"""
823 823 if self._docket is None:
824 824 return None
825 825 t = self._docket.default_compression_header
826 826 c = self._get_decompressor(t)
827 827 return c.decompress
828 828
829 829 def _indexfp(self):
830 830 """file object for the revlog's index file"""
831 831 return self.opener(self._indexfile, mode=b"r")
832 832
833 833 def __index_write_fp(self):
834 834 # You should not use this directly and use `_writing` instead
835 835 try:
836 836 f = self.opener(
837 837 self._indexfile, mode=b"r+", checkambig=self._checkambig
838 838 )
839 839 if self._docket is None:
840 840 f.seek(0, os.SEEK_END)
841 841 else:
842 842 f.seek(self._docket.index_end, os.SEEK_SET)
843 843 return f
844 844 except FileNotFoundError:
845 845 return self.opener(
846 846 self._indexfile, mode=b"w+", checkambig=self._checkambig
847 847 )
848 848
849 849 def __index_new_fp(self):
850 850 # You should not use this unless you are upgrading from inline revlog
851 851 return self.opener(
852 852 self._indexfile,
853 853 mode=b"w",
854 854 checkambig=self._checkambig,
855 855 atomictemp=True,
856 856 )
857 857
858 858 def _datafp(self, mode=b'r'):
859 859 """file object for the revlog's data file"""
860 860 return self.opener(self._datafile, mode=mode)
861 861
862 862 @contextlib.contextmanager
863 863 def _sidedatareadfp(self):
864 864 """file object suitable to read sidedata"""
865 865 if self._writinghandles:
866 866 yield self._writinghandles[2]
867 867 else:
868 868 with self.opener(self._sidedatafile) as fp:
869 869 yield fp
870 870
871 871 def tiprev(self):
872 872 return len(self.index) - 1
873 873
874 874 def tip(self):
875 875 return self.node(self.tiprev())
876 876
877 877 def __contains__(self, rev):
878 878 return 0 <= rev < len(self)
879 879
880 880 def __len__(self):
881 881 return len(self.index)
882 882
883 883 def __iter__(self):
884 884 return iter(range(len(self)))
885 885
886 886 def revs(self, start=0, stop=None):
887 887 """iterate over all rev in this revlog (from start to stop)"""
888 888 return storageutil.iterrevs(len(self), start=start, stop=stop)
889 889
890 890 def hasnode(self, node):
891 891 try:
892 892 self.rev(node)
893 893 return True
894 894 except KeyError:
895 895 return False
896 896
897 897 def _candelta(self, baserev, rev):
898 898 """whether two revisions (baserev, rev) can be delta-ed or not"""
899 899 # Disable delta if either rev requires a content-changing flag
900 900 # processor (ex. LFS). This is because such flag processor can alter
901 901 # the rawtext content that the delta will be based on, and two clients
902 902 # could have a same revlog node with different flags (i.e. different
903 903 # rawtext contents) and the delta could be incompatible.
904 904 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
905 905 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
906 906 ):
907 907 return False
908 908 return True
909 909
910 910 def update_caches(self, transaction):
911 911 """update on disk cache
912 912
913 913 If a transaction is passed, the update may be delayed to transaction
914 914 commit."""
915 915 if self._nodemap_file is not None:
916 916 if transaction is None:
917 917 nodemaputil.update_persistent_nodemap(self)
918 918 else:
919 919 nodemaputil.setup_persistent_nodemap(transaction, self)
920 920
921 921 def clearcaches(self):
922 922 """Clear in-memory caches"""
923 923 self._revisioncache = None
924 924 self._chainbasecache.clear()
925 925 self._segmentfile.clear_cache()
926 926 self._segmentfile_sidedata.clear_cache()
927 927 self._pcache = {}
928 928 self._nodemap_docket = None
929 929 self.index.clearcaches()
930 930 # The python code is the one responsible for validating the docket, we
931 931 # end up having to refresh it here.
932 932 use_nodemap = (
933 933 not self._inline
934 934 and self._nodemap_file is not None
935 935 and hasattr(self.index, 'update_nodemap_data')
936 936 )
937 937 if use_nodemap:
938 938 nodemap_data = nodemaputil.persisted_data(self)
939 939 if nodemap_data is not None:
940 940 self._nodemap_docket = nodemap_data[0]
941 941 self.index.update_nodemap_data(*nodemap_data)
942 942
943 943 def rev(self, node):
944 944 """return the revision number associated with a <nodeid>"""
945 945 try:
946 946 return self.index.rev(node)
947 947 except TypeError:
948 948 raise
949 949 except error.RevlogError:
950 950 # parsers.c radix tree lookup failed
951 951 if (
952 952 node == self.nodeconstants.wdirid
953 953 or node in self.nodeconstants.wdirfilenodeids
954 954 ):
955 955 raise error.WdirUnsupported
956 956 raise error.LookupError(node, self.display_id, _(b'no node'))
957 957
958 958 # Accessors for index entries.
959 959
960 960 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
961 961 # are flags.
962 962 def start(self, rev):
963 963 return int(self.index[rev][0] >> 16)
964 964
965 965 def sidedata_cut_off(self, rev):
966 966 sd_cut_off = self.index[rev][8]
967 967 if sd_cut_off != 0:
968 968 return sd_cut_off
969 969 # This is some annoying dance, because entries without sidedata
970 970 # currently use 0 as their ofsset. (instead of previous-offset +
971 971 # previous-size)
972 972 #
973 973 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
974 974 # In the meantime, we need this.
975 975 while 0 <= rev:
976 976 e = self.index[rev]
977 977 if e[9] != 0:
978 978 return e[8] + e[9]
979 979 rev -= 1
980 980 return 0
981 981
982 982 def flags(self, rev):
983 983 return self.index[rev][0] & 0xFFFF
984 984
985 985 def length(self, rev):
986 986 return self.index[rev][1]
987 987
988 988 def sidedata_length(self, rev):
989 989 if not self.hassidedata:
990 990 return 0
991 991 return self.index[rev][9]
992 992
993 993 def rawsize(self, rev):
994 994 """return the length of the uncompressed text for a given revision"""
995 995 l = self.index[rev][2]
996 996 if l >= 0:
997 997 return l
998 998
999 999 t = self.rawdata(rev)
1000 1000 return len(t)
1001 1001
1002 1002 def size(self, rev):
1003 1003 """length of non-raw text (processed by a "read" flag processor)"""
1004 1004 # fast path: if no "read" flag processor could change the content,
1005 1005 # size is rawsize. note: ELLIPSIS is known to not change the content.
1006 1006 flags = self.flags(rev)
1007 1007 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1008 1008 return self.rawsize(rev)
1009 1009
1010 1010 return len(self.revision(rev))
1011 1011
1012 1012 def fast_rank(self, rev):
1013 1013 """Return the rank of a revision if already known, or None otherwise.
1014 1014
1015 1015 The rank of a revision is the size of the sub-graph it defines as a
1016 1016 head. Equivalently, the rank of a revision `r` is the size of the set
1017 1017 `ancestors(r)`, `r` included.
1018 1018
1019 1019 This method returns the rank retrieved from the revlog in constant
1020 1020 time. It makes no attempt at computing unknown values for versions of
1021 1021 the revlog which do not persist the rank.
1022 1022 """
1023 1023 rank = self.index[rev][ENTRY_RANK]
1024 1024 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1025 1025 return None
1026 1026 if rev == nullrev:
1027 1027 return 0 # convention
1028 1028 return rank
1029 1029
1030 1030 def chainbase(self, rev):
1031 1031 base = self._chainbasecache.get(rev)
1032 1032 if base is not None:
1033 1033 return base
1034 1034
1035 1035 index = self.index
1036 1036 iterrev = rev
1037 1037 base = index[iterrev][3]
1038 1038 while base != iterrev:
1039 1039 iterrev = base
1040 1040 base = index[iterrev][3]
1041 1041
1042 1042 self._chainbasecache[rev] = base
1043 1043 return base
1044 1044
1045 1045 def linkrev(self, rev):
1046 1046 return self.index[rev][4]
1047 1047
1048 1048 def parentrevs(self, rev):
1049 1049 try:
1050 1050 entry = self.index[rev]
1051 1051 except IndexError:
1052 1052 if rev == wdirrev:
1053 1053 raise error.WdirUnsupported
1054 1054 raise
1055 1055
1056 1056 if self.canonical_parent_order and entry[5] == nullrev:
1057 1057 return entry[6], entry[5]
1058 1058 else:
1059 1059 return entry[5], entry[6]
1060 1060
1061 1061 # fast parentrevs(rev) where rev isn't filtered
1062 1062 _uncheckedparentrevs = parentrevs
1063 1063
1064 1064 def node(self, rev):
1065 1065 try:
1066 1066 return self.index[rev][7]
1067 1067 except IndexError:
1068 1068 if rev == wdirrev:
1069 1069 raise error.WdirUnsupported
1070 1070 raise
1071 1071
1072 1072 # Derived from index values.
1073 1073
1074 1074 def end(self, rev):
1075 1075 return self.start(rev) + self.length(rev)
1076 1076
1077 1077 def parents(self, node):
1078 1078 i = self.index
1079 1079 d = i[self.rev(node)]
1080 1080 # inline node() to avoid function call overhead
1081 1081 if self.canonical_parent_order and d[5] == self.nullid:
1082 1082 return i[d[6]][7], i[d[5]][7]
1083 1083 else:
1084 1084 return i[d[5]][7], i[d[6]][7]
1085 1085
1086 1086 def chainlen(self, rev):
1087 1087 return self._chaininfo(rev)[0]
1088 1088
1089 1089 def _chaininfo(self, rev):
1090 1090 chaininfocache = self._chaininfocache
1091 1091 if rev in chaininfocache:
1092 1092 return chaininfocache[rev]
1093 1093 index = self.index
1094 1094 generaldelta = self._generaldelta
1095 1095 iterrev = rev
1096 1096 e = index[iterrev]
1097 1097 clen = 0
1098 1098 compresseddeltalen = 0
1099 1099 while iterrev != e[3]:
1100 1100 clen += 1
1101 1101 compresseddeltalen += e[1]
1102 1102 if generaldelta:
1103 1103 iterrev = e[3]
1104 1104 else:
1105 1105 iterrev -= 1
1106 1106 if iterrev in chaininfocache:
1107 1107 t = chaininfocache[iterrev]
1108 1108 clen += t[0]
1109 1109 compresseddeltalen += t[1]
1110 1110 break
1111 1111 e = index[iterrev]
1112 1112 else:
1113 1113 # Add text length of base since decompressing that also takes
1114 1114 # work. For cache hits the length is already included.
1115 1115 compresseddeltalen += e[1]
1116 1116 r = (clen, compresseddeltalen)
1117 1117 chaininfocache[rev] = r
1118 1118 return r
1119 1119
1120 1120 def _deltachain(self, rev, stoprev=None):
1121 1121 """Obtain the delta chain for a revision.
1122 1122
1123 1123 ``stoprev`` specifies a revision to stop at. If not specified, we
1124 1124 stop at the base of the chain.
1125 1125
1126 1126 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1127 1127 revs in ascending order and ``stopped`` is a bool indicating whether
1128 1128 ``stoprev`` was hit.
1129 1129 """
1130 1130 # Try C implementation.
1131 1131 try:
1132 1132 return self.index.deltachain(rev, stoprev, self._generaldelta)
1133 1133 except AttributeError:
1134 1134 pass
1135 1135
1136 1136 chain = []
1137 1137
1138 1138 # Alias to prevent attribute lookup in tight loop.
1139 1139 index = self.index
1140 1140 generaldelta = self._generaldelta
1141 1141
1142 1142 iterrev = rev
1143 1143 e = index[iterrev]
1144 1144 while iterrev != e[3] and iterrev != stoprev:
1145 1145 chain.append(iterrev)
1146 1146 if generaldelta:
1147 1147 iterrev = e[3]
1148 1148 else:
1149 1149 iterrev -= 1
1150 1150 e = index[iterrev]
1151 1151
1152 1152 if iterrev == stoprev:
1153 1153 stopped = True
1154 1154 else:
1155 1155 chain.append(iterrev)
1156 1156 stopped = False
1157 1157
1158 1158 chain.reverse()
1159 1159 return chain, stopped
1160 1160
1161 1161 def ancestors(self, revs, stoprev=0, inclusive=False):
1162 1162 """Generate the ancestors of 'revs' in reverse revision order.
1163 1163 Does not generate revs lower than stoprev.
1164 1164
1165 1165 See the documentation for ancestor.lazyancestors for more details."""
1166 1166
1167 1167 # first, make sure start revisions aren't filtered
1168 1168 revs = list(revs)
1169 1169 checkrev = self.node
1170 1170 for r in revs:
1171 1171 checkrev(r)
1172 1172 # and we're sure ancestors aren't filtered as well
1173 1173
1174 1174 if rustancestor is not None and self.index.rust_ext_compat:
1175 1175 lazyancestors = rustancestor.LazyAncestors
1176 1176 arg = self.index
1177 1177 else:
1178 1178 lazyancestors = ancestor.lazyancestors
1179 1179 arg = self._uncheckedparentrevs
1180 1180 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1181 1181
1182 1182 def descendants(self, revs):
1183 1183 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1184 1184
1185 1185 def findcommonmissing(self, common=None, heads=None):
1186 1186 """Return a tuple of the ancestors of common and the ancestors of heads
1187 1187 that are not ancestors of common. In revset terminology, we return the
1188 1188 tuple:
1189 1189
1190 1190 ::common, (::heads) - (::common)
1191 1191
1192 1192 The list is sorted by revision number, meaning it is
1193 1193 topologically sorted.
1194 1194
1195 1195 'heads' and 'common' are both lists of node IDs. If heads is
1196 1196 not supplied, uses all of the revlog's heads. If common is not
1197 1197 supplied, uses nullid."""
1198 1198 if common is None:
1199 1199 common = [self.nullid]
1200 1200 if heads is None:
1201 1201 heads = self.heads()
1202 1202
1203 1203 common = [self.rev(n) for n in common]
1204 1204 heads = [self.rev(n) for n in heads]
1205 1205
1206 1206 # we want the ancestors, but inclusive
1207 1207 class lazyset:
1208 1208 def __init__(self, lazyvalues):
1209 1209 self.addedvalues = set()
1210 1210 self.lazyvalues = lazyvalues
1211 1211
1212 1212 def __contains__(self, value):
1213 1213 return value in self.addedvalues or value in self.lazyvalues
1214 1214
1215 1215 def __iter__(self):
1216 1216 added = self.addedvalues
1217 1217 for r in added:
1218 1218 yield r
1219 1219 for r in self.lazyvalues:
1220 1220 if not r in added:
1221 1221 yield r
1222 1222
1223 1223 def add(self, value):
1224 1224 self.addedvalues.add(value)
1225 1225
1226 1226 def update(self, values):
1227 1227 self.addedvalues.update(values)
1228 1228
1229 1229 has = lazyset(self.ancestors(common))
1230 1230 has.add(nullrev)
1231 1231 has.update(common)
1232 1232
1233 1233 # take all ancestors from heads that aren't in has
1234 1234 missing = set()
1235 1235 visit = collections.deque(r for r in heads if r not in has)
1236 1236 while visit:
1237 1237 r = visit.popleft()
1238 1238 if r in missing:
1239 1239 continue
1240 1240 else:
1241 1241 missing.add(r)
1242 1242 for p in self.parentrevs(r):
1243 1243 if p not in has:
1244 1244 visit.append(p)
1245 1245 missing = list(missing)
1246 1246 missing.sort()
1247 1247 return has, [self.node(miss) for miss in missing]
1248 1248
1249 1249 def incrementalmissingrevs(self, common=None):
1250 1250 """Return an object that can be used to incrementally compute the
1251 1251 revision numbers of the ancestors of arbitrary sets that are not
1252 1252 ancestors of common. This is an ancestor.incrementalmissingancestors
1253 1253 object.
1254 1254
1255 1255 'common' is a list of revision numbers. If common is not supplied, uses
1256 1256 nullrev.
1257 1257 """
1258 1258 if common is None:
1259 1259 common = [nullrev]
1260 1260
1261 1261 if rustancestor is not None and self.index.rust_ext_compat:
1262 1262 return rustancestor.MissingAncestors(self.index, common)
1263 1263 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1264 1264
1265 1265 def findmissingrevs(self, common=None, heads=None):
1266 1266 """Return the revision numbers of the ancestors of heads that
1267 1267 are not ancestors of common.
1268 1268
1269 1269 More specifically, return a list of revision numbers corresponding to
1270 1270 nodes N such that every N satisfies the following constraints:
1271 1271
1272 1272 1. N is an ancestor of some node in 'heads'
1273 1273 2. N is not an ancestor of any node in 'common'
1274 1274
1275 1275 The list is sorted by revision number, meaning it is
1276 1276 topologically sorted.
1277 1277
1278 1278 'heads' and 'common' are both lists of revision numbers. If heads is
1279 1279 not supplied, uses all of the revlog's heads. If common is not
1280 1280 supplied, uses nullid."""
1281 1281 if common is None:
1282 1282 common = [nullrev]
1283 1283 if heads is None:
1284 1284 heads = self.headrevs()
1285 1285
1286 1286 inc = self.incrementalmissingrevs(common=common)
1287 1287 return inc.missingancestors(heads)
1288 1288
1289 1289 def findmissing(self, common=None, heads=None):
1290 1290 """Return the ancestors of heads that are not ancestors of common.
1291 1291
1292 1292 More specifically, return a list of nodes N such that every N
1293 1293 satisfies the following constraints:
1294 1294
1295 1295 1. N is an ancestor of some node in 'heads'
1296 1296 2. N is not an ancestor of any node in 'common'
1297 1297
1298 1298 The list is sorted by revision number, meaning it is
1299 1299 topologically sorted.
1300 1300
1301 1301 'heads' and 'common' are both lists of node IDs. If heads is
1302 1302 not supplied, uses all of the revlog's heads. If common is not
1303 1303 supplied, uses nullid."""
1304 1304 if common is None:
1305 1305 common = [self.nullid]
1306 1306 if heads is None:
1307 1307 heads = self.heads()
1308 1308
1309 1309 common = [self.rev(n) for n in common]
1310 1310 heads = [self.rev(n) for n in heads]
1311 1311
1312 1312 inc = self.incrementalmissingrevs(common=common)
1313 1313 return [self.node(r) for r in inc.missingancestors(heads)]
1314 1314
1315 1315 def nodesbetween(self, roots=None, heads=None):
1316 1316 """Return a topological path from 'roots' to 'heads'.
1317 1317
1318 1318 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1319 1319 topologically sorted list of all nodes N that satisfy both of
1320 1320 these constraints:
1321 1321
1322 1322 1. N is a descendant of some node in 'roots'
1323 1323 2. N is an ancestor of some node in 'heads'
1324 1324
1325 1325 Every node is considered to be both a descendant and an ancestor
1326 1326 of itself, so every reachable node in 'roots' and 'heads' will be
1327 1327 included in 'nodes'.
1328 1328
1329 1329 'outroots' is the list of reachable nodes in 'roots', i.e., the
1330 1330 subset of 'roots' that is returned in 'nodes'. Likewise,
1331 1331 'outheads' is the subset of 'heads' that is also in 'nodes'.
1332 1332
1333 1333 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1334 1334 unspecified, uses nullid as the only root. If 'heads' is
1335 1335 unspecified, uses list of all of the revlog's heads."""
1336 1336 nonodes = ([], [], [])
1337 1337 if roots is not None:
1338 1338 roots = list(roots)
1339 1339 if not roots:
1340 1340 return nonodes
1341 1341 lowestrev = min([self.rev(n) for n in roots])
1342 1342 else:
1343 1343 roots = [self.nullid] # Everybody's a descendant of nullid
1344 1344 lowestrev = nullrev
1345 1345 if (lowestrev == nullrev) and (heads is None):
1346 1346 # We want _all_ the nodes!
1347 1347 return (
1348 1348 [self.node(r) for r in self],
1349 1349 [self.nullid],
1350 1350 list(self.heads()),
1351 1351 )
1352 1352 if heads is None:
1353 1353 # All nodes are ancestors, so the latest ancestor is the last
1354 1354 # node.
1355 1355 highestrev = len(self) - 1
1356 1356 # Set ancestors to None to signal that every node is an ancestor.
1357 1357 ancestors = None
1358 1358 # Set heads to an empty dictionary for later discovery of heads
1359 1359 heads = {}
1360 1360 else:
1361 1361 heads = list(heads)
1362 1362 if not heads:
1363 1363 return nonodes
1364 1364 ancestors = set()
1365 1365 # Turn heads into a dictionary so we can remove 'fake' heads.
1366 1366 # Also, later we will be using it to filter out the heads we can't
1367 1367 # find from roots.
1368 1368 heads = dict.fromkeys(heads, False)
1369 1369 # Start at the top and keep marking parents until we're done.
1370 1370 nodestotag = set(heads)
1371 1371 # Remember where the top was so we can use it as a limit later.
1372 1372 highestrev = max([self.rev(n) for n in nodestotag])
1373 1373 while nodestotag:
1374 1374 # grab a node to tag
1375 1375 n = nodestotag.pop()
1376 1376 # Never tag nullid
1377 1377 if n == self.nullid:
1378 1378 continue
1379 1379 # A node's revision number represents its place in a
1380 1380 # topologically sorted list of nodes.
1381 1381 r = self.rev(n)
1382 1382 if r >= lowestrev:
1383 1383 if n not in ancestors:
1384 1384 # If we are possibly a descendant of one of the roots
1385 1385 # and we haven't already been marked as an ancestor
1386 1386 ancestors.add(n) # Mark as ancestor
1387 1387 # Add non-nullid parents to list of nodes to tag.
1388 1388 nodestotag.update(
1389 1389 [p for p in self.parents(n) if p != self.nullid]
1390 1390 )
1391 1391 elif n in heads: # We've seen it before, is it a fake head?
1392 1392 # So it is, real heads should not be the ancestors of
1393 1393 # any other heads.
1394 1394 heads.pop(n)
1395 1395 if not ancestors:
1396 1396 return nonodes
1397 1397 # Now that we have our set of ancestors, we want to remove any
1398 1398 # roots that are not ancestors.
1399 1399
1400 1400 # If one of the roots was nullid, everything is included anyway.
1401 1401 if lowestrev > nullrev:
1402 1402 # But, since we weren't, let's recompute the lowest rev to not
1403 1403 # include roots that aren't ancestors.
1404 1404
1405 1405 # Filter out roots that aren't ancestors of heads
1406 1406 roots = [root for root in roots if root in ancestors]
1407 1407 # Recompute the lowest revision
1408 1408 if roots:
1409 1409 lowestrev = min([self.rev(root) for root in roots])
1410 1410 else:
1411 1411 # No more roots? Return empty list
1412 1412 return nonodes
1413 1413 else:
1414 1414 # We are descending from nullid, and don't need to care about
1415 1415 # any other roots.
1416 1416 lowestrev = nullrev
1417 1417 roots = [self.nullid]
1418 1418 # Transform our roots list into a set.
1419 1419 descendants = set(roots)
1420 1420 # Also, keep the original roots so we can filter out roots that aren't
1421 1421 # 'real' roots (i.e. are descended from other roots).
1422 1422 roots = descendants.copy()
1423 1423 # Our topologically sorted list of output nodes.
1424 1424 orderedout = []
1425 1425 # Don't start at nullid since we don't want nullid in our output list,
1426 1426 # and if nullid shows up in descendants, empty parents will look like
1427 1427 # they're descendants.
1428 1428 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1429 1429 n = self.node(r)
1430 1430 isdescendant = False
1431 1431 if lowestrev == nullrev: # Everybody is a descendant of nullid
1432 1432 isdescendant = True
1433 1433 elif n in descendants:
1434 1434 # n is already a descendant
1435 1435 isdescendant = True
1436 1436 # This check only needs to be done here because all the roots
1437 1437 # will start being marked is descendants before the loop.
1438 1438 if n in roots:
1439 1439 # If n was a root, check if it's a 'real' root.
1440 1440 p = tuple(self.parents(n))
1441 1441 # If any of its parents are descendants, it's not a root.
1442 1442 if (p[0] in descendants) or (p[1] in descendants):
1443 1443 roots.remove(n)
1444 1444 else:
1445 1445 p = tuple(self.parents(n))
1446 1446 # A node is a descendant if either of its parents are
1447 1447 # descendants. (We seeded the dependents list with the roots
1448 1448 # up there, remember?)
1449 1449 if (p[0] in descendants) or (p[1] in descendants):
1450 1450 descendants.add(n)
1451 1451 isdescendant = True
1452 1452 if isdescendant and ((ancestors is None) or (n in ancestors)):
1453 1453 # Only include nodes that are both descendants and ancestors.
1454 1454 orderedout.append(n)
1455 1455 if (ancestors is not None) and (n in heads):
1456 1456 # We're trying to figure out which heads are reachable
1457 1457 # from roots.
1458 1458 # Mark this head as having been reached
1459 1459 heads[n] = True
1460 1460 elif ancestors is None:
1461 1461 # Otherwise, we're trying to discover the heads.
1462 1462 # Assume this is a head because if it isn't, the next step
1463 1463 # will eventually remove it.
1464 1464 heads[n] = True
1465 1465 # But, obviously its parents aren't.
1466 1466 for p in self.parents(n):
1467 1467 heads.pop(p, None)
1468 1468 heads = [head for head, flag in heads.items() if flag]
1469 1469 roots = list(roots)
1470 1470 assert orderedout
1471 1471 assert roots
1472 1472 assert heads
1473 1473 return (orderedout, roots, heads)
1474 1474
1475 1475 def headrevs(self, revs=None):
1476 1476 if revs is None:
1477 1477 try:
1478 1478 return self.index.headrevs()
1479 1479 except AttributeError:
1480 1480 return self._headrevs()
1481 1481 if rustdagop is not None and self.index.rust_ext_compat:
1482 1482 return rustdagop.headrevs(self.index, revs)
1483 1483 return dagop.headrevs(revs, self._uncheckedparentrevs)
1484 1484
1485 1485 def computephases(self, roots):
1486 1486 return self.index.computephasesmapsets(roots)
1487 1487
1488 1488 def _headrevs(self):
1489 1489 count = len(self)
1490 1490 if not count:
1491 1491 return [nullrev]
1492 1492 # we won't iter over filtered rev so nobody is a head at start
1493 1493 ishead = [0] * (count + 1)
1494 1494 index = self.index
1495 1495 for r in self:
1496 1496 ishead[r] = 1 # I may be an head
1497 1497 e = index[r]
1498 1498 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1499 1499 return [r for r, val in enumerate(ishead) if val]
1500 1500
1501 1501 def heads(self, start=None, stop=None):
1502 1502 """return the list of all nodes that have no children
1503 1503
1504 1504 if start is specified, only heads that are descendants of
1505 1505 start will be returned
1506 1506 if stop is specified, it will consider all the revs from stop
1507 1507 as if they had no children
1508 1508 """
1509 1509 if start is None and stop is None:
1510 1510 if not len(self):
1511 1511 return [self.nullid]
1512 1512 return [self.node(r) for r in self.headrevs()]
1513 1513
1514 1514 if start is None:
1515 1515 start = nullrev
1516 1516 else:
1517 1517 start = self.rev(start)
1518 1518
1519 1519 stoprevs = {self.rev(n) for n in stop or []}
1520 1520
1521 1521 revs = dagop.headrevssubset(
1522 1522 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1523 1523 )
1524 1524
1525 1525 return [self.node(rev) for rev in revs]
1526 1526
1527 1527 def children(self, node):
1528 1528 """find the children of a given node"""
1529 1529 c = []
1530 1530 p = self.rev(node)
1531 1531 for r in self.revs(start=p + 1):
1532 1532 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1533 1533 if prevs:
1534 1534 for pr in prevs:
1535 1535 if pr == p:
1536 1536 c.append(self.node(r))
1537 1537 elif p == nullrev:
1538 1538 c.append(self.node(r))
1539 1539 return c
1540 1540
1541 1541 def commonancestorsheads(self, a, b):
1542 1542 """calculate all the heads of the common ancestors of nodes a and b"""
1543 1543 a, b = self.rev(a), self.rev(b)
1544 1544 ancs = self._commonancestorsheads(a, b)
1545 1545 return pycompat.maplist(self.node, ancs)
1546 1546
1547 1547 def _commonancestorsheads(self, *revs):
1548 1548 """calculate all the heads of the common ancestors of revs"""
1549 1549 try:
1550 1550 ancs = self.index.commonancestorsheads(*revs)
1551 1551 except (AttributeError, OverflowError): # C implementation failed
1552 1552 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1553 1553 return ancs
1554 1554
1555 1555 def isancestor(self, a, b):
1556 1556 """return True if node a is an ancestor of node b
1557 1557
1558 1558 A revision is considered an ancestor of itself."""
1559 1559 a, b = self.rev(a), self.rev(b)
1560 1560 return self.isancestorrev(a, b)
1561 1561
1562 1562 def isancestorrev(self, a, b):
1563 1563 """return True if revision a is an ancestor of revision b
1564 1564
1565 1565 A revision is considered an ancestor of itself.
1566 1566
1567 1567 The implementation of this is trivial but the use of
1568 1568 reachableroots is not."""
1569 1569 if a == nullrev:
1570 1570 return True
1571 1571 elif a == b:
1572 1572 return True
1573 1573 elif a > b:
1574 1574 return False
1575 1575 return bool(self.reachableroots(a, [b], [a], includepath=False))
1576 1576
1577 1577 def reachableroots(self, minroot, heads, roots, includepath=False):
1578 1578 """return (heads(::(<roots> and <roots>::<heads>)))
1579 1579
1580 1580 If includepath is True, return (<roots>::<heads>)."""
1581 1581 try:
1582 1582 return self.index.reachableroots2(
1583 1583 minroot, heads, roots, includepath
1584 1584 )
1585 1585 except AttributeError:
1586 1586 return dagop._reachablerootspure(
1587 1587 self.parentrevs, minroot, roots, heads, includepath
1588 1588 )
1589 1589
1590 1590 def ancestor(self, a, b):
1591 1591 """calculate the "best" common ancestor of nodes a and b"""
1592 1592
1593 1593 a, b = self.rev(a), self.rev(b)
1594 1594 try:
1595 1595 ancs = self.index.ancestors(a, b)
1596 1596 except (AttributeError, OverflowError):
1597 1597 ancs = ancestor.ancestors(self.parentrevs, a, b)
1598 1598 if ancs:
1599 1599 # choose a consistent winner when there's a tie
1600 1600 return min(map(self.node, ancs))
1601 1601 return self.nullid
1602 1602
1603 1603 def _match(self, id):
1604 1604 if isinstance(id, int):
1605 1605 # rev
1606 1606 return self.node(id)
1607 1607 if len(id) == self.nodeconstants.nodelen:
1608 1608 # possibly a binary node
1609 1609 # odds of a binary node being all hex in ASCII are 1 in 10**25
1610 1610 try:
1611 1611 node = id
1612 1612 self.rev(node) # quick search the index
1613 1613 return node
1614 1614 except error.LookupError:
1615 1615 pass # may be partial hex id
1616 1616 try:
1617 1617 # str(rev)
1618 1618 rev = int(id)
1619 1619 if b"%d" % rev != id:
1620 1620 raise ValueError
1621 1621 if rev < 0:
1622 1622 rev = len(self) + rev
1623 1623 if rev < 0 or rev >= len(self):
1624 1624 raise ValueError
1625 1625 return self.node(rev)
1626 1626 except (ValueError, OverflowError):
1627 1627 pass
1628 1628 if len(id) == 2 * self.nodeconstants.nodelen:
1629 1629 try:
1630 1630 # a full hex nodeid?
1631 1631 node = bin(id)
1632 1632 self.rev(node)
1633 1633 return node
1634 1634 except (binascii.Error, error.LookupError):
1635 1635 pass
1636 1636
1637 1637 def _partialmatch(self, id):
1638 1638 # we don't care wdirfilenodeids as they should be always full hash
1639 1639 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1640 1640 ambiguous = False
1641 1641 try:
1642 1642 partial = self.index.partialmatch(id)
1643 1643 if partial and self.hasnode(partial):
1644 1644 if maybewdir:
1645 1645 # single 'ff...' match in radix tree, ambiguous with wdir
1646 1646 ambiguous = True
1647 1647 else:
1648 1648 return partial
1649 1649 elif maybewdir:
1650 1650 # no 'ff...' match in radix tree, wdir identified
1651 1651 raise error.WdirUnsupported
1652 1652 else:
1653 1653 return None
1654 1654 except error.RevlogError:
1655 1655 # parsers.c radix tree lookup gave multiple matches
1656 1656 # fast path: for unfiltered changelog, radix tree is accurate
1657 1657 if not getattr(self, 'filteredrevs', None):
1658 1658 ambiguous = True
1659 1659 # fall through to slow path that filters hidden revisions
1660 1660 except (AttributeError, ValueError):
1661 1661 # we are pure python, or key is not hex
1662 1662 pass
1663 1663 if ambiguous:
1664 1664 raise error.AmbiguousPrefixLookupError(
1665 1665 id, self.display_id, _(b'ambiguous identifier')
1666 1666 )
1667 1667
1668 1668 if id in self._pcache:
1669 1669 return self._pcache[id]
1670 1670
1671 1671 if len(id) <= 40:
1672 1672 # hex(node)[:...]
1673 1673 l = len(id) // 2 * 2 # grab an even number of digits
1674 1674 try:
1675 1675 # we're dropping the last digit, so let's check that it's hex,
1676 1676 # to avoid the expensive computation below if it's not
1677 1677 if len(id) % 2 > 0:
1678 1678 if not (id[-1] in hexdigits):
1679 1679 return None
1680 1680 prefix = bin(id[:l])
1681 1681 except binascii.Error:
1682 1682 pass
1683 1683 else:
1684 1684 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1685 1685 nl = [
1686 1686 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1687 1687 ]
1688 1688 if self.nodeconstants.nullhex.startswith(id):
1689 1689 nl.append(self.nullid)
1690 1690 if len(nl) > 0:
1691 1691 if len(nl) == 1 and not maybewdir:
1692 1692 self._pcache[id] = nl[0]
1693 1693 return nl[0]
1694 1694 raise error.AmbiguousPrefixLookupError(
1695 1695 id, self.display_id, _(b'ambiguous identifier')
1696 1696 )
1697 1697 if maybewdir:
1698 1698 raise error.WdirUnsupported
1699 1699 return None
1700 1700
1701 1701 def lookup(self, id):
1702 1702 """locate a node based on:
1703 1703 - revision number or str(revision number)
1704 1704 - nodeid or subset of hex nodeid
1705 1705 """
1706 1706 n = self._match(id)
1707 1707 if n is not None:
1708 1708 return n
1709 1709 n = self._partialmatch(id)
1710 1710 if n:
1711 1711 return n
1712 1712
1713 1713 raise error.LookupError(id, self.display_id, _(b'no match found'))
1714 1714
1715 1715 def shortest(self, node, minlength=1):
1716 1716 """Find the shortest unambiguous prefix that matches node."""
1717 1717
1718 1718 def isvalid(prefix):
1719 1719 try:
1720 1720 matchednode = self._partialmatch(prefix)
1721 1721 except error.AmbiguousPrefixLookupError:
1722 1722 return False
1723 1723 except error.WdirUnsupported:
1724 1724 # single 'ff...' match
1725 1725 return True
1726 1726 if matchednode is None:
1727 1727 raise error.LookupError(node, self.display_id, _(b'no node'))
1728 1728 return True
1729 1729
1730 1730 def maybewdir(prefix):
1731 1731 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1732 1732
1733 1733 hexnode = hex(node)
1734 1734
1735 1735 def disambiguate(hexnode, minlength):
1736 1736 """Disambiguate against wdirid."""
1737 1737 for length in range(minlength, len(hexnode) + 1):
1738 1738 prefix = hexnode[:length]
1739 1739 if not maybewdir(prefix):
1740 1740 return prefix
1741 1741
1742 1742 if not getattr(self, 'filteredrevs', None):
1743 1743 try:
1744 1744 length = max(self.index.shortest(node), minlength)
1745 1745 return disambiguate(hexnode, length)
1746 1746 except error.RevlogError:
1747 1747 if node != self.nodeconstants.wdirid:
1748 1748 raise error.LookupError(
1749 1749 node, self.display_id, _(b'no node')
1750 1750 )
1751 1751 except AttributeError:
1752 1752 # Fall through to pure code
1753 1753 pass
1754 1754
1755 1755 if node == self.nodeconstants.wdirid:
1756 1756 for length in range(minlength, len(hexnode) + 1):
1757 1757 prefix = hexnode[:length]
1758 1758 if isvalid(prefix):
1759 1759 return prefix
1760 1760
1761 1761 for length in range(minlength, len(hexnode) + 1):
1762 1762 prefix = hexnode[:length]
1763 1763 if isvalid(prefix):
1764 1764 return disambiguate(hexnode, length)
1765 1765
1766 1766 def cmp(self, node, text):
1767 1767 """compare text with a given file revision
1768 1768
1769 1769 returns True if text is different than what is stored.
1770 1770 """
1771 1771 p1, p2 = self.parents(node)
1772 1772 return storageutil.hashrevisionsha1(text, p1, p2) != node
1773 1773
1774 def _getsegmentforrevs(self, startrev, endrev, df=None):
1774 def _getsegmentforrevs(self, startrev, endrev):
1775 1775 """Obtain a segment of raw data corresponding to a range of revisions.
1776 1776
1777 1777 Accepts the start and end revisions and an optional already-open
1778 1778 file handle to be used for reading. If the file handle is read, its
1779 1779 seek position will not be preserved.
1780 1780
1781 1781 Requests for data may be satisfied by a cache.
1782 1782
1783 1783 Returns a 2-tuple of (offset, data) for the requested range of
1784 1784 revisions. Offset is the integer offset from the beginning of the
1785 1785 revlog and data is a str or buffer of the raw byte data.
1786 1786
1787 1787 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1788 1788 to determine where each revision's data begins and ends.
1789 1789 """
1790 1790 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1791 1791 # (functions are expensive).
1792 1792 index = self.index
1793 1793 istart = index[startrev]
1794 1794 start = int(istart[0] >> 16)
1795 1795 if startrev == endrev:
1796 1796 end = start + istart[1]
1797 1797 else:
1798 1798 iend = index[endrev]
1799 1799 end = int(iend[0] >> 16) + iend[1]
1800 1800
1801 1801 if self._inline:
1802 1802 start += (startrev + 1) * self.index.entry_size
1803 1803 end += (endrev + 1) * self.index.entry_size
1804 1804 length = end - start
1805 1805
1806 return start, self._segmentfile.read_chunk(start, length, df)
1807
1808 def _chunk(self, rev, df=None):
1806 return start, self._segmentfile.read_chunk(start, length)
1807
1808 def _chunk(self, rev):
1809 1809 """Obtain a single decompressed chunk for a revision.
1810 1810
1811 1811 Accepts an integer revision and an optional already-open file handle
1812 1812 to be used for reading. If used, the seek position of the file will not
1813 1813 be preserved.
1814 1814
1815 1815 Returns a str holding uncompressed data for the requested revision.
1816 1816 """
1817 1817 compression_mode = self.index[rev][10]
1818 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1818 data = self._getsegmentforrevs(rev, rev)[1]
1819 1819 if compression_mode == COMP_MODE_PLAIN:
1820 1820 return data
1821 1821 elif compression_mode == COMP_MODE_DEFAULT:
1822 1822 return self._decompressor(data)
1823 1823 elif compression_mode == COMP_MODE_INLINE:
1824 1824 return self.decompress(data)
1825 1825 else:
1826 1826 msg = b'unknown compression mode %d'
1827 1827 msg %= compression_mode
1828 1828 raise error.RevlogError(msg)
1829 1829
1830 def _chunks(self, revs, df=None, targetsize=None):
1830 def _chunks(self, revs, targetsize=None):
1831 1831 """Obtain decompressed chunks for the specified revisions.
1832 1832
1833 1833 Accepts an iterable of numeric revisions that are assumed to be in
1834 1834 ascending order. Also accepts an optional already-open file handle
1835 1835 to be used for reading. If used, the seek position of the file will
1836 1836 not be preserved.
1837 1837
1838 1838 This function is similar to calling ``self._chunk()`` multiple times,
1839 1839 but is faster.
1840 1840
1841 1841 Returns a list with decompressed data for each requested revision.
1842 1842 """
1843 1843 if not revs:
1844 1844 return []
1845 1845 start = self.start
1846 1846 length = self.length
1847 1847 inline = self._inline
1848 1848 iosize = self.index.entry_size
1849 1849 buffer = util.buffer
1850 1850
1851 1851 l = []
1852 1852 ladd = l.append
1853 1853
1854 1854 if not self._withsparseread:
1855 1855 slicedchunks = (revs,)
1856 1856 else:
1857 1857 slicedchunks = deltautil.slicechunk(
1858 1858 self, revs, targetsize=targetsize
1859 1859 )
1860 1860
1861 1861 for revschunk in slicedchunks:
1862 1862 firstrev = revschunk[0]
1863 1863 # Skip trailing revisions with empty diff
1864 1864 for lastrev in revschunk[::-1]:
1865 1865 if length(lastrev) != 0:
1866 1866 break
1867 1867
1868 1868 try:
1869 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1869 offset, data = self._getsegmentforrevs(firstrev, lastrev)
1870 1870 except OverflowError:
1871 1871 # issue4215 - we can't cache a run of chunks greater than
1872 1872 # 2G on Windows
1873 return [self._chunk(rev, df=df) for rev in revschunk]
1873 return [self._chunk(rev) for rev in revschunk]
1874 1874
1875 1875 decomp = self.decompress
1876 1876 # self._decompressor might be None, but will not be used in that case
1877 1877 def_decomp = self._decompressor
1878 1878 for rev in revschunk:
1879 1879 chunkstart = start(rev)
1880 1880 if inline:
1881 1881 chunkstart += (rev + 1) * iosize
1882 1882 chunklength = length(rev)
1883 1883 comp_mode = self.index[rev][10]
1884 1884 c = buffer(data, chunkstart - offset, chunklength)
1885 1885 if comp_mode == COMP_MODE_PLAIN:
1886 1886 ladd(c)
1887 1887 elif comp_mode == COMP_MODE_INLINE:
1888 1888 ladd(decomp(c))
1889 1889 elif comp_mode == COMP_MODE_DEFAULT:
1890 1890 ladd(def_decomp(c))
1891 1891 else:
1892 1892 msg = b'unknown compression mode %d'
1893 1893 msg %= comp_mode
1894 1894 raise error.RevlogError(msg)
1895 1895
1896 1896 return l
1897 1897
1898 1898 def deltaparent(self, rev):
1899 1899 """return deltaparent of the given revision"""
1900 1900 base = self.index[rev][3]
1901 1901 if base == rev:
1902 1902 return nullrev
1903 1903 elif self._generaldelta:
1904 1904 return base
1905 1905 else:
1906 1906 return rev - 1
1907 1907
1908 1908 def issnapshot(self, rev):
1909 1909 """tells whether rev is a snapshot"""
1910 1910 if not self._sparserevlog:
1911 1911 return self.deltaparent(rev) == nullrev
1912 1912 elif hasattr(self.index, 'issnapshot'):
1913 1913 # directly assign the method to cache the testing and access
1914 1914 self.issnapshot = self.index.issnapshot
1915 1915 return self.issnapshot(rev)
1916 1916 if rev == nullrev:
1917 1917 return True
1918 1918 entry = self.index[rev]
1919 1919 base = entry[3]
1920 1920 if base == rev:
1921 1921 return True
1922 1922 if base == nullrev:
1923 1923 return True
1924 1924 p1 = entry[5]
1925 1925 while self.length(p1) == 0:
1926 1926 b = self.deltaparent(p1)
1927 1927 if b == p1:
1928 1928 break
1929 1929 p1 = b
1930 1930 p2 = entry[6]
1931 1931 while self.length(p2) == 0:
1932 1932 b = self.deltaparent(p2)
1933 1933 if b == p2:
1934 1934 break
1935 1935 p2 = b
1936 1936 if base == p1 or base == p2:
1937 1937 return False
1938 1938 return self.issnapshot(base)
1939 1939
1940 1940 def snapshotdepth(self, rev):
1941 1941 """number of snapshot in the chain before this one"""
1942 1942 if not self.issnapshot(rev):
1943 1943 raise error.ProgrammingError(b'revision %d not a snapshot')
1944 1944 return len(self._deltachain(rev)[0]) - 1
1945 1945
1946 1946 def revdiff(self, rev1, rev2):
1947 1947 """return or calculate a delta between two revisions
1948 1948
1949 1949 The delta calculated is in binary form and is intended to be written to
1950 1950 revlog data directly. So this function needs raw revision data.
1951 1951 """
1952 1952 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1953 1953 return bytes(self._chunk(rev2))
1954 1954
1955 1955 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1956 1956
1957 1957 def revision(self, nodeorrev):
1958 1958 """return an uncompressed revision of a given node or revision
1959 1959 number.
1960 1960 """
1961 1961 return self._revisiondata(nodeorrev)
1962 1962
1963 1963 def sidedata(self, nodeorrev):
1964 1964 """a map of extra data related to the changeset but not part of the hash
1965 1965
1966 1966 This function currently return a dictionary. However, more advanced
1967 1967 mapping object will likely be used in the future for a more
1968 1968 efficient/lazy code.
1969 1969 """
1970 1970 # deal with <nodeorrev> argument type
1971 1971 if isinstance(nodeorrev, int):
1972 1972 rev = nodeorrev
1973 1973 else:
1974 1974 rev = self.rev(nodeorrev)
1975 1975 return self._sidedata(rev)
1976 1976
1977 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1977 def _revisiondata(self, nodeorrev, raw=False):
1978 1978 # deal with <nodeorrev> argument type
1979 1979 if isinstance(nodeorrev, int):
1980 1980 rev = nodeorrev
1981 1981 node = self.node(rev)
1982 1982 else:
1983 1983 node = nodeorrev
1984 1984 rev = None
1985 1985
1986 1986 # fast path the special `nullid` rev
1987 1987 if node == self.nullid:
1988 1988 return b""
1989 1989
1990 1990 # ``rawtext`` is the text as stored inside the revlog. Might be the
1991 1991 # revision or might need to be processed to retrieve the revision.
1992 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1992 rev, rawtext, validated = self._rawtext(node, rev)
1993 1993
1994 1994 if raw and validated:
1995 1995 # if we don't want to process the raw text and that raw
1996 1996 # text is cached, we can exit early.
1997 1997 return rawtext
1998 1998 if rev is None:
1999 1999 rev = self.rev(node)
2000 2000 # the revlog's flag for this revision
2001 2001 # (usually alter its state or content)
2002 2002 flags = self.flags(rev)
2003 2003
2004 2004 if validated and flags == REVIDX_DEFAULT_FLAGS:
2005 2005 # no extra flags set, no flag processor runs, text = rawtext
2006 2006 return rawtext
2007 2007
2008 2008 if raw:
2009 2009 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2010 2010 text = rawtext
2011 2011 else:
2012 2012 r = flagutil.processflagsread(self, rawtext, flags)
2013 2013 text, validatehash = r
2014 2014 if validatehash:
2015 2015 self.checkhash(text, node, rev=rev)
2016 2016 if not validated:
2017 2017 self._revisioncache = (node, rev, rawtext)
2018 2018
2019 2019 return text
2020 2020
2021 def _rawtext(self, node, rev, _df=None):
2021 def _rawtext(self, node, rev):
2022 2022 """return the possibly unvalidated rawtext for a revision
2023 2023
2024 2024 returns (rev, rawtext, validated)
2025 2025 """
2026 2026
2027 2027 # revision in the cache (could be useful to apply delta)
2028 2028 cachedrev = None
2029 2029 # An intermediate text to apply deltas to
2030 2030 basetext = None
2031 2031
2032 2032 # Check if we have the entry in cache
2033 2033 # The cache entry looks like (node, rev, rawtext)
2034 2034 if self._revisioncache:
2035 2035 if self._revisioncache[0] == node:
2036 2036 return (rev, self._revisioncache[2], True)
2037 2037 cachedrev = self._revisioncache[1]
2038 2038
2039 2039 if rev is None:
2040 2040 rev = self.rev(node)
2041 2041
2042 2042 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2043 2043 if stopped:
2044 2044 basetext = self._revisioncache[2]
2045 2045
2046 2046 # drop cache to save memory, the caller is expected to
2047 2047 # update self._revisioncache after validating the text
2048 2048 self._revisioncache = None
2049 2049
2050 2050 targetsize = None
2051 2051 rawsize = self.index[rev][2]
2052 2052 if 0 <= rawsize:
2053 2053 targetsize = 4 * rawsize
2054 2054
2055 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2055 bins = self._chunks(chain, targetsize=targetsize)
2056 2056 if basetext is None:
2057 2057 basetext = bytes(bins[0])
2058 2058 bins = bins[1:]
2059 2059
2060 2060 rawtext = mdiff.patches(basetext, bins)
2061 2061 del basetext # let us have a chance to free memory early
2062 2062 return (rev, rawtext, False)
2063 2063
2064 2064 def _sidedata(self, rev):
2065 2065 """Return the sidedata for a given revision number."""
2066 2066 index_entry = self.index[rev]
2067 2067 sidedata_offset = index_entry[8]
2068 2068 sidedata_size = index_entry[9]
2069 2069
2070 2070 if self._inline:
2071 2071 sidedata_offset += self.index.entry_size * (1 + rev)
2072 2072 if sidedata_size == 0:
2073 2073 return {}
2074 2074
2075 2075 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2076 2076 filename = self._sidedatafile
2077 2077 end = self._docket.sidedata_end
2078 2078 offset = sidedata_offset
2079 2079 length = sidedata_size
2080 2080 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2081 2081 raise error.RevlogError(m)
2082 2082
2083 2083 comp_segment = self._segmentfile_sidedata.read_chunk(
2084 2084 sidedata_offset, sidedata_size
2085 2085 )
2086 2086
2087 2087 comp = self.index[rev][11]
2088 2088 if comp == COMP_MODE_PLAIN:
2089 2089 segment = comp_segment
2090 2090 elif comp == COMP_MODE_DEFAULT:
2091 2091 segment = self._decompressor(comp_segment)
2092 2092 elif comp == COMP_MODE_INLINE:
2093 2093 segment = self.decompress(comp_segment)
2094 2094 else:
2095 2095 msg = b'unknown compression mode %d'
2096 2096 msg %= comp
2097 2097 raise error.RevlogError(msg)
2098 2098
2099 2099 sidedata = sidedatautil.deserialize_sidedata(segment)
2100 2100 return sidedata
2101 2101
2102 2102 def rawdata(self, nodeorrev):
2103 2103 """return an uncompressed raw data of a given node or revision number."""
2104 2104 return self._revisiondata(nodeorrev, raw=True)
2105 2105
2106 2106 def hash(self, text, p1, p2):
2107 2107 """Compute a node hash.
2108 2108
2109 2109 Available as a function so that subclasses can replace the hash
2110 2110 as needed.
2111 2111 """
2112 2112 return storageutil.hashrevisionsha1(text, p1, p2)
2113 2113
2114 2114 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2115 2115 """Check node hash integrity.
2116 2116
2117 2117 Available as a function so that subclasses can extend hash mismatch
2118 2118 behaviors as needed.
2119 2119 """
2120 2120 try:
2121 2121 if p1 is None and p2 is None:
2122 2122 p1, p2 = self.parents(node)
2123 2123 if node != self.hash(text, p1, p2):
2124 2124 # Clear the revision cache on hash failure. The revision cache
2125 2125 # only stores the raw revision and clearing the cache does have
2126 2126 # the side-effect that we won't have a cache hit when the raw
2127 2127 # revision data is accessed. But this case should be rare and
2128 2128 # it is extra work to teach the cache about the hash
2129 2129 # verification state.
2130 2130 if self._revisioncache and self._revisioncache[0] == node:
2131 2131 self._revisioncache = None
2132 2132
2133 2133 revornode = rev
2134 2134 if revornode is None:
2135 2135 revornode = templatefilters.short(hex(node))
2136 2136 raise error.RevlogError(
2137 2137 _(b"integrity check failed on %s:%s")
2138 2138 % (self.display_id, pycompat.bytestr(revornode))
2139 2139 )
2140 2140 except error.RevlogError:
2141 2141 if self._censorable and storageutil.iscensoredtext(text):
2142 2142 raise error.CensoredNodeError(self.display_id, node, text)
2143 2143 raise
2144 2144
2145 2145 @property
2146 2146 def _split_index_file(self):
2147 2147 """the path where to expect the index of an ongoing splitting operation
2148 2148
2149 2149 The file will only exist if a splitting operation is in progress, but
2150 2150 it is always expected at the same location."""
2151 2151 parts = self.radix.split(b'/')
2152 2152 if len(parts) > 1:
2153 2153 # adds a '-s' prefix to the ``data/` or `meta/` base
2154 2154 head = parts[0] + b'-s'
2155 2155 mids = parts[1:-1]
2156 2156 tail = parts[-1] + b'.i'
2157 2157 pieces = [head] + mids + [tail]
2158 2158 return b'/'.join(pieces)
2159 2159 else:
2160 2160 # the revlog is stored at the root of the store (changelog or
2161 2161 # manifest), no risk of collision.
2162 2162 return self.radix + b'.i.s'
2163 2163
2164 2164 def _enforceinlinesize(self, tr, side_write=True):
2165 2165 """Check if the revlog is too big for inline and convert if so.
2166 2166
2167 2167 This should be called after revisions are added to the revlog. If the
2168 2168 revlog has grown too large to be an inline revlog, it will convert it
2169 2169 to use multiple index and data files.
2170 2170 """
2171 2171 tiprev = len(self) - 1
2172 2172 total_size = self.start(tiprev) + self.length(tiprev)
2173 2173 if not self._inline or total_size < _maxinline:
2174 2174 return
2175 2175
2176 2176 troffset = tr.findoffset(self._indexfile)
2177 2177 if troffset is None:
2178 2178 raise error.RevlogError(
2179 2179 _(b"%s not found in the transaction") % self._indexfile
2180 2180 )
2181 2181 if troffset:
2182 2182 tr.addbackup(self._indexfile, for_offset=True)
2183 2183 tr.add(self._datafile, 0)
2184 2184
2185 2185 existing_handles = False
2186 2186 if self._writinghandles is not None:
2187 2187 existing_handles = True
2188 2188 fp = self._writinghandles[0]
2189 2189 fp.flush()
2190 2190 fp.close()
2191 2191 # We can't use the cached file handle after close(). So prevent
2192 2192 # its usage.
2193 2193 self._writinghandles = None
2194 2194 self._segmentfile.writing_handle = None
2195 2195 # No need to deal with sidedata writing handle as it is only
2196 2196 # relevant with revlog-v2 which is never inline, not reaching
2197 2197 # this code
2198 2198 if side_write:
2199 2199 old_index_file_path = self._indexfile
2200 2200 new_index_file_path = self._split_index_file
2201 2201 opener = self.opener
2202 2202 weak_self = weakref.ref(self)
2203 2203
2204 2204 # the "split" index replace the real index when the transaction is finalized
2205 2205 def finalize_callback(tr):
2206 2206 opener.rename(
2207 2207 new_index_file_path,
2208 2208 old_index_file_path,
2209 2209 checkambig=True,
2210 2210 )
2211 2211 maybe_self = weak_self()
2212 2212 if maybe_self is not None:
2213 2213 maybe_self._indexfile = old_index_file_path
2214 2214
2215 2215 def abort_callback(tr):
2216 2216 maybe_self = weak_self()
2217 2217 if maybe_self is not None:
2218 2218 maybe_self._indexfile = old_index_file_path
2219 2219
2220 2220 tr.registertmp(new_index_file_path)
2221 2221 if self.target[1] is not None:
2222 2222 callback_id = b'000-revlog-split-%d-%s' % self.target
2223 2223 else:
2224 2224 callback_id = b'000-revlog-split-%d' % self.target[0]
2225 2225 tr.addfinalize(callback_id, finalize_callback)
2226 2226 tr.addabort(callback_id, abort_callback)
2227 2227
2228 2228 new_dfh = self._datafp(b'w+')
2229 2229 new_dfh.truncate(0) # drop any potentially existing data
2230 2230 try:
2231 2231 with self.reading():
2232 2232 for r in self:
2233 2233 new_dfh.write(self._getsegmentforrevs(r, r)[1])
2234 2234 new_dfh.flush()
2235 2235
2236 2236 if side_write:
2237 2237 self._indexfile = new_index_file_path
2238 2238 with self.__index_new_fp() as fp:
2239 2239 self._format_flags &= ~FLAG_INLINE_DATA
2240 2240 self._inline = False
2241 2241 for i in self:
2242 2242 e = self.index.entry_binary(i)
2243 2243 if i == 0 and self._docket is None:
2244 2244 header = self._format_flags | self._format_version
2245 2245 header = self.index.pack_header(header)
2246 2246 e = header + e
2247 2247 fp.write(e)
2248 2248 if self._docket is not None:
2249 2249 self._docket.index_end = fp.tell()
2250 2250
2251 2251 # If we don't use side-write, the temp file replace the real
2252 2252 # index when we exit the context manager
2253 2253
2254 2254 nodemaputil.setup_persistent_nodemap(tr, self)
2255 2255 self._segmentfile = randomaccessfile.randomaccessfile(
2256 2256 self.opener,
2257 2257 self._datafile,
2258 2258 self._chunkcachesize,
2259 2259 )
2260 2260
2261 2261 if existing_handles:
2262 2262 # switched from inline to conventional reopen the index
2263 2263 ifh = self.__index_write_fp()
2264 2264 self._writinghandles = (ifh, new_dfh, None)
2265 2265 self._segmentfile.writing_handle = new_dfh
2266 2266 new_dfh = None
2267 2267 # No need to deal with sidedata writing handle as it is only
2268 2268 # relevant with revlog-v2 which is never inline, not reaching
2269 2269 # this code
2270 2270 finally:
2271 2271 if new_dfh is not None:
2272 2272 new_dfh.close()
2273 2273
2274 2274 def _nodeduplicatecallback(self, transaction, node):
2275 2275 """called when trying to add a node already stored."""
2276 2276
2277 2277 @contextlib.contextmanager
2278 2278 def reading(self):
2279 2279 """Context manager that keeps data and sidedata files open for reading"""
2280 2280 if len(self.index) == 0:
2281 2281 yield # nothing to be read
2282 2282 else:
2283 2283 with self._segmentfile.reading():
2284 2284 with self._segmentfile_sidedata.reading():
2285 2285 yield
2286 2286
2287 2287 @contextlib.contextmanager
2288 2288 def _writing(self, transaction):
2289 2289 if self._trypending:
2290 2290 msg = b'try to write in a `trypending` revlog: %s'
2291 2291 msg %= self.display_id
2292 2292 raise error.ProgrammingError(msg)
2293 2293 if self._writinghandles is not None:
2294 2294 yield
2295 2295 else:
2296 2296 ifh = dfh = sdfh = None
2297 2297 try:
2298 2298 r = len(self)
2299 2299 # opening the data file.
2300 2300 dsize = 0
2301 2301 if r:
2302 2302 dsize = self.end(r - 1)
2303 2303 dfh = None
2304 2304 if not self._inline:
2305 2305 try:
2306 2306 dfh = self._datafp(b"r+")
2307 2307 if self._docket is None:
2308 2308 dfh.seek(0, os.SEEK_END)
2309 2309 else:
2310 2310 dfh.seek(self._docket.data_end, os.SEEK_SET)
2311 2311 except FileNotFoundError:
2312 2312 dfh = self._datafp(b"w+")
2313 2313 transaction.add(self._datafile, dsize)
2314 2314 if self._sidedatafile is not None:
2315 2315 # revlog-v2 does not inline, help Pytype
2316 2316 assert dfh is not None
2317 2317 try:
2318 2318 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2319 2319 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2320 2320 except FileNotFoundError:
2321 2321 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2322 2322 transaction.add(
2323 2323 self._sidedatafile, self._docket.sidedata_end
2324 2324 )
2325 2325
2326 2326 # opening the index file.
2327 2327 isize = r * self.index.entry_size
2328 2328 ifh = self.__index_write_fp()
2329 2329 if self._inline:
2330 2330 transaction.add(self._indexfile, dsize + isize)
2331 2331 else:
2332 2332 transaction.add(self._indexfile, isize)
2333 2333 # exposing all file handle for writing.
2334 2334 self._writinghandles = (ifh, dfh, sdfh)
2335 2335 self._segmentfile.writing_handle = ifh if self._inline else dfh
2336 2336 self._segmentfile_sidedata.writing_handle = sdfh
2337 2337 yield
2338 2338 if self._docket is not None:
2339 2339 self._write_docket(transaction)
2340 2340 finally:
2341 2341 self._writinghandles = None
2342 2342 self._segmentfile.writing_handle = None
2343 2343 self._segmentfile_sidedata.writing_handle = None
2344 2344 if dfh is not None:
2345 2345 dfh.close()
2346 2346 if sdfh is not None:
2347 2347 sdfh.close()
2348 2348 # closing the index file last to avoid exposing referent to
2349 2349 # potential unflushed data content.
2350 2350 if ifh is not None:
2351 2351 ifh.close()
2352 2352
2353 2353 def _write_docket(self, transaction):
2354 2354 """write the current docket on disk
2355 2355
2356 2356 Exist as a method to help changelog to implement transaction logic
2357 2357
2358 2358 We could also imagine using the same transaction logic for all revlog
2359 2359 since docket are cheap."""
2360 2360 self._docket.write(transaction)
2361 2361
2362 2362 def addrevision(
2363 2363 self,
2364 2364 text,
2365 2365 transaction,
2366 2366 link,
2367 2367 p1,
2368 2368 p2,
2369 2369 cachedelta=None,
2370 2370 node=None,
2371 2371 flags=REVIDX_DEFAULT_FLAGS,
2372 2372 deltacomputer=None,
2373 2373 sidedata=None,
2374 2374 ):
2375 2375 """add a revision to the log
2376 2376
2377 2377 text - the revision data to add
2378 2378 transaction - the transaction object used for rollback
2379 2379 link - the linkrev data to add
2380 2380 p1, p2 - the parent nodeids of the revision
2381 2381 cachedelta - an optional precomputed delta
2382 2382 node - nodeid of revision; typically node is not specified, and it is
2383 2383 computed by default as hash(text, p1, p2), however subclasses might
2384 2384 use different hashing method (and override checkhash() in such case)
2385 2385 flags - the known flags to set on the revision
2386 2386 deltacomputer - an optional deltacomputer instance shared between
2387 2387 multiple calls
2388 2388 """
2389 2389 if link == nullrev:
2390 2390 raise error.RevlogError(
2391 2391 _(b"attempted to add linkrev -1 to %s") % self.display_id
2392 2392 )
2393 2393
2394 2394 if sidedata is None:
2395 2395 sidedata = {}
2396 2396 elif sidedata and not self.hassidedata:
2397 2397 raise error.ProgrammingError(
2398 2398 _(b"trying to add sidedata to a revlog who don't support them")
2399 2399 )
2400 2400
2401 2401 if flags:
2402 2402 node = node or self.hash(text, p1, p2)
2403 2403
2404 2404 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2405 2405
2406 2406 # If the flag processor modifies the revision data, ignore any provided
2407 2407 # cachedelta.
2408 2408 if rawtext != text:
2409 2409 cachedelta = None
2410 2410
2411 2411 if len(rawtext) > _maxentrysize:
2412 2412 raise error.RevlogError(
2413 2413 _(
2414 2414 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2415 2415 )
2416 2416 % (self.display_id, len(rawtext))
2417 2417 )
2418 2418
2419 2419 node = node or self.hash(rawtext, p1, p2)
2420 2420 rev = self.index.get_rev(node)
2421 2421 if rev is not None:
2422 2422 return rev
2423 2423
2424 2424 if validatehash:
2425 2425 self.checkhash(rawtext, node, p1=p1, p2=p2)
2426 2426
2427 2427 return self.addrawrevision(
2428 2428 rawtext,
2429 2429 transaction,
2430 2430 link,
2431 2431 p1,
2432 2432 p2,
2433 2433 node,
2434 2434 flags,
2435 2435 cachedelta=cachedelta,
2436 2436 deltacomputer=deltacomputer,
2437 2437 sidedata=sidedata,
2438 2438 )
2439 2439
2440 2440 def addrawrevision(
2441 2441 self,
2442 2442 rawtext,
2443 2443 transaction,
2444 2444 link,
2445 2445 p1,
2446 2446 p2,
2447 2447 node,
2448 2448 flags,
2449 2449 cachedelta=None,
2450 2450 deltacomputer=None,
2451 2451 sidedata=None,
2452 2452 ):
2453 2453 """add a raw revision with known flags, node and parents
2454 2454 useful when reusing a revision not stored in this revlog (ex: received
2455 2455 over wire, or read from an external bundle).
2456 2456 """
2457 2457 with self._writing(transaction):
2458 2458 return self._addrevision(
2459 2459 node,
2460 2460 rawtext,
2461 2461 transaction,
2462 2462 link,
2463 2463 p1,
2464 2464 p2,
2465 2465 flags,
2466 2466 cachedelta,
2467 2467 deltacomputer=deltacomputer,
2468 2468 sidedata=sidedata,
2469 2469 )
2470 2470
2471 2471 def compress(self, data):
2472 2472 """Generate a possibly-compressed representation of data."""
2473 2473 if not data:
2474 2474 return b'', data
2475 2475
2476 2476 compressed = self._compressor.compress(data)
2477 2477
2478 2478 if compressed:
2479 2479 # The revlog compressor added the header in the returned data.
2480 2480 return b'', compressed
2481 2481
2482 2482 if data[0:1] == b'\0':
2483 2483 return b'', data
2484 2484 return b'u', data
2485 2485
2486 2486 def decompress(self, data):
2487 2487 """Decompress a revlog chunk.
2488 2488
2489 2489 The chunk is expected to begin with a header identifying the
2490 2490 format type so it can be routed to an appropriate decompressor.
2491 2491 """
2492 2492 if not data:
2493 2493 return data
2494 2494
2495 2495 # Revlogs are read much more frequently than they are written and many
2496 2496 # chunks only take microseconds to decompress, so performance is
2497 2497 # important here.
2498 2498 #
2499 2499 # We can make a few assumptions about revlogs:
2500 2500 #
2501 2501 # 1) the majority of chunks will be compressed (as opposed to inline
2502 2502 # raw data).
2503 2503 # 2) decompressing *any* data will likely by at least 10x slower than
2504 2504 # returning raw inline data.
2505 2505 # 3) we want to prioritize common and officially supported compression
2506 2506 # engines
2507 2507 #
2508 2508 # It follows that we want to optimize for "decompress compressed data
2509 2509 # when encoded with common and officially supported compression engines"
2510 2510 # case over "raw data" and "data encoded by less common or non-official
2511 2511 # compression engines." That is why we have the inline lookup first
2512 2512 # followed by the compengines lookup.
2513 2513 #
2514 2514 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2515 2515 # compressed chunks. And this matters for changelog and manifest reads.
2516 2516 t = data[0:1]
2517 2517
2518 2518 if t == b'x':
2519 2519 try:
2520 2520 return _zlibdecompress(data)
2521 2521 except zlib.error as e:
2522 2522 raise error.RevlogError(
2523 2523 _(b'revlog decompress error: %s')
2524 2524 % stringutil.forcebytestr(e)
2525 2525 )
2526 2526 # '\0' is more common than 'u' so it goes first.
2527 2527 elif t == b'\0':
2528 2528 return data
2529 2529 elif t == b'u':
2530 2530 return util.buffer(data, 1)
2531 2531
2532 2532 compressor = self._get_decompressor(t)
2533 2533
2534 2534 return compressor.decompress(data)
2535 2535
2536 2536 def _addrevision(
2537 2537 self,
2538 2538 node,
2539 2539 rawtext,
2540 2540 transaction,
2541 2541 link,
2542 2542 p1,
2543 2543 p2,
2544 2544 flags,
2545 2545 cachedelta,
2546 2546 alwayscache=False,
2547 2547 deltacomputer=None,
2548 2548 sidedata=None,
2549 2549 ):
2550 2550 """internal function to add revisions to the log
2551 2551
2552 2552 see addrevision for argument descriptions.
2553 2553
2554 2554 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2555 2555
2556 2556 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2557 2557 be used.
2558 2558
2559 2559 invariants:
2560 2560 - rawtext is optional (can be None); if not set, cachedelta must be set.
2561 2561 if both are set, they must correspond to each other.
2562 2562 """
2563 2563 if node == self.nullid:
2564 2564 raise error.RevlogError(
2565 2565 _(b"%s: attempt to add null revision") % self.display_id
2566 2566 )
2567 2567 if (
2568 2568 node == self.nodeconstants.wdirid
2569 2569 or node in self.nodeconstants.wdirfilenodeids
2570 2570 ):
2571 2571 raise error.RevlogError(
2572 2572 _(b"%s: attempt to add wdir revision") % self.display_id
2573 2573 )
2574 2574 if self._writinghandles is None:
2575 2575 msg = b'adding revision outside `revlog._writing` context'
2576 2576 raise error.ProgrammingError(msg)
2577 2577
2578 2578 btext = [rawtext]
2579 2579
2580 2580 curr = len(self)
2581 2581 prev = curr - 1
2582 2582
2583 2583 offset = self._get_data_offset(prev)
2584 2584
2585 2585 if self._concurrencychecker:
2586 2586 ifh, dfh, sdfh = self._writinghandles
2587 2587 # XXX no checking for the sidedata file
2588 2588 if self._inline:
2589 2589 # offset is "as if" it were in the .d file, so we need to add on
2590 2590 # the size of the entry metadata.
2591 2591 self._concurrencychecker(
2592 2592 ifh, self._indexfile, offset + curr * self.index.entry_size
2593 2593 )
2594 2594 else:
2595 2595 # Entries in the .i are a consistent size.
2596 2596 self._concurrencychecker(
2597 2597 ifh, self._indexfile, curr * self.index.entry_size
2598 2598 )
2599 2599 self._concurrencychecker(dfh, self._datafile, offset)
2600 2600
2601 2601 p1r, p2r = self.rev(p1), self.rev(p2)
2602 2602
2603 2603 # full versions are inserted when the needed deltas
2604 2604 # become comparable to the uncompressed text
2605 2605 if rawtext is None:
2606 2606 # need rawtext size, before changed by flag processors, which is
2607 2607 # the non-raw size. use revlog explicitly to avoid filelog's extra
2608 2608 # logic that might remove metadata size.
2609 2609 textlen = mdiff.patchedsize(
2610 2610 revlog.size(self, cachedelta[0]), cachedelta[1]
2611 2611 )
2612 2612 else:
2613 2613 textlen = len(rawtext)
2614 2614
2615 2615 if deltacomputer is None:
2616 2616 write_debug = None
2617 2617 if self._debug_delta:
2618 2618 write_debug = transaction._report
2619 2619 deltacomputer = deltautil.deltacomputer(
2620 2620 self, write_debug=write_debug
2621 2621 )
2622 2622
2623 2623 if cachedelta is not None and len(cachedelta) == 2:
2624 2624 # If the cached delta has no information about how it should be
2625 2625 # reused, add the default reuse instruction according to the
2626 2626 # revlog's configuration.
2627 2627 if self._generaldelta and self._lazydeltabase:
2628 2628 delta_base_reuse = DELTA_BASE_REUSE_TRY
2629 2629 else:
2630 2630 delta_base_reuse = DELTA_BASE_REUSE_NO
2631 2631 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2632 2632
2633 2633 revinfo = revlogutils.revisioninfo(
2634 2634 node,
2635 2635 p1,
2636 2636 p2,
2637 2637 btext,
2638 2638 textlen,
2639 2639 cachedelta,
2640 2640 flags,
2641 2641 )
2642 2642
2643 2643 deltainfo = deltacomputer.finddeltainfo(revinfo)
2644 2644
2645 2645 compression_mode = COMP_MODE_INLINE
2646 2646 if self._docket is not None:
2647 2647 default_comp = self._docket.default_compression_header
2648 2648 r = deltautil.delta_compression(default_comp, deltainfo)
2649 2649 compression_mode, deltainfo = r
2650 2650
2651 2651 sidedata_compression_mode = COMP_MODE_INLINE
2652 2652 if sidedata and self.hassidedata:
2653 2653 sidedata_compression_mode = COMP_MODE_PLAIN
2654 2654 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2655 2655 sidedata_offset = self._docket.sidedata_end
2656 2656 h, comp_sidedata = self.compress(serialized_sidedata)
2657 2657 if (
2658 2658 h != b'u'
2659 2659 and comp_sidedata[0:1] != b'\0'
2660 2660 and len(comp_sidedata) < len(serialized_sidedata)
2661 2661 ):
2662 2662 assert not h
2663 2663 if (
2664 2664 comp_sidedata[0:1]
2665 2665 == self._docket.default_compression_header
2666 2666 ):
2667 2667 sidedata_compression_mode = COMP_MODE_DEFAULT
2668 2668 serialized_sidedata = comp_sidedata
2669 2669 else:
2670 2670 sidedata_compression_mode = COMP_MODE_INLINE
2671 2671 serialized_sidedata = comp_sidedata
2672 2672 else:
2673 2673 serialized_sidedata = b""
2674 2674 # Don't store the offset if the sidedata is empty, that way
2675 2675 # we can easily detect empty sidedata and they will be no different
2676 2676 # than ones we manually add.
2677 2677 sidedata_offset = 0
2678 2678
2679 2679 rank = RANK_UNKNOWN
2680 2680 if self._compute_rank:
2681 2681 if (p1r, p2r) == (nullrev, nullrev):
2682 2682 rank = 1
2683 2683 elif p1r != nullrev and p2r == nullrev:
2684 2684 rank = 1 + self.fast_rank(p1r)
2685 2685 elif p1r == nullrev and p2r != nullrev:
2686 2686 rank = 1 + self.fast_rank(p2r)
2687 2687 else: # merge node
2688 2688 if rustdagop is not None and self.index.rust_ext_compat:
2689 2689 rank = rustdagop.rank(self.index, p1r, p2r)
2690 2690 else:
2691 2691 pmin, pmax = sorted((p1r, p2r))
2692 2692 rank = 1 + self.fast_rank(pmax)
2693 2693 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2694 2694
2695 2695 e = revlogutils.entry(
2696 2696 flags=flags,
2697 2697 data_offset=offset,
2698 2698 data_compressed_length=deltainfo.deltalen,
2699 2699 data_uncompressed_length=textlen,
2700 2700 data_compression_mode=compression_mode,
2701 2701 data_delta_base=deltainfo.base,
2702 2702 link_rev=link,
2703 2703 parent_rev_1=p1r,
2704 2704 parent_rev_2=p2r,
2705 2705 node_id=node,
2706 2706 sidedata_offset=sidedata_offset,
2707 2707 sidedata_compressed_length=len(serialized_sidedata),
2708 2708 sidedata_compression_mode=sidedata_compression_mode,
2709 2709 rank=rank,
2710 2710 )
2711 2711
2712 2712 self.index.append(e)
2713 2713 entry = self.index.entry_binary(curr)
2714 2714 if curr == 0 and self._docket is None:
2715 2715 header = self._format_flags | self._format_version
2716 2716 header = self.index.pack_header(header)
2717 2717 entry = header + entry
2718 2718 self._writeentry(
2719 2719 transaction,
2720 2720 entry,
2721 2721 deltainfo.data,
2722 2722 link,
2723 2723 offset,
2724 2724 serialized_sidedata,
2725 2725 sidedata_offset,
2726 2726 )
2727 2727
2728 2728 rawtext = btext[0]
2729 2729
2730 2730 if alwayscache and rawtext is None:
2731 2731 rawtext = deltacomputer.buildtext(revinfo)
2732 2732
2733 2733 if type(rawtext) == bytes: # only accept immutable objects
2734 2734 self._revisioncache = (node, curr, rawtext)
2735 2735 self._chainbasecache[curr] = deltainfo.chainbase
2736 2736 return curr
2737 2737
2738 2738 def _get_data_offset(self, prev):
2739 2739 """Returns the current offset in the (in-transaction) data file.
2740 2740 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2741 2741 file to store that information: since sidedata can be rewritten to the
2742 2742 end of the data file within a transaction, you can have cases where, for
2743 2743 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2744 2744 to `n - 1`'s sidedata being written after `n`'s data.
2745 2745
2746 2746 TODO cache this in a docket file before getting out of experimental."""
2747 2747 if self._docket is None:
2748 2748 return self.end(prev)
2749 2749 else:
2750 2750 return self._docket.data_end
2751 2751
2752 2752 def _writeentry(
2753 2753 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2754 2754 ):
2755 2755 # Files opened in a+ mode have inconsistent behavior on various
2756 2756 # platforms. Windows requires that a file positioning call be made
2757 2757 # when the file handle transitions between reads and writes. See
2758 2758 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2759 2759 # platforms, Python or the platform itself can be buggy. Some versions
2760 2760 # of Solaris have been observed to not append at the end of the file
2761 2761 # if the file was seeked to before the end. See issue4943 for more.
2762 2762 #
2763 2763 # We work around this issue by inserting a seek() before writing.
2764 2764 # Note: This is likely not necessary on Python 3. However, because
2765 2765 # the file handle is reused for reads and may be seeked there, we need
2766 2766 # to be careful before changing this.
2767 2767 if self._writinghandles is None:
2768 2768 msg = b'adding revision outside `revlog._writing` context'
2769 2769 raise error.ProgrammingError(msg)
2770 2770 ifh, dfh, sdfh = self._writinghandles
2771 2771 if self._docket is None:
2772 2772 ifh.seek(0, os.SEEK_END)
2773 2773 else:
2774 2774 ifh.seek(self._docket.index_end, os.SEEK_SET)
2775 2775 if dfh:
2776 2776 if self._docket is None:
2777 2777 dfh.seek(0, os.SEEK_END)
2778 2778 else:
2779 2779 dfh.seek(self._docket.data_end, os.SEEK_SET)
2780 2780 if sdfh:
2781 2781 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2782 2782
2783 2783 curr = len(self) - 1
2784 2784 if not self._inline:
2785 2785 transaction.add(self._datafile, offset)
2786 2786 if self._sidedatafile:
2787 2787 transaction.add(self._sidedatafile, sidedata_offset)
2788 2788 transaction.add(self._indexfile, curr * len(entry))
2789 2789 if data[0]:
2790 2790 dfh.write(data[0])
2791 2791 dfh.write(data[1])
2792 2792 if sidedata:
2793 2793 sdfh.write(sidedata)
2794 2794 ifh.write(entry)
2795 2795 else:
2796 2796 offset += curr * self.index.entry_size
2797 2797 transaction.add(self._indexfile, offset)
2798 2798 ifh.write(entry)
2799 2799 ifh.write(data[0])
2800 2800 ifh.write(data[1])
2801 2801 assert not sidedata
2802 2802 self._enforceinlinesize(transaction)
2803 2803 if self._docket is not None:
2804 2804 # revlog-v2 always has 3 writing handles, help Pytype
2805 2805 wh1 = self._writinghandles[0]
2806 2806 wh2 = self._writinghandles[1]
2807 2807 wh3 = self._writinghandles[2]
2808 2808 assert wh1 is not None
2809 2809 assert wh2 is not None
2810 2810 assert wh3 is not None
2811 2811 self._docket.index_end = wh1.tell()
2812 2812 self._docket.data_end = wh2.tell()
2813 2813 self._docket.sidedata_end = wh3.tell()
2814 2814
2815 2815 nodemaputil.setup_persistent_nodemap(transaction, self)
2816 2816
2817 2817 def addgroup(
2818 2818 self,
2819 2819 deltas,
2820 2820 linkmapper,
2821 2821 transaction,
2822 2822 alwayscache=False,
2823 2823 addrevisioncb=None,
2824 2824 duplicaterevisioncb=None,
2825 2825 debug_info=None,
2826 2826 delta_base_reuse_policy=None,
2827 2827 ):
2828 2828 """
2829 2829 add a delta group
2830 2830
2831 2831 given a set of deltas, add them to the revision log. the
2832 2832 first delta is against its parent, which should be in our
2833 2833 log, the rest are against the previous delta.
2834 2834
2835 2835 If ``addrevisioncb`` is defined, it will be called with arguments of
2836 2836 this revlog and the node that was added.
2837 2837 """
2838 2838
2839 2839 if self._adding_group:
2840 2840 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2841 2841
2842 2842 # read the default delta-base reuse policy from revlog config if the
2843 2843 # group did not specify one.
2844 2844 if delta_base_reuse_policy is None:
2845 2845 if self._generaldelta and self._lazydeltabase:
2846 2846 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
2847 2847 else:
2848 2848 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
2849 2849
2850 2850 self._adding_group = True
2851 2851 empty = True
2852 2852 try:
2853 2853 with self._writing(transaction):
2854 2854 write_debug = None
2855 2855 if self._debug_delta:
2856 2856 write_debug = transaction._report
2857 2857 deltacomputer = deltautil.deltacomputer(
2858 2858 self,
2859 2859 write_debug=write_debug,
2860 2860 debug_info=debug_info,
2861 2861 )
2862 2862 # loop through our set of deltas
2863 2863 for data in deltas:
2864 2864 (
2865 2865 node,
2866 2866 p1,
2867 2867 p2,
2868 2868 linknode,
2869 2869 deltabase,
2870 2870 delta,
2871 2871 flags,
2872 2872 sidedata,
2873 2873 ) = data
2874 2874 link = linkmapper(linknode)
2875 2875 flags = flags or REVIDX_DEFAULT_FLAGS
2876 2876
2877 2877 rev = self.index.get_rev(node)
2878 2878 if rev is not None:
2879 2879 # this can happen if two branches make the same change
2880 2880 self._nodeduplicatecallback(transaction, rev)
2881 2881 if duplicaterevisioncb:
2882 2882 duplicaterevisioncb(self, rev)
2883 2883 empty = False
2884 2884 continue
2885 2885
2886 2886 for p in (p1, p2):
2887 2887 if not self.index.has_node(p):
2888 2888 raise error.LookupError(
2889 2889 p, self.radix, _(b'unknown parent')
2890 2890 )
2891 2891
2892 2892 if not self.index.has_node(deltabase):
2893 2893 raise error.LookupError(
2894 2894 deltabase, self.display_id, _(b'unknown delta base')
2895 2895 )
2896 2896
2897 2897 baserev = self.rev(deltabase)
2898 2898
2899 2899 if baserev != nullrev and self.iscensored(baserev):
2900 2900 # if base is censored, delta must be full replacement in a
2901 2901 # single patch operation
2902 2902 hlen = struct.calcsize(b">lll")
2903 2903 oldlen = self.rawsize(baserev)
2904 2904 newlen = len(delta) - hlen
2905 2905 if delta[:hlen] != mdiff.replacediffheader(
2906 2906 oldlen, newlen
2907 2907 ):
2908 2908 raise error.CensoredBaseError(
2909 2909 self.display_id, self.node(baserev)
2910 2910 )
2911 2911
2912 2912 if not flags and self._peek_iscensored(baserev, delta):
2913 2913 flags |= REVIDX_ISCENSORED
2914 2914
2915 2915 # We assume consumers of addrevisioncb will want to retrieve
2916 2916 # the added revision, which will require a call to
2917 2917 # revision(). revision() will fast path if there is a cache
2918 2918 # hit. So, we tell _addrevision() to always cache in this case.
2919 2919 # We're only using addgroup() in the context of changegroup
2920 2920 # generation so the revision data can always be handled as raw
2921 2921 # by the flagprocessor.
2922 2922 rev = self._addrevision(
2923 2923 node,
2924 2924 None,
2925 2925 transaction,
2926 2926 link,
2927 2927 p1,
2928 2928 p2,
2929 2929 flags,
2930 2930 (baserev, delta, delta_base_reuse_policy),
2931 2931 alwayscache=alwayscache,
2932 2932 deltacomputer=deltacomputer,
2933 2933 sidedata=sidedata,
2934 2934 )
2935 2935
2936 2936 if addrevisioncb:
2937 2937 addrevisioncb(self, rev)
2938 2938 empty = False
2939 2939 finally:
2940 2940 self._adding_group = False
2941 2941 return not empty
2942 2942
2943 2943 def iscensored(self, rev):
2944 2944 """Check if a file revision is censored."""
2945 2945 if not self._censorable:
2946 2946 return False
2947 2947
2948 2948 return self.flags(rev) & REVIDX_ISCENSORED
2949 2949
2950 2950 def _peek_iscensored(self, baserev, delta):
2951 2951 """Quickly check if a delta produces a censored revision."""
2952 2952 if not self._censorable:
2953 2953 return False
2954 2954
2955 2955 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2956 2956
2957 2957 def getstrippoint(self, minlink):
2958 2958 """find the minimum rev that must be stripped to strip the linkrev
2959 2959
2960 2960 Returns a tuple containing the minimum rev and a set of all revs that
2961 2961 have linkrevs that will be broken by this strip.
2962 2962 """
2963 2963 return storageutil.resolvestripinfo(
2964 2964 minlink,
2965 2965 len(self) - 1,
2966 2966 self.headrevs(),
2967 2967 self.linkrev,
2968 2968 self.parentrevs,
2969 2969 )
2970 2970
2971 2971 def strip(self, minlink, transaction):
2972 2972 """truncate the revlog on the first revision with a linkrev >= minlink
2973 2973
2974 2974 This function is called when we're stripping revision minlink and
2975 2975 its descendants from the repository.
2976 2976
2977 2977 We have to remove all revisions with linkrev >= minlink, because
2978 2978 the equivalent changelog revisions will be renumbered after the
2979 2979 strip.
2980 2980
2981 2981 So we truncate the revlog on the first of these revisions, and
2982 2982 trust that the caller has saved the revisions that shouldn't be
2983 2983 removed and that it'll re-add them after this truncation.
2984 2984 """
2985 2985 if len(self) == 0:
2986 2986 return
2987 2987
2988 2988 rev, _ = self.getstrippoint(minlink)
2989 2989 if rev == len(self):
2990 2990 return
2991 2991
2992 2992 # first truncate the files on disk
2993 2993 data_end = self.start(rev)
2994 2994 if not self._inline:
2995 2995 transaction.add(self._datafile, data_end)
2996 2996 end = rev * self.index.entry_size
2997 2997 else:
2998 2998 end = data_end + (rev * self.index.entry_size)
2999 2999
3000 3000 if self._sidedatafile:
3001 3001 sidedata_end = self.sidedata_cut_off(rev)
3002 3002 transaction.add(self._sidedatafile, sidedata_end)
3003 3003
3004 3004 transaction.add(self._indexfile, end)
3005 3005 if self._docket is not None:
3006 3006 # XXX we could, leverage the docket while stripping. However it is
3007 3007 # not powerfull enough at the time of this comment
3008 3008 self._docket.index_end = end
3009 3009 self._docket.data_end = data_end
3010 3010 self._docket.sidedata_end = sidedata_end
3011 3011 self._docket.write(transaction, stripping=True)
3012 3012
3013 3013 # then reset internal state in memory to forget those revisions
3014 3014 self._revisioncache = None
3015 3015 self._chaininfocache = util.lrucachedict(500)
3016 3016 self._segmentfile.clear_cache()
3017 3017 self._segmentfile_sidedata.clear_cache()
3018 3018
3019 3019 del self.index[rev:-1]
3020 3020
3021 3021 def checksize(self):
3022 3022 """Check size of index and data files
3023 3023
3024 3024 return a (dd, di) tuple.
3025 3025 - dd: extra bytes for the "data" file
3026 3026 - di: extra bytes for the "index" file
3027 3027
3028 3028 A healthy revlog will return (0, 0).
3029 3029 """
3030 3030 expected = 0
3031 3031 if len(self):
3032 3032 expected = max(0, self.end(len(self) - 1))
3033 3033
3034 3034 try:
3035 3035 with self._datafp() as f:
3036 3036 f.seek(0, io.SEEK_END)
3037 3037 actual = f.tell()
3038 3038 dd = actual - expected
3039 3039 except FileNotFoundError:
3040 3040 dd = 0
3041 3041
3042 3042 try:
3043 3043 f = self.opener(self._indexfile)
3044 3044 f.seek(0, io.SEEK_END)
3045 3045 actual = f.tell()
3046 3046 f.close()
3047 3047 s = self.index.entry_size
3048 3048 i = max(0, actual // s)
3049 3049 di = actual - (i * s)
3050 3050 if self._inline:
3051 3051 databytes = 0
3052 3052 for r in self:
3053 3053 databytes += max(0, self.length(r))
3054 3054 dd = 0
3055 3055 di = actual - len(self) * s - databytes
3056 3056 except FileNotFoundError:
3057 3057 di = 0
3058 3058
3059 3059 return (dd, di)
3060 3060
3061 3061 def files(self):
3062 3062 res = [self._indexfile]
3063 3063 if self._docket_file is None:
3064 3064 if not self._inline:
3065 3065 res.append(self._datafile)
3066 3066 else:
3067 3067 res.append(self._docket_file)
3068 3068 res.extend(self._docket.old_index_filepaths(include_empty=False))
3069 3069 if self._docket.data_end:
3070 3070 res.append(self._datafile)
3071 3071 res.extend(self._docket.old_data_filepaths(include_empty=False))
3072 3072 if self._docket.sidedata_end:
3073 3073 res.append(self._sidedatafile)
3074 3074 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3075 3075 return res
3076 3076
3077 3077 def emitrevisions(
3078 3078 self,
3079 3079 nodes,
3080 3080 nodesorder=None,
3081 3081 revisiondata=False,
3082 3082 assumehaveparentrevisions=False,
3083 3083 deltamode=repository.CG_DELTAMODE_STD,
3084 3084 sidedata_helpers=None,
3085 3085 debug_info=None,
3086 3086 ):
3087 3087 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3088 3088 raise error.ProgrammingError(
3089 3089 b'unhandled value for nodesorder: %s' % nodesorder
3090 3090 )
3091 3091
3092 3092 if nodesorder is None and not self._generaldelta:
3093 3093 nodesorder = b'storage'
3094 3094
3095 3095 if (
3096 3096 not self._storedeltachains
3097 3097 and deltamode != repository.CG_DELTAMODE_PREV
3098 3098 ):
3099 3099 deltamode = repository.CG_DELTAMODE_FULL
3100 3100
3101 3101 return storageutil.emitrevisions(
3102 3102 self,
3103 3103 nodes,
3104 3104 nodesorder,
3105 3105 revlogrevisiondelta,
3106 3106 deltaparentfn=self.deltaparent,
3107 3107 candeltafn=self._candelta,
3108 3108 rawsizefn=self.rawsize,
3109 3109 revdifffn=self.revdiff,
3110 3110 flagsfn=self.flags,
3111 3111 deltamode=deltamode,
3112 3112 revisiondata=revisiondata,
3113 3113 assumehaveparentrevisions=assumehaveparentrevisions,
3114 3114 sidedata_helpers=sidedata_helpers,
3115 3115 debug_info=debug_info,
3116 3116 )
3117 3117
3118 3118 DELTAREUSEALWAYS = b'always'
3119 3119 DELTAREUSESAMEREVS = b'samerevs'
3120 3120 DELTAREUSENEVER = b'never'
3121 3121
3122 3122 DELTAREUSEFULLADD = b'fulladd'
3123 3123
3124 3124 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3125 3125
3126 3126 def clone(
3127 3127 self,
3128 3128 tr,
3129 3129 destrevlog,
3130 3130 addrevisioncb=None,
3131 3131 deltareuse=DELTAREUSESAMEREVS,
3132 3132 forcedeltabothparents=None,
3133 3133 sidedata_helpers=None,
3134 3134 ):
3135 3135 """Copy this revlog to another, possibly with format changes.
3136 3136
3137 3137 The destination revlog will contain the same revisions and nodes.
3138 3138 However, it may not be bit-for-bit identical due to e.g. delta encoding
3139 3139 differences.
3140 3140
3141 3141 The ``deltareuse`` argument control how deltas from the existing revlog
3142 3142 are preserved in the destination revlog. The argument can have the
3143 3143 following values:
3144 3144
3145 3145 DELTAREUSEALWAYS
3146 3146 Deltas will always be reused (if possible), even if the destination
3147 3147 revlog would not select the same revisions for the delta. This is the
3148 3148 fastest mode of operation.
3149 3149 DELTAREUSESAMEREVS
3150 3150 Deltas will be reused if the destination revlog would pick the same
3151 3151 revisions for the delta. This mode strikes a balance between speed
3152 3152 and optimization.
3153 3153 DELTAREUSENEVER
3154 3154 Deltas will never be reused. This is the slowest mode of execution.
3155 3155 This mode can be used to recompute deltas (e.g. if the diff/delta
3156 3156 algorithm changes).
3157 3157 DELTAREUSEFULLADD
3158 3158 Revision will be re-added as if their were new content. This is
3159 3159 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3160 3160 eg: large file detection and handling.
3161 3161
3162 3162 Delta computation can be slow, so the choice of delta reuse policy can
3163 3163 significantly affect run time.
3164 3164
3165 3165 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3166 3166 two extremes. Deltas will be reused if they are appropriate. But if the
3167 3167 delta could choose a better revision, it will do so. This means if you
3168 3168 are converting a non-generaldelta revlog to a generaldelta revlog,
3169 3169 deltas will be recomputed if the delta's parent isn't a parent of the
3170 3170 revision.
3171 3171
3172 3172 In addition to the delta policy, the ``forcedeltabothparents``
3173 3173 argument controls whether to force compute deltas against both parents
3174 3174 for merges. By default, the current default is used.
3175 3175
3176 3176 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3177 3177 `sidedata_helpers`.
3178 3178 """
3179 3179 if deltareuse not in self.DELTAREUSEALL:
3180 3180 raise ValueError(
3181 3181 _(b'value for deltareuse invalid: %s') % deltareuse
3182 3182 )
3183 3183
3184 3184 if len(destrevlog):
3185 3185 raise ValueError(_(b'destination revlog is not empty'))
3186 3186
3187 3187 if getattr(self, 'filteredrevs', None):
3188 3188 raise ValueError(_(b'source revlog has filtered revisions'))
3189 3189 if getattr(destrevlog, 'filteredrevs', None):
3190 3190 raise ValueError(_(b'destination revlog has filtered revisions'))
3191 3191
3192 3192 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3193 3193 # if possible.
3194 3194 oldlazydelta = destrevlog._lazydelta
3195 3195 oldlazydeltabase = destrevlog._lazydeltabase
3196 3196 oldamd = destrevlog._deltabothparents
3197 3197
3198 3198 try:
3199 3199 if deltareuse == self.DELTAREUSEALWAYS:
3200 3200 destrevlog._lazydeltabase = True
3201 3201 destrevlog._lazydelta = True
3202 3202 elif deltareuse == self.DELTAREUSESAMEREVS:
3203 3203 destrevlog._lazydeltabase = False
3204 3204 destrevlog._lazydelta = True
3205 3205 elif deltareuse == self.DELTAREUSENEVER:
3206 3206 destrevlog._lazydeltabase = False
3207 3207 destrevlog._lazydelta = False
3208 3208
3209 3209 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3210 3210
3211 3211 with self.reading():
3212 3212 self._clone(
3213 3213 tr,
3214 3214 destrevlog,
3215 3215 addrevisioncb,
3216 3216 deltareuse,
3217 3217 forcedeltabothparents,
3218 3218 sidedata_helpers,
3219 3219 )
3220 3220
3221 3221 finally:
3222 3222 destrevlog._lazydelta = oldlazydelta
3223 3223 destrevlog._lazydeltabase = oldlazydeltabase
3224 3224 destrevlog._deltabothparents = oldamd
3225 3225
3226 3226 def _clone(
3227 3227 self,
3228 3228 tr,
3229 3229 destrevlog,
3230 3230 addrevisioncb,
3231 3231 deltareuse,
3232 3232 forcedeltabothparents,
3233 3233 sidedata_helpers,
3234 3234 ):
3235 3235 """perform the core duty of `revlog.clone` after parameter processing"""
3236 3236 write_debug = None
3237 3237 if self._debug_delta:
3238 3238 write_debug = tr._report
3239 3239 deltacomputer = deltautil.deltacomputer(
3240 3240 destrevlog,
3241 3241 write_debug=write_debug,
3242 3242 )
3243 3243 index = self.index
3244 3244 for rev in self:
3245 3245 entry = index[rev]
3246 3246
3247 3247 # Some classes override linkrev to take filtered revs into
3248 3248 # account. Use raw entry from index.
3249 3249 flags = entry[0] & 0xFFFF
3250 3250 linkrev = entry[4]
3251 3251 p1 = index[entry[5]][7]
3252 3252 p2 = index[entry[6]][7]
3253 3253 node = entry[7]
3254 3254
3255 3255 # (Possibly) reuse the delta from the revlog if allowed and
3256 3256 # the revlog chunk is a delta.
3257 3257 cachedelta = None
3258 3258 rawtext = None
3259 3259 if deltareuse == self.DELTAREUSEFULLADD:
3260 3260 text = self._revisiondata(rev)
3261 3261 sidedata = self.sidedata(rev)
3262 3262
3263 3263 if sidedata_helpers is not None:
3264 3264 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3265 3265 self, sidedata_helpers, sidedata, rev
3266 3266 )
3267 3267 flags = flags | new_flags[0] & ~new_flags[1]
3268 3268
3269 3269 destrevlog.addrevision(
3270 3270 text,
3271 3271 tr,
3272 3272 linkrev,
3273 3273 p1,
3274 3274 p2,
3275 3275 cachedelta=cachedelta,
3276 3276 node=node,
3277 3277 flags=flags,
3278 3278 deltacomputer=deltacomputer,
3279 3279 sidedata=sidedata,
3280 3280 )
3281 3281 else:
3282 3282 if destrevlog._lazydelta:
3283 3283 dp = self.deltaparent(rev)
3284 3284 if dp != nullrev:
3285 3285 cachedelta = (dp, bytes(self._chunk(rev)))
3286 3286
3287 3287 sidedata = None
3288 3288 if not cachedelta:
3289 3289 rawtext = self._revisiondata(rev)
3290 3290 sidedata = self.sidedata(rev)
3291 3291 if sidedata is None:
3292 3292 sidedata = self.sidedata(rev)
3293 3293
3294 3294 if sidedata_helpers is not None:
3295 3295 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3296 3296 self, sidedata_helpers, sidedata, rev
3297 3297 )
3298 3298 flags = flags | new_flags[0] & ~new_flags[1]
3299 3299
3300 3300 with destrevlog._writing(tr):
3301 3301 destrevlog._addrevision(
3302 3302 node,
3303 3303 rawtext,
3304 3304 tr,
3305 3305 linkrev,
3306 3306 p1,
3307 3307 p2,
3308 3308 flags,
3309 3309 cachedelta,
3310 3310 deltacomputer=deltacomputer,
3311 3311 sidedata=sidedata,
3312 3312 )
3313 3313
3314 3314 if addrevisioncb:
3315 3315 addrevisioncb(self, rev, node)
3316 3316
3317 3317 def censorrevision(self, tr, censornode, tombstone=b''):
3318 3318 if self._format_version == REVLOGV0:
3319 3319 raise error.RevlogError(
3320 3320 _(b'cannot censor with version %d revlogs')
3321 3321 % self._format_version
3322 3322 )
3323 3323 elif self._format_version == REVLOGV1:
3324 3324 rewrite.v1_censor(self, tr, censornode, tombstone)
3325 3325 else:
3326 3326 rewrite.v2_censor(self, tr, censornode, tombstone)
3327 3327
3328 3328 def verifyintegrity(self, state):
3329 3329 """Verifies the integrity of the revlog.
3330 3330
3331 3331 Yields ``revlogproblem`` instances describing problems that are
3332 3332 found.
3333 3333 """
3334 3334 dd, di = self.checksize()
3335 3335 if dd:
3336 3336 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3337 3337 if di:
3338 3338 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3339 3339
3340 3340 version = self._format_version
3341 3341
3342 3342 # The verifier tells us what version revlog we should be.
3343 3343 if version != state[b'expectedversion']:
3344 3344 yield revlogproblem(
3345 3345 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3346 3346 % (self.display_id, version, state[b'expectedversion'])
3347 3347 )
3348 3348
3349 3349 state[b'skipread'] = set()
3350 3350 state[b'safe_renamed'] = set()
3351 3351
3352 3352 for rev in self:
3353 3353 node = self.node(rev)
3354 3354
3355 3355 # Verify contents. 4 cases to care about:
3356 3356 #
3357 3357 # common: the most common case
3358 3358 # rename: with a rename
3359 3359 # meta: file content starts with b'\1\n', the metadata
3360 3360 # header defined in filelog.py, but without a rename
3361 3361 # ext: content stored externally
3362 3362 #
3363 3363 # More formally, their differences are shown below:
3364 3364 #
3365 3365 # | common | rename | meta | ext
3366 3366 # -------------------------------------------------------
3367 3367 # flags() | 0 | 0 | 0 | not 0
3368 3368 # renamed() | False | True | False | ?
3369 3369 # rawtext[0:2]=='\1\n'| False | True | True | ?
3370 3370 #
3371 3371 # "rawtext" means the raw text stored in revlog data, which
3372 3372 # could be retrieved by "rawdata(rev)". "text"
3373 3373 # mentioned below is "revision(rev)".
3374 3374 #
3375 3375 # There are 3 different lengths stored physically:
3376 3376 # 1. L1: rawsize, stored in revlog index
3377 3377 # 2. L2: len(rawtext), stored in revlog data
3378 3378 # 3. L3: len(text), stored in revlog data if flags==0, or
3379 3379 # possibly somewhere else if flags!=0
3380 3380 #
3381 3381 # L1 should be equal to L2. L3 could be different from them.
3382 3382 # "text" may or may not affect commit hash depending on flag
3383 3383 # processors (see flagutil.addflagprocessor).
3384 3384 #
3385 3385 # | common | rename | meta | ext
3386 3386 # -------------------------------------------------
3387 3387 # rawsize() | L1 | L1 | L1 | L1
3388 3388 # size() | L1 | L2-LM | L1(*) | L1 (?)
3389 3389 # len(rawtext) | L2 | L2 | L2 | L2
3390 3390 # len(text) | L2 | L2 | L2 | L3
3391 3391 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3392 3392 #
3393 3393 # LM: length of metadata, depending on rawtext
3394 3394 # (*): not ideal, see comment in filelog.size
3395 3395 # (?): could be "- len(meta)" if the resolved content has
3396 3396 # rename metadata
3397 3397 #
3398 3398 # Checks needed to be done:
3399 3399 # 1. length check: L1 == L2, in all cases.
3400 3400 # 2. hash check: depending on flag processor, we may need to
3401 3401 # use either "text" (external), or "rawtext" (in revlog).
3402 3402
3403 3403 try:
3404 3404 skipflags = state.get(b'skipflags', 0)
3405 3405 if skipflags:
3406 3406 skipflags &= self.flags(rev)
3407 3407
3408 3408 _verify_revision(self, skipflags, state, node)
3409 3409
3410 3410 l1 = self.rawsize(rev)
3411 3411 l2 = len(self.rawdata(node))
3412 3412
3413 3413 if l1 != l2:
3414 3414 yield revlogproblem(
3415 3415 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3416 3416 node=node,
3417 3417 )
3418 3418
3419 3419 except error.CensoredNodeError:
3420 3420 if state[b'erroroncensored']:
3421 3421 yield revlogproblem(
3422 3422 error=_(b'censored file data'), node=node
3423 3423 )
3424 3424 state[b'skipread'].add(node)
3425 3425 except Exception as e:
3426 3426 yield revlogproblem(
3427 3427 error=_(b'unpacking %s: %s')
3428 3428 % (short(node), stringutil.forcebytestr(e)),
3429 3429 node=node,
3430 3430 )
3431 3431 state[b'skipread'].add(node)
3432 3432
3433 3433 def storageinfo(
3434 3434 self,
3435 3435 exclusivefiles=False,
3436 3436 sharedfiles=False,
3437 3437 revisionscount=False,
3438 3438 trackedsize=False,
3439 3439 storedsize=False,
3440 3440 ):
3441 3441 d = {}
3442 3442
3443 3443 if exclusivefiles:
3444 3444 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3445 3445 if not self._inline:
3446 3446 d[b'exclusivefiles'].append((self.opener, self._datafile))
3447 3447
3448 3448 if sharedfiles:
3449 3449 d[b'sharedfiles'] = []
3450 3450
3451 3451 if revisionscount:
3452 3452 d[b'revisionscount'] = len(self)
3453 3453
3454 3454 if trackedsize:
3455 3455 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3456 3456
3457 3457 if storedsize:
3458 3458 d[b'storedsize'] = sum(
3459 3459 self.opener.stat(path).st_size for path in self.files()
3460 3460 )
3461 3461
3462 3462 return d
3463 3463
3464 3464 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3465 3465 if not self.hassidedata:
3466 3466 return
3467 3467 # revlog formats with sidedata support does not support inline
3468 3468 assert not self._inline
3469 3469 if not helpers[1] and not helpers[2]:
3470 3470 # Nothing to generate or remove
3471 3471 return
3472 3472
3473 3473 new_entries = []
3474 3474 # append the new sidedata
3475 3475 with self._writing(transaction):
3476 3476 ifh, dfh, sdfh = self._writinghandles
3477 3477 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3478 3478
3479 3479 current_offset = sdfh.tell()
3480 3480 for rev in range(startrev, endrev + 1):
3481 3481 entry = self.index[rev]
3482 3482 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3483 3483 store=self,
3484 3484 sidedata_helpers=helpers,
3485 3485 sidedata={},
3486 3486 rev=rev,
3487 3487 )
3488 3488
3489 3489 serialized_sidedata = sidedatautil.serialize_sidedata(
3490 3490 new_sidedata
3491 3491 )
3492 3492
3493 3493 sidedata_compression_mode = COMP_MODE_INLINE
3494 3494 if serialized_sidedata and self.hassidedata:
3495 3495 sidedata_compression_mode = COMP_MODE_PLAIN
3496 3496 h, comp_sidedata = self.compress(serialized_sidedata)
3497 3497 if (
3498 3498 h != b'u'
3499 3499 and comp_sidedata[0] != b'\0'
3500 3500 and len(comp_sidedata) < len(serialized_sidedata)
3501 3501 ):
3502 3502 assert not h
3503 3503 if (
3504 3504 comp_sidedata[0]
3505 3505 == self._docket.default_compression_header
3506 3506 ):
3507 3507 sidedata_compression_mode = COMP_MODE_DEFAULT
3508 3508 serialized_sidedata = comp_sidedata
3509 3509 else:
3510 3510 sidedata_compression_mode = COMP_MODE_INLINE
3511 3511 serialized_sidedata = comp_sidedata
3512 3512 if entry[8] != 0 or entry[9] != 0:
3513 3513 # rewriting entries that already have sidedata is not
3514 3514 # supported yet, because it introduces garbage data in the
3515 3515 # revlog.
3516 3516 msg = b"rewriting existing sidedata is not supported yet"
3517 3517 raise error.Abort(msg)
3518 3518
3519 3519 # Apply (potential) flags to add and to remove after running
3520 3520 # the sidedata helpers
3521 3521 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3522 3522 entry_update = (
3523 3523 current_offset,
3524 3524 len(serialized_sidedata),
3525 3525 new_offset_flags,
3526 3526 sidedata_compression_mode,
3527 3527 )
3528 3528
3529 3529 # the sidedata computation might have move the file cursors around
3530 3530 sdfh.seek(current_offset, os.SEEK_SET)
3531 3531 sdfh.write(serialized_sidedata)
3532 3532 new_entries.append(entry_update)
3533 3533 current_offset += len(serialized_sidedata)
3534 3534 self._docket.sidedata_end = sdfh.tell()
3535 3535
3536 3536 # rewrite the new index entries
3537 3537 ifh.seek(startrev * self.index.entry_size)
3538 3538 for i, e in enumerate(new_entries):
3539 3539 rev = startrev + i
3540 3540 self.index.replace_sidedata_info(rev, *e)
3541 3541 packed = self.index.entry_binary(rev)
3542 3542 if rev == 0 and self._docket is None:
3543 3543 header = self._format_flags | self._format_version
3544 3544 header = self.index.pack_header(header)
3545 3545 packed = header + packed
3546 3546 ifh.write(packed)
@@ -1,342 +1,342 b''
1 1 # unionrepo.py - repository class for viewing union of repository changesets
2 2 #
3 3 # Derived from bundlerepo.py
4 4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Repository class for "in-memory pull" of one local repository to another,
11 11 allowing operations like diff and log with revsets.
12 12 """
13 13
14 14 import contextlib
15 15
16 16
17 17 from .i18n import _
18 18
19 19 from . import (
20 20 changelog,
21 21 cmdutil,
22 22 encoding,
23 23 error,
24 24 filelog,
25 25 localrepo,
26 26 manifest,
27 27 mdiff,
28 28 pathutil,
29 29 revlog,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33
34 34 from .revlogutils import (
35 35 constants as revlog_constants,
36 36 )
37 37
38 38
39 39 class unionrevlog(revlog.revlog):
40 40 def __init__(self, opener, radix, revlog2, linkmapper):
41 41 # How it works:
42 42 # To retrieve a revision, we just need to know the node id so we can
43 43 # look it up in revlog2.
44 44 #
45 45 # To differentiate a rev in the second revlog from a rev in the revlog,
46 46 # we check revision against repotiprev.
47 47 opener = vfsmod.readonlyvfs(opener)
48 48 target = getattr(revlog2, 'target', None)
49 49 if target is None:
50 50 # a revlog wrapper, eg: the manifestlog that is not an actual revlog
51 51 target = revlog2._revlog.target
52 52 revlog.revlog.__init__(self, opener, target=target, radix=radix)
53 53 self.revlog2 = revlog2
54 54
55 55 n = len(self)
56 56 self.repotiprev = n - 1
57 57 self.bundlerevs = set() # used by 'bundle()' revset expression
58 58 for rev2 in self.revlog2:
59 59 rev = self.revlog2.index[rev2]
60 60 # rev numbers - in revlog2, very different from self.rev
61 61 (
62 62 _start,
63 63 _csize,
64 64 rsize,
65 65 base,
66 66 linkrev,
67 67 p1rev,
68 68 p2rev,
69 69 node,
70 70 _sdo,
71 71 _sds,
72 72 _dcm,
73 73 _sdcm,
74 74 rank,
75 75 ) = rev
76 76 flags = _start & 0xFFFF
77 77
78 78 if linkmapper is None: # link is to same revlog
79 79 assert linkrev == rev2 # we never link back
80 80 link = n
81 81 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
82 82 link = linkmapper(linkrev)
83 83
84 84 if linkmapper is not None: # link is to same revlog
85 85 base = linkmapper(base)
86 86
87 87 this_rev = self.index.get_rev(node)
88 88 if this_rev is not None:
89 89 # this happens for the common revlog revisions
90 90 self.bundlerevs.add(this_rev)
91 91 continue
92 92
93 93 p1node = self.revlog2.node(p1rev)
94 94 p2node = self.revlog2.node(p2rev)
95 95
96 96 # TODO: it's probably wrong to set compressed length to -1, but
97 97 # I have no idea if csize is valid in the base revlog context.
98 98 e = (
99 99 flags,
100 100 -1,
101 101 rsize,
102 102 base,
103 103 link,
104 104 self.rev(p1node),
105 105 self.rev(p2node),
106 106 node,
107 107 0, # sidedata offset
108 108 0, # sidedata size
109 109 revlog_constants.COMP_MODE_INLINE,
110 110 revlog_constants.COMP_MODE_INLINE,
111 111 rank,
112 112 )
113 113 self.index.append(e)
114 114 self.bundlerevs.add(n)
115 115 n += 1
116 116
117 117 @contextlib.contextmanager
118 118 def reading(self):
119 119 if 0 <= len(self.bundlerevs) < len(self.index):
120 120 read_1 = super().reading
121 121 else:
122 122 read_1 = util.nullcontextmanager
123 123 if 0 < len(self.bundlerevs):
124 124 read_2 = self.revlog2.reading
125 125 else:
126 126 read_2 = util.nullcontextmanager
127 127 with read_1(), read_2():
128 128 yield
129 129
130 def _chunk(self, rev, df=None):
130 def _chunk(self, rev):
131 131 if rev <= self.repotiprev:
132 132 return revlog.revlog._chunk(self, rev)
133 133 return self.revlog2._chunk(self.node(rev))
134 134
135 135 def revdiff(self, rev1, rev2):
136 136 """return or calculate a delta between two revisions"""
137 137 if rev1 > self.repotiprev and rev2 > self.repotiprev:
138 138 return self.revlog2.revdiff(
139 139 self.revlog2.rev(self.node(rev1)),
140 140 self.revlog2.rev(self.node(rev2)),
141 141 )
142 142 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
143 143 return super(unionrevlog, self).revdiff(rev1, rev2)
144 144
145 145 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
146 146
147 def _revisiondata(self, nodeorrev, _df=None, raw=False):
147 def _revisiondata(self, nodeorrev, raw=False):
148 148 if isinstance(nodeorrev, int):
149 149 rev = nodeorrev
150 150 node = self.node(rev)
151 151 else:
152 152 node = nodeorrev
153 153 rev = self.rev(node)
154 154
155 155 if rev > self.repotiprev:
156 156 # work around manifestrevlog NOT being a revlog
157 157 revlog2 = getattr(self.revlog2, '_revlog', self.revlog2)
158 158 func = revlog2._revisiondata
159 159 else:
160 160 func = super(unionrevlog, self)._revisiondata
161 return func(node, _df=_df, raw=raw)
161 return func(node, raw=raw)
162 162
163 163 def addrevision(
164 164 self,
165 165 text,
166 166 transaction,
167 167 link,
168 168 p1,
169 169 p2,
170 170 cachedelta=None,
171 171 node=None,
172 172 flags=revlog.REVIDX_DEFAULT_FLAGS,
173 173 deltacomputer=None,
174 174 sidedata=None,
175 175 ):
176 176 raise NotImplementedError
177 177
178 178 def addgroup(
179 179 self,
180 180 deltas,
181 181 linkmapper,
182 182 transaction,
183 183 alwayscache=False,
184 184 addrevisioncb=None,
185 185 duplicaterevisioncb=None,
186 186 debug_info=None,
187 187 delta_base_reuse_policy=None,
188 188 ):
189 189 raise NotImplementedError
190 190
191 191 def strip(self, minlink, transaction):
192 192 raise NotImplementedError
193 193
194 194 def checksize(self):
195 195 raise NotImplementedError
196 196
197 197
198 198 class unionchangelog(unionrevlog, changelog.changelog):
199 199 def __init__(self, opener, opener2):
200 200 changelog.changelog.__init__(self, opener)
201 201 linkmapper = None
202 202 changelog2 = changelog.changelog(opener2)
203 203 unionrevlog.__init__(self, opener, self.radix, changelog2, linkmapper)
204 204
205 205
206 206 class unionmanifest(unionrevlog, manifest.manifestrevlog):
207 207 def __init__(self, nodeconstants, opener, opener2, linkmapper):
208 208 manifest.manifestrevlog.__init__(self, nodeconstants, opener)
209 209 manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
210 210 unionrevlog.__init__(
211 211 self, opener, self._revlog.radix, manifest2, linkmapper
212 212 )
213 213
214 214
215 215 class unionfilelog(filelog.filelog):
216 216 def __init__(self, opener, path, opener2, linkmapper, repo):
217 217 filelog.filelog.__init__(self, opener, path)
218 218 filelog2 = filelog.filelog(opener2, path)
219 219 self._revlog = unionrevlog(
220 220 opener, self._revlog.radix, filelog2._revlog, linkmapper
221 221 )
222 222 self._repo = repo
223 223 self.repotiprev = self._revlog.repotiprev
224 224 self.revlog2 = self._revlog.revlog2
225 225
226 226 def iscensored(self, rev):
227 227 """Check if a revision is censored."""
228 228 if rev <= self.repotiprev:
229 229 return filelog.filelog.iscensored(self, rev)
230 230 node = self.node(rev)
231 231 return self.revlog2.iscensored(self.revlog2.rev(node))
232 232
233 233
234 234 class unionpeer(localrepo.localpeer):
235 235 def canpush(self):
236 236 return False
237 237
238 238
239 239 class unionrepository:
240 240 """Represents the union of data in 2 repositories.
241 241
242 242 Instances are not usable if constructed directly. Use ``instance()``
243 243 or ``makeunionrepository()`` to create a usable instance.
244 244 """
245 245
246 246 def __init__(self, repo2, url):
247 247 self.repo2 = repo2
248 248 self._url = url
249 249
250 250 self.ui.setconfig(b'phases', b'publish', False, b'unionrepo')
251 251
252 252 @localrepo.unfilteredpropertycache
253 253 def changelog(self):
254 254 return unionchangelog(self.svfs, self.repo2.svfs)
255 255
256 256 @localrepo.unfilteredpropertycache
257 257 def manifestlog(self):
258 258 rootstore = unionmanifest(
259 259 self.nodeconstants,
260 260 self.svfs,
261 261 self.repo2.svfs,
262 262 self.unfiltered()._clrev,
263 263 )
264 264 return manifest.manifestlog(
265 265 self.svfs, self, rootstore, self.narrowmatch()
266 266 )
267 267
268 268 def _clrev(self, rev2):
269 269 """map from repo2 changelog rev to temporary rev in self.changelog"""
270 270 node = self.repo2.changelog.node(rev2)
271 271 return self.changelog.rev(node)
272 272
273 273 def url(self):
274 274 return self._url
275 275
276 276 def file(self, f):
277 277 return unionfilelog(
278 278 self.svfs, f, self.repo2.svfs, self.unfiltered()._clrev, self
279 279 )
280 280
281 281 def close(self):
282 282 self.repo2.close()
283 283
284 284 def cancopy(self):
285 285 return False
286 286
287 287 def peer(self, path=None, remotehidden=False):
288 288 return unionpeer(self, path=None, remotehidden=remotehidden)
289 289
290 290 def getcwd(self):
291 291 return encoding.getcwd() # always outside the repo
292 292
293 293
294 294 def instance(ui, path, create, intents=None, createopts=None):
295 295 if create:
296 296 raise error.Abort(_(b'cannot create new union repository'))
297 297 parentpath = ui.config(b"bundle", b"mainreporoot")
298 298 if not parentpath:
299 299 # try to find the correct path to the working directory repo
300 300 parentpath = cmdutil.findrepo(encoding.getcwd())
301 301 if parentpath is None:
302 302 parentpath = b''
303 303 if parentpath:
304 304 # Try to make the full path relative so we get a nice, short URL.
305 305 # In particular, we don't want temp dir names in test outputs.
306 306 cwd = encoding.getcwd()
307 307 if parentpath == cwd:
308 308 parentpath = b''
309 309 else:
310 310 cwd = pathutil.normasprefix(cwd)
311 311 if parentpath.startswith(cwd):
312 312 parentpath = parentpath[len(cwd) :]
313 313 if path.startswith(b'union:'):
314 314 s = path.split(b":", 1)[1].split(b"+", 1)
315 315 if len(s) == 1:
316 316 repopath, repopath2 = parentpath, s[0]
317 317 else:
318 318 repopath, repopath2 = s
319 319 else:
320 320 repopath, repopath2 = parentpath, path
321 321
322 322 return makeunionrepository(ui, repopath, repopath2)
323 323
324 324
325 325 def makeunionrepository(ui, repopath1, repopath2):
326 326 """Make a union repository object from 2 local repo paths."""
327 327 repo1 = localrepo.instance(ui, repopath1, create=False)
328 328 repo2 = localrepo.instance(ui, repopath2, create=False)
329 329
330 330 url = b'union:%s+%s' % (
331 331 util.expandpath(repopath1),
332 332 util.expandpath(repopath2),
333 333 )
334 334
335 335 class derivedunionrepository(unionrepository, repo1.__class__):
336 336 pass
337 337
338 338 repo = repo1
339 339 repo.__class__ = derivedunionrepository
340 340 unionrepository.__init__(repo1, repo2, url)
341 341
342 342 return repo
General Comments 0
You need to be logged in to leave comments. Login now