##// END OF EJS Templates
revlog: add an experimental option to mitigated delta issues (issue5480)...
marmoute -
r33202:895ecec3 default
parent child Browse files
Show More
@@ -1,2107 +1,2110 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class _basefilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class repofilecache(_basefilecache):
82 82 """filecache for files in .hg but outside of .hg/store"""
83 83 def join(self, obj, fname):
84 84 return obj.vfs.join(fname)
85 85
86 86 class storecache(_basefilecache):
87 87 """filecache for files in the store"""
88 88 def join(self, obj, fname):
89 89 return obj.sjoin(fname)
90 90
91 91 class unfilteredpropertycache(util.propertycache):
92 92 """propertycache that apply to unfiltered repo only"""
93 93
94 94 def __get__(self, repo, type=None):
95 95 unfi = repo.unfiltered()
96 96 if unfi is repo:
97 97 return super(unfilteredpropertycache, self).__get__(unfi)
98 98 return getattr(unfi, self.name)
99 99
100 100 class filteredpropertycache(util.propertycache):
101 101 """propertycache that must take filtering in account"""
102 102
103 103 def cachevalue(self, obj, value):
104 104 object.__setattr__(obj, self.name, value)
105 105
106 106
107 107 def hasunfilteredcache(repo, name):
108 108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 109 return name in vars(repo.unfiltered())
110 110
111 111 def unfilteredmethod(orig):
112 112 """decorate method that always need to be run on unfiltered version"""
113 113 def wrapper(repo, *args, **kwargs):
114 114 return orig(repo.unfiltered(), *args, **kwargs)
115 115 return wrapper
116 116
117 117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 118 'unbundle'}
119 119 legacycaps = moderncaps.union({'changegroupsubset'})
120 120
121 121 class localpeer(peer.peerrepository):
122 122 '''peer for a local repo; reflects only the most recent API'''
123 123
124 124 def __init__(self, repo, caps=None):
125 125 if caps is None:
126 126 caps = moderncaps.copy()
127 127 peer.peerrepository.__init__(self)
128 128 self._repo = repo.filtered('served')
129 129 self.ui = repo.ui
130 130 self._caps = repo._restrictcapabilities(caps)
131 131 self.requirements = repo.requirements
132 132 self.supportedformats = repo.supportedformats
133 133
134 134 def close(self):
135 135 self._repo.close()
136 136
137 137 def _capabilities(self):
138 138 return self._caps
139 139
140 140 def local(self):
141 141 return self._repo
142 142
143 143 def canpush(self):
144 144 return True
145 145
146 146 def url(self):
147 147 return self._repo.url()
148 148
149 149 def lookup(self, key):
150 150 return self._repo.lookup(key)
151 151
152 152 def branchmap(self):
153 153 return self._repo.branchmap()
154 154
155 155 def heads(self):
156 156 return self._repo.heads()
157 157
158 158 def known(self, nodes):
159 159 return self._repo.known(nodes)
160 160
161 161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 162 **kwargs):
163 163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 164 common=common, bundlecaps=bundlecaps,
165 165 **kwargs)
166 166 cb = util.chunkbuffer(chunks)
167 167
168 168 if exchange.bundle2requested(bundlecaps):
169 169 # When requesting a bundle2, getbundle returns a stream to make the
170 170 # wire level function happier. We need to build a proper object
171 171 # from it in local peer.
172 172 return bundle2.getunbundler(self.ui, cb)
173 173 else:
174 174 return changegroup.getunbundler('01', cb, None)
175 175
176 176 # TODO We might want to move the next two calls into legacypeer and add
177 177 # unbundle instead.
178 178
179 179 def unbundle(self, cg, heads, url):
180 180 """apply a bundle on a repo
181 181
182 182 This function handles the repo locking itself."""
183 183 try:
184 184 try:
185 185 cg = exchange.readbundle(self.ui, cg, None)
186 186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 187 if util.safehasattr(ret, 'getchunks'):
188 188 # This is a bundle20 object, turn it into an unbundler.
189 189 # This little dance should be dropped eventually when the
190 190 # API is finally improved.
191 191 stream = util.chunkbuffer(ret.getchunks())
192 192 ret = bundle2.getunbundler(self.ui, stream)
193 193 return ret
194 194 except Exception as exc:
195 195 # If the exception contains output salvaged from a bundle2
196 196 # reply, we need to make sure it is printed before continuing
197 197 # to fail. So we build a bundle2 with such output and consume
198 198 # it directly.
199 199 #
200 200 # This is not very elegant but allows a "simple" solution for
201 201 # issue4594
202 202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 203 if output:
204 204 bundler = bundle2.bundle20(self._repo.ui)
205 205 for out in output:
206 206 bundler.addpart(out)
207 207 stream = util.chunkbuffer(bundler.getchunks())
208 208 b = bundle2.getunbundler(self.ui, stream)
209 209 bundle2.processbundle(self._repo, b)
210 210 raise
211 211 except error.PushRaced as exc:
212 212 raise error.ResponseError(_('push failed:'), str(exc))
213 213
214 214 def lock(self):
215 215 return self._repo.lock()
216 216
217 217 def pushkey(self, namespace, key, old, new):
218 218 return self._repo.pushkey(namespace, key, old, new)
219 219
220 220 def listkeys(self, namespace):
221 221 return self._repo.listkeys(namespace)
222 222
223 223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 224 '''used to test argument passing over the wire'''
225 225 return "%s %s %s %s %s" % (one, two, three, four, five)
226 226
227 227 class locallegacypeer(localpeer):
228 228 '''peer extension which implements legacy methods too; used for tests with
229 229 restricted capabilities'''
230 230
231 231 def __init__(self, repo):
232 232 localpeer.__init__(self, repo, caps=legacycaps)
233 233
234 234 def branches(self, nodes):
235 235 return self._repo.branches(nodes)
236 236
237 237 def between(self, pairs):
238 238 return self._repo.between(pairs)
239 239
240 240 def changegroup(self, basenodes, source):
241 241 return changegroup.changegroup(self._repo, basenodes, source)
242 242
243 243 def changegroupsubset(self, bases, heads, source):
244 244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245 245
246 246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 247 # clients.
248 248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249 249
250 250 class localrepository(object):
251 251
252 252 supportedformats = {
253 253 'revlogv1',
254 254 'generaldelta',
255 255 'treemanifest',
256 256 'manifestv2',
257 257 REVLOGV2_REQUIREMENT,
258 258 }
259 259 _basesupported = supportedformats | {
260 260 'store',
261 261 'fncache',
262 262 'shared',
263 263 'relshared',
264 264 'dotencode',
265 265 }
266 266 openerreqs = {
267 267 'revlogv1',
268 268 'generaldelta',
269 269 'treemanifest',
270 270 'manifestv2',
271 271 }
272 272
273 273 # a list of (ui, featureset) functions.
274 274 # only functions defined in module of enabled extensions are invoked
275 275 featuresetupfuncs = set()
276 276
277 277 def __init__(self, baseui, path, create=False):
278 278 self.requirements = set()
279 279 self.filtername = None
280 280 # wvfs: rooted at the repository root, used to access the working copy
281 281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 283 self.vfs = None
284 284 # svfs: usually rooted at .hg/store, used to access repository history
285 285 # If this is a shared repository, this vfs may point to another
286 286 # repository's .hg/store directory.
287 287 self.svfs = None
288 288 self.root = self.wvfs.base
289 289 self.path = self.wvfs.join(".hg")
290 290 self.origroot = path
291 291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 293 realfs=False)
294 294 self.vfs = vfsmod.vfs(self.path)
295 295 self.baseui = baseui
296 296 self.ui = baseui.copy()
297 297 self.ui.copy = baseui.copy # prevent copying repo configuration
298 298 # A list of callback to shape the phase if no data were found.
299 299 # Callback are in the form: func(repo, roots) --> processed root.
300 300 # This list it to be filled by extension during repo setup
301 301 self._phasedefaults = []
302 302 try:
303 303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 304 self._loadextensions()
305 305 except IOError:
306 306 pass
307 307
308 308 if self.featuresetupfuncs:
309 309 self.supported = set(self._basesupported) # use private copy
310 310 extmods = set(m.__name__ for n, m
311 311 in extensions.extensions(self.ui))
312 312 for setupfunc in self.featuresetupfuncs:
313 313 if setupfunc.__module__ in extmods:
314 314 setupfunc(self.ui, self.supported)
315 315 else:
316 316 self.supported = self._basesupported
317 317 color.setup(self.ui)
318 318
319 319 # Add compression engines.
320 320 for name in util.compengines:
321 321 engine = util.compengines[name]
322 322 if engine.revlogheader():
323 323 self.supported.add('exp-compression-%s' % name)
324 324
325 325 if not self.vfs.isdir():
326 326 if create:
327 327 self.requirements = newreporequirements(self)
328 328
329 329 if not self.wvfs.exists():
330 330 self.wvfs.makedirs()
331 331 self.vfs.makedir(notindexed=True)
332 332
333 333 if 'store' in self.requirements:
334 334 self.vfs.mkdir("store")
335 335
336 336 # create an invalid changelog
337 337 self.vfs.append(
338 338 "00changelog.i",
339 339 '\0\0\0\2' # represents revlogv2
340 340 ' dummy changelog to prevent using the old repo layout'
341 341 )
342 342 else:
343 343 raise error.RepoError(_("repository %s not found") % path)
344 344 elif create:
345 345 raise error.RepoError(_("repository %s already exists") % path)
346 346 else:
347 347 try:
348 348 self.requirements = scmutil.readrequires(
349 349 self.vfs, self.supported)
350 350 except IOError as inst:
351 351 if inst.errno != errno.ENOENT:
352 352 raise
353 353
354 354 self.sharedpath = self.path
355 355 try:
356 356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 357 if 'relshared' in self.requirements:
358 358 sharedpath = self.vfs.join(sharedpath)
359 359 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 360 s = vfs.base
361 361 if not vfs.exists():
362 362 raise error.RepoError(
363 363 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 364 self.sharedpath = s
365 365 except IOError as inst:
366 366 if inst.errno != errno.ENOENT:
367 367 raise
368 368
369 369 self.store = store.store(
370 370 self.requirements, self.sharedpath, vfsmod.vfs)
371 371 self.spath = self.store.path
372 372 self.svfs = self.store.vfs
373 373 self.sjoin = self.store.join
374 374 self.vfs.createmode = self.store.createmode
375 375 self._applyopenerreqs()
376 376 if create:
377 377 self._writerequirements()
378 378
379 379 self._dirstatevalidatewarned = False
380 380
381 381 self._branchcaches = {}
382 382 self._revbranchcache = None
383 383 self.filterpats = {}
384 384 self._datafilters = {}
385 385 self._transref = self._lockref = self._wlockref = None
386 386
387 387 # A cache for various files under .hg/ that tracks file changes,
388 388 # (used by the filecache decorator)
389 389 #
390 390 # Maps a property name to its util.filecacheentry
391 391 self._filecache = {}
392 392
393 393 # hold sets of revision to be filtered
394 394 # should be cleared when something might have changed the filter value:
395 395 # - new changesets,
396 396 # - phase change,
397 397 # - new obsolescence marker,
398 398 # - working directory parent change,
399 399 # - bookmark changes
400 400 self.filteredrevcache = {}
401 401
402 402 # post-dirstate-status hooks
403 403 self._postdsstatus = []
404 404
405 405 # generic mapping between names and nodes
406 406 self.names = namespaces.namespaces()
407 407
408 408 def close(self):
409 409 self._writecaches()
410 410
411 411 def _loadextensions(self):
412 412 extensions.loadall(self.ui)
413 413
414 414 def _writecaches(self):
415 415 if self._revbranchcache:
416 416 self._revbranchcache.write()
417 417
418 418 def _restrictcapabilities(self, caps):
419 419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 420 caps = set(caps)
421 421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 422 caps.add('bundle2=' + urlreq.quote(capsblob))
423 423 return caps
424 424
425 425 def _applyopenerreqs(self):
426 426 self.svfs.options = dict((r, 1) for r in self.requirements
427 427 if r in self.openerreqs)
428 428 # experimental config: format.chunkcachesize
429 429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 430 if chunkcachesize is not None:
431 431 self.svfs.options['chunkcachesize'] = chunkcachesize
432 432 # experimental config: format.maxchainlen
433 433 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 434 if maxchainlen is not None:
435 435 self.svfs.options['maxchainlen'] = maxchainlen
436 436 # experimental config: format.manifestcachesize
437 437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 438 if manifestcachesize is not None:
439 439 self.svfs.options['manifestcachesize'] = manifestcachesize
440 440 # experimental config: format.aggressivemergedeltas
441 441 aggressivemergedeltas = self.ui.configbool('format',
442 442 'aggressivemergedeltas', False)
443 443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 if 0 <= chainspan:
447 self.svfs.options['maxdeltachainspan'] = chainspan
445 448
446 449 for r in self.requirements:
447 450 if r.startswith('exp-compression-'):
448 451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
449 452
450 453 # TODO move "revlogv2" to openerreqs once finalized.
451 454 if REVLOGV2_REQUIREMENT in self.requirements:
452 455 self.svfs.options['revlogv2'] = True
453 456
454 457 def _writerequirements(self):
455 458 scmutil.writerequires(self.vfs, self.requirements)
456 459
457 460 def _checknested(self, path):
458 461 """Determine if path is a legal nested repository."""
459 462 if not path.startswith(self.root):
460 463 return False
461 464 subpath = path[len(self.root) + 1:]
462 465 normsubpath = util.pconvert(subpath)
463 466
464 467 # XXX: Checking against the current working copy is wrong in
465 468 # the sense that it can reject things like
466 469 #
467 470 # $ hg cat -r 10 sub/x.txt
468 471 #
469 472 # if sub/ is no longer a subrepository in the working copy
470 473 # parent revision.
471 474 #
472 475 # However, it can of course also allow things that would have
473 476 # been rejected before, such as the above cat command if sub/
474 477 # is a subrepository now, but was a normal directory before.
475 478 # The old path auditor would have rejected by mistake since it
476 479 # panics when it sees sub/.hg/.
477 480 #
478 481 # All in all, checking against the working copy seems sensible
479 482 # since we want to prevent access to nested repositories on
480 483 # the filesystem *now*.
481 484 ctx = self[None]
482 485 parts = util.splitpath(subpath)
483 486 while parts:
484 487 prefix = '/'.join(parts)
485 488 if prefix in ctx.substate:
486 489 if prefix == normsubpath:
487 490 return True
488 491 else:
489 492 sub = ctx.sub(prefix)
490 493 return sub.checknested(subpath[len(prefix) + 1:])
491 494 else:
492 495 parts.pop()
493 496 return False
494 497
495 498 def peer(self):
496 499 return localpeer(self) # not cached to avoid reference cycle
497 500
498 501 def unfiltered(self):
499 502 """Return unfiltered version of the repository
500 503
501 504 Intended to be overwritten by filtered repo."""
502 505 return self
503 506
504 507 def filtered(self, name):
505 508 """Return a filtered version of a repository"""
506 509 # build a new class with the mixin and the current class
507 510 # (possibly subclass of the repo)
508 511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
509 512 pass
510 513 return filteredrepo(self, name)
511 514
512 515 @repofilecache('bookmarks', 'bookmarks.current')
513 516 def _bookmarks(self):
514 517 return bookmarks.bmstore(self)
515 518
516 519 @property
517 520 def _activebookmark(self):
518 521 return self._bookmarks.active
519 522
520 523 # _phaserevs and _phasesets depend on changelog. what we need is to
521 524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
522 525 # can't be easily expressed in filecache mechanism.
523 526 @storecache('phaseroots', '00changelog.i')
524 527 def _phasecache(self):
525 528 return phases.phasecache(self, self._phasedefaults)
526 529
527 530 @storecache('obsstore')
528 531 def obsstore(self):
529 532 return obsolete.makestore(self.ui, self)
530 533
531 534 @storecache('00changelog.i')
532 535 def changelog(self):
533 536 return changelog.changelog(self.svfs,
534 537 trypending=txnutil.mayhavepending(self.root))
535 538
536 539 def _constructmanifest(self):
537 540 # This is a temporary function while we migrate from manifest to
538 541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
539 542 # manifest creation.
540 543 return manifest.manifestrevlog(self.svfs)
541 544
542 545 @storecache('00manifest.i')
543 546 def manifestlog(self):
544 547 return manifest.manifestlog(self.svfs, self)
545 548
546 549 @repofilecache('dirstate')
547 550 def dirstate(self):
548 551 return dirstate.dirstate(self.vfs, self.ui, self.root,
549 552 self._dirstatevalidate)
550 553
551 554 def _dirstatevalidate(self, node):
552 555 try:
553 556 self.changelog.rev(node)
554 557 return node
555 558 except error.LookupError:
556 559 if not self._dirstatevalidatewarned:
557 560 self._dirstatevalidatewarned = True
558 561 self.ui.warn(_("warning: ignoring unknown"
559 562 " working parent %s!\n") % short(node))
560 563 return nullid
561 564
562 565 def __getitem__(self, changeid):
563 566 if changeid is None:
564 567 return context.workingctx(self)
565 568 if isinstance(changeid, slice):
566 569 # wdirrev isn't contiguous so the slice shouldn't include it
567 570 return [context.changectx(self, i)
568 571 for i in xrange(*changeid.indices(len(self)))
569 572 if i not in self.changelog.filteredrevs]
570 573 try:
571 574 return context.changectx(self, changeid)
572 575 except error.WdirUnsupported:
573 576 return context.workingctx(self)
574 577
575 578 def __contains__(self, changeid):
576 579 """True if the given changeid exists
577 580
578 581 error.LookupError is raised if an ambiguous node specified.
579 582 """
580 583 try:
581 584 self[changeid]
582 585 return True
583 586 except error.RepoLookupError:
584 587 return False
585 588
586 589 def __nonzero__(self):
587 590 return True
588 591
589 592 __bool__ = __nonzero__
590 593
591 594 def __len__(self):
592 595 return len(self.changelog)
593 596
594 597 def __iter__(self):
595 598 return iter(self.changelog)
596 599
597 600 def revs(self, expr, *args):
598 601 '''Find revisions matching a revset.
599 602
600 603 The revset is specified as a string ``expr`` that may contain
601 604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
602 605
603 606 Revset aliases from the configuration are not expanded. To expand
604 607 user aliases, consider calling ``scmutil.revrange()`` or
605 608 ``repo.anyrevs([expr], user=True)``.
606 609
607 610 Returns a revset.abstractsmartset, which is a list-like interface
608 611 that contains integer revisions.
609 612 '''
610 613 expr = revsetlang.formatspec(expr, *args)
611 614 m = revset.match(None, expr)
612 615 return m(self)
613 616
614 617 def set(self, expr, *args):
615 618 '''Find revisions matching a revset and emit changectx instances.
616 619
617 620 This is a convenience wrapper around ``revs()`` that iterates the
618 621 result and is a generator of changectx instances.
619 622
620 623 Revset aliases from the configuration are not expanded. To expand
621 624 user aliases, consider calling ``scmutil.revrange()``.
622 625 '''
623 626 for r in self.revs(expr, *args):
624 627 yield self[r]
625 628
626 629 def anyrevs(self, specs, user=False):
627 630 '''Find revisions matching one of the given revsets.
628 631
629 632 Revset aliases from the configuration are not expanded by default. To
630 633 expand user aliases, specify ``user=True``.
631 634 '''
632 635 if user:
633 636 m = revset.matchany(self.ui, specs, repo=self)
634 637 else:
635 638 m = revset.matchany(None, specs)
636 639 return m(self)
637 640
638 641 def url(self):
639 642 return 'file:' + self.root
640 643
641 644 def hook(self, name, throw=False, **args):
642 645 """Call a hook, passing this repo instance.
643 646
644 647 This a convenience method to aid invoking hooks. Extensions likely
645 648 won't call this unless they have registered a custom hook or are
646 649 replacing code that is expected to call a hook.
647 650 """
648 651 return hook.hook(self.ui, self, name, throw, **args)
649 652
650 653 @filteredpropertycache
651 654 def _tagscache(self):
652 655 '''Returns a tagscache object that contains various tags related
653 656 caches.'''
654 657
655 658 # This simplifies its cache management by having one decorated
656 659 # function (this one) and the rest simply fetch things from it.
657 660 class tagscache(object):
658 661 def __init__(self):
659 662 # These two define the set of tags for this repository. tags
660 663 # maps tag name to node; tagtypes maps tag name to 'global' or
661 664 # 'local'. (Global tags are defined by .hgtags across all
662 665 # heads, and local tags are defined in .hg/localtags.)
663 666 # They constitute the in-memory cache of tags.
664 667 self.tags = self.tagtypes = None
665 668
666 669 self.nodetagscache = self.tagslist = None
667 670
668 671 cache = tagscache()
669 672 cache.tags, cache.tagtypes = self._findtags()
670 673
671 674 return cache
672 675
673 676 def tags(self):
674 677 '''return a mapping of tag to node'''
675 678 t = {}
676 679 if self.changelog.filteredrevs:
677 680 tags, tt = self._findtags()
678 681 else:
679 682 tags = self._tagscache.tags
680 683 for k, v in tags.iteritems():
681 684 try:
682 685 # ignore tags to unknown nodes
683 686 self.changelog.rev(v)
684 687 t[k] = v
685 688 except (error.LookupError, ValueError):
686 689 pass
687 690 return t
688 691
689 692 def _findtags(self):
690 693 '''Do the hard work of finding tags. Return a pair of dicts
691 694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
692 695 maps tag name to a string like \'global\' or \'local\'.
693 696 Subclasses or extensions are free to add their own tags, but
694 697 should be aware that the returned dicts will be retained for the
695 698 duration of the localrepo object.'''
696 699
697 700 # XXX what tagtype should subclasses/extensions use? Currently
698 701 # mq and bookmarks add tags, but do not set the tagtype at all.
699 702 # Should each extension invent its own tag type? Should there
700 703 # be one tagtype for all such "virtual" tags? Or is the status
701 704 # quo fine?
702 705
703 706
704 707 # map tag name to (node, hist)
705 708 alltags = tagsmod.findglobaltags(self.ui, self)
706 709 # map tag name to tag type
707 710 tagtypes = dict((tag, 'global') for tag in alltags)
708 711
709 712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
710 713
711 714 # Build the return dicts. Have to re-encode tag names because
712 715 # the tags module always uses UTF-8 (in order not to lose info
713 716 # writing to the cache), but the rest of Mercurial wants them in
714 717 # local encoding.
715 718 tags = {}
716 719 for (name, (node, hist)) in alltags.iteritems():
717 720 if node != nullid:
718 721 tags[encoding.tolocal(name)] = node
719 722 tags['tip'] = self.changelog.tip()
720 723 tagtypes = dict([(encoding.tolocal(name), value)
721 724 for (name, value) in tagtypes.iteritems()])
722 725 return (tags, tagtypes)
723 726
724 727 def tagtype(self, tagname):
725 728 '''
726 729 return the type of the given tag. result can be:
727 730
728 731 'local' : a local tag
729 732 'global' : a global tag
730 733 None : tag does not exist
731 734 '''
732 735
733 736 return self._tagscache.tagtypes.get(tagname)
734 737
735 738 def tagslist(self):
736 739 '''return a list of tags ordered by revision'''
737 740 if not self._tagscache.tagslist:
738 741 l = []
739 742 for t, n in self.tags().iteritems():
740 743 l.append((self.changelog.rev(n), t, n))
741 744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
742 745
743 746 return self._tagscache.tagslist
744 747
745 748 def nodetags(self, node):
746 749 '''return the tags associated with a node'''
747 750 if not self._tagscache.nodetagscache:
748 751 nodetagscache = {}
749 752 for t, n in self._tagscache.tags.iteritems():
750 753 nodetagscache.setdefault(n, []).append(t)
751 754 for tags in nodetagscache.itervalues():
752 755 tags.sort()
753 756 self._tagscache.nodetagscache = nodetagscache
754 757 return self._tagscache.nodetagscache.get(node, [])
755 758
756 759 def nodebookmarks(self, node):
757 760 """return the list of bookmarks pointing to the specified node"""
758 761 marks = []
759 762 for bookmark, n in self._bookmarks.iteritems():
760 763 if n == node:
761 764 marks.append(bookmark)
762 765 return sorted(marks)
763 766
764 767 def branchmap(self):
765 768 '''returns a dictionary {branch: [branchheads]} with branchheads
766 769 ordered by increasing revision number'''
767 770 branchmap.updatecache(self)
768 771 return self._branchcaches[self.filtername]
769 772
770 773 @unfilteredmethod
771 774 def revbranchcache(self):
772 775 if not self._revbranchcache:
773 776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
774 777 return self._revbranchcache
775 778
776 779 def branchtip(self, branch, ignoremissing=False):
777 780 '''return the tip node for a given branch
778 781
779 782 If ignoremissing is True, then this method will not raise an error.
780 783 This is helpful for callers that only expect None for a missing branch
781 784 (e.g. namespace).
782 785
783 786 '''
784 787 try:
785 788 return self.branchmap().branchtip(branch)
786 789 except KeyError:
787 790 if not ignoremissing:
788 791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
789 792 else:
790 793 pass
791 794
792 795 def lookup(self, key):
793 796 return self[key].node()
794 797
795 798 def lookupbranch(self, key, remote=None):
796 799 repo = remote or self
797 800 if key in repo.branchmap():
798 801 return key
799 802
800 803 repo = (remote and remote.local()) and remote or self
801 804 return repo[key].branch()
802 805
803 806 def known(self, nodes):
804 807 cl = self.changelog
805 808 nm = cl.nodemap
806 809 filtered = cl.filteredrevs
807 810 result = []
808 811 for n in nodes:
809 812 r = nm.get(n)
810 813 resp = not (r is None or r in filtered)
811 814 result.append(resp)
812 815 return result
813 816
814 817 def local(self):
815 818 return self
816 819
817 820 def publishing(self):
818 821 # it's safe (and desirable) to trust the publish flag unconditionally
819 822 # so that we don't finalize changes shared between users via ssh or nfs
820 823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
821 824
822 825 def cancopy(self):
823 826 # so statichttprepo's override of local() works
824 827 if not self.local():
825 828 return False
826 829 if not self.publishing():
827 830 return True
828 831 # if publishing we can't copy if there is filtered content
829 832 return not self.filtered('visible').changelog.filteredrevs
830 833
831 834 def shared(self):
832 835 '''the type of shared repository (None if not shared)'''
833 836 if self.sharedpath != self.path:
834 837 return 'store'
835 838 return None
836 839
837 840 def wjoin(self, f, *insidef):
838 841 return self.vfs.reljoin(self.root, f, *insidef)
839 842
840 843 def file(self, f):
841 844 if f[0] == '/':
842 845 f = f[1:]
843 846 return filelog.filelog(self.svfs, f)
844 847
845 848 def changectx(self, changeid):
846 849 return self[changeid]
847 850
848 851 def setparents(self, p1, p2=nullid):
849 852 with self.dirstate.parentchange():
850 853 copies = self.dirstate.setparents(p1, p2)
851 854 pctx = self[p1]
852 855 if copies:
853 856 # Adjust copy records, the dirstate cannot do it, it
854 857 # requires access to parents manifests. Preserve them
855 858 # only for entries added to first parent.
856 859 for f in copies:
857 860 if f not in pctx and copies[f] in pctx:
858 861 self.dirstate.copy(copies[f], f)
859 862 if p2 == nullid:
860 863 for f, s in sorted(self.dirstate.copies().items()):
861 864 if f not in pctx and s not in pctx:
862 865 self.dirstate.copy(None, f)
863 866
864 867 def filectx(self, path, changeid=None, fileid=None):
865 868 """changeid can be a changeset revision, node, or tag.
866 869 fileid can be a file revision or node."""
867 870 return context.filectx(self, path, changeid, fileid)
868 871
869 872 def getcwd(self):
870 873 return self.dirstate.getcwd()
871 874
872 875 def pathto(self, f, cwd=None):
873 876 return self.dirstate.pathto(f, cwd)
874 877
875 878 def _loadfilter(self, filter):
876 879 if filter not in self.filterpats:
877 880 l = []
878 881 for pat, cmd in self.ui.configitems(filter):
879 882 if cmd == '!':
880 883 continue
881 884 mf = matchmod.match(self.root, '', [pat])
882 885 fn = None
883 886 params = cmd
884 887 for name, filterfn in self._datafilters.iteritems():
885 888 if cmd.startswith(name):
886 889 fn = filterfn
887 890 params = cmd[len(name):].lstrip()
888 891 break
889 892 if not fn:
890 893 fn = lambda s, c, **kwargs: util.filter(s, c)
891 894 # Wrap old filters not supporting keyword arguments
892 895 if not inspect.getargspec(fn)[2]:
893 896 oldfn = fn
894 897 fn = lambda s, c, **kwargs: oldfn(s, c)
895 898 l.append((mf, fn, params))
896 899 self.filterpats[filter] = l
897 900 return self.filterpats[filter]
898 901
899 902 def _filter(self, filterpats, filename, data):
900 903 for mf, fn, cmd in filterpats:
901 904 if mf(filename):
902 905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
903 906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
904 907 break
905 908
906 909 return data
907 910
908 911 @unfilteredpropertycache
909 912 def _encodefilterpats(self):
910 913 return self._loadfilter('encode')
911 914
912 915 @unfilteredpropertycache
913 916 def _decodefilterpats(self):
914 917 return self._loadfilter('decode')
915 918
916 919 def adddatafilter(self, name, filter):
917 920 self._datafilters[name] = filter
918 921
919 922 def wread(self, filename):
920 923 if self.wvfs.islink(filename):
921 924 data = self.wvfs.readlink(filename)
922 925 else:
923 926 data = self.wvfs.read(filename)
924 927 return self._filter(self._encodefilterpats, filename, data)
925 928
926 929 def wwrite(self, filename, data, flags, backgroundclose=False):
927 930 """write ``data`` into ``filename`` in the working directory
928 931
929 932 This returns length of written (maybe decoded) data.
930 933 """
931 934 data = self._filter(self._decodefilterpats, filename, data)
932 935 if 'l' in flags:
933 936 self.wvfs.symlink(data, filename)
934 937 else:
935 938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
936 939 if 'x' in flags:
937 940 self.wvfs.setflags(filename, False, True)
938 941 return len(data)
939 942
940 943 def wwritedata(self, filename, data):
941 944 return self._filter(self._decodefilterpats, filename, data)
942 945
943 946 def currenttransaction(self):
944 947 """return the current transaction or None if non exists"""
945 948 if self._transref:
946 949 tr = self._transref()
947 950 else:
948 951 tr = None
949 952
950 953 if tr and tr.running():
951 954 return tr
952 955 return None
953 956
954 957 def transaction(self, desc, report=None):
955 958 if (self.ui.configbool('devel', 'all-warnings')
956 959 or self.ui.configbool('devel', 'check-locks')):
957 960 if self._currentlock(self._lockref) is None:
958 961 raise error.ProgrammingError('transaction requires locking')
959 962 tr = self.currenttransaction()
960 963 if tr is not None:
961 964 return tr.nest()
962 965
963 966 # abort here if the journal already exists
964 967 if self.svfs.exists("journal"):
965 968 raise error.RepoError(
966 969 _("abandoned transaction found"),
967 970 hint=_("run 'hg recover' to clean up transaction"))
968 971
969 972 idbase = "%.40f#%f" % (random.random(), time.time())
970 973 ha = hex(hashlib.sha1(idbase).digest())
971 974 txnid = 'TXN:' + ha
972 975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
973 976
974 977 self._writejournal(desc)
975 978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
976 979 if report:
977 980 rp = report
978 981 else:
979 982 rp = self.ui.warn
980 983 vfsmap = {'plain': self.vfs} # root of .hg/
981 984 # we must avoid cyclic reference between repo and transaction.
982 985 reporef = weakref.ref(self)
983 986 # Code to track tag movement
984 987 #
985 988 # Since tags are all handled as file content, it is actually quite hard
986 989 # to track these movement from a code perspective. So we fallback to a
987 990 # tracking at the repository level. One could envision to track changes
988 991 # to the '.hgtags' file through changegroup apply but that fails to
989 992 # cope with case where transaction expose new heads without changegroup
990 993 # being involved (eg: phase movement).
991 994 #
992 995 # For now, We gate the feature behind a flag since this likely comes
993 996 # with performance impacts. The current code run more often than needed
994 997 # and do not use caches as much as it could. The current focus is on
995 998 # the behavior of the feature so we disable it by default. The flag
996 999 # will be removed when we are happy with the performance impact.
997 1000 #
998 1001 # Once this feature is no longer experimental move the following
999 1002 # documentation to the appropriate help section:
1000 1003 #
1001 1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1002 1005 # tags (new or changed or deleted tags). In addition the details of
1003 1006 # these changes are made available in a file at:
1004 1007 # ``REPOROOT/.hg/changes/tags.changes``.
1005 1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1006 1009 # might exist from a previous transaction even if no tag were touched
1007 1010 # in this one. Changes are recorded in a line base format::
1008 1011 #
1009 1012 # <action> <hex-node> <tag-name>\n
1010 1013 #
1011 1014 # Actions are defined as follow:
1012 1015 # "-R": tag is removed,
1013 1016 # "+A": tag is added,
1014 1017 # "-M": tag is moved (old value),
1015 1018 # "+M": tag is moved (new value),
1016 1019 tracktags = lambda x: None
1017 1020 # experimental config: experimental.hook-track-tags
1018 1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1019 1022 False)
1020 1023 if desc != 'strip' and shouldtracktags:
1021 1024 oldheads = self.changelog.headrevs()
1022 1025 def tracktags(tr2):
1023 1026 repo = reporef()
1024 1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1025 1028 newheads = repo.changelog.headrevs()
1026 1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1027 1030 # notes: we compare lists here.
1028 1031 # As we do it only once buiding set would not be cheaper
1029 1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1030 1033 if changes:
1031 1034 tr2.hookargs['tag_moved'] = '1'
1032 1035 with repo.vfs('changes/tags.changes', 'w',
1033 1036 atomictemp=True) as changesfile:
1034 1037 # note: we do not register the file to the transaction
1035 1038 # because we needs it to still exist on the transaction
1036 1039 # is close (for txnclose hooks)
1037 1040 tagsmod.writediff(changesfile, changes)
1038 1041 def validate(tr2):
1039 1042 """will run pre-closing hooks"""
1040 1043 # XXX the transaction API is a bit lacking here so we take a hacky
1041 1044 # path for now
1042 1045 #
1043 1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1044 1047 # dict is copied before these run. In addition we needs the data
1045 1048 # available to in memory hooks too.
1046 1049 #
1047 1050 # Moreover, we also need to make sure this runs before txnclose
1048 1051 # hooks and there is no "pending" mechanism that would execute
1049 1052 # logic only if hooks are about to run.
1050 1053 #
1051 1054 # Fixing this limitation of the transaction is also needed to track
1052 1055 # other families of changes (bookmarks, phases, obsolescence).
1053 1056 #
1054 1057 # This will have to be fixed before we remove the experimental
1055 1058 # gating.
1056 1059 tracktags(tr2)
1057 1060 reporef().hook('pretxnclose', throw=True,
1058 1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1059 1062 def releasefn(tr, success):
1060 1063 repo = reporef()
1061 1064 if success:
1062 1065 # this should be explicitly invoked here, because
1063 1066 # in-memory changes aren't written out at closing
1064 1067 # transaction, if tr.addfilegenerator (via
1065 1068 # dirstate.write or so) isn't invoked while
1066 1069 # transaction running
1067 1070 repo.dirstate.write(None)
1068 1071 else:
1069 1072 # discard all changes (including ones already written
1070 1073 # out) in this transaction
1071 1074 repo.dirstate.restorebackup(None, prefix='journal.')
1072 1075
1073 1076 repo.invalidate(clearfilecache=True)
1074 1077
1075 1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1076 1079 "journal",
1077 1080 "undo",
1078 1081 aftertrans(renames),
1079 1082 self.store.createmode,
1080 1083 validator=validate,
1081 1084 releasefn=releasefn)
1082 1085 tr.changes['revs'] = set()
1083 1086
1084 1087 tr.hookargs['txnid'] = txnid
1085 1088 # note: writing the fncache only during finalize mean that the file is
1086 1089 # outdated when running hooks. As fncache is used for streaming clone,
1087 1090 # this is not expected to break anything that happen during the hooks.
1088 1091 tr.addfinalize('flush-fncache', self.store.write)
1089 1092 def txnclosehook(tr2):
1090 1093 """To be run if transaction is successful, will schedule a hook run
1091 1094 """
1092 1095 # Don't reference tr2 in hook() so we don't hold a reference.
1093 1096 # This reduces memory consumption when there are multiple
1094 1097 # transactions per lock. This can likely go away if issue5045
1095 1098 # fixes the function accumulation.
1096 1099 hookargs = tr2.hookargs
1097 1100
1098 1101 def hook():
1099 1102 reporef().hook('txnclose', throw=False, txnname=desc,
1100 1103 **pycompat.strkwargs(hookargs))
1101 1104 reporef()._afterlock(hook)
1102 1105 tr.addfinalize('txnclose-hook', txnclosehook)
1103 1106 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1104 1107 def txnaborthook(tr2):
1105 1108 """To be run if transaction is aborted
1106 1109 """
1107 1110 reporef().hook('txnabort', throw=False, txnname=desc,
1108 1111 **tr2.hookargs)
1109 1112 tr.addabort('txnabort-hook', txnaborthook)
1110 1113 # avoid eager cache invalidation. in-memory data should be identical
1111 1114 # to stored data if transaction has no error.
1112 1115 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1113 1116 self._transref = weakref.ref(tr)
1114 1117 return tr
1115 1118
1116 1119 def _journalfiles(self):
1117 1120 return ((self.svfs, 'journal'),
1118 1121 (self.vfs, 'journal.dirstate'),
1119 1122 (self.vfs, 'journal.branch'),
1120 1123 (self.vfs, 'journal.desc'),
1121 1124 (self.vfs, 'journal.bookmarks'),
1122 1125 (self.svfs, 'journal.phaseroots'))
1123 1126
1124 1127 def undofiles(self):
1125 1128 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1126 1129
1127 1130 @unfilteredmethod
1128 1131 def _writejournal(self, desc):
1129 1132 self.dirstate.savebackup(None, prefix='journal.')
1130 1133 self.vfs.write("journal.branch",
1131 1134 encoding.fromlocal(self.dirstate.branch()))
1132 1135 self.vfs.write("journal.desc",
1133 1136 "%d\n%s\n" % (len(self), desc))
1134 1137 self.vfs.write("journal.bookmarks",
1135 1138 self.vfs.tryread("bookmarks"))
1136 1139 self.svfs.write("journal.phaseroots",
1137 1140 self.svfs.tryread("phaseroots"))
1138 1141
1139 1142 def recover(self):
1140 1143 with self.lock():
1141 1144 if self.svfs.exists("journal"):
1142 1145 self.ui.status(_("rolling back interrupted transaction\n"))
1143 1146 vfsmap = {'': self.svfs,
1144 1147 'plain': self.vfs,}
1145 1148 transaction.rollback(self.svfs, vfsmap, "journal",
1146 1149 self.ui.warn)
1147 1150 self.invalidate()
1148 1151 return True
1149 1152 else:
1150 1153 self.ui.warn(_("no interrupted transaction available\n"))
1151 1154 return False
1152 1155
1153 1156 def rollback(self, dryrun=False, force=False):
1154 1157 wlock = lock = dsguard = None
1155 1158 try:
1156 1159 wlock = self.wlock()
1157 1160 lock = self.lock()
1158 1161 if self.svfs.exists("undo"):
1159 1162 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1160 1163
1161 1164 return self._rollback(dryrun, force, dsguard)
1162 1165 else:
1163 1166 self.ui.warn(_("no rollback information available\n"))
1164 1167 return 1
1165 1168 finally:
1166 1169 release(dsguard, lock, wlock)
1167 1170
1168 1171 @unfilteredmethod # Until we get smarter cache management
1169 1172 def _rollback(self, dryrun, force, dsguard):
1170 1173 ui = self.ui
1171 1174 try:
1172 1175 args = self.vfs.read('undo.desc').splitlines()
1173 1176 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1174 1177 if len(args) >= 3:
1175 1178 detail = args[2]
1176 1179 oldtip = oldlen - 1
1177 1180
1178 1181 if detail and ui.verbose:
1179 1182 msg = (_('repository tip rolled back to revision %d'
1180 1183 ' (undo %s: %s)\n')
1181 1184 % (oldtip, desc, detail))
1182 1185 else:
1183 1186 msg = (_('repository tip rolled back to revision %d'
1184 1187 ' (undo %s)\n')
1185 1188 % (oldtip, desc))
1186 1189 except IOError:
1187 1190 msg = _('rolling back unknown transaction\n')
1188 1191 desc = None
1189 1192
1190 1193 if not force and self['.'] != self['tip'] and desc == 'commit':
1191 1194 raise error.Abort(
1192 1195 _('rollback of last commit while not checked out '
1193 1196 'may lose data'), hint=_('use -f to force'))
1194 1197
1195 1198 ui.status(msg)
1196 1199 if dryrun:
1197 1200 return 0
1198 1201
1199 1202 parents = self.dirstate.parents()
1200 1203 self.destroying()
1201 1204 vfsmap = {'plain': self.vfs, '': self.svfs}
1202 1205 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1203 1206 if self.vfs.exists('undo.bookmarks'):
1204 1207 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1205 1208 if self.svfs.exists('undo.phaseroots'):
1206 1209 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1207 1210 self.invalidate()
1208 1211
1209 1212 parentgone = (parents[0] not in self.changelog.nodemap or
1210 1213 parents[1] not in self.changelog.nodemap)
1211 1214 if parentgone:
1212 1215 # prevent dirstateguard from overwriting already restored one
1213 1216 dsguard.close()
1214 1217
1215 1218 self.dirstate.restorebackup(None, prefix='undo.')
1216 1219 try:
1217 1220 branch = self.vfs.read('undo.branch')
1218 1221 self.dirstate.setbranch(encoding.tolocal(branch))
1219 1222 except IOError:
1220 1223 ui.warn(_('named branch could not be reset: '
1221 1224 'current branch is still \'%s\'\n')
1222 1225 % self.dirstate.branch())
1223 1226
1224 1227 parents = tuple([p.rev() for p in self[None].parents()])
1225 1228 if len(parents) > 1:
1226 1229 ui.status(_('working directory now based on '
1227 1230 'revisions %d and %d\n') % parents)
1228 1231 else:
1229 1232 ui.status(_('working directory now based on '
1230 1233 'revision %d\n') % parents)
1231 1234 mergemod.mergestate.clean(self, self['.'].node())
1232 1235
1233 1236 # TODO: if we know which new heads may result from this rollback, pass
1234 1237 # them to destroy(), which will prevent the branchhead cache from being
1235 1238 # invalidated.
1236 1239 self.destroyed()
1237 1240 return 0
1238 1241
1239 1242 def _buildcacheupdater(self, newtransaction):
1240 1243 """called during transaction to build the callback updating cache
1241 1244
1242 1245 Lives on the repository to help extension who might want to augment
1243 1246 this logic. For this purpose, the created transaction is passed to the
1244 1247 method.
1245 1248 """
1246 1249 # we must avoid cyclic reference between repo and transaction.
1247 1250 reporef = weakref.ref(self)
1248 1251 def updater(tr):
1249 1252 repo = reporef()
1250 1253 repo.updatecaches(tr)
1251 1254 return updater
1252 1255
1253 1256 @unfilteredmethod
1254 1257 def updatecaches(self, tr=None):
1255 1258 """warm appropriate caches
1256 1259
1257 1260 If this function is called after a transaction closed. The transaction
1258 1261 will be available in the 'tr' argument. This can be used to selectively
1259 1262 update caches relevant to the changes in that transaction.
1260 1263 """
1261 1264 if tr is not None and tr.hookargs.get('source') == 'strip':
1262 1265 # During strip, many caches are invalid but
1263 1266 # later call to `destroyed` will refresh them.
1264 1267 return
1265 1268
1266 1269 if tr is None or tr.changes['revs']:
1267 1270 # updating the unfiltered branchmap should refresh all the others,
1268 1271 self.ui.debug('updating the branch cache\n')
1269 1272 branchmap.updatecache(self.filtered('served'))
1270 1273
1271 1274 def invalidatecaches(self):
1272 1275
1273 1276 if '_tagscache' in vars(self):
1274 1277 # can't use delattr on proxy
1275 1278 del self.__dict__['_tagscache']
1276 1279
1277 1280 self.unfiltered()._branchcaches.clear()
1278 1281 self.invalidatevolatilesets()
1279 1282
1280 1283 def invalidatevolatilesets(self):
1281 1284 self.filteredrevcache.clear()
1282 1285 obsolete.clearobscaches(self)
1283 1286
1284 1287 def invalidatedirstate(self):
1285 1288 '''Invalidates the dirstate, causing the next call to dirstate
1286 1289 to check if it was modified since the last time it was read,
1287 1290 rereading it if it has.
1288 1291
1289 1292 This is different to dirstate.invalidate() that it doesn't always
1290 1293 rereads the dirstate. Use dirstate.invalidate() if you want to
1291 1294 explicitly read the dirstate again (i.e. restoring it to a previous
1292 1295 known good state).'''
1293 1296 if hasunfilteredcache(self, 'dirstate'):
1294 1297 for k in self.dirstate._filecache:
1295 1298 try:
1296 1299 delattr(self.dirstate, k)
1297 1300 except AttributeError:
1298 1301 pass
1299 1302 delattr(self.unfiltered(), 'dirstate')
1300 1303
1301 1304 def invalidate(self, clearfilecache=False):
1302 1305 '''Invalidates both store and non-store parts other than dirstate
1303 1306
1304 1307 If a transaction is running, invalidation of store is omitted,
1305 1308 because discarding in-memory changes might cause inconsistency
1306 1309 (e.g. incomplete fncache causes unintentional failure, but
1307 1310 redundant one doesn't).
1308 1311 '''
1309 1312 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1310 1313 for k in list(self._filecache.keys()):
1311 1314 # dirstate is invalidated separately in invalidatedirstate()
1312 1315 if k == 'dirstate':
1313 1316 continue
1314 1317
1315 1318 if clearfilecache:
1316 1319 del self._filecache[k]
1317 1320 try:
1318 1321 delattr(unfiltered, k)
1319 1322 except AttributeError:
1320 1323 pass
1321 1324 self.invalidatecaches()
1322 1325 if not self.currenttransaction():
1323 1326 # TODO: Changing contents of store outside transaction
1324 1327 # causes inconsistency. We should make in-memory store
1325 1328 # changes detectable, and abort if changed.
1326 1329 self.store.invalidatecaches()
1327 1330
1328 1331 def invalidateall(self):
1329 1332 '''Fully invalidates both store and non-store parts, causing the
1330 1333 subsequent operation to reread any outside changes.'''
1331 1334 # extension should hook this to invalidate its caches
1332 1335 self.invalidate()
1333 1336 self.invalidatedirstate()
1334 1337
1335 1338 @unfilteredmethod
1336 1339 def _refreshfilecachestats(self, tr):
1337 1340 """Reload stats of cached files so that they are flagged as valid"""
1338 1341 for k, ce in self._filecache.items():
1339 1342 if k == 'dirstate' or k not in self.__dict__:
1340 1343 continue
1341 1344 ce.refresh()
1342 1345
1343 1346 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1344 1347 inheritchecker=None, parentenvvar=None):
1345 1348 parentlock = None
1346 1349 # the contents of parentenvvar are used by the underlying lock to
1347 1350 # determine whether it can be inherited
1348 1351 if parentenvvar is not None:
1349 1352 parentlock = encoding.environ.get(parentenvvar)
1350 1353 try:
1351 1354 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1352 1355 acquirefn=acquirefn, desc=desc,
1353 1356 inheritchecker=inheritchecker,
1354 1357 parentlock=parentlock)
1355 1358 except error.LockHeld as inst:
1356 1359 if not wait:
1357 1360 raise
1358 1361 # show more details for new-style locks
1359 1362 if ':' in inst.locker:
1360 1363 host, pid = inst.locker.split(":", 1)
1361 1364 self.ui.warn(
1362 1365 _("waiting for lock on %s held by process %r "
1363 1366 "on host %r\n") % (desc, pid, host))
1364 1367 else:
1365 1368 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1366 1369 (desc, inst.locker))
1367 1370 # default to 600 seconds timeout
1368 1371 l = lockmod.lock(vfs, lockname,
1369 1372 int(self.ui.config("ui", "timeout", "600")),
1370 1373 releasefn=releasefn, acquirefn=acquirefn,
1371 1374 desc=desc)
1372 1375 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1373 1376 return l
1374 1377
1375 1378 def _afterlock(self, callback):
1376 1379 """add a callback to be run when the repository is fully unlocked
1377 1380
1378 1381 The callback will be executed when the outermost lock is released
1379 1382 (with wlock being higher level than 'lock')."""
1380 1383 for ref in (self._wlockref, self._lockref):
1381 1384 l = ref and ref()
1382 1385 if l and l.held:
1383 1386 l.postrelease.append(callback)
1384 1387 break
1385 1388 else: # no lock have been found.
1386 1389 callback()
1387 1390
1388 1391 def lock(self, wait=True):
1389 1392 '''Lock the repository store (.hg/store) and return a weak reference
1390 1393 to the lock. Use this before modifying the store (e.g. committing or
1391 1394 stripping). If you are opening a transaction, get a lock as well.)
1392 1395
1393 1396 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1394 1397 'wlock' first to avoid a dead-lock hazard.'''
1395 1398 l = self._currentlock(self._lockref)
1396 1399 if l is not None:
1397 1400 l.lock()
1398 1401 return l
1399 1402
1400 1403 l = self._lock(self.svfs, "lock", wait, None,
1401 1404 self.invalidate, _('repository %s') % self.origroot)
1402 1405 self._lockref = weakref.ref(l)
1403 1406 return l
1404 1407
1405 1408 def _wlockchecktransaction(self):
1406 1409 if self.currenttransaction() is not None:
1407 1410 raise error.LockInheritanceContractViolation(
1408 1411 'wlock cannot be inherited in the middle of a transaction')
1409 1412
1410 1413 def wlock(self, wait=True):
1411 1414 '''Lock the non-store parts of the repository (everything under
1412 1415 .hg except .hg/store) and return a weak reference to the lock.
1413 1416
1414 1417 Use this before modifying files in .hg.
1415 1418
1416 1419 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1417 1420 'wlock' first to avoid a dead-lock hazard.'''
1418 1421 l = self._wlockref and self._wlockref()
1419 1422 if l is not None and l.held:
1420 1423 l.lock()
1421 1424 return l
1422 1425
1423 1426 # We do not need to check for non-waiting lock acquisition. Such
1424 1427 # acquisition would not cause dead-lock as they would just fail.
1425 1428 if wait and (self.ui.configbool('devel', 'all-warnings')
1426 1429 or self.ui.configbool('devel', 'check-locks')):
1427 1430 if self._currentlock(self._lockref) is not None:
1428 1431 self.ui.develwarn('"wlock" acquired after "lock"')
1429 1432
1430 1433 def unlock():
1431 1434 if self.dirstate.pendingparentchange():
1432 1435 self.dirstate.invalidate()
1433 1436 else:
1434 1437 self.dirstate.write(None)
1435 1438
1436 1439 self._filecache['dirstate'].refresh()
1437 1440
1438 1441 l = self._lock(self.vfs, "wlock", wait, unlock,
1439 1442 self.invalidatedirstate, _('working directory of %s') %
1440 1443 self.origroot,
1441 1444 inheritchecker=self._wlockchecktransaction,
1442 1445 parentenvvar='HG_WLOCK_LOCKER')
1443 1446 self._wlockref = weakref.ref(l)
1444 1447 return l
1445 1448
1446 1449 def _currentlock(self, lockref):
1447 1450 """Returns the lock if it's held, or None if it's not."""
1448 1451 if lockref is None:
1449 1452 return None
1450 1453 l = lockref()
1451 1454 if l is None or not l.held:
1452 1455 return None
1453 1456 return l
1454 1457
1455 1458 def currentwlock(self):
1456 1459 """Returns the wlock if it's held, or None if it's not."""
1457 1460 return self._currentlock(self._wlockref)
1458 1461
1459 1462 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1460 1463 """
1461 1464 commit an individual file as part of a larger transaction
1462 1465 """
1463 1466
1464 1467 fname = fctx.path()
1465 1468 fparent1 = manifest1.get(fname, nullid)
1466 1469 fparent2 = manifest2.get(fname, nullid)
1467 1470 if isinstance(fctx, context.filectx):
1468 1471 node = fctx.filenode()
1469 1472 if node in [fparent1, fparent2]:
1470 1473 self.ui.debug('reusing %s filelog entry\n' % fname)
1471 1474 if manifest1.flags(fname) != fctx.flags():
1472 1475 changelist.append(fname)
1473 1476 return node
1474 1477
1475 1478 flog = self.file(fname)
1476 1479 meta = {}
1477 1480 copy = fctx.renamed()
1478 1481 if copy and copy[0] != fname:
1479 1482 # Mark the new revision of this file as a copy of another
1480 1483 # file. This copy data will effectively act as a parent
1481 1484 # of this new revision. If this is a merge, the first
1482 1485 # parent will be the nullid (meaning "look up the copy data")
1483 1486 # and the second one will be the other parent. For example:
1484 1487 #
1485 1488 # 0 --- 1 --- 3 rev1 changes file foo
1486 1489 # \ / rev2 renames foo to bar and changes it
1487 1490 # \- 2 -/ rev3 should have bar with all changes and
1488 1491 # should record that bar descends from
1489 1492 # bar in rev2 and foo in rev1
1490 1493 #
1491 1494 # this allows this merge to succeed:
1492 1495 #
1493 1496 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1494 1497 # \ / merging rev3 and rev4 should use bar@rev2
1495 1498 # \- 2 --- 4 as the merge base
1496 1499 #
1497 1500
1498 1501 cfname = copy[0]
1499 1502 crev = manifest1.get(cfname)
1500 1503 newfparent = fparent2
1501 1504
1502 1505 if manifest2: # branch merge
1503 1506 if fparent2 == nullid or crev is None: # copied on remote side
1504 1507 if cfname in manifest2:
1505 1508 crev = manifest2[cfname]
1506 1509 newfparent = fparent1
1507 1510
1508 1511 # Here, we used to search backwards through history to try to find
1509 1512 # where the file copy came from if the source of a copy was not in
1510 1513 # the parent directory. However, this doesn't actually make sense to
1511 1514 # do (what does a copy from something not in your working copy even
1512 1515 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1513 1516 # the user that copy information was dropped, so if they didn't
1514 1517 # expect this outcome it can be fixed, but this is the correct
1515 1518 # behavior in this circumstance.
1516 1519
1517 1520 if crev:
1518 1521 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1519 1522 meta["copy"] = cfname
1520 1523 meta["copyrev"] = hex(crev)
1521 1524 fparent1, fparent2 = nullid, newfparent
1522 1525 else:
1523 1526 self.ui.warn(_("warning: can't find ancestor for '%s' "
1524 1527 "copied from '%s'!\n") % (fname, cfname))
1525 1528
1526 1529 elif fparent1 == nullid:
1527 1530 fparent1, fparent2 = fparent2, nullid
1528 1531 elif fparent2 != nullid:
1529 1532 # is one parent an ancestor of the other?
1530 1533 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1531 1534 if fparent1 in fparentancestors:
1532 1535 fparent1, fparent2 = fparent2, nullid
1533 1536 elif fparent2 in fparentancestors:
1534 1537 fparent2 = nullid
1535 1538
1536 1539 # is the file changed?
1537 1540 text = fctx.data()
1538 1541 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1539 1542 changelist.append(fname)
1540 1543 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1541 1544 # are just the flags changed during merge?
1542 1545 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1543 1546 changelist.append(fname)
1544 1547
1545 1548 return fparent1
1546 1549
1547 1550 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1548 1551 """check for commit arguments that aren't committable"""
1549 1552 if match.isexact() or match.prefix():
1550 1553 matched = set(status.modified + status.added + status.removed)
1551 1554
1552 1555 for f in match.files():
1553 1556 f = self.dirstate.normalize(f)
1554 1557 if f == '.' or f in matched or f in wctx.substate:
1555 1558 continue
1556 1559 if f in status.deleted:
1557 1560 fail(f, _('file not found!'))
1558 1561 if f in vdirs: # visited directory
1559 1562 d = f + '/'
1560 1563 for mf in matched:
1561 1564 if mf.startswith(d):
1562 1565 break
1563 1566 else:
1564 1567 fail(f, _("no match under directory!"))
1565 1568 elif f not in self.dirstate:
1566 1569 fail(f, _("file not tracked!"))
1567 1570
1568 1571 @unfilteredmethod
1569 1572 def commit(self, text="", user=None, date=None, match=None, force=False,
1570 1573 editor=False, extra=None):
1571 1574 """Add a new revision to current repository.
1572 1575
1573 1576 Revision information is gathered from the working directory,
1574 1577 match can be used to filter the committed files. If editor is
1575 1578 supplied, it is called to get a commit message.
1576 1579 """
1577 1580 if extra is None:
1578 1581 extra = {}
1579 1582
1580 1583 def fail(f, msg):
1581 1584 raise error.Abort('%s: %s' % (f, msg))
1582 1585
1583 1586 if not match:
1584 1587 match = matchmod.always(self.root, '')
1585 1588
1586 1589 if not force:
1587 1590 vdirs = []
1588 1591 match.explicitdir = vdirs.append
1589 1592 match.bad = fail
1590 1593
1591 1594 wlock = lock = tr = None
1592 1595 try:
1593 1596 wlock = self.wlock()
1594 1597 lock = self.lock() # for recent changelog (see issue4368)
1595 1598
1596 1599 wctx = self[None]
1597 1600 merge = len(wctx.parents()) > 1
1598 1601
1599 1602 if not force and merge and not match.always():
1600 1603 raise error.Abort(_('cannot partially commit a merge '
1601 1604 '(do not specify files or patterns)'))
1602 1605
1603 1606 status = self.status(match=match, clean=force)
1604 1607 if force:
1605 1608 status.modified.extend(status.clean) # mq may commit clean files
1606 1609
1607 1610 # check subrepos
1608 1611 subs = []
1609 1612 commitsubs = set()
1610 1613 newstate = wctx.substate.copy()
1611 1614 # only manage subrepos and .hgsubstate if .hgsub is present
1612 1615 if '.hgsub' in wctx:
1613 1616 # we'll decide whether to track this ourselves, thanks
1614 1617 for c in status.modified, status.added, status.removed:
1615 1618 if '.hgsubstate' in c:
1616 1619 c.remove('.hgsubstate')
1617 1620
1618 1621 # compare current state to last committed state
1619 1622 # build new substate based on last committed state
1620 1623 oldstate = wctx.p1().substate
1621 1624 for s in sorted(newstate.keys()):
1622 1625 if not match(s):
1623 1626 # ignore working copy, use old state if present
1624 1627 if s in oldstate:
1625 1628 newstate[s] = oldstate[s]
1626 1629 continue
1627 1630 if not force:
1628 1631 raise error.Abort(
1629 1632 _("commit with new subrepo %s excluded") % s)
1630 1633 dirtyreason = wctx.sub(s).dirtyreason(True)
1631 1634 if dirtyreason:
1632 1635 if not self.ui.configbool('ui', 'commitsubrepos'):
1633 1636 raise error.Abort(dirtyreason,
1634 1637 hint=_("use --subrepos for recursive commit"))
1635 1638 subs.append(s)
1636 1639 commitsubs.add(s)
1637 1640 else:
1638 1641 bs = wctx.sub(s).basestate()
1639 1642 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1640 1643 if oldstate.get(s, (None, None, None))[1] != bs:
1641 1644 subs.append(s)
1642 1645
1643 1646 # check for removed subrepos
1644 1647 for p in wctx.parents():
1645 1648 r = [s for s in p.substate if s not in newstate]
1646 1649 subs += [s for s in r if match(s)]
1647 1650 if subs:
1648 1651 if (not match('.hgsub') and
1649 1652 '.hgsub' in (wctx.modified() + wctx.added())):
1650 1653 raise error.Abort(
1651 1654 _("can't commit subrepos without .hgsub"))
1652 1655 status.modified.insert(0, '.hgsubstate')
1653 1656
1654 1657 elif '.hgsub' in status.removed:
1655 1658 # clean up .hgsubstate when .hgsub is removed
1656 1659 if ('.hgsubstate' in wctx and
1657 1660 '.hgsubstate' not in (status.modified + status.added +
1658 1661 status.removed)):
1659 1662 status.removed.insert(0, '.hgsubstate')
1660 1663
1661 1664 # make sure all explicit patterns are matched
1662 1665 if not force:
1663 1666 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1664 1667
1665 1668 cctx = context.workingcommitctx(self, status,
1666 1669 text, user, date, extra)
1667 1670
1668 1671 # internal config: ui.allowemptycommit
1669 1672 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1670 1673 or extra.get('close') or merge or cctx.files()
1671 1674 or self.ui.configbool('ui', 'allowemptycommit'))
1672 1675 if not allowemptycommit:
1673 1676 return None
1674 1677
1675 1678 if merge and cctx.deleted():
1676 1679 raise error.Abort(_("cannot commit merge with missing files"))
1677 1680
1678 1681 ms = mergemod.mergestate.read(self)
1679 1682 mergeutil.checkunresolved(ms)
1680 1683
1681 1684 if editor:
1682 1685 cctx._text = editor(self, cctx, subs)
1683 1686 edited = (text != cctx._text)
1684 1687
1685 1688 # Save commit message in case this transaction gets rolled back
1686 1689 # (e.g. by a pretxncommit hook). Leave the content alone on
1687 1690 # the assumption that the user will use the same editor again.
1688 1691 msgfn = self.savecommitmessage(cctx._text)
1689 1692
1690 1693 # commit subs and write new state
1691 1694 if subs:
1692 1695 for s in sorted(commitsubs):
1693 1696 sub = wctx.sub(s)
1694 1697 self.ui.status(_('committing subrepository %s\n') %
1695 1698 subrepo.subrelpath(sub))
1696 1699 sr = sub.commit(cctx._text, user, date)
1697 1700 newstate[s] = (newstate[s][0], sr)
1698 1701 subrepo.writestate(self, newstate)
1699 1702
1700 1703 p1, p2 = self.dirstate.parents()
1701 1704 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1702 1705 try:
1703 1706 self.hook("precommit", throw=True, parent1=hookp1,
1704 1707 parent2=hookp2)
1705 1708 tr = self.transaction('commit')
1706 1709 ret = self.commitctx(cctx, True)
1707 1710 except: # re-raises
1708 1711 if edited:
1709 1712 self.ui.write(
1710 1713 _('note: commit message saved in %s\n') % msgfn)
1711 1714 raise
1712 1715 # update bookmarks, dirstate and mergestate
1713 1716 bookmarks.update(self, [p1, p2], ret)
1714 1717 cctx.markcommitted(ret)
1715 1718 ms.reset()
1716 1719 tr.close()
1717 1720
1718 1721 finally:
1719 1722 lockmod.release(tr, lock, wlock)
1720 1723
1721 1724 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1722 1725 # hack for command that use a temporary commit (eg: histedit)
1723 1726 # temporary commit got stripped before hook release
1724 1727 if self.changelog.hasnode(ret):
1725 1728 self.hook("commit", node=node, parent1=parent1,
1726 1729 parent2=parent2)
1727 1730 self._afterlock(commithook)
1728 1731 return ret
1729 1732
1730 1733 @unfilteredmethod
1731 1734 def commitctx(self, ctx, error=False):
1732 1735 """Add a new revision to current repository.
1733 1736 Revision information is passed via the context argument.
1734 1737 """
1735 1738
1736 1739 tr = None
1737 1740 p1, p2 = ctx.p1(), ctx.p2()
1738 1741 user = ctx.user()
1739 1742
1740 1743 lock = self.lock()
1741 1744 try:
1742 1745 tr = self.transaction("commit")
1743 1746 trp = weakref.proxy(tr)
1744 1747
1745 1748 if ctx.manifestnode():
1746 1749 # reuse an existing manifest revision
1747 1750 mn = ctx.manifestnode()
1748 1751 files = ctx.files()
1749 1752 elif ctx.files():
1750 1753 m1ctx = p1.manifestctx()
1751 1754 m2ctx = p2.manifestctx()
1752 1755 mctx = m1ctx.copy()
1753 1756
1754 1757 m = mctx.read()
1755 1758 m1 = m1ctx.read()
1756 1759 m2 = m2ctx.read()
1757 1760
1758 1761 # check in files
1759 1762 added = []
1760 1763 changed = []
1761 1764 removed = list(ctx.removed())
1762 1765 linkrev = len(self)
1763 1766 self.ui.note(_("committing files:\n"))
1764 1767 for f in sorted(ctx.modified() + ctx.added()):
1765 1768 self.ui.note(f + "\n")
1766 1769 try:
1767 1770 fctx = ctx[f]
1768 1771 if fctx is None:
1769 1772 removed.append(f)
1770 1773 else:
1771 1774 added.append(f)
1772 1775 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1773 1776 trp, changed)
1774 1777 m.setflag(f, fctx.flags())
1775 1778 except OSError as inst:
1776 1779 self.ui.warn(_("trouble committing %s!\n") % f)
1777 1780 raise
1778 1781 except IOError as inst:
1779 1782 errcode = getattr(inst, 'errno', errno.ENOENT)
1780 1783 if error or errcode and errcode != errno.ENOENT:
1781 1784 self.ui.warn(_("trouble committing %s!\n") % f)
1782 1785 raise
1783 1786
1784 1787 # update manifest
1785 1788 self.ui.note(_("committing manifest\n"))
1786 1789 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1787 1790 drop = [f for f in removed if f in m]
1788 1791 for f in drop:
1789 1792 del m[f]
1790 1793 mn = mctx.write(trp, linkrev,
1791 1794 p1.manifestnode(), p2.manifestnode(),
1792 1795 added, drop)
1793 1796 files = changed + removed
1794 1797 else:
1795 1798 mn = p1.manifestnode()
1796 1799 files = []
1797 1800
1798 1801 # update changelog
1799 1802 self.ui.note(_("committing changelog\n"))
1800 1803 self.changelog.delayupdate(tr)
1801 1804 n = self.changelog.add(mn, files, ctx.description(),
1802 1805 trp, p1.node(), p2.node(),
1803 1806 user, ctx.date(), ctx.extra().copy())
1804 1807 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1805 1808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1806 1809 parent2=xp2)
1807 1810 # set the new commit is proper phase
1808 1811 targetphase = subrepo.newcommitphase(self.ui, ctx)
1809 1812 if targetphase:
1810 1813 # retract boundary do not alter parent changeset.
1811 1814 # if a parent have higher the resulting phase will
1812 1815 # be compliant anyway
1813 1816 #
1814 1817 # if minimal phase was 0 we don't need to retract anything
1815 1818 phases.retractboundary(self, tr, targetphase, [n])
1816 1819 tr.close()
1817 1820 return n
1818 1821 finally:
1819 1822 if tr:
1820 1823 tr.release()
1821 1824 lock.release()
1822 1825
1823 1826 @unfilteredmethod
1824 1827 def destroying(self):
1825 1828 '''Inform the repository that nodes are about to be destroyed.
1826 1829 Intended for use by strip and rollback, so there's a common
1827 1830 place for anything that has to be done before destroying history.
1828 1831
1829 1832 This is mostly useful for saving state that is in memory and waiting
1830 1833 to be flushed when the current lock is released. Because a call to
1831 1834 destroyed is imminent, the repo will be invalidated causing those
1832 1835 changes to stay in memory (waiting for the next unlock), or vanish
1833 1836 completely.
1834 1837 '''
1835 1838 # When using the same lock to commit and strip, the phasecache is left
1836 1839 # dirty after committing. Then when we strip, the repo is invalidated,
1837 1840 # causing those changes to disappear.
1838 1841 if '_phasecache' in vars(self):
1839 1842 self._phasecache.write()
1840 1843
1841 1844 @unfilteredmethod
1842 1845 def destroyed(self):
1843 1846 '''Inform the repository that nodes have been destroyed.
1844 1847 Intended for use by strip and rollback, so there's a common
1845 1848 place for anything that has to be done after destroying history.
1846 1849 '''
1847 1850 # When one tries to:
1848 1851 # 1) destroy nodes thus calling this method (e.g. strip)
1849 1852 # 2) use phasecache somewhere (e.g. commit)
1850 1853 #
1851 1854 # then 2) will fail because the phasecache contains nodes that were
1852 1855 # removed. We can either remove phasecache from the filecache,
1853 1856 # causing it to reload next time it is accessed, or simply filter
1854 1857 # the removed nodes now and write the updated cache.
1855 1858 self._phasecache.filterunknown(self)
1856 1859 self._phasecache.write()
1857 1860
1858 1861 # refresh all repository caches
1859 1862 self.updatecaches()
1860 1863
1861 1864 # Ensure the persistent tag cache is updated. Doing it now
1862 1865 # means that the tag cache only has to worry about destroyed
1863 1866 # heads immediately after a strip/rollback. That in turn
1864 1867 # guarantees that "cachetip == currenttip" (comparing both rev
1865 1868 # and node) always means no nodes have been added or destroyed.
1866 1869
1867 1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 1871 # head, refresh the tag cache, then immediately add a new head.
1869 1872 # But I think doing it this way is necessary for the "instant
1870 1873 # tag cache retrieval" case to work.
1871 1874 self.invalidate()
1872 1875
1873 1876 def walk(self, match, node=None):
1874 1877 '''
1875 1878 walk recursively through the directory tree or a given
1876 1879 changeset, finding all files matched by the match
1877 1880 function
1878 1881 '''
1879 1882 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1880 1883 return self[node].walk(match)
1881 1884
1882 1885 def status(self, node1='.', node2=None, match=None,
1883 1886 ignored=False, clean=False, unknown=False,
1884 1887 listsubrepos=False):
1885 1888 '''a convenience method that calls node1.status(node2)'''
1886 1889 return self[node1].status(node2, match, ignored, clean, unknown,
1887 1890 listsubrepos)
1888 1891
1889 1892 def addpostdsstatus(self, ps):
1890 1893 """Add a callback to run within the wlock, at the point at which status
1891 1894 fixups happen.
1892 1895
1893 1896 On status completion, callback(wctx, status) will be called with the
1894 1897 wlock held, unless the dirstate has changed from underneath or the wlock
1895 1898 couldn't be grabbed.
1896 1899
1897 1900 Callbacks should not capture and use a cached copy of the dirstate --
1898 1901 it might change in the meanwhile. Instead, they should access the
1899 1902 dirstate via wctx.repo().dirstate.
1900 1903
1901 1904 This list is emptied out after each status run -- extensions should
1902 1905 make sure it adds to this list each time dirstate.status is called.
1903 1906 Extensions should also make sure they don't call this for statuses
1904 1907 that don't involve the dirstate.
1905 1908 """
1906 1909
1907 1910 # The list is located here for uniqueness reasons -- it is actually
1908 1911 # managed by the workingctx, but that isn't unique per-repo.
1909 1912 self._postdsstatus.append(ps)
1910 1913
1911 1914 def postdsstatus(self):
1912 1915 """Used by workingctx to get the list of post-dirstate-status hooks."""
1913 1916 return self._postdsstatus
1914 1917
1915 1918 def clearpostdsstatus(self):
1916 1919 """Used by workingctx to clear post-dirstate-status hooks."""
1917 1920 del self._postdsstatus[:]
1918 1921
1919 1922 def heads(self, start=None):
1920 1923 if start is None:
1921 1924 cl = self.changelog
1922 1925 headrevs = reversed(cl.headrevs())
1923 1926 return [cl.node(rev) for rev in headrevs]
1924 1927
1925 1928 heads = self.changelog.heads(start)
1926 1929 # sort the output in rev descending order
1927 1930 return sorted(heads, key=self.changelog.rev, reverse=True)
1928 1931
1929 1932 def branchheads(self, branch=None, start=None, closed=False):
1930 1933 '''return a (possibly filtered) list of heads for the given branch
1931 1934
1932 1935 Heads are returned in topological order, from newest to oldest.
1933 1936 If branch is None, use the dirstate branch.
1934 1937 If start is not None, return only heads reachable from start.
1935 1938 If closed is True, return heads that are marked as closed as well.
1936 1939 '''
1937 1940 if branch is None:
1938 1941 branch = self[None].branch()
1939 1942 branches = self.branchmap()
1940 1943 if branch not in branches:
1941 1944 return []
1942 1945 # the cache returns heads ordered lowest to highest
1943 1946 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1944 1947 if start is not None:
1945 1948 # filter out the heads that cannot be reached from startrev
1946 1949 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1947 1950 bheads = [h for h in bheads if h in fbheads]
1948 1951 return bheads
1949 1952
1950 1953 def branches(self, nodes):
1951 1954 if not nodes:
1952 1955 nodes = [self.changelog.tip()]
1953 1956 b = []
1954 1957 for n in nodes:
1955 1958 t = n
1956 1959 while True:
1957 1960 p = self.changelog.parents(n)
1958 1961 if p[1] != nullid or p[0] == nullid:
1959 1962 b.append((t, n, p[0], p[1]))
1960 1963 break
1961 1964 n = p[0]
1962 1965 return b
1963 1966
1964 1967 def between(self, pairs):
1965 1968 r = []
1966 1969
1967 1970 for top, bottom in pairs:
1968 1971 n, l, i = top, [], 0
1969 1972 f = 1
1970 1973
1971 1974 while n != bottom and n != nullid:
1972 1975 p = self.changelog.parents(n)[0]
1973 1976 if i == f:
1974 1977 l.append(n)
1975 1978 f = f * 2
1976 1979 n = p
1977 1980 i += 1
1978 1981
1979 1982 r.append(l)
1980 1983
1981 1984 return r
1982 1985
1983 1986 def checkpush(self, pushop):
1984 1987 """Extensions can override this function if additional checks have
1985 1988 to be performed before pushing, or call it if they override push
1986 1989 command.
1987 1990 """
1988 1991 pass
1989 1992
1990 1993 @unfilteredpropertycache
1991 1994 def prepushoutgoinghooks(self):
1992 1995 """Return util.hooks consists of a pushop with repo, remote, outgoing
1993 1996 methods, which are called before pushing changesets.
1994 1997 """
1995 1998 return util.hooks()
1996 1999
1997 2000 def pushkey(self, namespace, key, old, new):
1998 2001 try:
1999 2002 tr = self.currenttransaction()
2000 2003 hookargs = {}
2001 2004 if tr is not None:
2002 2005 hookargs.update(tr.hookargs)
2003 2006 hookargs['namespace'] = namespace
2004 2007 hookargs['key'] = key
2005 2008 hookargs['old'] = old
2006 2009 hookargs['new'] = new
2007 2010 self.hook('prepushkey', throw=True, **hookargs)
2008 2011 except error.HookAbort as exc:
2009 2012 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2010 2013 if exc.hint:
2011 2014 self.ui.write_err(_("(%s)\n") % exc.hint)
2012 2015 return False
2013 2016 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2014 2017 ret = pushkey.push(self, namespace, key, old, new)
2015 2018 def runhook():
2016 2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2017 2020 ret=ret)
2018 2021 self._afterlock(runhook)
2019 2022 return ret
2020 2023
2021 2024 def listkeys(self, namespace):
2022 2025 self.hook('prelistkeys', throw=True, namespace=namespace)
2023 2026 self.ui.debug('listing keys for "%s"\n' % namespace)
2024 2027 values = pushkey.list(self, namespace)
2025 2028 self.hook('listkeys', namespace=namespace, values=values)
2026 2029 return values
2027 2030
2028 2031 def debugwireargs(self, one, two, three=None, four=None, five=None):
2029 2032 '''used to test argument passing over the wire'''
2030 2033 return "%s %s %s %s %s" % (one, two, three, four, five)
2031 2034
2032 2035 def savecommitmessage(self, text):
2033 2036 fp = self.vfs('last-message.txt', 'wb')
2034 2037 try:
2035 2038 fp.write(text)
2036 2039 finally:
2037 2040 fp.close()
2038 2041 return self.pathto(fp.name[len(self.root) + 1:])
2039 2042
2040 2043 # used to avoid circular references so destructors work
2041 2044 def aftertrans(files):
2042 2045 renamefiles = [tuple(t) for t in files]
2043 2046 def a():
2044 2047 for vfs, src, dest in renamefiles:
2045 2048 # if src and dest refer to a same file, vfs.rename is a no-op,
2046 2049 # leaving both src and dest on disk. delete dest to make sure
2047 2050 # the rename couldn't be such a no-op.
2048 2051 vfs.tryunlink(dest)
2049 2052 try:
2050 2053 vfs.rename(src, dest)
2051 2054 except OSError: # journal file does not yet exist
2052 2055 pass
2053 2056 return a
2054 2057
2055 2058 def undoname(fn):
2056 2059 base, name = os.path.split(fn)
2057 2060 assert name.startswith('journal')
2058 2061 return os.path.join(base, name.replace('journal', 'undo', 1))
2059 2062
2060 2063 def instance(ui, path, create):
2061 2064 return localrepository(ui, util.urllocalpath(path), create)
2062 2065
2063 2066 def islocal(path):
2064 2067 return True
2065 2068
2066 2069 def newreporequirements(repo):
2067 2070 """Determine the set of requirements for a new local repository.
2068 2071
2069 2072 Extensions can wrap this function to specify custom requirements for
2070 2073 new repositories.
2071 2074 """
2072 2075 ui = repo.ui
2073 2076 requirements = {'revlogv1'}
2074 2077 if ui.configbool('format', 'usestore', True):
2075 2078 requirements.add('store')
2076 2079 if ui.configbool('format', 'usefncache', True):
2077 2080 requirements.add('fncache')
2078 2081 if ui.configbool('format', 'dotencode', True):
2079 2082 requirements.add('dotencode')
2080 2083
2081 2084 compengine = ui.config('experimental', 'format.compression', 'zlib')
2082 2085 if compengine not in util.compengines:
2083 2086 raise error.Abort(_('compression engine %s defined by '
2084 2087 'experimental.format.compression not available') %
2085 2088 compengine,
2086 2089 hint=_('run "hg debuginstall" to list available '
2087 2090 'compression engines'))
2088 2091
2089 2092 # zlib is the historical default and doesn't need an explicit requirement.
2090 2093 if compengine != 'zlib':
2091 2094 requirements.add('exp-compression-%s' % compengine)
2092 2095
2093 2096 if scmutil.gdinitconfig(ui):
2094 2097 requirements.add('generaldelta')
2095 2098 if ui.configbool('experimental', 'treemanifest', False):
2096 2099 requirements.add('treemanifest')
2097 2100 if ui.configbool('experimental', 'manifestv2', False):
2098 2101 requirements.add('manifestv2')
2099 2102
2100 2103 revlogv2 = ui.config('experimental', 'revlogv2')
2101 2104 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2102 2105 requirements.remove('revlogv1')
2103 2106 # generaldelta is implied by revlogv2.
2104 2107 requirements.discard('generaldelta')
2105 2108 requirements.add(REVLOGV2_REQUIREMENT)
2106 2109
2107 2110 return requirements
@@ -1,2199 +1,2208 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import errno
19 19 import hashlib
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullid,
29 29 nullrev,
30 30 wdirhex,
31 31 wdirid,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from . import (
36 36 ancestor,
37 37 error,
38 38 mdiff,
39 39 policy,
40 40 pycompat,
41 41 templatefilters,
42 42 util,
43 43 )
44 44
45 45 parsers = policy.importmod(r'parsers')
46 46
47 47 _pack = struct.pack
48 48 _unpack = struct.unpack
49 49 # Aliased for performance.
50 50 _zlibdecompress = zlib.decompress
51 51
52 52 # revlog header flags
53 53 REVLOGV0 = 0
54 54 REVLOGV1 = 1
55 55 # Dummy value until file format is finalized.
56 56 # Reminder: change the bounds check in revlog.__init__ when this is changed.
57 57 REVLOGV2 = 0xDEAD
58 58 FLAG_INLINE_DATA = (1 << 16)
59 59 FLAG_GENERALDELTA = (1 << 17)
60 60 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
61 61 REVLOG_DEFAULT_FORMAT = REVLOGV1
62 62 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
63 63 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
64 64 REVLOGV2_FLAGS = REVLOGV1_FLAGS
65 65
66 66 # revlog index flags
67 67 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
68 68 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
69 69 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
70 70 REVIDX_DEFAULT_FLAGS = 0
71 71 # stable order in which flags need to be processed and their processors applied
72 72 REVIDX_FLAGS_ORDER = [
73 73 REVIDX_ISCENSORED,
74 74 REVIDX_ELLIPSIS,
75 75 REVIDX_EXTSTORED,
76 76 ]
77 77 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
78 78
79 79 # max size of revlog with inline data
80 80 _maxinline = 131072
81 81 _chunksize = 1048576
82 82
83 83 RevlogError = error.RevlogError
84 84 LookupError = error.LookupError
85 85 CensoredNodeError = error.CensoredNodeError
86 86 ProgrammingError = error.ProgrammingError
87 87
88 88 # Store flag processors (cf. 'addflagprocessor()' to register)
89 89 _flagprocessors = {
90 90 REVIDX_ISCENSORED: None,
91 91 }
92 92
93 93 def addflagprocessor(flag, processor):
94 94 """Register a flag processor on a revision data flag.
95 95
96 96 Invariant:
97 97 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
98 98 - Only one flag processor can be registered on a specific flag.
99 99 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
100 100 following signatures:
101 101 - (read) f(self, rawtext) -> text, bool
102 102 - (write) f(self, text) -> rawtext, bool
103 103 - (raw) f(self, rawtext) -> bool
104 104 "text" is presented to the user. "rawtext" is stored in revlog data, not
105 105 directly visible to the user.
106 106 The boolean returned by these transforms is used to determine whether
107 107 the returned text can be used for hash integrity checking. For example,
108 108 if "write" returns False, then "text" is used to generate hash. If
109 109 "write" returns True, that basically means "rawtext" returned by "write"
110 110 should be used to generate hash. Usually, "write" and "read" return
111 111 different booleans. And "raw" returns a same boolean as "write".
112 112
113 113 Note: The 'raw' transform is used for changegroup generation and in some
114 114 debug commands. In this case the transform only indicates whether the
115 115 contents can be used for hash integrity checks.
116 116 """
117 117 if not flag & REVIDX_KNOWN_FLAGS:
118 118 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
119 119 raise ProgrammingError(msg)
120 120 if flag not in REVIDX_FLAGS_ORDER:
121 121 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
122 122 raise ProgrammingError(msg)
123 123 if flag in _flagprocessors:
124 124 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
125 125 raise error.Abort(msg)
126 126 _flagprocessors[flag] = processor
127 127
128 128 def getoffset(q):
129 129 return int(q >> 16)
130 130
131 131 def gettype(q):
132 132 return int(q & 0xFFFF)
133 133
134 134 def offset_type(offset, type):
135 135 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
136 136 raise ValueError('unknown revlog index flags')
137 137 return int(int(offset) << 16 | type)
138 138
139 139 _nullhash = hashlib.sha1(nullid)
140 140
141 141 def hash(text, p1, p2):
142 142 """generate a hash from the given text and its parent hashes
143 143
144 144 This hash combines both the current file contents and its history
145 145 in a manner that makes it easy to distinguish nodes with the same
146 146 content in the revision graph.
147 147 """
148 148 # As of now, if one of the parent node is null, p2 is null
149 149 if p2 == nullid:
150 150 # deep copy of a hash is faster than creating one
151 151 s = _nullhash.copy()
152 152 s.update(p1)
153 153 else:
154 154 # none of the parent nodes are nullid
155 155 l = [p1, p2]
156 156 l.sort()
157 157 s = hashlib.sha1(l[0])
158 158 s.update(l[1])
159 159 s.update(text)
160 160 return s.digest()
161 161
162 162 # index v0:
163 163 # 4 bytes: offset
164 164 # 4 bytes: compressed length
165 165 # 4 bytes: base rev
166 166 # 4 bytes: link rev
167 167 # 20 bytes: parent 1 nodeid
168 168 # 20 bytes: parent 2 nodeid
169 169 # 20 bytes: nodeid
170 170 indexformatv0 = ">4l20s20s20s"
171 171
172 172 class revlogoldio(object):
173 173 def __init__(self):
174 174 self.size = struct.calcsize(indexformatv0)
175 175
176 176 def parseindex(self, data, inline):
177 177 s = self.size
178 178 index = []
179 179 nodemap = {nullid: nullrev}
180 180 n = off = 0
181 181 l = len(data)
182 182 while off + s <= l:
183 183 cur = data[off:off + s]
184 184 off += s
185 185 e = _unpack(indexformatv0, cur)
186 186 # transform to revlogv1 format
187 187 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
188 188 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
189 189 index.append(e2)
190 190 nodemap[e[6]] = n
191 191 n += 1
192 192
193 193 # add the magic null revision at -1
194 194 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
195 195
196 196 return index, nodemap, None
197 197
198 198 def packentry(self, entry, node, version, rev):
199 199 if gettype(entry[0]):
200 200 raise RevlogError(_('index entry flags need revlog version 1'))
201 201 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
202 202 node(entry[5]), node(entry[6]), entry[7])
203 203 return _pack(indexformatv0, *e2)
204 204
205 205 # index ng:
206 206 # 6 bytes: offset
207 207 # 2 bytes: flags
208 208 # 4 bytes: compressed length
209 209 # 4 bytes: uncompressed length
210 210 # 4 bytes: base rev
211 211 # 4 bytes: link rev
212 212 # 4 bytes: parent 1 rev
213 213 # 4 bytes: parent 2 rev
214 214 # 32 bytes: nodeid
215 215 indexformatng = ">Qiiiiii20s12x"
216 216 versionformat = ">I"
217 217
218 218 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
219 219 # signed integer)
220 220 _maxentrysize = 0x7fffffff
221 221
222 222 class revlogio(object):
223 223 def __init__(self):
224 224 self.size = struct.calcsize(indexformatng)
225 225
226 226 def parseindex(self, data, inline):
227 227 # call the C implementation to parse the index data
228 228 index, cache = parsers.parse_index2(data, inline)
229 229 return index, getattr(index, 'nodemap', None), cache
230 230
231 231 def packentry(self, entry, node, version, rev):
232 232 p = _pack(indexformatng, *entry)
233 233 if rev == 0:
234 234 p = _pack(versionformat, version) + p[4:]
235 235 return p
236 236
237 237 class revlog(object):
238 238 """
239 239 the underlying revision storage object
240 240
241 241 A revlog consists of two parts, an index and the revision data.
242 242
243 243 The index is a file with a fixed record size containing
244 244 information on each revision, including its nodeid (hash), the
245 245 nodeids of its parents, the position and offset of its data within
246 246 the data file, and the revision it's based on. Finally, each entry
247 247 contains a linkrev entry that can serve as a pointer to external
248 248 data.
249 249
250 250 The revision data itself is a linear collection of data chunks.
251 251 Each chunk represents a revision and is usually represented as a
252 252 delta against the previous chunk. To bound lookup time, runs of
253 253 deltas are limited to about 2 times the length of the original
254 254 version data. This makes retrieval of a version proportional to
255 255 its size, or O(1) relative to the number of revisions.
256 256
257 257 Both pieces of the revlog are written to in an append-only
258 258 fashion, which means we never need to rewrite a file to insert or
259 259 remove data, and can use some simple techniques to avoid the need
260 260 for locking while reading.
261 261
262 262 If checkambig, indexfile is opened with checkambig=True at
263 263 writing, to avoid file stat ambiguity.
264 264 """
265 265 def __init__(self, opener, indexfile, datafile=None, checkambig=False):
266 266 """
267 267 create a revlog object
268 268
269 269 opener is a function that abstracts the file opening operation
270 270 and can be used to implement COW semantics or the like.
271 271 """
272 272 self.indexfile = indexfile
273 273 self.datafile = datafile or (indexfile[:-2] + ".d")
274 274 self.opener = opener
275 275 # When True, indexfile is opened with checkambig=True at writing, to
276 276 # avoid file stat ambiguity.
277 277 self._checkambig = checkambig
278 278 # 3-tuple of (node, rev, text) for a raw revision.
279 279 self._cache = None
280 280 # Maps rev to chain base rev.
281 281 self._chainbasecache = util.lrucachedict(100)
282 282 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
283 283 self._chunkcache = (0, '')
284 284 # How much data to read and cache into the raw revlog data cache.
285 285 self._chunkcachesize = 65536
286 286 self._maxchainlen = None
287 287 self._aggressivemergedeltas = False
288 288 self.index = []
289 289 # Mapping of partial identifiers to full nodes.
290 290 self._pcache = {}
291 291 # Mapping of revision integer to full node.
292 292 self._nodecache = {nullid: nullrev}
293 293 self._nodepos = None
294 294 self._compengine = 'zlib'
295 self._maxdeltachainspan = -1
295 296
296 297 v = REVLOG_DEFAULT_VERSION
297 298 opts = getattr(opener, 'options', None)
298 299 if opts is not None:
299 300 if 'revlogv2' in opts:
300 301 # version 2 revlogs always use generaldelta.
301 302 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
302 303 elif 'revlogv1' in opts:
303 304 if 'generaldelta' in opts:
304 305 v |= FLAG_GENERALDELTA
305 306 else:
306 307 v = 0
307 308 if 'chunkcachesize' in opts:
308 309 self._chunkcachesize = opts['chunkcachesize']
309 310 if 'maxchainlen' in opts:
310 311 self._maxchainlen = opts['maxchainlen']
311 312 if 'aggressivemergedeltas' in opts:
312 313 self._aggressivemergedeltas = opts['aggressivemergedeltas']
313 314 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
314 315 if 'compengine' in opts:
315 316 self._compengine = opts['compengine']
317 if 'maxdeltachainspan' in opts:
318 self._maxdeltachainspan = opts['maxdeltachainspan']
316 319
317 320 if self._chunkcachesize <= 0:
318 321 raise RevlogError(_('revlog chunk cache size %r is not greater '
319 322 'than 0') % self._chunkcachesize)
320 323 elif self._chunkcachesize & (self._chunkcachesize - 1):
321 324 raise RevlogError(_('revlog chunk cache size %r is not a power '
322 325 'of 2') % self._chunkcachesize)
323 326
324 327 indexdata = ''
325 328 self._initempty = True
326 329 try:
327 330 f = self.opener(self.indexfile)
328 331 indexdata = f.read()
329 332 f.close()
330 333 if len(indexdata) > 0:
331 334 v = struct.unpack(versionformat, indexdata[:4])[0]
332 335 self._initempty = False
333 336 except IOError as inst:
334 337 if inst.errno != errno.ENOENT:
335 338 raise
336 339
337 340 self.version = v
338 341 self._inline = v & FLAG_INLINE_DATA
339 342 self._generaldelta = v & FLAG_GENERALDELTA
340 343 flags = v & ~0xFFFF
341 344 fmt = v & 0xFFFF
342 345 if fmt == REVLOGV0:
343 346 if flags:
344 347 raise RevlogError(_('unknown flags (%#04x) in version %d '
345 348 'revlog %s') %
346 349 (flags >> 16, fmt, self.indexfile))
347 350 elif fmt == REVLOGV1:
348 351 if flags & ~REVLOGV1_FLAGS:
349 352 raise RevlogError(_('unknown flags (%#04x) in version %d '
350 353 'revlog %s') %
351 354 (flags >> 16, fmt, self.indexfile))
352 355 elif fmt == REVLOGV2:
353 356 if flags & ~REVLOGV2_FLAGS:
354 357 raise RevlogError(_('unknown flags (%#04x) in version %d '
355 358 'revlog %s') %
356 359 (flags >> 16, fmt, self.indexfile))
357 360 else:
358 361 raise RevlogError(_('unknown version (%d) in revlog %s') %
359 362 (fmt, self.indexfile))
360 363
361 364 self.storedeltachains = True
362 365
363 366 self._io = revlogio()
364 367 if self.version == REVLOGV0:
365 368 self._io = revlogoldio()
366 369 try:
367 370 d = self._io.parseindex(indexdata, self._inline)
368 371 except (ValueError, IndexError):
369 372 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
370 373 self.index, nodemap, self._chunkcache = d
371 374 if nodemap is not None:
372 375 self.nodemap = self._nodecache = nodemap
373 376 if not self._chunkcache:
374 377 self._chunkclear()
375 378 # revnum -> (chain-length, sum-delta-length)
376 379 self._chaininfocache = {}
377 380 # revlog header -> revlog compressor
378 381 self._decompressors = {}
379 382
380 383 @util.propertycache
381 384 def _compressor(self):
382 385 return util.compengines[self._compengine].revlogcompressor()
383 386
384 387 def tip(self):
385 388 return self.node(len(self.index) - 2)
386 389 def __contains__(self, rev):
387 390 return 0 <= rev < len(self)
388 391 def __len__(self):
389 392 return len(self.index) - 1
390 393 def __iter__(self):
391 394 return iter(xrange(len(self)))
392 395 def revs(self, start=0, stop=None):
393 396 """iterate over all rev in this revlog (from start to stop)"""
394 397 step = 1
395 398 if stop is not None:
396 399 if start > stop:
397 400 step = -1
398 401 stop += step
399 402 else:
400 403 stop = len(self)
401 404 return xrange(start, stop, step)
402 405
403 406 @util.propertycache
404 407 def nodemap(self):
405 408 self.rev(self.node(0))
406 409 return self._nodecache
407 410
408 411 def hasnode(self, node):
409 412 try:
410 413 self.rev(node)
411 414 return True
412 415 except KeyError:
413 416 return False
414 417
415 418 def clearcaches(self):
416 419 self._cache = None
417 420 self._chainbasecache.clear()
418 421 self._chunkcache = (0, '')
419 422 self._pcache = {}
420 423
421 424 try:
422 425 self._nodecache.clearcaches()
423 426 except AttributeError:
424 427 self._nodecache = {nullid: nullrev}
425 428 self._nodepos = None
426 429
427 430 def rev(self, node):
428 431 try:
429 432 return self._nodecache[node]
430 433 except TypeError:
431 434 raise
432 435 except RevlogError:
433 436 # parsers.c radix tree lookup failed
434 437 if node == wdirid:
435 438 raise error.WdirUnsupported
436 439 raise LookupError(node, self.indexfile, _('no node'))
437 440 except KeyError:
438 441 # pure python cache lookup failed
439 442 n = self._nodecache
440 443 i = self.index
441 444 p = self._nodepos
442 445 if p is None:
443 446 p = len(i) - 2
444 447 for r in xrange(p, -1, -1):
445 448 v = i[r][7]
446 449 n[v] = r
447 450 if v == node:
448 451 self._nodepos = r - 1
449 452 return r
450 453 if node == wdirid:
451 454 raise error.WdirUnsupported
452 455 raise LookupError(node, self.indexfile, _('no node'))
453 456
454 457 # Accessors for index entries.
455 458
456 459 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
457 460 # are flags.
458 461 def start(self, rev):
459 462 return int(self.index[rev][0] >> 16)
460 463
461 464 def flags(self, rev):
462 465 return self.index[rev][0] & 0xFFFF
463 466
464 467 def length(self, rev):
465 468 return self.index[rev][1]
466 469
467 470 def rawsize(self, rev):
468 471 """return the length of the uncompressed text for a given revision"""
469 472 l = self.index[rev][2]
470 473 if l >= 0:
471 474 return l
472 475
473 476 t = self.revision(rev, raw=True)
474 477 return len(t)
475 478
476 479 def size(self, rev):
477 480 """length of non-raw text (processed by a "read" flag processor)"""
478 481 # fast path: if no "read" flag processor could change the content,
479 482 # size is rawsize. note: ELLIPSIS is known to not change the content.
480 483 flags = self.flags(rev)
481 484 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
482 485 return self.rawsize(rev)
483 486
484 487 return len(self.revision(rev, raw=False))
485 488
486 489 def chainbase(self, rev):
487 490 base = self._chainbasecache.get(rev)
488 491 if base is not None:
489 492 return base
490 493
491 494 index = self.index
492 495 base = index[rev][3]
493 496 while base != rev:
494 497 rev = base
495 498 base = index[rev][3]
496 499
497 500 self._chainbasecache[rev] = base
498 501 return base
499 502
500 503 def linkrev(self, rev):
501 504 return self.index[rev][4]
502 505
503 506 def parentrevs(self, rev):
504 507 try:
505 508 return self.index[rev][5:7]
506 509 except IndexError:
507 510 if rev == wdirrev:
508 511 raise error.WdirUnsupported
509 512 raise
510 513
511 514 def node(self, rev):
512 515 try:
513 516 return self.index[rev][7]
514 517 except IndexError:
515 518 if rev == wdirrev:
516 519 raise error.WdirUnsupported
517 520 raise
518 521
519 522 # Derived from index values.
520 523
521 524 def end(self, rev):
522 525 return self.start(rev) + self.length(rev)
523 526
524 527 def parents(self, node):
525 528 i = self.index
526 529 d = i[self.rev(node)]
527 530 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
528 531
529 532 def chainlen(self, rev):
530 533 return self._chaininfo(rev)[0]
531 534
532 535 def _chaininfo(self, rev):
533 536 chaininfocache = self._chaininfocache
534 537 if rev in chaininfocache:
535 538 return chaininfocache[rev]
536 539 index = self.index
537 540 generaldelta = self._generaldelta
538 541 iterrev = rev
539 542 e = index[iterrev]
540 543 clen = 0
541 544 compresseddeltalen = 0
542 545 while iterrev != e[3]:
543 546 clen += 1
544 547 compresseddeltalen += e[1]
545 548 if generaldelta:
546 549 iterrev = e[3]
547 550 else:
548 551 iterrev -= 1
549 552 if iterrev in chaininfocache:
550 553 t = chaininfocache[iterrev]
551 554 clen += t[0]
552 555 compresseddeltalen += t[1]
553 556 break
554 557 e = index[iterrev]
555 558 else:
556 559 # Add text length of base since decompressing that also takes
557 560 # work. For cache hits the length is already included.
558 561 compresseddeltalen += e[1]
559 562 r = (clen, compresseddeltalen)
560 563 chaininfocache[rev] = r
561 564 return r
562 565
563 566 def _deltachain(self, rev, stoprev=None):
564 567 """Obtain the delta chain for a revision.
565 568
566 569 ``stoprev`` specifies a revision to stop at. If not specified, we
567 570 stop at the base of the chain.
568 571
569 572 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
570 573 revs in ascending order and ``stopped`` is a bool indicating whether
571 574 ``stoprev`` was hit.
572 575 """
573 576 # Try C implementation.
574 577 try:
575 578 return self.index.deltachain(rev, stoprev, self._generaldelta)
576 579 except AttributeError:
577 580 pass
578 581
579 582 chain = []
580 583
581 584 # Alias to prevent attribute lookup in tight loop.
582 585 index = self.index
583 586 generaldelta = self._generaldelta
584 587
585 588 iterrev = rev
586 589 e = index[iterrev]
587 590 while iterrev != e[3] and iterrev != stoprev:
588 591 chain.append(iterrev)
589 592 if generaldelta:
590 593 iterrev = e[3]
591 594 else:
592 595 iterrev -= 1
593 596 e = index[iterrev]
594 597
595 598 if iterrev == stoprev:
596 599 stopped = True
597 600 else:
598 601 chain.append(iterrev)
599 602 stopped = False
600 603
601 604 chain.reverse()
602 605 return chain, stopped
603 606
604 607 def ancestors(self, revs, stoprev=0, inclusive=False):
605 608 """Generate the ancestors of 'revs' in reverse topological order.
606 609 Does not generate revs lower than stoprev.
607 610
608 611 See the documentation for ancestor.lazyancestors for more details."""
609 612
610 613 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
611 614 inclusive=inclusive)
612 615
613 616 def descendants(self, revs):
614 617 """Generate the descendants of 'revs' in revision order.
615 618
616 619 Yield a sequence of revision numbers starting with a child of
617 620 some rev in revs, i.e., each revision is *not* considered a
618 621 descendant of itself. Results are ordered by revision number (a
619 622 topological sort)."""
620 623 first = min(revs)
621 624 if first == nullrev:
622 625 for i in self:
623 626 yield i
624 627 return
625 628
626 629 seen = set(revs)
627 630 for i in self.revs(start=first + 1):
628 631 for x in self.parentrevs(i):
629 632 if x != nullrev and x in seen:
630 633 seen.add(i)
631 634 yield i
632 635 break
633 636
634 637 def findcommonmissing(self, common=None, heads=None):
635 638 """Return a tuple of the ancestors of common and the ancestors of heads
636 639 that are not ancestors of common. In revset terminology, we return the
637 640 tuple:
638 641
639 642 ::common, (::heads) - (::common)
640 643
641 644 The list is sorted by revision number, meaning it is
642 645 topologically sorted.
643 646
644 647 'heads' and 'common' are both lists of node IDs. If heads is
645 648 not supplied, uses all of the revlog's heads. If common is not
646 649 supplied, uses nullid."""
647 650 if common is None:
648 651 common = [nullid]
649 652 if heads is None:
650 653 heads = self.heads()
651 654
652 655 common = [self.rev(n) for n in common]
653 656 heads = [self.rev(n) for n in heads]
654 657
655 658 # we want the ancestors, but inclusive
656 659 class lazyset(object):
657 660 def __init__(self, lazyvalues):
658 661 self.addedvalues = set()
659 662 self.lazyvalues = lazyvalues
660 663
661 664 def __contains__(self, value):
662 665 return value in self.addedvalues or value in self.lazyvalues
663 666
664 667 def __iter__(self):
665 668 added = self.addedvalues
666 669 for r in added:
667 670 yield r
668 671 for r in self.lazyvalues:
669 672 if not r in added:
670 673 yield r
671 674
672 675 def add(self, value):
673 676 self.addedvalues.add(value)
674 677
675 678 def update(self, values):
676 679 self.addedvalues.update(values)
677 680
678 681 has = lazyset(self.ancestors(common))
679 682 has.add(nullrev)
680 683 has.update(common)
681 684
682 685 # take all ancestors from heads that aren't in has
683 686 missing = set()
684 687 visit = collections.deque(r for r in heads if r not in has)
685 688 while visit:
686 689 r = visit.popleft()
687 690 if r in missing:
688 691 continue
689 692 else:
690 693 missing.add(r)
691 694 for p in self.parentrevs(r):
692 695 if p not in has:
693 696 visit.append(p)
694 697 missing = list(missing)
695 698 missing.sort()
696 699 return has, [self.node(miss) for miss in missing]
697 700
698 701 def incrementalmissingrevs(self, common=None):
699 702 """Return an object that can be used to incrementally compute the
700 703 revision numbers of the ancestors of arbitrary sets that are not
701 704 ancestors of common. This is an ancestor.incrementalmissingancestors
702 705 object.
703 706
704 707 'common' is a list of revision numbers. If common is not supplied, uses
705 708 nullrev.
706 709 """
707 710 if common is None:
708 711 common = [nullrev]
709 712
710 713 return ancestor.incrementalmissingancestors(self.parentrevs, common)
711 714
712 715 def findmissingrevs(self, common=None, heads=None):
713 716 """Return the revision numbers of the ancestors of heads that
714 717 are not ancestors of common.
715 718
716 719 More specifically, return a list of revision numbers corresponding to
717 720 nodes N such that every N satisfies the following constraints:
718 721
719 722 1. N is an ancestor of some node in 'heads'
720 723 2. N is not an ancestor of any node in 'common'
721 724
722 725 The list is sorted by revision number, meaning it is
723 726 topologically sorted.
724 727
725 728 'heads' and 'common' are both lists of revision numbers. If heads is
726 729 not supplied, uses all of the revlog's heads. If common is not
727 730 supplied, uses nullid."""
728 731 if common is None:
729 732 common = [nullrev]
730 733 if heads is None:
731 734 heads = self.headrevs()
732 735
733 736 inc = self.incrementalmissingrevs(common=common)
734 737 return inc.missingancestors(heads)
735 738
736 739 def findmissing(self, common=None, heads=None):
737 740 """Return the ancestors of heads that are not ancestors of common.
738 741
739 742 More specifically, return a list of nodes N such that every N
740 743 satisfies the following constraints:
741 744
742 745 1. N is an ancestor of some node in 'heads'
743 746 2. N is not an ancestor of any node in 'common'
744 747
745 748 The list is sorted by revision number, meaning it is
746 749 topologically sorted.
747 750
748 751 'heads' and 'common' are both lists of node IDs. If heads is
749 752 not supplied, uses all of the revlog's heads. If common is not
750 753 supplied, uses nullid."""
751 754 if common is None:
752 755 common = [nullid]
753 756 if heads is None:
754 757 heads = self.heads()
755 758
756 759 common = [self.rev(n) for n in common]
757 760 heads = [self.rev(n) for n in heads]
758 761
759 762 inc = self.incrementalmissingrevs(common=common)
760 763 return [self.node(r) for r in inc.missingancestors(heads)]
761 764
762 765 def nodesbetween(self, roots=None, heads=None):
763 766 """Return a topological path from 'roots' to 'heads'.
764 767
765 768 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
766 769 topologically sorted list of all nodes N that satisfy both of
767 770 these constraints:
768 771
769 772 1. N is a descendant of some node in 'roots'
770 773 2. N is an ancestor of some node in 'heads'
771 774
772 775 Every node is considered to be both a descendant and an ancestor
773 776 of itself, so every reachable node in 'roots' and 'heads' will be
774 777 included in 'nodes'.
775 778
776 779 'outroots' is the list of reachable nodes in 'roots', i.e., the
777 780 subset of 'roots' that is returned in 'nodes'. Likewise,
778 781 'outheads' is the subset of 'heads' that is also in 'nodes'.
779 782
780 783 'roots' and 'heads' are both lists of node IDs. If 'roots' is
781 784 unspecified, uses nullid as the only root. If 'heads' is
782 785 unspecified, uses list of all of the revlog's heads."""
783 786 nonodes = ([], [], [])
784 787 if roots is not None:
785 788 roots = list(roots)
786 789 if not roots:
787 790 return nonodes
788 791 lowestrev = min([self.rev(n) for n in roots])
789 792 else:
790 793 roots = [nullid] # Everybody's a descendant of nullid
791 794 lowestrev = nullrev
792 795 if (lowestrev == nullrev) and (heads is None):
793 796 # We want _all_ the nodes!
794 797 return ([self.node(r) for r in self], [nullid], list(self.heads()))
795 798 if heads is None:
796 799 # All nodes are ancestors, so the latest ancestor is the last
797 800 # node.
798 801 highestrev = len(self) - 1
799 802 # Set ancestors to None to signal that every node is an ancestor.
800 803 ancestors = None
801 804 # Set heads to an empty dictionary for later discovery of heads
802 805 heads = {}
803 806 else:
804 807 heads = list(heads)
805 808 if not heads:
806 809 return nonodes
807 810 ancestors = set()
808 811 # Turn heads into a dictionary so we can remove 'fake' heads.
809 812 # Also, later we will be using it to filter out the heads we can't
810 813 # find from roots.
811 814 heads = dict.fromkeys(heads, False)
812 815 # Start at the top and keep marking parents until we're done.
813 816 nodestotag = set(heads)
814 817 # Remember where the top was so we can use it as a limit later.
815 818 highestrev = max([self.rev(n) for n in nodestotag])
816 819 while nodestotag:
817 820 # grab a node to tag
818 821 n = nodestotag.pop()
819 822 # Never tag nullid
820 823 if n == nullid:
821 824 continue
822 825 # A node's revision number represents its place in a
823 826 # topologically sorted list of nodes.
824 827 r = self.rev(n)
825 828 if r >= lowestrev:
826 829 if n not in ancestors:
827 830 # If we are possibly a descendant of one of the roots
828 831 # and we haven't already been marked as an ancestor
829 832 ancestors.add(n) # Mark as ancestor
830 833 # Add non-nullid parents to list of nodes to tag.
831 834 nodestotag.update([p for p in self.parents(n) if
832 835 p != nullid])
833 836 elif n in heads: # We've seen it before, is it a fake head?
834 837 # So it is, real heads should not be the ancestors of
835 838 # any other heads.
836 839 heads.pop(n)
837 840 if not ancestors:
838 841 return nonodes
839 842 # Now that we have our set of ancestors, we want to remove any
840 843 # roots that are not ancestors.
841 844
842 845 # If one of the roots was nullid, everything is included anyway.
843 846 if lowestrev > nullrev:
844 847 # But, since we weren't, let's recompute the lowest rev to not
845 848 # include roots that aren't ancestors.
846 849
847 850 # Filter out roots that aren't ancestors of heads
848 851 roots = [root for root in roots if root in ancestors]
849 852 # Recompute the lowest revision
850 853 if roots:
851 854 lowestrev = min([self.rev(root) for root in roots])
852 855 else:
853 856 # No more roots? Return empty list
854 857 return nonodes
855 858 else:
856 859 # We are descending from nullid, and don't need to care about
857 860 # any other roots.
858 861 lowestrev = nullrev
859 862 roots = [nullid]
860 863 # Transform our roots list into a set.
861 864 descendants = set(roots)
862 865 # Also, keep the original roots so we can filter out roots that aren't
863 866 # 'real' roots (i.e. are descended from other roots).
864 867 roots = descendants.copy()
865 868 # Our topologically sorted list of output nodes.
866 869 orderedout = []
867 870 # Don't start at nullid since we don't want nullid in our output list,
868 871 # and if nullid shows up in descendants, empty parents will look like
869 872 # they're descendants.
870 873 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
871 874 n = self.node(r)
872 875 isdescendant = False
873 876 if lowestrev == nullrev: # Everybody is a descendant of nullid
874 877 isdescendant = True
875 878 elif n in descendants:
876 879 # n is already a descendant
877 880 isdescendant = True
878 881 # This check only needs to be done here because all the roots
879 882 # will start being marked is descendants before the loop.
880 883 if n in roots:
881 884 # If n was a root, check if it's a 'real' root.
882 885 p = tuple(self.parents(n))
883 886 # If any of its parents are descendants, it's not a root.
884 887 if (p[0] in descendants) or (p[1] in descendants):
885 888 roots.remove(n)
886 889 else:
887 890 p = tuple(self.parents(n))
888 891 # A node is a descendant if either of its parents are
889 892 # descendants. (We seeded the dependents list with the roots
890 893 # up there, remember?)
891 894 if (p[0] in descendants) or (p[1] in descendants):
892 895 descendants.add(n)
893 896 isdescendant = True
894 897 if isdescendant and ((ancestors is None) or (n in ancestors)):
895 898 # Only include nodes that are both descendants and ancestors.
896 899 orderedout.append(n)
897 900 if (ancestors is not None) and (n in heads):
898 901 # We're trying to figure out which heads are reachable
899 902 # from roots.
900 903 # Mark this head as having been reached
901 904 heads[n] = True
902 905 elif ancestors is None:
903 906 # Otherwise, we're trying to discover the heads.
904 907 # Assume this is a head because if it isn't, the next step
905 908 # will eventually remove it.
906 909 heads[n] = True
907 910 # But, obviously its parents aren't.
908 911 for p in self.parents(n):
909 912 heads.pop(p, None)
910 913 heads = [head for head, flag in heads.iteritems() if flag]
911 914 roots = list(roots)
912 915 assert orderedout
913 916 assert roots
914 917 assert heads
915 918 return (orderedout, roots, heads)
916 919
917 920 def headrevs(self):
918 921 try:
919 922 return self.index.headrevs()
920 923 except AttributeError:
921 924 return self._headrevs()
922 925
923 926 def computephases(self, roots):
924 927 return self.index.computephasesmapsets(roots)
925 928
926 929 def _headrevs(self):
927 930 count = len(self)
928 931 if not count:
929 932 return [nullrev]
930 933 # we won't iter over filtered rev so nobody is a head at start
931 934 ishead = [0] * (count + 1)
932 935 index = self.index
933 936 for r in self:
934 937 ishead[r] = 1 # I may be an head
935 938 e = index[r]
936 939 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
937 940 return [r for r, val in enumerate(ishead) if val]
938 941
939 942 def heads(self, start=None, stop=None):
940 943 """return the list of all nodes that have no children
941 944
942 945 if start is specified, only heads that are descendants of
943 946 start will be returned
944 947 if stop is specified, it will consider all the revs from stop
945 948 as if they had no children
946 949 """
947 950 if start is None and stop is None:
948 951 if not len(self):
949 952 return [nullid]
950 953 return [self.node(r) for r in self.headrevs()]
951 954
952 955 if start is None:
953 956 start = nullid
954 957 if stop is None:
955 958 stop = []
956 959 stoprevs = set([self.rev(n) for n in stop])
957 960 startrev = self.rev(start)
958 961 reachable = {startrev}
959 962 heads = {startrev}
960 963
961 964 parentrevs = self.parentrevs
962 965 for r in self.revs(start=startrev + 1):
963 966 for p in parentrevs(r):
964 967 if p in reachable:
965 968 if r not in stoprevs:
966 969 reachable.add(r)
967 970 heads.add(r)
968 971 if p in heads and p not in stoprevs:
969 972 heads.remove(p)
970 973
971 974 return [self.node(r) for r in heads]
972 975
973 976 def children(self, node):
974 977 """find the children of a given node"""
975 978 c = []
976 979 p = self.rev(node)
977 980 for r in self.revs(start=p + 1):
978 981 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
979 982 if prevs:
980 983 for pr in prevs:
981 984 if pr == p:
982 985 c.append(self.node(r))
983 986 elif p == nullrev:
984 987 c.append(self.node(r))
985 988 return c
986 989
987 990 def descendant(self, start, end):
988 991 if start == nullrev:
989 992 return True
990 993 for i in self.descendants([start]):
991 994 if i == end:
992 995 return True
993 996 elif i > end:
994 997 break
995 998 return False
996 999
997 1000 def commonancestorsheads(self, a, b):
998 1001 """calculate all the heads of the common ancestors of nodes a and b"""
999 1002 a, b = self.rev(a), self.rev(b)
1000 1003 try:
1001 1004 ancs = self.index.commonancestorsheads(a, b)
1002 1005 except (AttributeError, OverflowError): # C implementation failed
1003 1006 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1004 1007 return pycompat.maplist(self.node, ancs)
1005 1008
1006 1009 def isancestor(self, a, b):
1007 1010 """return True if node a is an ancestor of node b
1008 1011
1009 1012 The implementation of this is trivial but the use of
1010 1013 commonancestorsheads is not."""
1011 1014 return a in self.commonancestorsheads(a, b)
1012 1015
1013 1016 def ancestor(self, a, b):
1014 1017 """calculate the "best" common ancestor of nodes a and b"""
1015 1018
1016 1019 a, b = self.rev(a), self.rev(b)
1017 1020 try:
1018 1021 ancs = self.index.ancestors(a, b)
1019 1022 except (AttributeError, OverflowError):
1020 1023 ancs = ancestor.ancestors(self.parentrevs, a, b)
1021 1024 if ancs:
1022 1025 # choose a consistent winner when there's a tie
1023 1026 return min(map(self.node, ancs))
1024 1027 return nullid
1025 1028
1026 1029 def _match(self, id):
1027 1030 if isinstance(id, int):
1028 1031 # rev
1029 1032 return self.node(id)
1030 1033 if len(id) == 20:
1031 1034 # possibly a binary node
1032 1035 # odds of a binary node being all hex in ASCII are 1 in 10**25
1033 1036 try:
1034 1037 node = id
1035 1038 self.rev(node) # quick search the index
1036 1039 return node
1037 1040 except LookupError:
1038 1041 pass # may be partial hex id
1039 1042 try:
1040 1043 # str(rev)
1041 1044 rev = int(id)
1042 1045 if str(rev) != id:
1043 1046 raise ValueError
1044 1047 if rev < 0:
1045 1048 rev = len(self) + rev
1046 1049 if rev < 0 or rev >= len(self):
1047 1050 raise ValueError
1048 1051 return self.node(rev)
1049 1052 except (ValueError, OverflowError):
1050 1053 pass
1051 1054 if len(id) == 40:
1052 1055 try:
1053 1056 # a full hex nodeid?
1054 1057 node = bin(id)
1055 1058 self.rev(node)
1056 1059 return node
1057 1060 except (TypeError, LookupError):
1058 1061 pass
1059 1062
1060 1063 def _partialmatch(self, id):
1061 1064 maybewdir = wdirhex.startswith(id)
1062 1065 try:
1063 1066 partial = self.index.partialmatch(id)
1064 1067 if partial and self.hasnode(partial):
1065 1068 if maybewdir:
1066 1069 # single 'ff...' match in radix tree, ambiguous with wdir
1067 1070 raise RevlogError
1068 1071 return partial
1069 1072 if maybewdir:
1070 1073 # no 'ff...' match in radix tree, wdir identified
1071 1074 raise error.WdirUnsupported
1072 1075 return None
1073 1076 except RevlogError:
1074 1077 # parsers.c radix tree lookup gave multiple matches
1075 1078 # fast path: for unfiltered changelog, radix tree is accurate
1076 1079 if not getattr(self, 'filteredrevs', None):
1077 1080 raise LookupError(id, self.indexfile,
1078 1081 _('ambiguous identifier'))
1079 1082 # fall through to slow path that filters hidden revisions
1080 1083 except (AttributeError, ValueError):
1081 1084 # we are pure python, or key was too short to search radix tree
1082 1085 pass
1083 1086
1084 1087 if id in self._pcache:
1085 1088 return self._pcache[id]
1086 1089
1087 1090 if len(id) < 40:
1088 1091 try:
1089 1092 # hex(node)[:...]
1090 1093 l = len(id) // 2 # grab an even number of digits
1091 1094 prefix = bin(id[:l * 2])
1092 1095 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1093 1096 nl = [n for n in nl if hex(n).startswith(id) and
1094 1097 self.hasnode(n)]
1095 1098 if len(nl) > 0:
1096 1099 if len(nl) == 1 and not maybewdir:
1097 1100 self._pcache[id] = nl[0]
1098 1101 return nl[0]
1099 1102 raise LookupError(id, self.indexfile,
1100 1103 _('ambiguous identifier'))
1101 1104 if maybewdir:
1102 1105 raise error.WdirUnsupported
1103 1106 return None
1104 1107 except (TypeError, binascii.Error):
1105 1108 pass
1106 1109
1107 1110 def lookup(self, id):
1108 1111 """locate a node based on:
1109 1112 - revision number or str(revision number)
1110 1113 - nodeid or subset of hex nodeid
1111 1114 """
1112 1115 n = self._match(id)
1113 1116 if n is not None:
1114 1117 return n
1115 1118 n = self._partialmatch(id)
1116 1119 if n:
1117 1120 return n
1118 1121
1119 1122 raise LookupError(id, self.indexfile, _('no match found'))
1120 1123
1121 1124 def cmp(self, node, text):
1122 1125 """compare text with a given file revision
1123 1126
1124 1127 returns True if text is different than what is stored.
1125 1128 """
1126 1129 p1, p2 = self.parents(node)
1127 1130 return hash(text, p1, p2) != node
1128 1131
1129 1132 def _cachesegment(self, offset, data):
1130 1133 """Add a segment to the revlog cache.
1131 1134
1132 1135 Accepts an absolute offset and the data that is at that location.
1133 1136 """
1134 1137 o, d = self._chunkcache
1135 1138 # try to add to existing cache
1136 1139 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1137 1140 self._chunkcache = o, d + data
1138 1141 else:
1139 1142 self._chunkcache = offset, data
1140 1143
1141 1144 def _readsegment(self, offset, length, df=None):
1142 1145 """Load a segment of raw data from the revlog.
1143 1146
1144 1147 Accepts an absolute offset, length to read, and an optional existing
1145 1148 file handle to read from.
1146 1149
1147 1150 If an existing file handle is passed, it will be seeked and the
1148 1151 original seek position will NOT be restored.
1149 1152
1150 1153 Returns a str or buffer of raw byte data.
1151 1154 """
1152 1155 if df is not None:
1153 1156 closehandle = False
1154 1157 else:
1155 1158 if self._inline:
1156 1159 df = self.opener(self.indexfile)
1157 1160 else:
1158 1161 df = self.opener(self.datafile)
1159 1162 closehandle = True
1160 1163
1161 1164 # Cache data both forward and backward around the requested
1162 1165 # data, in a fixed size window. This helps speed up operations
1163 1166 # involving reading the revlog backwards.
1164 1167 cachesize = self._chunkcachesize
1165 1168 realoffset = offset & ~(cachesize - 1)
1166 1169 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1167 1170 - realoffset)
1168 1171 df.seek(realoffset)
1169 1172 d = df.read(reallength)
1170 1173 if closehandle:
1171 1174 df.close()
1172 1175 self._cachesegment(realoffset, d)
1173 1176 if offset != realoffset or reallength != length:
1174 1177 return util.buffer(d, offset - realoffset, length)
1175 1178 return d
1176 1179
1177 1180 def _getsegment(self, offset, length, df=None):
1178 1181 """Obtain a segment of raw data from the revlog.
1179 1182
1180 1183 Accepts an absolute offset, length of bytes to obtain, and an
1181 1184 optional file handle to the already-opened revlog. If the file
1182 1185 handle is used, it's original seek position will not be preserved.
1183 1186
1184 1187 Requests for data may be returned from a cache.
1185 1188
1186 1189 Returns a str or a buffer instance of raw byte data.
1187 1190 """
1188 1191 o, d = self._chunkcache
1189 1192 l = len(d)
1190 1193
1191 1194 # is it in the cache?
1192 1195 cachestart = offset - o
1193 1196 cacheend = cachestart + length
1194 1197 if cachestart >= 0 and cacheend <= l:
1195 1198 if cachestart == 0 and cacheend == l:
1196 1199 return d # avoid a copy
1197 1200 return util.buffer(d, cachestart, cacheend - cachestart)
1198 1201
1199 1202 return self._readsegment(offset, length, df=df)
1200 1203
1201 1204 def _getsegmentforrevs(self, startrev, endrev, df=None):
1202 1205 """Obtain a segment of raw data corresponding to a range of revisions.
1203 1206
1204 1207 Accepts the start and end revisions and an optional already-open
1205 1208 file handle to be used for reading. If the file handle is read, its
1206 1209 seek position will not be preserved.
1207 1210
1208 1211 Requests for data may be satisfied by a cache.
1209 1212
1210 1213 Returns a 2-tuple of (offset, data) for the requested range of
1211 1214 revisions. Offset is the integer offset from the beginning of the
1212 1215 revlog and data is a str or buffer of the raw byte data.
1213 1216
1214 1217 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1215 1218 to determine where each revision's data begins and ends.
1216 1219 """
1217 1220 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1218 1221 # (functions are expensive).
1219 1222 index = self.index
1220 1223 istart = index[startrev]
1221 1224 start = int(istart[0] >> 16)
1222 1225 if startrev == endrev:
1223 1226 end = start + istart[1]
1224 1227 else:
1225 1228 iend = index[endrev]
1226 1229 end = int(iend[0] >> 16) + iend[1]
1227 1230
1228 1231 if self._inline:
1229 1232 start += (startrev + 1) * self._io.size
1230 1233 end += (endrev + 1) * self._io.size
1231 1234 length = end - start
1232 1235
1233 1236 return start, self._getsegment(start, length, df=df)
1234 1237
1235 1238 def _chunk(self, rev, df=None):
1236 1239 """Obtain a single decompressed chunk for a revision.
1237 1240
1238 1241 Accepts an integer revision and an optional already-open file handle
1239 1242 to be used for reading. If used, the seek position of the file will not
1240 1243 be preserved.
1241 1244
1242 1245 Returns a str holding uncompressed data for the requested revision.
1243 1246 """
1244 1247 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1245 1248
1246 1249 def _chunks(self, revs, df=None):
1247 1250 """Obtain decompressed chunks for the specified revisions.
1248 1251
1249 1252 Accepts an iterable of numeric revisions that are assumed to be in
1250 1253 ascending order. Also accepts an optional already-open file handle
1251 1254 to be used for reading. If used, the seek position of the file will
1252 1255 not be preserved.
1253 1256
1254 1257 This function is similar to calling ``self._chunk()`` multiple times,
1255 1258 but is faster.
1256 1259
1257 1260 Returns a list with decompressed data for each requested revision.
1258 1261 """
1259 1262 if not revs:
1260 1263 return []
1261 1264 start = self.start
1262 1265 length = self.length
1263 1266 inline = self._inline
1264 1267 iosize = self._io.size
1265 1268 buffer = util.buffer
1266 1269
1267 1270 l = []
1268 1271 ladd = l.append
1269 1272
1270 1273 try:
1271 1274 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1272 1275 except OverflowError:
1273 1276 # issue4215 - we can't cache a run of chunks greater than
1274 1277 # 2G on Windows
1275 1278 return [self._chunk(rev, df=df) for rev in revs]
1276 1279
1277 1280 decomp = self.decompress
1278 1281 for rev in revs:
1279 1282 chunkstart = start(rev)
1280 1283 if inline:
1281 1284 chunkstart += (rev + 1) * iosize
1282 1285 chunklength = length(rev)
1283 1286 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1284 1287
1285 1288 return l
1286 1289
1287 1290 def _chunkclear(self):
1288 1291 """Clear the raw chunk cache."""
1289 1292 self._chunkcache = (0, '')
1290 1293
1291 1294 def deltaparent(self, rev):
1292 1295 """return deltaparent of the given revision"""
1293 1296 base = self.index[rev][3]
1294 1297 if base == rev:
1295 1298 return nullrev
1296 1299 elif self._generaldelta:
1297 1300 return base
1298 1301 else:
1299 1302 return rev - 1
1300 1303
1301 1304 def revdiff(self, rev1, rev2):
1302 1305 """return or calculate a delta between two revisions
1303 1306
1304 1307 The delta calculated is in binary form and is intended to be written to
1305 1308 revlog data directly. So this function needs raw revision data.
1306 1309 """
1307 1310 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1308 1311 return bytes(self._chunk(rev2))
1309 1312
1310 1313 return mdiff.textdiff(self.revision(rev1, raw=True),
1311 1314 self.revision(rev2, raw=True))
1312 1315
1313 1316 def revision(self, nodeorrev, _df=None, raw=False):
1314 1317 """return an uncompressed revision of a given node or revision
1315 1318 number.
1316 1319
1317 1320 _df - an existing file handle to read from. (internal-only)
1318 1321 raw - an optional argument specifying if the revision data is to be
1319 1322 treated as raw data when applying flag transforms. 'raw' should be set
1320 1323 to True when generating changegroups or in debug commands.
1321 1324 """
1322 1325 if isinstance(nodeorrev, int):
1323 1326 rev = nodeorrev
1324 1327 node = self.node(rev)
1325 1328 else:
1326 1329 node = nodeorrev
1327 1330 rev = None
1328 1331
1329 1332 cachedrev = None
1330 1333 flags = None
1331 1334 rawtext = None
1332 1335 if node == nullid:
1333 1336 return ""
1334 1337 if self._cache:
1335 1338 if self._cache[0] == node:
1336 1339 # _cache only stores rawtext
1337 1340 if raw:
1338 1341 return self._cache[2]
1339 1342 # duplicated, but good for perf
1340 1343 if rev is None:
1341 1344 rev = self.rev(node)
1342 1345 if flags is None:
1343 1346 flags = self.flags(rev)
1344 1347 # no extra flags set, no flag processor runs, text = rawtext
1345 1348 if flags == REVIDX_DEFAULT_FLAGS:
1346 1349 return self._cache[2]
1347 1350 # rawtext is reusable. need to run flag processor
1348 1351 rawtext = self._cache[2]
1349 1352
1350 1353 cachedrev = self._cache[1]
1351 1354
1352 1355 # look up what we need to read
1353 1356 if rawtext is None:
1354 1357 if rev is None:
1355 1358 rev = self.rev(node)
1356 1359
1357 1360 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1358 1361 if stopped:
1359 1362 rawtext = self._cache[2]
1360 1363
1361 1364 # drop cache to save memory
1362 1365 self._cache = None
1363 1366
1364 1367 bins = self._chunks(chain, df=_df)
1365 1368 if rawtext is None:
1366 1369 rawtext = bytes(bins[0])
1367 1370 bins = bins[1:]
1368 1371
1369 1372 rawtext = mdiff.patches(rawtext, bins)
1370 1373 self._cache = (node, rev, rawtext)
1371 1374
1372 1375 if flags is None:
1373 1376 if rev is None:
1374 1377 rev = self.rev(node)
1375 1378 flags = self.flags(rev)
1376 1379
1377 1380 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1378 1381 if validatehash:
1379 1382 self.checkhash(text, node, rev=rev)
1380 1383
1381 1384 return text
1382 1385
1383 1386 def hash(self, text, p1, p2):
1384 1387 """Compute a node hash.
1385 1388
1386 1389 Available as a function so that subclasses can replace the hash
1387 1390 as needed.
1388 1391 """
1389 1392 return hash(text, p1, p2)
1390 1393
1391 1394 def _processflags(self, text, flags, operation, raw=False):
1392 1395 """Inspect revision data flags and applies transforms defined by
1393 1396 registered flag processors.
1394 1397
1395 1398 ``text`` - the revision data to process
1396 1399 ``flags`` - the revision flags
1397 1400 ``operation`` - the operation being performed (read or write)
1398 1401 ``raw`` - an optional argument describing if the raw transform should be
1399 1402 applied.
1400 1403
1401 1404 This method processes the flags in the order (or reverse order if
1402 1405 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1403 1406 flag processors registered for present flags. The order of flags defined
1404 1407 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1405 1408
1406 1409 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1407 1410 processed text and ``validatehash`` is a bool indicating whether the
1408 1411 returned text should be checked for hash integrity.
1409 1412
1410 1413 Note: If the ``raw`` argument is set, it has precedence over the
1411 1414 operation and will only update the value of ``validatehash``.
1412 1415 """
1413 1416 # fast path: no flag processors will run
1414 1417 if flags == 0:
1415 1418 return text, True
1416 1419 if not operation in ('read', 'write'):
1417 1420 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1418 1421 # Check all flags are known.
1419 1422 if flags & ~REVIDX_KNOWN_FLAGS:
1420 1423 raise RevlogError(_("incompatible revision flag '%#x'") %
1421 1424 (flags & ~REVIDX_KNOWN_FLAGS))
1422 1425 validatehash = True
1423 1426 # Depending on the operation (read or write), the order might be
1424 1427 # reversed due to non-commutative transforms.
1425 1428 orderedflags = REVIDX_FLAGS_ORDER
1426 1429 if operation == 'write':
1427 1430 orderedflags = reversed(orderedflags)
1428 1431
1429 1432 for flag in orderedflags:
1430 1433 # If a flagprocessor has been registered for a known flag, apply the
1431 1434 # related operation transform and update result tuple.
1432 1435 if flag & flags:
1433 1436 vhash = True
1434 1437
1435 1438 if flag not in _flagprocessors:
1436 1439 message = _("missing processor for flag '%#x'") % (flag)
1437 1440 raise RevlogError(message)
1438 1441
1439 1442 processor = _flagprocessors[flag]
1440 1443 if processor is not None:
1441 1444 readtransform, writetransform, rawtransform = processor
1442 1445
1443 1446 if raw:
1444 1447 vhash = rawtransform(self, text)
1445 1448 elif operation == 'read':
1446 1449 text, vhash = readtransform(self, text)
1447 1450 else: # write operation
1448 1451 text, vhash = writetransform(self, text)
1449 1452 validatehash = validatehash and vhash
1450 1453
1451 1454 return text, validatehash
1452 1455
1453 1456 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1454 1457 """Check node hash integrity.
1455 1458
1456 1459 Available as a function so that subclasses can extend hash mismatch
1457 1460 behaviors as needed.
1458 1461 """
1459 1462 if p1 is None and p2 is None:
1460 1463 p1, p2 = self.parents(node)
1461 1464 if node != self.hash(text, p1, p2):
1462 1465 revornode = rev
1463 1466 if revornode is None:
1464 1467 revornode = templatefilters.short(hex(node))
1465 1468 raise RevlogError(_("integrity check failed on %s:%s")
1466 1469 % (self.indexfile, revornode))
1467 1470
1468 1471 def checkinlinesize(self, tr, fp=None):
1469 1472 """Check if the revlog is too big for inline and convert if so.
1470 1473
1471 1474 This should be called after revisions are added to the revlog. If the
1472 1475 revlog has grown too large to be an inline revlog, it will convert it
1473 1476 to use multiple index and data files.
1474 1477 """
1475 1478 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1476 1479 return
1477 1480
1478 1481 trinfo = tr.find(self.indexfile)
1479 1482 if trinfo is None:
1480 1483 raise RevlogError(_("%s not found in the transaction")
1481 1484 % self.indexfile)
1482 1485
1483 1486 trindex = trinfo[2]
1484 1487 if trindex is not None:
1485 1488 dataoff = self.start(trindex)
1486 1489 else:
1487 1490 # revlog was stripped at start of transaction, use all leftover data
1488 1491 trindex = len(self) - 1
1489 1492 dataoff = self.end(-2)
1490 1493
1491 1494 tr.add(self.datafile, dataoff)
1492 1495
1493 1496 if fp:
1494 1497 fp.flush()
1495 1498 fp.close()
1496 1499
1497 1500 df = self.opener(self.datafile, 'w')
1498 1501 try:
1499 1502 for r in self:
1500 1503 df.write(self._getsegmentforrevs(r, r)[1])
1501 1504 finally:
1502 1505 df.close()
1503 1506
1504 1507 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1505 1508 checkambig=self._checkambig)
1506 1509 self.version &= ~FLAG_INLINE_DATA
1507 1510 self._inline = False
1508 1511 for i in self:
1509 1512 e = self._io.packentry(self.index[i], self.node, self.version, i)
1510 1513 fp.write(e)
1511 1514
1512 1515 # if we don't call close, the temp file will never replace the
1513 1516 # real index
1514 1517 fp.close()
1515 1518
1516 1519 tr.replace(self.indexfile, trindex * self._io.size)
1517 1520 self._chunkclear()
1518 1521
1519 1522 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1520 1523 node=None, flags=REVIDX_DEFAULT_FLAGS):
1521 1524 """add a revision to the log
1522 1525
1523 1526 text - the revision data to add
1524 1527 transaction - the transaction object used for rollback
1525 1528 link - the linkrev data to add
1526 1529 p1, p2 - the parent nodeids of the revision
1527 1530 cachedelta - an optional precomputed delta
1528 1531 node - nodeid of revision; typically node is not specified, and it is
1529 1532 computed by default as hash(text, p1, p2), however subclasses might
1530 1533 use different hashing method (and override checkhash() in such case)
1531 1534 flags - the known flags to set on the revision
1532 1535 """
1533 1536 if link == nullrev:
1534 1537 raise RevlogError(_("attempted to add linkrev -1 to %s")
1535 1538 % self.indexfile)
1536 1539
1537 1540 if flags:
1538 1541 node = node or self.hash(text, p1, p2)
1539 1542
1540 1543 rawtext, validatehash = self._processflags(text, flags, 'write')
1541 1544
1542 1545 # If the flag processor modifies the revision data, ignore any provided
1543 1546 # cachedelta.
1544 1547 if rawtext != text:
1545 1548 cachedelta = None
1546 1549
1547 1550 if len(rawtext) > _maxentrysize:
1548 1551 raise RevlogError(
1549 1552 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1550 1553 % (self.indexfile, len(rawtext)))
1551 1554
1552 1555 node = node or self.hash(rawtext, p1, p2)
1553 1556 if node in self.nodemap:
1554 1557 return node
1555 1558
1556 1559 if validatehash:
1557 1560 self.checkhash(rawtext, node, p1=p1, p2=p2)
1558 1561
1559 1562 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1560 1563 flags, cachedelta=cachedelta)
1561 1564
1562 1565 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1563 1566 cachedelta=None):
1564 1567 """add a raw revision with known flags, node and parents
1565 1568 useful when reusing a revision not stored in this revlog (ex: received
1566 1569 over wire, or read from an external bundle).
1567 1570 """
1568 1571 dfh = None
1569 1572 if not self._inline:
1570 1573 dfh = self.opener(self.datafile, "a+")
1571 1574 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1572 1575 try:
1573 1576 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1574 1577 flags, cachedelta, ifh, dfh)
1575 1578 finally:
1576 1579 if dfh:
1577 1580 dfh.close()
1578 1581 ifh.close()
1579 1582
1580 1583 def compress(self, data):
1581 1584 """Generate a possibly-compressed representation of data."""
1582 1585 if not data:
1583 1586 return '', data
1584 1587
1585 1588 compressed = self._compressor.compress(data)
1586 1589
1587 1590 if compressed:
1588 1591 # The revlog compressor added the header in the returned data.
1589 1592 return '', compressed
1590 1593
1591 1594 if data[0:1] == '\0':
1592 1595 return '', data
1593 1596 return 'u', data
1594 1597
1595 1598 def decompress(self, data):
1596 1599 """Decompress a revlog chunk.
1597 1600
1598 1601 The chunk is expected to begin with a header identifying the
1599 1602 format type so it can be routed to an appropriate decompressor.
1600 1603 """
1601 1604 if not data:
1602 1605 return data
1603 1606
1604 1607 # Revlogs are read much more frequently than they are written and many
1605 1608 # chunks only take microseconds to decompress, so performance is
1606 1609 # important here.
1607 1610 #
1608 1611 # We can make a few assumptions about revlogs:
1609 1612 #
1610 1613 # 1) the majority of chunks will be compressed (as opposed to inline
1611 1614 # raw data).
1612 1615 # 2) decompressing *any* data will likely by at least 10x slower than
1613 1616 # returning raw inline data.
1614 1617 # 3) we want to prioritize common and officially supported compression
1615 1618 # engines
1616 1619 #
1617 1620 # It follows that we want to optimize for "decompress compressed data
1618 1621 # when encoded with common and officially supported compression engines"
1619 1622 # case over "raw data" and "data encoded by less common or non-official
1620 1623 # compression engines." That is why we have the inline lookup first
1621 1624 # followed by the compengines lookup.
1622 1625 #
1623 1626 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1624 1627 # compressed chunks. And this matters for changelog and manifest reads.
1625 1628 t = data[0:1]
1626 1629
1627 1630 if t == 'x':
1628 1631 try:
1629 1632 return _zlibdecompress(data)
1630 1633 except zlib.error as e:
1631 1634 raise RevlogError(_('revlog decompress error: %s') % str(e))
1632 1635 # '\0' is more common than 'u' so it goes first.
1633 1636 elif t == '\0':
1634 1637 return data
1635 1638 elif t == 'u':
1636 1639 return util.buffer(data, 1)
1637 1640
1638 1641 try:
1639 1642 compressor = self._decompressors[t]
1640 1643 except KeyError:
1641 1644 try:
1642 1645 engine = util.compengines.forrevlogheader(t)
1643 1646 compressor = engine.revlogcompressor()
1644 1647 self._decompressors[t] = compressor
1645 1648 except KeyError:
1646 1649 raise RevlogError(_('unknown compression type %r') % t)
1647 1650
1648 1651 return compressor.decompress(data)
1649 1652
1650 1653 def _isgooddelta(self, d, textlen):
1651 1654 """Returns True if the given delta is good. Good means that it is within
1652 1655 the disk span, disk size, and chain length bounds that we know to be
1653 1656 performant."""
1654 1657 if d is None:
1655 1658 return False
1656 1659
1657 1660 # - 'dist' is the distance from the base revision -- bounding it limits
1658 1661 # the amount of I/O we need to do.
1659 1662 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1660 1663 # to apply -- bounding it limits the amount of CPU we consume.
1661 1664 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1662 if (dist > textlen * 4 or l > textlen or
1665
1666 defaultmax = textlen * 4
1667 maxdist = self._maxdeltachainspan
1668 if not maxdist:
1669 maxdist = dist # ensure the conditional pass
1670 maxdist = max(maxdist, defaultmax)
1671 if (dist > maxdist or l > textlen or
1663 1672 compresseddeltalen > textlen * 2 or
1664 1673 (self._maxchainlen and chainlen > self._maxchainlen)):
1665 1674 return False
1666 1675
1667 1676 return True
1668 1677
1669 1678 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1670 1679 cachedelta, ifh, dfh, alwayscache=False):
1671 1680 """internal function to add revisions to the log
1672 1681
1673 1682 see addrevision for argument descriptions.
1674 1683
1675 1684 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1676 1685
1677 1686 invariants:
1678 1687 - rawtext is optional (can be None); if not set, cachedelta must be set.
1679 1688 if both are set, they must correspond to each other.
1680 1689 """
1681 1690 btext = [rawtext]
1682 1691 def buildtext():
1683 1692 if btext[0] is not None:
1684 1693 return btext[0]
1685 1694 baserev = cachedelta[0]
1686 1695 delta = cachedelta[1]
1687 1696 # special case deltas which replace entire base; no need to decode
1688 1697 # base revision. this neatly avoids censored bases, which throw when
1689 1698 # they're decoded.
1690 1699 hlen = struct.calcsize(">lll")
1691 1700 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1692 1701 len(delta) - hlen):
1693 1702 btext[0] = delta[hlen:]
1694 1703 else:
1695 1704 if self._inline:
1696 1705 fh = ifh
1697 1706 else:
1698 1707 fh = dfh
1699 1708 basetext = self.revision(baserev, _df=fh, raw=True)
1700 1709 btext[0] = mdiff.patch(basetext, delta)
1701 1710
1702 1711 try:
1703 1712 res = self._processflags(btext[0], flags, 'read', raw=True)
1704 1713 btext[0], validatehash = res
1705 1714 if validatehash:
1706 1715 self.checkhash(btext[0], node, p1=p1, p2=p2)
1707 1716 if flags & REVIDX_ISCENSORED:
1708 1717 raise RevlogError(_('node %s is not censored') % node)
1709 1718 except CensoredNodeError:
1710 1719 # must pass the censored index flag to add censored revisions
1711 1720 if not flags & REVIDX_ISCENSORED:
1712 1721 raise
1713 1722 return btext[0]
1714 1723
1715 1724 def builddelta(rev):
1716 1725 # can we use the cached delta?
1717 1726 if cachedelta and cachedelta[0] == rev:
1718 1727 delta = cachedelta[1]
1719 1728 else:
1720 1729 t = buildtext()
1721 1730 if self.iscensored(rev):
1722 1731 # deltas based on a censored revision must replace the
1723 1732 # full content in one patch, so delta works everywhere
1724 1733 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1725 1734 delta = header + t
1726 1735 else:
1727 1736 if self._inline:
1728 1737 fh = ifh
1729 1738 else:
1730 1739 fh = dfh
1731 1740 ptext = self.revision(rev, _df=fh, raw=True)
1732 1741 delta = mdiff.textdiff(ptext, t)
1733 1742 header, data = self.compress(delta)
1734 1743 deltalen = len(header) + len(data)
1735 1744 chainbase = self.chainbase(rev)
1736 1745 dist = deltalen + offset - self.start(chainbase)
1737 1746 if self._generaldelta:
1738 1747 base = rev
1739 1748 else:
1740 1749 base = chainbase
1741 1750 chainlen, compresseddeltalen = self._chaininfo(rev)
1742 1751 chainlen += 1
1743 1752 compresseddeltalen += deltalen
1744 1753 return (dist, deltalen, (header, data), base,
1745 1754 chainbase, chainlen, compresseddeltalen)
1746 1755
1747 1756 curr = len(self)
1748 1757 prev = curr - 1
1749 1758 offset = self.end(prev)
1750 1759 delta = None
1751 1760 p1r, p2r = self.rev(p1), self.rev(p2)
1752 1761
1753 1762 # full versions are inserted when the needed deltas
1754 1763 # become comparable to the uncompressed text
1755 1764 if rawtext is None:
1756 1765 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1757 1766 cachedelta[1])
1758 1767 else:
1759 1768 textlen = len(rawtext)
1760 1769
1761 1770 # should we try to build a delta?
1762 1771 if prev != nullrev and self.storedeltachains:
1763 1772 tested = set()
1764 1773 # This condition is true most of the time when processing
1765 1774 # changegroup data into a generaldelta repo. The only time it
1766 1775 # isn't true is if this is the first revision in a delta chain
1767 1776 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1768 1777 if cachedelta and self._generaldelta and self._lazydeltabase:
1769 1778 # Assume what we received from the server is a good choice
1770 1779 # build delta will reuse the cache
1771 1780 candidatedelta = builddelta(cachedelta[0])
1772 1781 tested.add(cachedelta[0])
1773 1782 if self._isgooddelta(candidatedelta, textlen):
1774 1783 delta = candidatedelta
1775 1784 if delta is None and self._generaldelta:
1776 1785 # exclude already lazy tested base if any
1777 1786 parents = [p for p in (p1r, p2r)
1778 1787 if p != nullrev and p not in tested]
1779 1788 if parents and not self._aggressivemergedeltas:
1780 1789 # Pick whichever parent is closer to us (to minimize the
1781 1790 # chance of having to build a fulltext).
1782 1791 parents = [max(parents)]
1783 1792 tested.update(parents)
1784 1793 pdeltas = []
1785 1794 for p in parents:
1786 1795 pd = builddelta(p)
1787 1796 if self._isgooddelta(pd, textlen):
1788 1797 pdeltas.append(pd)
1789 1798 if pdeltas:
1790 1799 delta = min(pdeltas, key=lambda x: x[1])
1791 1800 if delta is None and prev not in tested:
1792 1801 # other approach failed try against prev to hopefully save us a
1793 1802 # fulltext.
1794 1803 candidatedelta = builddelta(prev)
1795 1804 if self._isgooddelta(candidatedelta, textlen):
1796 1805 delta = candidatedelta
1797 1806 if delta is not None:
1798 1807 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1799 1808 else:
1800 1809 rawtext = buildtext()
1801 1810 data = self.compress(rawtext)
1802 1811 l = len(data[1]) + len(data[0])
1803 1812 base = chainbase = curr
1804 1813
1805 1814 e = (offset_type(offset, flags), l, textlen,
1806 1815 base, link, p1r, p2r, node)
1807 1816 self.index.insert(-1, e)
1808 1817 self.nodemap[node] = curr
1809 1818
1810 1819 entry = self._io.packentry(e, self.node, self.version, curr)
1811 1820 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1812 1821
1813 1822 if alwayscache and rawtext is None:
1814 1823 rawtext = buildtext()
1815 1824
1816 1825 if type(rawtext) == str: # only accept immutable objects
1817 1826 self._cache = (node, curr, rawtext)
1818 1827 self._chainbasecache[curr] = chainbase
1819 1828 return node
1820 1829
1821 1830 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1822 1831 # Files opened in a+ mode have inconsistent behavior on various
1823 1832 # platforms. Windows requires that a file positioning call be made
1824 1833 # when the file handle transitions between reads and writes. See
1825 1834 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1826 1835 # platforms, Python or the platform itself can be buggy. Some versions
1827 1836 # of Solaris have been observed to not append at the end of the file
1828 1837 # if the file was seeked to before the end. See issue4943 for more.
1829 1838 #
1830 1839 # We work around this issue by inserting a seek() before writing.
1831 1840 # Note: This is likely not necessary on Python 3.
1832 1841 ifh.seek(0, os.SEEK_END)
1833 1842 if dfh:
1834 1843 dfh.seek(0, os.SEEK_END)
1835 1844
1836 1845 curr = len(self) - 1
1837 1846 if not self._inline:
1838 1847 transaction.add(self.datafile, offset)
1839 1848 transaction.add(self.indexfile, curr * len(entry))
1840 1849 if data[0]:
1841 1850 dfh.write(data[0])
1842 1851 dfh.write(data[1])
1843 1852 ifh.write(entry)
1844 1853 else:
1845 1854 offset += curr * self._io.size
1846 1855 transaction.add(self.indexfile, offset, curr)
1847 1856 ifh.write(entry)
1848 1857 ifh.write(data[0])
1849 1858 ifh.write(data[1])
1850 1859 self.checkinlinesize(transaction, ifh)
1851 1860
1852 1861 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1853 1862 """
1854 1863 add a delta group
1855 1864
1856 1865 given a set of deltas, add them to the revision log. the
1857 1866 first delta is against its parent, which should be in our
1858 1867 log, the rest are against the previous delta.
1859 1868
1860 1869 If ``addrevisioncb`` is defined, it will be called with arguments of
1861 1870 this revlog and the node that was added.
1862 1871 """
1863 1872
1864 1873 nodes = []
1865 1874
1866 1875 r = len(self)
1867 1876 end = 0
1868 1877 if r:
1869 1878 end = self.end(r - 1)
1870 1879 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1871 1880 isize = r * self._io.size
1872 1881 if self._inline:
1873 1882 transaction.add(self.indexfile, end + isize, r)
1874 1883 dfh = None
1875 1884 else:
1876 1885 transaction.add(self.indexfile, isize, r)
1877 1886 transaction.add(self.datafile, end)
1878 1887 dfh = self.opener(self.datafile, "a+")
1879 1888 def flush():
1880 1889 if dfh:
1881 1890 dfh.flush()
1882 1891 ifh.flush()
1883 1892 try:
1884 1893 # loop through our set of deltas
1885 1894 chain = None
1886 1895 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1887 1896 node = chunkdata['node']
1888 1897 p1 = chunkdata['p1']
1889 1898 p2 = chunkdata['p2']
1890 1899 cs = chunkdata['cs']
1891 1900 deltabase = chunkdata['deltabase']
1892 1901 delta = chunkdata['delta']
1893 1902 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1894 1903
1895 1904 nodes.append(node)
1896 1905
1897 1906 link = linkmapper(cs)
1898 1907 if node in self.nodemap:
1899 1908 # this can happen if two branches make the same change
1900 1909 chain = node
1901 1910 continue
1902 1911
1903 1912 for p in (p1, p2):
1904 1913 if p not in self.nodemap:
1905 1914 raise LookupError(p, self.indexfile,
1906 1915 _('unknown parent'))
1907 1916
1908 1917 if deltabase not in self.nodemap:
1909 1918 raise LookupError(deltabase, self.indexfile,
1910 1919 _('unknown delta base'))
1911 1920
1912 1921 baserev = self.rev(deltabase)
1913 1922
1914 1923 if baserev != nullrev and self.iscensored(baserev):
1915 1924 # if base is censored, delta must be full replacement in a
1916 1925 # single patch operation
1917 1926 hlen = struct.calcsize(">lll")
1918 1927 oldlen = self.rawsize(baserev)
1919 1928 newlen = len(delta) - hlen
1920 1929 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1921 1930 raise error.CensoredBaseError(self.indexfile,
1922 1931 self.node(baserev))
1923 1932
1924 1933 if not flags and self._peek_iscensored(baserev, delta, flush):
1925 1934 flags |= REVIDX_ISCENSORED
1926 1935
1927 1936 # We assume consumers of addrevisioncb will want to retrieve
1928 1937 # the added revision, which will require a call to
1929 1938 # revision(). revision() will fast path if there is a cache
1930 1939 # hit. So, we tell _addrevision() to always cache in this case.
1931 1940 # We're only using addgroup() in the context of changegroup
1932 1941 # generation so the revision data can always be handled as raw
1933 1942 # by the flagprocessor.
1934 1943 chain = self._addrevision(node, None, transaction, link,
1935 1944 p1, p2, flags, (baserev, delta),
1936 1945 ifh, dfh,
1937 1946 alwayscache=bool(addrevisioncb))
1938 1947
1939 1948 if addrevisioncb:
1940 1949 addrevisioncb(self, chain)
1941 1950
1942 1951 if not dfh and not self._inline:
1943 1952 # addrevision switched from inline to conventional
1944 1953 # reopen the index
1945 1954 ifh.close()
1946 1955 dfh = self.opener(self.datafile, "a+")
1947 1956 ifh = self.opener(self.indexfile, "a+",
1948 1957 checkambig=self._checkambig)
1949 1958 finally:
1950 1959 if dfh:
1951 1960 dfh.close()
1952 1961 ifh.close()
1953 1962
1954 1963 return nodes
1955 1964
1956 1965 def iscensored(self, rev):
1957 1966 """Check if a file revision is censored."""
1958 1967 return False
1959 1968
1960 1969 def _peek_iscensored(self, baserev, delta, flush):
1961 1970 """Quickly check if a delta produces a censored revision."""
1962 1971 return False
1963 1972
1964 1973 def getstrippoint(self, minlink):
1965 1974 """find the minimum rev that must be stripped to strip the linkrev
1966 1975
1967 1976 Returns a tuple containing the minimum rev and a set of all revs that
1968 1977 have linkrevs that will be broken by this strip.
1969 1978 """
1970 1979 brokenrevs = set()
1971 1980 strippoint = len(self)
1972 1981
1973 1982 heads = {}
1974 1983 futurelargelinkrevs = set()
1975 1984 for head in self.headrevs():
1976 1985 headlinkrev = self.linkrev(head)
1977 1986 heads[head] = headlinkrev
1978 1987 if headlinkrev >= minlink:
1979 1988 futurelargelinkrevs.add(headlinkrev)
1980 1989
1981 1990 # This algorithm involves walking down the rev graph, starting at the
1982 1991 # heads. Since the revs are topologically sorted according to linkrev,
1983 1992 # once all head linkrevs are below the minlink, we know there are
1984 1993 # no more revs that could have a linkrev greater than minlink.
1985 1994 # So we can stop walking.
1986 1995 while futurelargelinkrevs:
1987 1996 strippoint -= 1
1988 1997 linkrev = heads.pop(strippoint)
1989 1998
1990 1999 if linkrev < minlink:
1991 2000 brokenrevs.add(strippoint)
1992 2001 else:
1993 2002 futurelargelinkrevs.remove(linkrev)
1994 2003
1995 2004 for p in self.parentrevs(strippoint):
1996 2005 if p != nullrev:
1997 2006 plinkrev = self.linkrev(p)
1998 2007 heads[p] = plinkrev
1999 2008 if plinkrev >= minlink:
2000 2009 futurelargelinkrevs.add(plinkrev)
2001 2010
2002 2011 return strippoint, brokenrevs
2003 2012
2004 2013 def strip(self, minlink, transaction):
2005 2014 """truncate the revlog on the first revision with a linkrev >= minlink
2006 2015
2007 2016 This function is called when we're stripping revision minlink and
2008 2017 its descendants from the repository.
2009 2018
2010 2019 We have to remove all revisions with linkrev >= minlink, because
2011 2020 the equivalent changelog revisions will be renumbered after the
2012 2021 strip.
2013 2022
2014 2023 So we truncate the revlog on the first of these revisions, and
2015 2024 trust that the caller has saved the revisions that shouldn't be
2016 2025 removed and that it'll re-add them after this truncation.
2017 2026 """
2018 2027 if len(self) == 0:
2019 2028 return
2020 2029
2021 2030 rev, _ = self.getstrippoint(minlink)
2022 2031 if rev == len(self):
2023 2032 return
2024 2033
2025 2034 # first truncate the files on disk
2026 2035 end = self.start(rev)
2027 2036 if not self._inline:
2028 2037 transaction.add(self.datafile, end)
2029 2038 end = rev * self._io.size
2030 2039 else:
2031 2040 end += rev * self._io.size
2032 2041
2033 2042 transaction.add(self.indexfile, end)
2034 2043
2035 2044 # then reset internal state in memory to forget those revisions
2036 2045 self._cache = None
2037 2046 self._chaininfocache = {}
2038 2047 self._chunkclear()
2039 2048 for x in xrange(rev, len(self)):
2040 2049 del self.nodemap[self.node(x)]
2041 2050
2042 2051 del self.index[rev:-1]
2043 2052
2044 2053 def checksize(self):
2045 2054 expected = 0
2046 2055 if len(self):
2047 2056 expected = max(0, self.end(len(self) - 1))
2048 2057
2049 2058 try:
2050 2059 f = self.opener(self.datafile)
2051 2060 f.seek(0, 2)
2052 2061 actual = f.tell()
2053 2062 f.close()
2054 2063 dd = actual - expected
2055 2064 except IOError as inst:
2056 2065 if inst.errno != errno.ENOENT:
2057 2066 raise
2058 2067 dd = 0
2059 2068
2060 2069 try:
2061 2070 f = self.opener(self.indexfile)
2062 2071 f.seek(0, 2)
2063 2072 actual = f.tell()
2064 2073 f.close()
2065 2074 s = self._io.size
2066 2075 i = max(0, actual // s)
2067 2076 di = actual - (i * s)
2068 2077 if self._inline:
2069 2078 databytes = 0
2070 2079 for r in self:
2071 2080 databytes += max(0, self.length(r))
2072 2081 dd = 0
2073 2082 di = actual - len(self) * s - databytes
2074 2083 except IOError as inst:
2075 2084 if inst.errno != errno.ENOENT:
2076 2085 raise
2077 2086 di = 0
2078 2087
2079 2088 return (dd, di)
2080 2089
2081 2090 def files(self):
2082 2091 res = [self.indexfile]
2083 2092 if not self._inline:
2084 2093 res.append(self.datafile)
2085 2094 return res
2086 2095
2087 2096 DELTAREUSEALWAYS = 'always'
2088 2097 DELTAREUSESAMEREVS = 'samerevs'
2089 2098 DELTAREUSENEVER = 'never'
2090 2099
2091 2100 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2092 2101
2093 2102 def clone(self, tr, destrevlog, addrevisioncb=None,
2094 2103 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2095 2104 """Copy this revlog to another, possibly with format changes.
2096 2105
2097 2106 The destination revlog will contain the same revisions and nodes.
2098 2107 However, it may not be bit-for-bit identical due to e.g. delta encoding
2099 2108 differences.
2100 2109
2101 2110 The ``deltareuse`` argument control how deltas from the existing revlog
2102 2111 are preserved in the destination revlog. The argument can have the
2103 2112 following values:
2104 2113
2105 2114 DELTAREUSEALWAYS
2106 2115 Deltas will always be reused (if possible), even if the destination
2107 2116 revlog would not select the same revisions for the delta. This is the
2108 2117 fastest mode of operation.
2109 2118 DELTAREUSESAMEREVS
2110 2119 Deltas will be reused if the destination revlog would pick the same
2111 2120 revisions for the delta. This mode strikes a balance between speed
2112 2121 and optimization.
2113 2122 DELTAREUSENEVER
2114 2123 Deltas will never be reused. This is the slowest mode of execution.
2115 2124 This mode can be used to recompute deltas (e.g. if the diff/delta
2116 2125 algorithm changes).
2117 2126
2118 2127 Delta computation can be slow, so the choice of delta reuse policy can
2119 2128 significantly affect run time.
2120 2129
2121 2130 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2122 2131 two extremes. Deltas will be reused if they are appropriate. But if the
2123 2132 delta could choose a better revision, it will do so. This means if you
2124 2133 are converting a non-generaldelta revlog to a generaldelta revlog,
2125 2134 deltas will be recomputed if the delta's parent isn't a parent of the
2126 2135 revision.
2127 2136
2128 2137 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2129 2138 controls whether to compute deltas against both parents for merges.
2130 2139 By default, the current default is used.
2131 2140 """
2132 2141 if deltareuse not in self.DELTAREUSEALL:
2133 2142 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2134 2143
2135 2144 if len(destrevlog):
2136 2145 raise ValueError(_('destination revlog is not empty'))
2137 2146
2138 2147 if getattr(self, 'filteredrevs', None):
2139 2148 raise ValueError(_('source revlog has filtered revisions'))
2140 2149 if getattr(destrevlog, 'filteredrevs', None):
2141 2150 raise ValueError(_('destination revlog has filtered revisions'))
2142 2151
2143 2152 # lazydeltabase controls whether to reuse a cached delta, if possible.
2144 2153 oldlazydeltabase = destrevlog._lazydeltabase
2145 2154 oldamd = destrevlog._aggressivemergedeltas
2146 2155
2147 2156 try:
2148 2157 if deltareuse == self.DELTAREUSEALWAYS:
2149 2158 destrevlog._lazydeltabase = True
2150 2159 elif deltareuse == self.DELTAREUSESAMEREVS:
2151 2160 destrevlog._lazydeltabase = False
2152 2161
2153 2162 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2154 2163
2155 2164 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2156 2165 self.DELTAREUSESAMEREVS)
2157 2166
2158 2167 index = self.index
2159 2168 for rev in self:
2160 2169 entry = index[rev]
2161 2170
2162 2171 # Some classes override linkrev to take filtered revs into
2163 2172 # account. Use raw entry from index.
2164 2173 flags = entry[0] & 0xffff
2165 2174 linkrev = entry[4]
2166 2175 p1 = index[entry[5]][7]
2167 2176 p2 = index[entry[6]][7]
2168 2177 node = entry[7]
2169 2178
2170 2179 # (Possibly) reuse the delta from the revlog if allowed and
2171 2180 # the revlog chunk is a delta.
2172 2181 cachedelta = None
2173 2182 rawtext = None
2174 2183 if populatecachedelta:
2175 2184 dp = self.deltaparent(rev)
2176 2185 if dp != nullrev:
2177 2186 cachedelta = (dp, str(self._chunk(rev)))
2178 2187
2179 2188 if not cachedelta:
2180 2189 rawtext = self.revision(rev, raw=True)
2181 2190
2182 2191 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2183 2192 checkambig=False)
2184 2193 dfh = None
2185 2194 if not destrevlog._inline:
2186 2195 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2187 2196 try:
2188 2197 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2189 2198 flags, cachedelta, ifh, dfh)
2190 2199 finally:
2191 2200 if dfh:
2192 2201 dfh.close()
2193 2202 ifh.close()
2194 2203
2195 2204 if addrevisioncb:
2196 2205 addrevisioncb(self, rev, node)
2197 2206 finally:
2198 2207 destrevlog._lazydeltabase = oldlazydeltabase
2199 2208 destrevlog._aggressivemergedeltas = oldamd
@@ -1,163 +1,351 b''
1 1 Check whether size of generaldelta revlog is not bigger than its
2 2 regular equivalent. Test would fail if generaldelta was naive
3 3 implementation of parentdelta: third manifest revision would be fully
4 4 inserted due to big distance from its paren revision (zero).
5 5
6 6 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
7 7 $ cd repo
8 8 $ echo foo > foo
9 9 $ echo bar > bar
10 10 $ echo baz > baz
11 11 $ hg commit -q -Am boo
12 12 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
13 13 $ for r in 1 2 3; do
14 14 > echo $r > foo
15 15 > hg commit -q -m $r
16 16 > hg up -q -r 0
17 17 > hg pull . -q -r $r -R ../gdrepo
18 18 > done
19 19
20 20 $ cd ..
21 21 >>> from __future__ import print_function
22 22 >>> import os
23 23 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
24 24 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
25 25 >>> if regsize < gdsize:
26 26 ... print('generaldata increased size of manifest')
27 27
28 28 Verify rev reordering doesnt create invalid bundles (issue4462)
29 29 This requires a commit tree that when pulled will reorder manifest revs such
30 30 that the second manifest to create a file rev will be ordered before the first
31 31 manifest to create that file rev. We also need to do a partial pull to ensure
32 32 reordering happens. At the end we verify the linkrev points at the earliest
33 33 commit.
34 34
35 35 $ hg init server --config format.generaldelta=True
36 36 $ cd server
37 37 $ touch a
38 38 $ hg commit -Aqm a
39 39 $ echo x > x
40 40 $ echo y > y
41 41 $ hg commit -Aqm xy
42 42 $ hg up -q '.^'
43 43 $ echo x > x
44 44 $ echo z > z
45 45 $ hg commit -Aqm xz
46 46 $ hg up -q 1
47 47 $ echo b > b
48 48 $ hg commit -Aqm b
49 49 $ hg merge -q 2
50 50 $ hg commit -Aqm merge
51 51 $ echo c > c
52 52 $ hg commit -Aqm c
53 53 $ hg log -G -T '{rev} {shortest(node)} {desc}'
54 54 @ 5 ebb8 c
55 55 |
56 56 o 4 baf7 merge
57 57 |\
58 58 | o 3 a129 b
59 59 | |
60 60 o | 2 958c xz
61 61 | |
62 62 | o 1 f00c xy
63 63 |/
64 64 o 0 3903 a
65 65
66 66 $ cd ..
67 67 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
68 68 $ cd client
69 69 $ hg pull -q ../server -r 4
70 70 $ hg debugindex x
71 71 rev offset length base linkrev nodeid p1 p2
72 72 0 0 3 0 1 1406e7411862 000000000000 000000000000
73 73
74 74 $ cd ..
75 75
76 76 Test "usegeneraldelta" config
77 77 (repo are general delta, but incoming bundle are not re-deltafied)
78 78
79 79 delta coming from the server base delta server are not recompressed.
80 80 (also include the aggressive version for comparison)
81 81
82 82 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
83 83 requesting all changes
84 84 adding changesets
85 85 adding manifests
86 86 adding file changes
87 87 added 4 changesets with 6 changes to 3 files (+2 heads)
88 88 updating to branch default
89 89 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 90 $ hg clone repo --pull --config format.generaldelta=1 full
91 91 requesting all changes
92 92 adding changesets
93 93 adding manifests
94 94 adding file changes
95 95 added 4 changesets with 6 changes to 3 files (+2 heads)
96 96 updating to branch default
97 97 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98 $ hg -R repo debugindex -m
99 99 rev offset length base linkrev nodeid p1 p2
100 100 0 0 104 0 0 cef96823c800 000000000000 000000000000
101 101 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
102 102 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
103 103 3 218 104 3 3 723508934dad cef96823c800 000000000000
104 104 $ hg -R usegd debugindex -m
105 105 rev offset length delta linkrev nodeid p1 p2
106 106 0 0 104 -1 0 cef96823c800 000000000000 000000000000
107 107 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
108 108 2 161 57 1 2 134fdc6fd680 cef96823c800 000000000000
109 109 3 218 57 0 3 723508934dad cef96823c800 000000000000
110 110 $ hg -R full debugindex -m
111 111 rev offset length delta linkrev nodeid p1 p2
112 112 0 0 104 -1 0 cef96823c800 000000000000 000000000000
113 113 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
114 114 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
115 115 3 218 57 0 3 723508934dad cef96823c800 000000000000
116 116
117 117 Test format.aggressivemergedeltas
118 118
119 119 $ hg init --config format.generaldelta=1 aggressive
120 120 $ cd aggressive
121 121 $ cat << EOF >> .hg/hgrc
122 122 > [format]
123 123 > generaldelta = 1
124 124 > EOF
125 125 $ touch a b c d e
126 126 $ hg commit -Aqm side1
127 127 $ hg up -q null
128 128 $ touch x y
129 129 $ hg commit -Aqm side2
130 130
131 131 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
132 132 $ hg merge -q 0
133 133 $ hg commit -q -m merge
134 134 $ hg debugindex -m
135 135 rev offset length delta linkrev nodeid p1 p2
136 136 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
137 137 1 59 61 0 1 315c023f341d 000000000000 000000000000
138 138 2 120 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
139 139
140 140 $ hg strip -q -r . --config extensions.strip=
141 141
142 142 - Verify aggressive merge uses p2 (commit 0) as delta parent
143 143 $ hg up -q -C 1
144 144 $ hg merge -q 0
145 145 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
146 146 $ hg debugindex -m
147 147 rev offset length delta linkrev nodeid p1 p2
148 148 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
149 149 1 59 61 0 1 315c023f341d 000000000000 000000000000
150 150 2 120 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
151 151
152 152 Test that strip bundle use bundle2
153 153 $ hg --config extensions.strip= strip .
154 154 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
155 155 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
156 156 $ hg debugbundle .hg/strip-backup/*
157 157 Stream params: sortdict([('Compression', 'BZ')])
158 158 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
159 159 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
160 160 phase-heads -- 'sortdict()'
161 161 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9 draft
162 162
163 163 $ cd ..
164
165 test maxdeltachainspan
166
167 $ hg init source-repo
168 $ cd source-repo
169 $ hg debugbuilddag --new-file '.+5:brancha$.+11:branchb$.+30:branchc<brancha+2<branchb+2'
170 $ cd ..
171 $ hg -R source-repo debugindex -m
172 rev offset length delta linkrev nodeid p1 p2
173 0 0 46 -1 0 19deeef41503 000000000000 000000000000
174 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
175 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
176 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
177 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
178 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
179 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
180 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
181 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
182 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
183 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
184 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
185 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
186 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
187 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
188 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
189 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
190 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
191 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
192 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
193 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
194 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
195 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
196 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
197 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
198 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
199 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
200 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
201 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
202 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
203 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
204 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
205 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
206 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
207 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
208 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
209 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
210 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
211 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
212 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
213 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
214 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
215 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
216 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
217 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
218 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
219 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
220 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
221 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
222 49 2799 197 -1 49 9fff62ea0624 96e0c2ce55ed 000000000000
223 50 2996 58 49 50 467f8e30a066 9fff62ea0624 000000000000
224 51 3054 356 50 51 346db97283df a33416e52d91 000000000000
225 52 3410 58 51 52 4e003fd4d5cd 346db97283df 000000000000
226 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
227 requesting all changes
228 adding changesets
229 adding manifests
230 adding file changes
231 added 53 changesets with 53 changes to 53 files (+2 heads)
232 updating to branch default
233 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 $ hg -R relax-chain debugindex -m
235 rev offset length delta linkrev nodeid p1 p2
236 0 0 46 -1 0 19deeef41503 000000000000 000000000000
237 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
238 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
239 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
240 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
241 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
242 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
243 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
244 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
245 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
246 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
247 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
248 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
249 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
250 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
251 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
252 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
253 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
254 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
255 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
256 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
257 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
258 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
259 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
260 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
261 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
262 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
263 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
264 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
265 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
266 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
267 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
268 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
269 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
270 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
271 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
272 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
273 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
274 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
275 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
276 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
277 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
278 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
279 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
280 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
281 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
282 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
283 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
284 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
285 49 2799 197 -1 49 9fff62ea0624 96e0c2ce55ed 000000000000
286 50 2996 58 49 50 467f8e30a066 9fff62ea0624 000000000000
287 51 3054 58 17 51 346db97283df a33416e52d91 000000000000
288 52 3112 369 -1 52 4e003fd4d5cd 346db97283df 000000000000
289 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
290 requesting all changes
291 adding changesets
292 adding manifests
293 adding file changes
294 added 53 changesets with 53 changes to 53 files (+2 heads)
295 updating to branch default
296 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
297 $ hg -R noconst-chain debugindex -m
298 rev offset length delta linkrev nodeid p1 p2
299 0 0 46 -1 0 19deeef41503 000000000000 000000000000
300 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
301 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
302 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
303 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
304 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
305 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
306 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
307 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
308 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
309 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
310 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
311 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
312 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
313 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
314 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
315 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
316 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
317 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
318 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
319 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
320 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
321 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
322 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
323 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
324 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
325 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
326 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
327 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
328 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
329 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
330 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
331 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
332 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
333 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
334 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
335 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
336 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
337 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
338 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
339 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
340 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
341 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
342 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
343 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
344 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
345 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
346 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
347 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
348 49 2799 58 5 49 9fff62ea0624 96e0c2ce55ed 000000000000
349 50 2857 58 49 50 467f8e30a066 9fff62ea0624 000000000000
350 51 2915 58 17 51 346db97283df a33416e52d91 000000000000
351 52 2973 58 51 52 4e003fd4d5cd 346db97283df 000000000000
General Comments 0
You need to be logged in to leave comments. Login now