##// END OF EJS Templates
localrepo: drop the CamelCase name for `localrepo.revlogfilestorage`...
Matt Harbison -
r52971:eb4f9028 default
parent child Browse files
Show More
@@ -1,4072 +1,4063
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import functools
12 12 import os
13 13 import random
14 14 import re
15 15 import sys
16 16 import time
17 17 import typing
18 18 import weakref
19 19
20 20 from concurrent import futures
21 21 from typing import (
22 22 Optional,
23 23 )
24 24
25 25 from .i18n import _
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 )
33 33 from . import (
34 34 bookmarks,
35 35 branchmap,
36 36 bundle2,
37 37 bundlecaches,
38 38 changegroup,
39 39 color,
40 40 commit,
41 41 context,
42 42 dirstate,
43 43 discovery,
44 44 encoding,
45 45 error,
46 46 exchange,
47 47 extensions,
48 48 filelog,
49 49 hook,
50 50 lock as lockmod,
51 51 match as matchmod,
52 52 mergestate as mergestatemod,
53 53 mergeutil,
54 54 namespaces,
55 55 narrowspec,
56 56 obsolete,
57 57 pathutil,
58 58 phases,
59 59 policy,
60 60 pushkey,
61 61 pycompat,
62 62 rcutil,
63 63 repoview,
64 64 requirements as requirementsmod,
65 65 revlog,
66 66 revset,
67 67 revsetlang,
68 68 scmutil,
69 69 sparse,
70 70 store as storemod,
71 71 subrepoutil,
72 72 tags as tagsmod,
73 73 transaction,
74 74 txnutil,
75 75 util,
76 76 vfs as vfsmod,
77 77 wireprototypes,
78 78 )
79 79
80 80 from .branching import (
81 81 rev_cache as rev_branch_cache,
82 82 )
83 83
84 84 from .interfaces import (
85 85 repository,
86 86 util as interfaceutil,
87 87 )
88 88
89 89 from .utils import (
90 90 hashutil,
91 91 procutil,
92 92 stringutil,
93 93 urlutil,
94 94 )
95 95
96 96 from .revlogutils import (
97 97 concurrency_checker as revlogchecker,
98 98 constants as revlogconst,
99 99 sidedata as sidedatamod,
100 100 )
101 101
102 102 release = lockmod.release
103 103 urlerr = util.urlerr
104 104 urlreq = util.urlreq
105 105
106 106 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
107 107 b"^((dirstate|narrowspec.dirstate).*|branch$)"
108 108 )
109 109
110 110 # set of (path, vfs-location) tuples. vfs-location is:
111 111 # - 'plain for vfs relative paths
112 112 # - '' for svfs relative paths
113 113 _cachedfiles = set()
114 114
115 115
116 116 class _basefilecache(scmutil.filecache):
117 117 """All filecache usage on repo are done for logic that should be unfiltered"""
118 118
119 119 def __get__(self, repo, type=None):
120 120 if repo is None:
121 121 return self
122 122 # proxy to unfiltered __dict__ since filtered repo has no entry
123 123 unfi = repo.unfiltered()
124 124 try:
125 125 return unfi.__dict__[self.sname]
126 126 except KeyError:
127 127 pass
128 128 return super(_basefilecache, self).__get__(unfi, type)
129 129
130 130 def set(self, repo, value):
131 131 return super(_basefilecache, self).set(repo.unfiltered(), value)
132 132
133 133
134 134 class repofilecache(_basefilecache):
135 135 """filecache for files in .hg but outside of .hg/store"""
136 136
137 137 def __init__(self, *paths):
138 138 super(repofilecache, self).__init__(*paths)
139 139 for path in paths:
140 140 _cachedfiles.add((path, b'plain'))
141 141
142 142 def join(self, obj, fname):
143 143 return obj.vfs.join(fname)
144 144
145 145
146 146 class storecache(_basefilecache):
147 147 """filecache for files in the store"""
148 148
149 149 def __init__(self, *paths):
150 150 super(storecache, self).__init__(*paths)
151 151 for path in paths:
152 152 _cachedfiles.add((path, b''))
153 153
154 154 def join(self, obj, fname):
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 class changelogcache(storecache):
159 159 """filecache for the changelog"""
160 160
161 161 def __init__(self):
162 162 super(changelogcache, self).__init__()
163 163 _cachedfiles.add((b'00changelog.i', b''))
164 164 _cachedfiles.add((b'00changelog.n', b''))
165 165
166 166 def tracked_paths(self, obj):
167 167 paths = [self.join(obj, b'00changelog.i')]
168 168 if obj.store.opener.options.get(b'persistent-nodemap', False):
169 169 paths.append(self.join(obj, b'00changelog.n'))
170 170 return paths
171 171
172 172
173 173 class manifestlogcache(storecache):
174 174 """filecache for the manifestlog"""
175 175
176 176 def __init__(self):
177 177 super(manifestlogcache, self).__init__()
178 178 _cachedfiles.add((b'00manifest.i', b''))
179 179 _cachedfiles.add((b'00manifest.n', b''))
180 180
181 181 def tracked_paths(self, obj):
182 182 paths = [self.join(obj, b'00manifest.i')]
183 183 if obj.store.opener.options.get(b'persistent-nodemap', False):
184 184 paths.append(self.join(obj, b'00manifest.n'))
185 185 return paths
186 186
187 187
188 188 class mixedrepostorecache(_basefilecache):
189 189 """filecache for a mix files in .hg/store and outside"""
190 190
191 191 def __init__(self, *pathsandlocations):
192 192 # scmutil.filecache only uses the path for passing back into our
193 193 # join(), so we can safely pass a list of paths and locations
194 194 super(mixedrepostorecache, self).__init__(*pathsandlocations)
195 195 _cachedfiles.update(pathsandlocations)
196 196
197 197 def join(self, obj, fnameandlocation):
198 198 fname, location = fnameandlocation
199 199 if location == b'plain':
200 200 return obj.vfs.join(fname)
201 201 else:
202 202 if location != b'':
203 203 raise error.ProgrammingError(
204 204 b'unexpected location: %s' % location
205 205 )
206 206 return obj.sjoin(fname)
207 207
208 208
209 209 def isfilecached(repo, name):
210 210 """check if a repo has already cached "name" filecache-ed property
211 211
212 212 This returns (cachedobj-or-None, iscached) tuple.
213 213 """
214 214 cacheentry = repo.unfiltered()._filecache.get(name, None)
215 215 if not cacheentry:
216 216 return None, False
217 217 return cacheentry.obj, True
218 218
219 219
220 220 class unfilteredpropertycache(util.propertycache):
221 221 """propertycache that apply to unfiltered repo only"""
222 222
223 223 def __get__(self, repo, type=None):
224 224 unfi = repo.unfiltered()
225 225 if unfi is repo:
226 226 return super(unfilteredpropertycache, self).__get__(unfi)
227 227 return getattr(unfi, self.name)
228 228
229 229
230 230 class filteredpropertycache(util.propertycache):
231 231 """propertycache that must take filtering in account"""
232 232
233 233 def cachevalue(self, obj, value):
234 234 object.__setattr__(obj, self.name, value)
235 235
236 236
237 237 def hasunfilteredcache(repo, name):
238 238 """check if a repo has an unfilteredpropertycache value for <name>"""
239 239 return name in vars(repo.unfiltered())
240 240
241 241
242 242 def unfilteredmethod(orig):
243 243 """decorate method that always need to be run on unfiltered version"""
244 244
245 245 @functools.wraps(orig)
246 246 def wrapper(repo, *args, **kwargs):
247 247 return orig(repo.unfiltered(), *args, **kwargs)
248 248
249 249 return wrapper
250 250
251 251
252 252 moderncaps = {
253 253 b'lookup',
254 254 b'branchmap',
255 255 b'pushkey',
256 256 b'known',
257 257 b'getbundle',
258 258 b'unbundle',
259 259 }
260 260 legacycaps = moderncaps.union({b'changegroupsubset'})
261 261
262 262
263 263 class localcommandexecutor: # (repository.ipeercommandexecutor)
264 264 def __init__(self, peer):
265 265 self._peer = peer
266 266 self._sent = False
267 267 self._closed = False
268 268
269 269 def __enter__(self):
270 270 return self
271 271
272 272 def __exit__(self, exctype, excvalue, exctb):
273 273 self.close()
274 274
275 275 def callcommand(self, command, args):
276 276 if self._sent:
277 277 raise error.ProgrammingError(
278 278 b'callcommand() cannot be used after sendcommands()'
279 279 )
280 280
281 281 if self._closed:
282 282 raise error.ProgrammingError(
283 283 b'callcommand() cannot be used after close()'
284 284 )
285 285
286 286 # We don't need to support anything fancy. Just call the named
287 287 # method on the peer and return a resolved future.
288 288 fn = getattr(self._peer, pycompat.sysstr(command))
289 289
290 290 f = futures.Future()
291 291
292 292 try:
293 293 result = fn(**pycompat.strkwargs(args))
294 294 except Exception:
295 295 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
296 296 else:
297 297 f.set_result(result)
298 298
299 299 return f
300 300
301 301 def sendcommands(self):
302 302 self._sent = True
303 303
304 304 def close(self):
305 305 self._closed = True
306 306
307 307
308 308 class localpeer(repository.peer): # (repository.ipeercommands)
309 309 '''peer for a local repo; reflects only the most recent API'''
310 310
311 311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
312 312 super(localpeer, self).__init__(
313 313 repo.ui, path=path, remotehidden=remotehidden
314 314 )
315 315
316 316 if caps is None:
317 317 caps = moderncaps.copy()
318 318 if remotehidden:
319 319 self._repo = repo.filtered(b'served.hidden')
320 320 else:
321 321 self._repo = repo.filtered(b'served')
322 322 if repo._wanted_sidedata:
323 323 formatted = bundle2.format_remote_wanted_sidedata(repo)
324 324 caps.add(b'exp-wanted-sidedata=' + formatted)
325 325
326 326 self._caps = repo._restrictcapabilities(caps)
327 327
328 328 # Begin of _basepeer interface.
329 329
330 330 def url(self):
331 331 return self._repo.url()
332 332
333 333 def local(self):
334 334 return self._repo
335 335
336 336 def canpush(self):
337 337 return True
338 338
339 339 def close(self):
340 340 self._repo.close()
341 341
342 342 # End of _basepeer interface.
343 343
344 344 # Begin of _basewirecommands interface.
345 345
346 346 def branchmap(self):
347 347 return self._repo.branchmap()
348 348
349 349 def capabilities(self):
350 350 return self._caps
351 351
352 352 def get_cached_bundle_inline(self, path):
353 353 # not needed with local peer
354 354 raise NotImplementedError
355 355
356 356 def clonebundles(self):
357 357 return bundlecaches.get_manifest(self._repo)
358 358
359 359 def debugwireargs(self, one, two, three=None, four=None, five=None):
360 360 """Used to test argument passing over the wire"""
361 361 return b"%s %s %s %s %s" % (
362 362 one,
363 363 two,
364 364 pycompat.bytestr(three),
365 365 pycompat.bytestr(four),
366 366 pycompat.bytestr(five),
367 367 )
368 368
369 369 def getbundle(
370 370 self,
371 371 source,
372 372 heads=None,
373 373 common=None,
374 374 bundlecaps=None,
375 375 remote_sidedata=None,
376 376 **kwargs,
377 377 ):
378 378 chunks = exchange.getbundlechunks(
379 379 self._repo,
380 380 source,
381 381 heads=heads,
382 382 common=common,
383 383 bundlecaps=bundlecaps,
384 384 remote_sidedata=remote_sidedata,
385 385 **kwargs,
386 386 )[1]
387 387 cb = util.chunkbuffer(chunks)
388 388
389 389 if exchange.bundle2requested(bundlecaps):
390 390 # When requesting a bundle2, getbundle returns a stream to make the
391 391 # wire level function happier. We need to build a proper object
392 392 # from it in local peer.
393 393 return bundle2.getunbundler(self.ui, cb)
394 394 else:
395 395 return changegroup.getunbundler(b'01', cb, None)
396 396
397 397 def heads(self):
398 398 return self._repo.heads()
399 399
400 400 def known(self, nodes):
401 401 return self._repo.known(nodes)
402 402
403 403 def listkeys(self, namespace):
404 404 return self._repo.listkeys(namespace)
405 405
406 406 def lookup(self, key):
407 407 return self._repo.lookup(key)
408 408
409 409 def pushkey(self, namespace, key, old, new):
410 410 return self._repo.pushkey(namespace, key, old, new)
411 411
412 412 def stream_out(self):
413 413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
414 414
415 415 def unbundle(self, bundle, heads, url):
416 416 """apply a bundle on a repo
417 417
418 418 This function handles the repo locking itself."""
419 419 try:
420 420 try:
421 421 bundle = exchange.readbundle(self.ui, bundle, None)
422 422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
423 423 if hasattr(ret, 'getchunks'):
424 424 # This is a bundle20 object, turn it into an unbundler.
425 425 # This little dance should be dropped eventually when the
426 426 # API is finally improved.
427 427 stream = util.chunkbuffer(ret.getchunks())
428 428 ret = bundle2.getunbundler(self.ui, stream)
429 429 return ret
430 430 except Exception as exc:
431 431 # If the exception contains output salvaged from a bundle2
432 432 # reply, we need to make sure it is printed before continuing
433 433 # to fail. So we build a bundle2 with such output and consume
434 434 # it directly.
435 435 #
436 436 # This is not very elegant but allows a "simple" solution for
437 437 # issue4594
438 438 output = getattr(exc, '_bundle2salvagedoutput', ())
439 439 if output:
440 440 bundler = bundle2.bundle20(self._repo.ui)
441 441 for out in output:
442 442 bundler.addpart(out)
443 443 stream = util.chunkbuffer(bundler.getchunks())
444 444 b = bundle2.getunbundler(self.ui, stream)
445 445 bundle2.processbundle(self._repo, b)
446 446 raise
447 447 except error.PushRaced as exc:
448 448 raise error.ResponseError(
449 449 _(b'push failed:'), stringutil.forcebytestr(exc)
450 450 )
451 451
452 452 # End of _basewirecommands interface.
453 453
454 454 # Begin of peer interface.
455 455
456 456 def commandexecutor(self):
457 457 return localcommandexecutor(self)
458 458
459 459 # End of peer interface.
460 460
461 461
462 462 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
463 463 """peer extension which implements legacy methods too; used for tests with
464 464 restricted capabilities"""
465 465
466 466 def __init__(self, repo, path=None, remotehidden=False):
467 467 super(locallegacypeer, self).__init__(
468 468 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 469 )
470 470
471 471 # Begin of baselegacywirecommands interface.
472 472
473 473 def between(self, pairs):
474 474 return self._repo.between(pairs)
475 475
476 476 def branches(self, nodes):
477 477 return self._repo.branches(nodes)
478 478
479 479 def changegroup(self, nodes, source):
480 480 outgoing = discovery.outgoing(
481 481 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 482 )
483 483 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484 484
485 485 def changegroupsubset(self, bases, heads, source):
486 486 outgoing = discovery.outgoing(
487 487 self._repo, missingroots=bases, ancestorsof=heads
488 488 )
489 489 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490 490
491 491 # End of baselegacywirecommands interface.
492 492
493 493
494 494 # Functions receiving (ui, features) that extensions can register to impact
495 495 # the ability to load repositories with custom requirements. Only
496 496 # functions defined in loaded extensions are called.
497 497 #
498 498 # The function receives a set of requirement strings that the repository
499 499 # is capable of opening. Functions will typically add elements to the
500 500 # set to reflect that the extension knows how to handle that requirements.
501 501 featuresetupfuncs = set()
502 502
503 503
504 504 def _getsharedvfs(hgvfs, requirements):
505 505 """returns the vfs object pointing to root of shared source
506 506 repo for a shared repository
507 507
508 508 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 509 requirements is a set of requirements of current repo (shared one)
510 510 """
511 511 # The ``shared`` or ``relshared`` requirements indicate the
512 512 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 513 # This is an absolute path for ``shared`` and relative to
514 514 # ``.hg/`` for ``relshared``.
515 515 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 516 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 517 sharedpath = util.normpath(hgvfs.join(sharedpath))
518 518
519 519 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520 520
521 521 if not sharedvfs.exists():
522 522 raise error.RepoError(
523 523 _(b'.hg/sharedpath points to nonexistent directory %s')
524 524 % sharedvfs.base
525 525 )
526 526 return sharedvfs
527 527
528 528
529 529 def makelocalrepository(baseui, path: bytes, intents=None):
530 530 """Create a local repository object.
531 531
532 532 Given arguments needed to construct a local repository, this function
533 533 performs various early repository loading functionality (such as
534 534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 535 the repository can be opened, derives a type suitable for representing
536 536 that repository, and returns an instance of it.
537 537
538 538 The returned object conforms to the ``repository.completelocalrepository``
539 539 interface.
540 540
541 541 The repository type is derived by calling a series of factory functions
542 542 for each aspect/interface of the final repository. These are defined by
543 543 ``REPO_INTERFACES``.
544 544
545 545 Each factory function is called to produce a type implementing a specific
546 546 interface. The cumulative list of returned types will be combined into a
547 547 new type and that type will be instantiated to represent the local
548 548 repository.
549 549
550 550 The factory functions each receive various state that may be consulted
551 551 as part of deriving a type.
552 552
553 553 Extensions should wrap these factory functions to customize repository type
554 554 creation. Note that an extension's wrapped function may be called even if
555 555 that extension is not loaded for the repo being constructed. Extensions
556 556 should check if their ``__name__`` appears in the
557 557 ``extensionmodulenames`` set passed to the factory function and no-op if
558 558 not.
559 559 """
560 560 ui = baseui.copy()
561 561 # Prevent copying repo configuration.
562 562 ui.copy = baseui.copy
563 563
564 564 # Working directory VFS rooted at repository root.
565 565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566 566
567 567 # Main VFS for .hg/ directory.
568 568 hgpath = wdirvfs.join(b'.hg')
569 569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 570 # Whether this repository is shared one or not
571 571 shared = False
572 572 # If this repository is shared, vfs pointing to shared repo
573 573 sharedvfs = None
574 574
575 575 # The .hg/ path should exist and should be a directory. All other
576 576 # cases are errors.
577 577 if not hgvfs.isdir():
578 578 try:
579 579 hgvfs.stat()
580 580 except FileNotFoundError:
581 581 pass
582 582 except ValueError as e:
583 583 # Can be raised on Python 3.8 when path is invalid.
584 584 raise error.Abort(
585 585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 586 )
587 587
588 588 raise error.RepoError(_(b'repository %s not found') % path)
589 589
590 590 requirements = scmutil.readrequires(hgvfs, True)
591 591 shared = (
592 592 requirementsmod.SHARED_REQUIREMENT in requirements
593 593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 594 )
595 595 storevfs = None
596 596 if shared:
597 597 # This is a shared repo
598 598 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 600 else:
601 601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602 602
603 603 # if .hg/requires contains the sharesafe requirement, it means
604 604 # there exists a `.hg/store/requires` too and we should read it
605 605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 607 # is not present, refer checkrequirementscompat() for that
608 608 #
609 609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 610 # repository was shared the old way. We check the share source .hg/requires
611 611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 612 # to be reshared
613 613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 615 if (
616 616 shared
617 617 and requirementsmod.SHARESAFE_REQUIREMENT
618 618 not in scmutil.readrequires(sharedvfs, True)
619 619 ):
620 620 mismatch_warn = ui.configbool(
621 621 b'share', b'safe-mismatch.source-not-safe.warn'
622 622 )
623 623 mismatch_config = ui.config(
624 624 b'share', b'safe-mismatch.source-not-safe'
625 625 )
626 626 mismatch_verbose_upgrade = ui.configbool(
627 627 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
628 628 )
629 629 if mismatch_config in (
630 630 b'downgrade-allow',
631 631 b'allow',
632 632 b'downgrade-abort',
633 633 ):
634 634 # prevent cyclic import localrepo -> upgrade -> localrepo
635 635 from . import upgrade
636 636
637 637 upgrade.downgrade_share_to_non_safe(
638 638 ui,
639 639 hgvfs,
640 640 sharedvfs,
641 641 requirements,
642 642 mismatch_config,
643 643 mismatch_warn,
644 644 mismatch_verbose_upgrade,
645 645 )
646 646 elif mismatch_config == b'abort':
647 647 raise error.Abort(
648 648 _(b"share source does not support share-safe requirement"),
649 649 hint=hint,
650 650 )
651 651 else:
652 652 raise error.Abort(
653 653 _(
654 654 b"share-safe mismatch with source.\nUnrecognized"
655 655 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 656 b" set."
657 657 )
658 658 % mismatch_config,
659 659 hint=hint,
660 660 )
661 661 else:
662 662 requirements |= scmutil.readrequires(storevfs, False)
663 663 elif shared:
664 664 sourcerequires = scmutil.readrequires(sharedvfs, False)
665 665 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 666 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 667 mismatch_warn = ui.configbool(
668 668 b'share', b'safe-mismatch.source-safe.warn'
669 669 )
670 670 mismatch_verbose_upgrade = ui.configbool(
671 671 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
672 672 )
673 673 if mismatch_config in (
674 674 b'upgrade-allow',
675 675 b'allow',
676 676 b'upgrade-abort',
677 677 ):
678 678 # prevent cyclic import localrepo -> upgrade -> localrepo
679 679 from . import upgrade
680 680
681 681 upgrade.upgrade_share_to_safe(
682 682 ui,
683 683 hgvfs,
684 684 storevfs,
685 685 requirements,
686 686 mismatch_config,
687 687 mismatch_warn,
688 688 mismatch_verbose_upgrade,
689 689 )
690 690 elif mismatch_config == b'abort':
691 691 raise error.Abort(
692 692 _(
693 693 b'version mismatch: source uses share-safe'
694 694 b' functionality while the current share does not'
695 695 ),
696 696 hint=hint,
697 697 )
698 698 else:
699 699 raise error.Abort(
700 700 _(
701 701 b"share-safe mismatch with source.\nUnrecognized"
702 702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 703 )
704 704 % mismatch_config,
705 705 hint=hint,
706 706 )
707 707
708 708 # The .hg/hgrc file may load extensions or contain config options
709 709 # that influence repository construction. Attempt to load it and
710 710 # process any new extensions that it may have pulled in.
711 711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 713 extensions.loadall(ui)
714 714 extensions.populateui(ui)
715 715
716 716 # Set of module names of extensions loaded for this repository.
717 717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718 718
719 719 supportedrequirements = gathersupportedrequirements(ui)
720 720
721 721 # We first validate the requirements are known.
722 722 ensurerequirementsrecognized(requirements, supportedrequirements)
723 723
724 724 # Then we validate that the known set is reasonable to use together.
725 725 ensurerequirementscompatible(ui, requirements)
726 726
727 727 # TODO there are unhandled edge cases related to opening repositories with
728 728 # shared storage. If storage is shared, we should also test for requirements
729 729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 730 # that repo, as that repo may load extensions needed to open it. This is a
731 731 # bit complicated because we don't want the other hgrc to overwrite settings
732 732 # in this hgrc.
733 733 #
734 734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 735 # file when sharing repos. But if a requirement is added after the share is
736 736 # performed, thereby introducing a new requirement for the opener, we may
737 737 # will not see that and could encounter a run-time error interacting with
738 738 # that shared store since it has an unknown-to-us requirement.
739 739
740 740 # At this point, we know we should be capable of opening the repository.
741 741 # Now get on with doing that.
742 742
743 743 features = set()
744 744
745 745 # The "store" part of the repository holds versioned data. How it is
746 746 # accessed is determined by various requirements. If `shared` or
747 747 # `relshared` requirements are present, this indicates current repository
748 748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 749 if shared:
750 750 storebasepath = sharedvfs.base
751 751 cachepath = sharedvfs.join(b'cache')
752 752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 753 else:
754 754 storebasepath = hgvfs.base
755 755 cachepath = hgvfs.join(b'cache')
756 756 wcachepath = hgvfs.join(b'wcache')
757 757
758 758 # The store has changed over time and the exact layout is dictated by
759 759 # requirements. The store interface abstracts differences across all
760 760 # of them.
761 761 store = makestore(
762 762 requirements,
763 763 storebasepath,
764 764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 765 )
766 766 hgvfs.createmode = store.createmode
767 767
768 768 storevfs = store.vfs
769 769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770 770
771 771 if (
772 772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 774 ):
775 775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 776 # the revlogv2 docket introduced race condition that we need to fix
777 777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778 778
779 779 # The cache vfs is used to manage cache files.
780 780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 781 cachevfs.createmode = store.createmode
782 782 # The cache vfs is used to manage cache files related to the working copy
783 783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 784 wcachevfs.createmode = store.createmode
785 785
786 786 # Now resolve the type for the repository object. We do this by repeatedly
787 787 # calling a factory function to produces types for specific aspects of the
788 788 # repo's operation. The aggregate returned types are used as base classes
789 789 # for a dynamically-derived type, which will represent our new repository.
790 790
791 791 bases = []
792 792 extrastate = {}
793 793
794 794 for iface, fn in REPO_INTERFACES:
795 795 # We pass all potentially useful state to give extensions tons of
796 796 # flexibility.
797 797 typ = fn()(
798 798 ui=ui,
799 799 intents=intents,
800 800 requirements=requirements,
801 801 features=features,
802 802 wdirvfs=wdirvfs,
803 803 hgvfs=hgvfs,
804 804 store=store,
805 805 storevfs=storevfs,
806 806 storeoptions=storevfs.options,
807 807 cachevfs=cachevfs,
808 808 wcachevfs=wcachevfs,
809 809 extensionmodulenames=extensionmodulenames,
810 810 extrastate=extrastate,
811 811 baseclasses=bases,
812 812 )
813 813
814 814 if not isinstance(typ, type):
815 815 raise error.ProgrammingError(
816 816 b'unable to construct type for %s' % iface
817 817 )
818 818
819 819 bases.append(typ)
820 820
821 821 # type() allows you to use characters in type names that wouldn't be
822 822 # recognized as Python symbols in source code. We abuse that to add
823 823 # rich information about our constructed repo.
824 824 name = pycompat.sysstr(
825 825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 826 )
827 827
828 828 cls = type(name, tuple(bases), {})
829 829
830 830 return cls(
831 831 baseui=baseui,
832 832 ui=ui,
833 833 origroot=path,
834 834 wdirvfs=wdirvfs,
835 835 hgvfs=hgvfs,
836 836 requirements=requirements,
837 837 supportedrequirements=supportedrequirements,
838 838 sharedpath=storebasepath,
839 839 store=store,
840 840 cachevfs=cachevfs,
841 841 wcachevfs=wcachevfs,
842 842 features=features,
843 843 intents=intents,
844 844 )
845 845
846 846
847 847 def loadhgrc(
848 848 ui,
849 849 wdirvfs: vfsmod.vfs,
850 850 hgvfs: vfsmod.vfs,
851 851 requirements,
852 852 sharedvfs: Optional[vfsmod.vfs] = None,
853 853 ):
854 854 """Load hgrc files/content into a ui instance.
855 855
856 856 This is called during repository opening to load any additional
857 857 config files or settings relevant to the current repository.
858 858
859 859 Returns a bool indicating whether any additional configs were loaded.
860 860
861 861 Extensions should monkeypatch this function to modify how per-repo
862 862 configs are loaded. For example, an extension may wish to pull in
863 863 configs from alternate files or sources.
864 864
865 865 sharedvfs is vfs object pointing to source repo if the current one is a
866 866 shared one
867 867 """
868 868 if not rcutil.use_repo_hgrc():
869 869 return False
870 870
871 871 ret = False
872 872 # first load config from shared source if we has to
873 873 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
874 874 try:
875 875 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
876 876 ret = True
877 877 except IOError:
878 878 pass
879 879
880 880 try:
881 881 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
882 882 ret = True
883 883 except IOError:
884 884 pass
885 885
886 886 try:
887 887 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
888 888 ret = True
889 889 except IOError:
890 890 pass
891 891
892 892 return ret
893 893
894 894
895 895 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
896 896 """Perform additional actions after .hg/hgrc is loaded.
897 897
898 898 This function is called during repository loading immediately after
899 899 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
900 900
901 901 The function can be used to validate configs, automatically add
902 902 options (including extensions) based on requirements, etc.
903 903 """
904 904
905 905 # Map of requirements to list of extensions to load automatically when
906 906 # requirement is present.
907 907 autoextensions = {
908 908 b'git': [b'git'],
909 909 b'largefiles': [b'largefiles'],
910 910 b'lfs': [b'lfs'],
911 911 }
912 912
913 913 for requirement, names in sorted(autoextensions.items()):
914 914 if requirement not in requirements:
915 915 continue
916 916
917 917 for name in names:
918 918 if not ui.hasconfig(b'extensions', name):
919 919 ui.setconfig(b'extensions', name, b'', source=b'autoload')
920 920
921 921
922 922 def gathersupportedrequirements(ui):
923 923 """Determine the complete set of recognized requirements."""
924 924 # Start with all requirements supported by this file.
925 925 supported = set(localrepository._basesupported)
926 926
927 927 # Execute ``featuresetupfuncs`` entries if they belong to an extension
928 928 # relevant to this ui instance.
929 929 modules = {m.__name__ for n, m in extensions.extensions(ui)}
930 930
931 931 for fn in featuresetupfuncs:
932 932 if fn.__module__ in modules:
933 933 fn(ui, supported)
934 934
935 935 # Add derived requirements from registered compression engines.
936 936 for name in util.compengines:
937 937 engine = util.compengines[name]
938 938 if engine.available() and engine.revlogheader():
939 939 supported.add(b'exp-compression-%s' % name)
940 940 if engine.name() == b'zstd':
941 941 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
942 942
943 943 return supported
944 944
945 945
946 946 def ensurerequirementsrecognized(requirements, supported):
947 947 """Validate that a set of local requirements is recognized.
948 948
949 949 Receives a set of requirements. Raises an ``error.RepoError`` if there
950 950 exists any requirement in that set that currently loaded code doesn't
951 951 recognize.
952 952
953 953 Returns a set of supported requirements.
954 954 """
955 955 missing = set()
956 956
957 957 for requirement in requirements:
958 958 if requirement in supported:
959 959 continue
960 960
961 961 if not requirement or not requirement[0:1].isalnum():
962 962 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
963 963
964 964 missing.add(requirement)
965 965
966 966 if missing:
967 967 raise error.RequirementError(
968 968 _(b'repository requires features unknown to this Mercurial: %s')
969 969 % b' '.join(sorted(missing)),
970 970 hint=_(
971 971 b'see https://mercurial-scm.org/wiki/MissingRequirement '
972 972 b'for more information'
973 973 ),
974 974 )
975 975
976 976
977 977 def ensurerequirementscompatible(ui, requirements):
978 978 """Validates that a set of recognized requirements is mutually compatible.
979 979
980 980 Some requirements may not be compatible with others or require
981 981 config options that aren't enabled. This function is called during
982 982 repository opening to ensure that the set of requirements needed
983 983 to open a repository is sane and compatible with config options.
984 984
985 985 Extensions can monkeypatch this function to perform additional
986 986 checking.
987 987
988 988 ``error.RepoError`` should be raised on failure.
989 989 """
990 990 if (
991 991 requirementsmod.SPARSE_REQUIREMENT in requirements
992 992 and not sparse.enabled
993 993 ):
994 994 raise error.RepoError(
995 995 _(
996 996 b'repository is using sparse feature but '
997 997 b'sparse is not enabled; enable the '
998 998 b'"sparse" extensions to access'
999 999 )
1000 1000 )
1001 1001
1002 1002
1003 1003 def makestore(requirements, path, vfstype):
1004 1004 """Construct a storage object for a repository."""
1005 1005 if requirementsmod.STORE_REQUIREMENT in requirements:
1006 1006 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1007 1007 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1008 1008 return storemod.fncachestore(path, vfstype, dotencode)
1009 1009
1010 1010 return storemod.encodedstore(path, vfstype)
1011 1011
1012 1012 return storemod.basicstore(path, vfstype)
1013 1013
1014 1014
1015 1015 def resolvestorevfsoptions(ui, requirements, features):
1016 1016 """Resolve the options to pass to the store vfs opener.
1017 1017
1018 1018 The returned dict is used to influence behavior of the storage layer.
1019 1019 """
1020 1020 options = {}
1021 1021
1022 1022 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1023 1023 options[b'treemanifest'] = True
1024 1024
1025 1025 # experimental config: format.manifestcachesize
1026 1026 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1027 1027 if manifestcachesize is not None:
1028 1028 options[b'manifestcachesize'] = manifestcachesize
1029 1029
1030 1030 # In the absence of another requirement superseding a revlog-related
1031 1031 # requirement, we have to assume the repo is using revlog version 0.
1032 1032 # This revlog format is super old and we don't bother trying to parse
1033 1033 # opener options for it because those options wouldn't do anything
1034 1034 # meaningful on such old repos.
1035 1035 if (
1036 1036 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1037 1037 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1038 1038 ):
1039 1039 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1040 1040 else: # explicitly mark repo as using revlogv0
1041 1041 options[b'revlogv0'] = True
1042 1042
1043 1043 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1044 1044 options[b'copies-storage'] = b'changeset-sidedata'
1045 1045 else:
1046 1046 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1047 1047 copiesextramode = (b'changeset-only', b'compatibility')
1048 1048 if writecopiesto in copiesextramode:
1049 1049 options[b'copies-storage'] = b'extra'
1050 1050
1051 1051 return options
1052 1052
1053 1053
1054 1054 def resolverevlogstorevfsoptions(ui, requirements, features):
1055 1055 """Resolve opener options specific to revlogs."""
1056 1056
1057 1057 options = {}
1058 1058 options[b'flagprocessors'] = {}
1059 1059
1060 1060 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1061 1061 data_config = options[b'data-config'] = revlog.DataConfig()
1062 1062 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1063 1063
1064 1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 1065 options[b'revlogv1'] = True
1066 1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 1067 options[b'revlogv2'] = True
1068 1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 1069 options[b'changelogv2'] = True
1070 1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072 1072
1073 1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 1074 options[b'generaldelta'] = True
1075 1075
1076 1076 # experimental config: format.chunkcachesize
1077 1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 1078 if chunkcachesize is not None:
1079 1079 data_config.chunk_cache_size = chunkcachesize
1080 1080
1081 1081 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1082 1082 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1083 1083 data_config.uncompressed_cache_count = 10_000
1084 1084 data_config.uncompressed_cache_factor = 4
1085 1085 if memory_profile >= scmutil.RESOURCE_HIGH:
1086 1086 data_config.uncompressed_cache_factor = 10
1087 1087
1088 1088 delta_config.delta_both_parents = ui.configbool(
1089 1089 b'storage', b'revlog.optimize-delta-parent-choice'
1090 1090 )
1091 1091 delta_config.candidate_group_chunk_size = ui.configint(
1092 1092 b'storage',
1093 1093 b'revlog.delta-parent-search.candidate-group-chunk-size',
1094 1094 )
1095 1095 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1096 1096
1097 1097 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1098 1098 options[b'issue6528.fix-incoming'] = issue6528
1099 1099
1100 1100 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1101 1101 lazydeltabase = False
1102 1102 if lazydelta:
1103 1103 lazydeltabase = ui.configbool(
1104 1104 b'storage', b'revlog.reuse-external-delta-parent'
1105 1105 )
1106 1106 if lazydeltabase is None:
1107 1107 lazydeltabase = not scmutil.gddeltaconfig(ui)
1108 1108 delta_config.lazy_delta = lazydelta
1109 1109 delta_config.lazy_delta_base = lazydeltabase
1110 1110
1111 1111 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1112 1112 if 0 <= chainspan:
1113 1113 delta_config.max_deltachain_span = chainspan
1114 1114
1115 1115 has_populate = util.has_mmap_populate()
1116 1116 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1117 1117 data_config.mmap_index_threshold = ui.configbytes(
1118 1118 b'storage',
1119 1119 b'revlog.mmap.index:size-threshold',
1120 1120 )
1121 1121
1122 1122 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1123 1123 srdensitythres = float(
1124 1124 ui.config(b'experimental', b'sparse-read.density-threshold')
1125 1125 )
1126 1126 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1127 1127 data_config.with_sparse_read = withsparseread
1128 1128 data_config.sr_density_threshold = srdensitythres
1129 1129 data_config.sr_min_gap_size = srmingapsize
1130 1130
1131 1131 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1132 1132 delta_config.sparse_revlog = sparserevlog
1133 1133 if sparserevlog:
1134 1134 options[b'generaldelta'] = True
1135 1135 data_config.with_sparse_read = True
1136 1136
1137 1137 maxchainlen = None
1138 1138 if sparserevlog:
1139 1139 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1140 1140 # experimental config: format.maxchainlen
1141 1141 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1142 1142 if maxchainlen is not None:
1143 1143 delta_config.max_chain_len = maxchainlen
1144 1144
1145 1145 for r in requirements:
1146 1146 # we allow multiple compression engine requirement to co-exist because
1147 1147 # strickly speaking, revlog seems to support mixed compression style.
1148 1148 #
1149 1149 # The compression used for new entries will be "the last one"
1150 1150 prefix = r.startswith
1151 1151 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1152 1152 feature_config.compression_engine = r.split(b'-', 2)[2]
1153 1153
1154 1154 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1155 1155 if zlib_level is not None:
1156 1156 if not (0 <= zlib_level <= 9):
1157 1157 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1158 1158 raise error.Abort(msg % zlib_level)
1159 1159 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1160 1160 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1161 1161 if zstd_level is not None:
1162 1162 if not (0 <= zstd_level <= 22):
1163 1163 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 1164 raise error.Abort(msg % zstd_level)
1165 1165 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1166 1166
1167 1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1168 1168 feature_config.enable_ellipsis = True
1169 1169
1170 1170 if ui.configbool(b'experimental', b'rust.index'):
1171 1171 options[b'rust.index'] = True
1172 1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1173 1173 slow_path = ui.config(
1174 1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1175 1175 )
1176 1176 if slow_path not in (b'allow', b'warn', b'abort'):
1177 1177 default = ui.config_default(
1178 1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1179 1179 )
1180 1180 msg = _(
1181 1181 b'unknown value for config '
1182 1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1183 1183 )
1184 1184 ui.warn(msg % slow_path)
1185 1185 if not ui.quiet:
1186 1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1187 1187 slow_path = default
1188 1188
1189 1189 msg = _(
1190 1190 b"accessing `persistent-nodemap` repository without associated "
1191 1191 b"fast implementation."
1192 1192 )
1193 1193 hint = _(
1194 1194 b"check `hg help config.format.use-persistent-nodemap` "
1195 1195 b"for details"
1196 1196 )
1197 1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1198 1198 if slow_path == b'warn':
1199 1199 msg = b"warning: " + msg + b'\n'
1200 1200 ui.warn(msg)
1201 1201 if not ui.quiet:
1202 1202 hint = b'(' + hint + b')\n'
1203 1203 ui.warn(hint)
1204 1204 if slow_path == b'abort':
1205 1205 raise error.Abort(msg, hint=hint)
1206 1206 options[b'persistent-nodemap'] = True
1207 1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1208 1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1209 1209 if slow_path not in (b'allow', b'warn', b'abort'):
1210 1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1211 1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1212 1212 ui.warn(msg % slow_path)
1213 1213 if not ui.quiet:
1214 1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1215 1215 slow_path = default
1216 1216
1217 1217 msg = _(
1218 1218 b"accessing `dirstate-v2` repository without associated "
1219 1219 b"fast implementation."
1220 1220 )
1221 1221 hint = _(
1222 1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1223 1223 )
1224 1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1225 1225 if slow_path == b'warn':
1226 1226 msg = b"warning: " + msg + b'\n'
1227 1227 ui.warn(msg)
1228 1228 if not ui.quiet:
1229 1229 hint = b'(' + hint + b')\n'
1230 1230 ui.warn(hint)
1231 1231 if slow_path == b'abort':
1232 1232 raise error.Abort(msg, hint=hint)
1233 1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1234 1234 options[b'persistent-nodemap.mmap'] = True
1235 1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1236 1236 options[b'devel-force-nodemap'] = True
1237 1237
1238 1238 return options
1239 1239
1240 1240
1241 1241 def makemain(**kwargs):
1242 1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1243 1243 return localrepository
1244 1244
1245 1245
1246 class RevlogFileStorage:
1246 class revlogfilestorage: # (repository.ilocalrepositoryfilestorage)
1247 1247 """File storage when using revlogs."""
1248 1248
1249 1249 def file(self, path):
1250 1250 if path.startswith(b'/'):
1251 1251 path = path[1:]
1252 1252
1253 1253 try_split = (
1254 1254 self.currenttransaction() is not None
1255 1255 or txnutil.mayhavepending(self.root)
1256 1256 )
1257 1257
1258 1258 return filelog.filelog(self.svfs, path, try_split=try_split)
1259 1259
1260 1260
1261 revlogfilestorage = interfaceutil.implementer(
1262 repository.ilocalrepositoryfilestorage
1263 )(RevlogFileStorage)
1264
1265 if typing.TYPE_CHECKING:
1266 # Help pytype by hiding the interface stuff that confuses it.
1267 revlogfilestorage = RevlogFileStorage
1268
1269
1270 1261 class RevlogNarrowFileStorage:
1271 1262 """File storage when using revlogs and narrow files."""
1272 1263
1273 1264 def file(self, path):
1274 1265 if path.startswith(b'/'):
1275 1266 path = path[1:]
1276 1267
1277 1268 try_split = (
1278 1269 self.currenttransaction() is not None
1279 1270 or txnutil.mayhavepending(self.root)
1280 1271 )
1281 1272 return filelog.narrowfilelog(
1282 1273 self.svfs, path, self._storenarrowmatch, try_split=try_split
1283 1274 )
1284 1275
1285 1276
1286 1277 revlognarrowfilestorage = interfaceutil.implementer(
1287 1278 repository.ilocalrepositoryfilestorage
1288 1279 )(RevlogNarrowFileStorage)
1289 1280
1290 1281 if typing.TYPE_CHECKING:
1291 1282 # Help pytype by hiding the interface stuff that confuses it.
1292 1283 revlognarrowfilestorage = RevlogNarrowFileStorage
1293 1284
1294 1285
1295 1286 def makefilestorage(requirements, features, **kwargs):
1296 1287 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1297 1288 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1298 1289 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1299 1290
1300 1291 if requirementsmod.NARROW_REQUIREMENT in requirements:
1301 1292 return revlognarrowfilestorage
1302 1293 else:
1303 1294 return revlogfilestorage
1304 1295
1305 1296
1306 1297 # List of repository interfaces and factory functions for them. Each
1307 1298 # will be called in order during ``makelocalrepository()`` to iteratively
1308 1299 # derive the final type for a local repository instance. We capture the
1309 1300 # function as a lambda so we don't hold a reference and the module-level
1310 1301 # functions can be wrapped.
1311 1302 REPO_INTERFACES = [
1312 1303 (repository.ilocalrepositorymain, lambda: makemain),
1313 1304 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1314 1305 ]
1315 1306
1316 1307 _localrepo_base_classes = object
1317 1308
1318 1309 if typing.TYPE_CHECKING:
1319 1310 _localrepo_base_classes = [
1320 1311 repository.ilocalrepositorymain,
1321 1312 repository.ilocalrepositoryfilestorage,
1322 1313 ]
1323 1314
1324 1315
1325 1316 class LocalRepository(_localrepo_base_classes):
1326 1317 """Main class for representing local repositories.
1327 1318
1328 1319 All local repositories are instances of this class.
1329 1320
1330 1321 Constructed on its own, instances of this class are not usable as
1331 1322 repository objects. To obtain a usable repository object, call
1332 1323 ``hg.repository()``, ``localrepo.instance()``, or
1333 1324 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1334 1325 ``instance()`` adds support for creating new repositories.
1335 1326 ``hg.repository()`` adds more extension integration, including calling
1336 1327 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1337 1328 used.
1338 1329 """
1339 1330
1340 1331 _basesupported = {
1341 1332 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1342 1333 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1343 1334 requirementsmod.CHANGELOGV2_REQUIREMENT,
1344 1335 requirementsmod.COPIESSDC_REQUIREMENT,
1345 1336 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1346 1337 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1347 1338 requirementsmod.DOTENCODE_REQUIREMENT,
1348 1339 requirementsmod.FNCACHE_REQUIREMENT,
1349 1340 requirementsmod.GENERALDELTA_REQUIREMENT,
1350 1341 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1351 1342 requirementsmod.NODEMAP_REQUIREMENT,
1352 1343 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1353 1344 requirementsmod.REVLOGV1_REQUIREMENT,
1354 1345 requirementsmod.REVLOGV2_REQUIREMENT,
1355 1346 requirementsmod.SHARED_REQUIREMENT,
1356 1347 requirementsmod.SHARESAFE_REQUIREMENT,
1357 1348 requirementsmod.SPARSE_REQUIREMENT,
1358 1349 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1359 1350 requirementsmod.STORE_REQUIREMENT,
1360 1351 requirementsmod.TREEMANIFEST_REQUIREMENT,
1361 1352 }
1362 1353
1363 1354 # list of prefix for file which can be written without 'wlock'
1364 1355 # Extensions should extend this list when needed
1365 1356 _wlockfreeprefix = {
1366 1357 # We migh consider requiring 'wlock' for the next
1367 1358 # two, but pretty much all the existing code assume
1368 1359 # wlock is not needed so we keep them excluded for
1369 1360 # now.
1370 1361 b'hgrc',
1371 1362 b'requires',
1372 1363 # XXX cache is a complicatged business someone
1373 1364 # should investigate this in depth at some point
1374 1365 b'cache/',
1375 1366 # XXX bisect was still a bit too messy at the time
1376 1367 # this changeset was introduced. Someone should fix
1377 1368 # the remainig bit and drop this line
1378 1369 b'bisect.state',
1379 1370 }
1380 1371
1381 1372 def __init__(
1382 1373 self,
1383 1374 baseui,
1384 1375 ui,
1385 1376 origroot: bytes,
1386 1377 wdirvfs: vfsmod.vfs,
1387 1378 hgvfs: vfsmod.vfs,
1388 1379 requirements,
1389 1380 supportedrequirements,
1390 1381 sharedpath: bytes,
1391 1382 store,
1392 1383 cachevfs: vfsmod.vfs,
1393 1384 wcachevfs: vfsmod.vfs,
1394 1385 features,
1395 1386 intents=None,
1396 1387 ):
1397 1388 """Create a new local repository instance.
1398 1389
1399 1390 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1400 1391 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1401 1392 object.
1402 1393
1403 1394 Arguments:
1404 1395
1405 1396 baseui
1406 1397 ``ui.ui`` instance that ``ui`` argument was based off of.
1407 1398
1408 1399 ui
1409 1400 ``ui.ui`` instance for use by the repository.
1410 1401
1411 1402 origroot
1412 1403 ``bytes`` path to working directory root of this repository.
1413 1404
1414 1405 wdirvfs
1415 1406 ``vfs.vfs`` rooted at the working directory.
1416 1407
1417 1408 hgvfs
1418 1409 ``vfs.vfs`` rooted at .hg/
1419 1410
1420 1411 requirements
1421 1412 ``set`` of bytestrings representing repository opening requirements.
1422 1413
1423 1414 supportedrequirements
1424 1415 ``set`` of bytestrings representing repository requirements that we
1425 1416 know how to open. May be a supetset of ``requirements``.
1426 1417
1427 1418 sharedpath
1428 1419 ``bytes`` Defining path to storage base directory. Points to a
1429 1420 ``.hg/`` directory somewhere.
1430 1421
1431 1422 store
1432 1423 ``store.basicstore`` (or derived) instance providing access to
1433 1424 versioned storage.
1434 1425
1435 1426 cachevfs
1436 1427 ``vfs.vfs`` used for cache files.
1437 1428
1438 1429 wcachevfs
1439 1430 ``vfs.vfs`` used for cache files related to the working copy.
1440 1431
1441 1432 features
1442 1433 ``set`` of bytestrings defining features/capabilities of this
1443 1434 instance.
1444 1435
1445 1436 intents
1446 1437 ``set`` of system strings indicating what this repo will be used
1447 1438 for.
1448 1439 """
1449 1440 self.baseui = baseui
1450 1441 self.ui = ui
1451 1442 self.origroot = origroot
1452 1443 # vfs rooted at working directory.
1453 1444 self.wvfs = wdirvfs
1454 1445 self.root = wdirvfs.base
1455 1446 # vfs rooted at .hg/. Used to access most non-store paths.
1456 1447 self.vfs = hgvfs
1457 1448 self.path = hgvfs.base
1458 1449 self.requirements = requirements
1459 1450 self.nodeconstants = sha1nodeconstants
1460 1451 self.nullid = self.nodeconstants.nullid
1461 1452 self.supported = supportedrequirements
1462 1453 self.sharedpath = sharedpath
1463 1454 self.store = store
1464 1455 self.cachevfs = cachevfs
1465 1456 self.wcachevfs = wcachevfs
1466 1457 self.features = features
1467 1458
1468 1459 self.filtername = None
1469 1460
1470 1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1471 1462 b'devel', b'check-locks'
1472 1463 ):
1473 1464 self.vfs.audit = self._getvfsward(self.vfs.audit)
1474 1465 # A list of callback to shape the phase if no data were found.
1475 1466 # Callback are in the form: func(repo, roots) --> processed root.
1476 1467 # This list it to be filled by extension during repo setup
1477 1468 self._phasedefaults = []
1478 1469
1479 1470 color.setup(self.ui)
1480 1471
1481 1472 self.spath = self.store.path
1482 1473 self.svfs = self.store.vfs
1483 1474 self.sjoin = self.store.join
1484 1475 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1485 1476 b'devel', b'check-locks'
1486 1477 ):
1487 1478 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1488 1479 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1489 1480 else: # standard vfs
1490 1481 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1491 1482
1492 1483 self._dirstatevalidatewarned = False
1493 1484
1494 1485 self._branchcaches = branchmap.BranchMapCache()
1495 1486 self._revbranchcache = None
1496 1487 self._filterpats = {}
1497 1488 self._datafilters = {}
1498 1489 self._transref = self._lockref = self._wlockref = None
1499 1490
1500 1491 # A cache for various files under .hg/ that tracks file changes,
1501 1492 # (used by the filecache decorator)
1502 1493 #
1503 1494 # Maps a property name to its util.filecacheentry
1504 1495 self._filecache = {}
1505 1496
1506 1497 # hold sets of revision to be filtered
1507 1498 # should be cleared when something might have changed the filter value:
1508 1499 # - new changesets,
1509 1500 # - phase change,
1510 1501 # - new obsolescence marker,
1511 1502 # - working directory parent change,
1512 1503 # - bookmark changes
1513 1504 self.filteredrevcache = {}
1514 1505
1515 1506 self._dirstate = None
1516 1507 # post-dirstate-status hooks
1517 1508 self._postdsstatus = []
1518 1509
1519 1510 self._pending_narrow_pats = None
1520 1511 self._pending_narrow_pats_dirstate = None
1521 1512
1522 1513 # generic mapping between names and nodes
1523 1514 self.names = namespaces.namespaces()
1524 1515
1525 1516 # Key to signature value.
1526 1517 self._sparsesignaturecache = {}
1527 1518 # Signature to cached matcher instance.
1528 1519 self._sparsematchercache = {}
1529 1520
1530 1521 self._extrafilterid = repoview.extrafilter(ui)
1531 1522
1532 1523 self.filecopiesmode = None
1533 1524 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1534 1525 self.filecopiesmode = b'changeset-sidedata'
1535 1526
1536 1527 self._wanted_sidedata = set()
1537 1528 self._sidedata_computers = {}
1538 1529 sidedatamod.set_sidedata_spec_for_repo(self)
1539 1530
1540 1531 def _getvfsward(self, origfunc):
1541 1532 """build a ward for self.vfs"""
1542 1533 rref = weakref.ref(self)
1543 1534
1544 1535 def checkvfs(path, mode=None):
1545 1536 ret = origfunc(path, mode=mode)
1546 1537 repo = rref()
1547 1538 if (
1548 1539 repo is None
1549 1540 or not hasattr(repo, '_wlockref')
1550 1541 or not hasattr(repo, '_lockref')
1551 1542 ):
1552 1543 return
1553 1544 if mode in (None, b'r', b'rb'):
1554 1545 return
1555 1546 if path.startswith(repo.path):
1556 1547 # truncate name relative to the repository (.hg)
1557 1548 path = path[len(repo.path) + 1 :]
1558 1549 if path.startswith(b'cache/'):
1559 1550 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1560 1551 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1561 1552 # path prefixes covered by 'lock'
1562 1553 vfs_path_prefixes = (
1563 1554 b'journal.',
1564 1555 b'undo.',
1565 1556 b'strip-backup/',
1566 1557 b'cache/',
1567 1558 )
1568 1559 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1569 1560 if repo._currentlock(repo._lockref) is None:
1570 1561 repo.ui.develwarn(
1571 1562 b'write with no lock: "%s"' % path,
1572 1563 stacklevel=3,
1573 1564 config=b'check-locks',
1574 1565 )
1575 1566 elif repo._currentlock(repo._wlockref) is None:
1576 1567 # rest of vfs files are covered by 'wlock'
1577 1568 #
1578 1569 # exclude special files
1579 1570 for prefix in self._wlockfreeprefix:
1580 1571 if path.startswith(prefix):
1581 1572 return
1582 1573 repo.ui.develwarn(
1583 1574 b'write with no wlock: "%s"' % path,
1584 1575 stacklevel=3,
1585 1576 config=b'check-locks',
1586 1577 )
1587 1578 return ret
1588 1579
1589 1580 return checkvfs
1590 1581
1591 1582 def _getsvfsward(self, origfunc):
1592 1583 """build a ward for self.svfs"""
1593 1584 rref = weakref.ref(self)
1594 1585
1595 1586 def checksvfs(path, mode=None):
1596 1587 ret = origfunc(path, mode=mode)
1597 1588 repo = rref()
1598 1589 if repo is None or not hasattr(repo, '_lockref'):
1599 1590 return
1600 1591 if mode in (None, b'r', b'rb'):
1601 1592 return
1602 1593 if path.startswith(repo.sharedpath):
1603 1594 # truncate name relative to the repository (.hg)
1604 1595 path = path[len(repo.sharedpath) + 1 :]
1605 1596 if repo._currentlock(repo._lockref) is None:
1606 1597 repo.ui.develwarn(
1607 1598 b'write with no lock: "%s"' % path, stacklevel=4
1608 1599 )
1609 1600 return ret
1610 1601
1611 1602 return checksvfs
1612 1603
1613 1604 @property
1614 1605 def vfs_map(self):
1615 1606 return {
1616 1607 b'': self.svfs,
1617 1608 b'plain': self.vfs,
1618 1609 b'store': self.svfs,
1619 1610 }
1620 1611
1621 1612 def close(self):
1622 1613 self._writecaches()
1623 1614
1624 1615 def _writecaches(self):
1625 1616 if self._revbranchcache:
1626 1617 self._revbranchcache.write()
1627 1618
1628 1619 def _restrictcapabilities(self, caps):
1629 1620 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1630 1621 caps = set(caps)
1631 1622 capsblob = bundle2.encodecaps(
1632 1623 bundle2.getrepocaps(self, role=b'client')
1633 1624 )
1634 1625 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1635 1626 if self.ui.configbool(b'experimental', b'narrow'):
1636 1627 caps.add(wireprototypes.NARROWCAP)
1637 1628 return caps
1638 1629
1639 1630 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1640 1631 # self -> auditor -> self._checknested -> self
1641 1632
1642 1633 @property
1643 1634 def auditor(self):
1644 1635 # This is only used by context.workingctx.match in order to
1645 1636 # detect files in subrepos.
1646 1637 return pathutil.pathauditor(self.root, callback=self._checknested)
1647 1638
1648 1639 @property
1649 1640 def nofsauditor(self):
1650 1641 # This is only used by context.basectx.match in order to detect
1651 1642 # files in subrepos.
1652 1643 return pathutil.pathauditor(
1653 1644 self.root, callback=self._checknested, realfs=False, cached=True
1654 1645 )
1655 1646
1656 1647 def _checknested(self, path):
1657 1648 """Determine if path is a legal nested repository."""
1658 1649 if not path.startswith(self.root):
1659 1650 return False
1660 1651 subpath = path[len(self.root) + 1 :]
1661 1652 normsubpath = util.pconvert(subpath)
1662 1653
1663 1654 # XXX: Checking against the current working copy is wrong in
1664 1655 # the sense that it can reject things like
1665 1656 #
1666 1657 # $ hg cat -r 10 sub/x.txt
1667 1658 #
1668 1659 # if sub/ is no longer a subrepository in the working copy
1669 1660 # parent revision.
1670 1661 #
1671 1662 # However, it can of course also allow things that would have
1672 1663 # been rejected before, such as the above cat command if sub/
1673 1664 # is a subrepository now, but was a normal directory before.
1674 1665 # The old path auditor would have rejected by mistake since it
1675 1666 # panics when it sees sub/.hg/.
1676 1667 #
1677 1668 # All in all, checking against the working copy seems sensible
1678 1669 # since we want to prevent access to nested repositories on
1679 1670 # the filesystem *now*.
1680 1671 ctx = self[None]
1681 1672 parts = util.splitpath(subpath)
1682 1673 while parts:
1683 1674 prefix = b'/'.join(parts)
1684 1675 if prefix in ctx.substate:
1685 1676 if prefix == normsubpath:
1686 1677 return True
1687 1678 else:
1688 1679 sub = ctx.sub(prefix)
1689 1680 return sub.checknested(subpath[len(prefix) + 1 :])
1690 1681 else:
1691 1682 parts.pop()
1692 1683 return False
1693 1684
1694 1685 def peer(self, path=None, remotehidden=False):
1695 1686 return localpeer(
1696 1687 self, path=path, remotehidden=remotehidden
1697 1688 ) # not cached to avoid reference cycle
1698 1689
1699 1690 def unfiltered(self):
1700 1691 """Return unfiltered version of the repository
1701 1692
1702 1693 Intended to be overwritten by filtered repo."""
1703 1694 return self
1704 1695
1705 1696 def filtered(self, name, visibilityexceptions=None):
1706 1697 """Return a filtered version of a repository
1707 1698
1708 1699 The `name` parameter is the identifier of the requested view. This
1709 1700 will return a repoview object set "exactly" to the specified view.
1710 1701
1711 1702 This function does not apply recursive filtering to a repository. For
1712 1703 example calling `repo.filtered("served")` will return a repoview using
1713 1704 the "served" view, regardless of the initial view used by `repo`.
1714 1705
1715 1706 In other word, there is always only one level of `repoview` "filtering".
1716 1707 """
1717 1708 if self._extrafilterid is not None and b'%' not in name:
1718 1709 name = name + b'%' + self._extrafilterid
1719 1710
1720 1711 cls = repoview.newtype(self.unfiltered().__class__)
1721 1712 return cls(self, name, visibilityexceptions)
1722 1713
1723 1714 @mixedrepostorecache(
1724 1715 (b'bookmarks', b'plain'),
1725 1716 (b'bookmarks.current', b'plain'),
1726 1717 (b'bookmarks', b''),
1727 1718 (b'00changelog.i', b''),
1728 1719 )
1729 1720 def _bookmarks(self):
1730 1721 # Since the multiple files involved in the transaction cannot be
1731 1722 # written atomically (with current repository format), there is a race
1732 1723 # condition here.
1733 1724 #
1734 1725 # 1) changelog content A is read
1735 1726 # 2) outside transaction update changelog to content B
1736 1727 # 3) outside transaction update bookmark file referring to content B
1737 1728 # 4) bookmarks file content is read and filtered against changelog-A
1738 1729 #
1739 1730 # When this happens, bookmarks against nodes missing from A are dropped.
1740 1731 #
1741 1732 # Having this happening during read is not great, but it become worse
1742 1733 # when this happen during write because the bookmarks to the "unknown"
1743 1734 # nodes will be dropped for good. However, writes happen within locks.
1744 1735 # This locking makes it possible to have a race free consistent read.
1745 1736 # For this purpose data read from disc before locking are
1746 1737 # "invalidated" right after the locks are taken. This invalidations are
1747 1738 # "light", the `filecache` mechanism keep the data in memory and will
1748 1739 # reuse them if the underlying files did not changed. Not parsing the
1749 1740 # same data multiple times helps performances.
1750 1741 #
1751 1742 # Unfortunately in the case describe above, the files tracked by the
1752 1743 # bookmarks file cache might not have changed, but the in-memory
1753 1744 # content is still "wrong" because we used an older changelog content
1754 1745 # to process the on-disk data. So after locking, the changelog would be
1755 1746 # refreshed but `_bookmarks` would be preserved.
1756 1747 # Adding `00changelog.i` to the list of tracked file is not
1757 1748 # enough, because at the time we build the content for `_bookmarks` in
1758 1749 # (4), the changelog file has already diverged from the content used
1759 1750 # for loading `changelog` in (1)
1760 1751 #
1761 1752 # To prevent the issue, we force the changelog to be explicitly
1762 1753 # reloaded while computing `_bookmarks`. The data race can still happen
1763 1754 # without the lock (with a narrower window), but it would no longer go
1764 1755 # undetected during the lock time refresh.
1765 1756 #
1766 1757 # The new schedule is as follow
1767 1758 #
1768 1759 # 1) filecache logic detect that `_bookmarks` needs to be computed
1769 1760 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1770 1761 # 3) We force `changelog` filecache to be tested
1771 1762 # 4) cachestat for `changelog` are captured (for changelog)
1772 1763 # 5) `_bookmarks` is computed and cached
1773 1764 #
1774 1765 # The step in (3) ensure we have a changelog at least as recent as the
1775 1766 # cache stat computed in (1). As a result at locking time:
1776 1767 # * if the changelog did not changed since (1) -> we can reuse the data
1777 1768 # * otherwise -> the bookmarks get refreshed.
1778 1769 self._refreshchangelog()
1779 1770 return bookmarks.bmstore(self)
1780 1771
1781 1772 def _refreshchangelog(self):
1782 1773 """make sure the in memory changelog match the on-disk one"""
1783 1774 if 'changelog' in vars(self) and self.currenttransaction() is None:
1784 1775 del self.changelog
1785 1776
1786 1777 @property
1787 1778 def _activebookmark(self):
1788 1779 return self._bookmarks.active
1789 1780
1790 1781 # _phasesets depend on changelog. what we need is to call
1791 1782 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1792 1783 # can't be easily expressed in filecache mechanism.
1793 1784 @storecache(b'phaseroots', b'00changelog.i')
1794 1785 def _phasecache(self):
1795 1786 return phases.phasecache(self, self._phasedefaults)
1796 1787
1797 1788 @storecache(b'obsstore')
1798 1789 def obsstore(self):
1799 1790 return obsolete.makestore(self.ui, self)
1800 1791
1801 1792 @changelogcache()
1802 1793 def changelog(repo):
1803 1794 # load dirstate before changelog to avoid race see issue6303
1804 1795 repo.dirstate.prefetch_parents()
1805 1796 return repo.store.changelog(
1806 1797 txnutil.mayhavepending(repo.root),
1807 1798 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1808 1799 )
1809 1800
1810 1801 @manifestlogcache()
1811 1802 def manifestlog(self):
1812 1803 return self.store.manifestlog(self, self._storenarrowmatch)
1813 1804
1814 1805 @unfilteredpropertycache
1815 1806 def dirstate(self):
1816 1807 if self._dirstate is None:
1817 1808 self._dirstate = self._makedirstate()
1818 1809 else:
1819 1810 self._dirstate.refresh()
1820 1811 return self._dirstate
1821 1812
1822 1813 def _makedirstate(self):
1823 1814 """Extension point for wrapping the dirstate per-repo."""
1824 1815 sparsematchfn = None
1825 1816 if sparse.use_sparse(self):
1826 1817 sparsematchfn = lambda: sparse.matcher(self)
1827 1818 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1828 1819 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1829 1820 use_dirstate_v2 = v2_req in self.requirements
1830 1821 use_tracked_hint = th in self.requirements
1831 1822
1832 1823 return dirstate.dirstate(
1833 1824 self.vfs,
1834 1825 self.ui,
1835 1826 self.root,
1836 1827 self._dirstatevalidate,
1837 1828 sparsematchfn,
1838 1829 self.nodeconstants,
1839 1830 use_dirstate_v2,
1840 1831 use_tracked_hint=use_tracked_hint,
1841 1832 )
1842 1833
1843 1834 def _dirstatevalidate(self, node):
1844 1835 okay = True
1845 1836 try:
1846 1837 self.changelog.rev(node)
1847 1838 except error.LookupError:
1848 1839 # If the parent are unknown it might just be because the changelog
1849 1840 # in memory is lagging behind the dirstate in memory. So try to
1850 1841 # refresh the changelog first.
1851 1842 #
1852 1843 # We only do so if we don't hold the lock, if we do hold the lock
1853 1844 # the invalidation at that time should have taken care of this and
1854 1845 # something is very fishy.
1855 1846 if self.currentlock() is None:
1856 1847 self.invalidate()
1857 1848 try:
1858 1849 self.changelog.rev(node)
1859 1850 except error.LookupError:
1860 1851 okay = False
1861 1852 else:
1862 1853 # XXX we should consider raising an error here.
1863 1854 okay = False
1864 1855 if okay:
1865 1856 return node
1866 1857 else:
1867 1858 if not self._dirstatevalidatewarned:
1868 1859 self._dirstatevalidatewarned = True
1869 1860 self.ui.warn(
1870 1861 _(b"warning: ignoring unknown working parent %s!\n")
1871 1862 % short(node)
1872 1863 )
1873 1864 return self.nullid
1874 1865
1875 1866 @storecache(narrowspec.FILENAME)
1876 1867 def narrowpats(self):
1877 1868 """matcher patterns for this repository's narrowspec
1878 1869
1879 1870 A tuple of (includes, excludes).
1880 1871 """
1881 1872 # the narrow management should probably move into its own object
1882 1873 val = self._pending_narrow_pats
1883 1874 if val is None:
1884 1875 val = narrowspec.load(self)
1885 1876 return val
1886 1877
1887 1878 @storecache(narrowspec.FILENAME)
1888 1879 def _storenarrowmatch(self):
1889 1880 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1890 1881 return matchmod.always()
1891 1882 include, exclude = self.narrowpats
1892 1883 return narrowspec.match(self.root, include=include, exclude=exclude)
1893 1884
1894 1885 @storecache(narrowspec.FILENAME)
1895 1886 def _narrowmatch(self):
1896 1887 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1897 1888 return matchmod.always()
1898 1889 narrowspec.checkworkingcopynarrowspec(self)
1899 1890 include, exclude = self.narrowpats
1900 1891 return narrowspec.match(self.root, include=include, exclude=exclude)
1901 1892
1902 1893 def narrowmatch(self, match=None, includeexact=False):
1903 1894 """matcher corresponding the the repo's narrowspec
1904 1895
1905 1896 If `match` is given, then that will be intersected with the narrow
1906 1897 matcher.
1907 1898
1908 1899 If `includeexact` is True, then any exact matches from `match` will
1909 1900 be included even if they're outside the narrowspec.
1910 1901 """
1911 1902 if match:
1912 1903 if includeexact and not self._narrowmatch.always():
1913 1904 # do not exclude explicitly-specified paths so that they can
1914 1905 # be warned later on
1915 1906 em = matchmod.exact(match.files())
1916 1907 nm = matchmod.unionmatcher([self._narrowmatch, em])
1917 1908 return matchmod.intersectmatchers(match, nm)
1918 1909 return matchmod.intersectmatchers(match, self._narrowmatch)
1919 1910 return self._narrowmatch
1920 1911
1921 1912 def setnarrowpats(self, newincludes, newexcludes):
1922 1913 narrowspec.save(self, newincludes, newexcludes)
1923 1914 self.invalidate(clearfilecache=True)
1924 1915
1925 1916 @unfilteredpropertycache
1926 1917 def _quick_access_changeid_null(self):
1927 1918 return {
1928 1919 b'null': (nullrev, self.nodeconstants.nullid),
1929 1920 nullrev: (nullrev, self.nodeconstants.nullid),
1930 1921 self.nullid: (nullrev, self.nullid),
1931 1922 }
1932 1923
1933 1924 @unfilteredpropertycache
1934 1925 def _quick_access_changeid_wc(self):
1935 1926 # also fast path access to the working copy parents
1936 1927 # however, only do it for filter that ensure wc is visible.
1937 1928 quick = self._quick_access_changeid_null.copy()
1938 1929 cl = self.unfiltered().changelog
1939 1930 for node in self.dirstate.parents():
1940 1931 if node == self.nullid:
1941 1932 continue
1942 1933 rev = cl.index.get_rev(node)
1943 1934 if rev is None:
1944 1935 # unknown working copy parent case:
1945 1936 #
1946 1937 # skip the fast path and let higher code deal with it
1947 1938 continue
1948 1939 pair = (rev, node)
1949 1940 quick[rev] = pair
1950 1941 quick[node] = pair
1951 1942 # also add the parents of the parents
1952 1943 for r in cl.parentrevs(rev):
1953 1944 if r == nullrev:
1954 1945 continue
1955 1946 n = cl.node(r)
1956 1947 pair = (r, n)
1957 1948 quick[r] = pair
1958 1949 quick[n] = pair
1959 1950 p1node = self.dirstate.p1()
1960 1951 if p1node != self.nullid:
1961 1952 quick[b'.'] = quick[p1node]
1962 1953 return quick
1963 1954
1964 1955 @unfilteredmethod
1965 1956 def _quick_access_changeid_invalidate(self):
1966 1957 if '_quick_access_changeid_wc' in vars(self):
1967 1958 del self.__dict__['_quick_access_changeid_wc']
1968 1959
1969 1960 @property
1970 1961 def _quick_access_changeid(self):
1971 1962 """an helper dictionnary for __getitem__ calls
1972 1963
1973 1964 This contains a list of symbol we can recognise right away without
1974 1965 further processing.
1975 1966 """
1976 1967 if self.filtername in repoview.filter_has_wc:
1977 1968 return self._quick_access_changeid_wc
1978 1969 return self._quick_access_changeid_null
1979 1970
1980 1971 def __getitem__(self, changeid):
1981 1972 # dealing with special cases
1982 1973 if changeid is None:
1983 1974 return context.workingctx(self)
1984 1975 if isinstance(changeid, context.basectx):
1985 1976 return changeid
1986 1977
1987 1978 # dealing with multiple revisions
1988 1979 if isinstance(changeid, slice):
1989 1980 # wdirrev isn't contiguous so the slice shouldn't include it
1990 1981 return [
1991 1982 self[i]
1992 1983 for i in range(*changeid.indices(len(self)))
1993 1984 if i not in self.changelog.filteredrevs
1994 1985 ]
1995 1986
1996 1987 # dealing with some special values
1997 1988 quick_access = self._quick_access_changeid.get(changeid)
1998 1989 if quick_access is not None:
1999 1990 rev, node = quick_access
2000 1991 return context.changectx(self, rev, node, maybe_filtered=False)
2001 1992 if changeid == b'tip':
2002 1993 node = self.changelog.tip()
2003 1994 rev = self.changelog.rev(node)
2004 1995 return context.changectx(self, rev, node)
2005 1996
2006 1997 # dealing with arbitrary values
2007 1998 try:
2008 1999 if isinstance(changeid, int):
2009 2000 node = self.changelog.node(changeid)
2010 2001 rev = changeid
2011 2002 elif changeid == b'.':
2012 2003 # this is a hack to delay/avoid loading obsmarkers
2013 2004 # when we know that '.' won't be hidden
2014 2005 node = self.dirstate.p1()
2015 2006 rev = self.unfiltered().changelog.rev(node)
2016 2007 elif len(changeid) == self.nodeconstants.nodelen:
2017 2008 try:
2018 2009 node = changeid
2019 2010 rev = self.changelog.rev(changeid)
2020 2011 except error.FilteredLookupError:
2021 2012 changeid = hex(changeid) # for the error message
2022 2013 raise
2023 2014 except LookupError:
2024 2015 # check if it might have come from damaged dirstate
2025 2016 #
2026 2017 # XXX we could avoid the unfiltered if we had a recognizable
2027 2018 # exception for filtered changeset access
2028 2019 if (
2029 2020 self.local()
2030 2021 and changeid in self.unfiltered().dirstate.parents()
2031 2022 ):
2032 2023 msg = _(b"working directory has unknown parent '%s'!")
2033 2024 raise error.Abort(msg % short(changeid))
2034 2025 changeid = hex(changeid) # for the error message
2035 2026 raise
2036 2027
2037 2028 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2038 2029 node = bin(changeid)
2039 2030 rev = self.changelog.rev(node)
2040 2031 else:
2041 2032 raise error.ProgrammingError(
2042 2033 b"unsupported changeid '%s' of type %s"
2043 2034 % (changeid, pycompat.bytestr(type(changeid)))
2044 2035 )
2045 2036
2046 2037 return context.changectx(self, rev, node)
2047 2038
2048 2039 except (error.FilteredIndexError, error.FilteredLookupError):
2049 2040 raise error.FilteredRepoLookupError(
2050 2041 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2051 2042 )
2052 2043 except (IndexError, LookupError):
2053 2044 raise error.RepoLookupError(
2054 2045 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2055 2046 )
2056 2047 except error.WdirUnsupported:
2057 2048 return context.workingctx(self)
2058 2049
2059 2050 def __contains__(self, changeid):
2060 2051 """True if the given changeid exists"""
2061 2052 try:
2062 2053 self[changeid]
2063 2054 return True
2064 2055 except error.RepoLookupError:
2065 2056 return False
2066 2057
2067 2058 def __nonzero__(self):
2068 2059 return True
2069 2060
2070 2061 __bool__ = __nonzero__
2071 2062
2072 2063 def __len__(self):
2073 2064 # no need to pay the cost of repoview.changelog
2074 2065 unfi = self.unfiltered()
2075 2066 return len(unfi.changelog)
2076 2067
2077 2068 def __iter__(self):
2078 2069 return iter(self.changelog)
2079 2070
2080 2071 def revs(self, expr: bytes, *args):
2081 2072 """Find revisions matching a revset.
2082 2073
2083 2074 The revset is specified as a string ``expr`` that may contain
2084 2075 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2085 2076
2086 2077 Revset aliases from the configuration are not expanded. To expand
2087 2078 user aliases, consider calling ``scmutil.revrange()`` or
2088 2079 ``repo.anyrevs([expr], user=True)``.
2089 2080
2090 2081 Returns a smartset.abstractsmartset, which is a list-like interface
2091 2082 that contains integer revisions.
2092 2083 """
2093 2084 tree = revsetlang.spectree(expr, *args)
2094 2085 return revset.makematcher(tree)(self)
2095 2086
2096 2087 def set(self, expr: bytes, *args):
2097 2088 """Find revisions matching a revset and emit changectx instances.
2098 2089
2099 2090 This is a convenience wrapper around ``revs()`` that iterates the
2100 2091 result and is a generator of changectx instances.
2101 2092
2102 2093 Revset aliases from the configuration are not expanded. To expand
2103 2094 user aliases, consider calling ``scmutil.revrange()``.
2104 2095 """
2105 2096 for r in self.revs(expr, *args):
2106 2097 yield self[r]
2107 2098
2108 2099 def anyrevs(self, specs: bytes, user=False, localalias=None):
2109 2100 """Find revisions matching one of the given revsets.
2110 2101
2111 2102 Revset aliases from the configuration are not expanded by default. To
2112 2103 expand user aliases, specify ``user=True``. To provide some local
2113 2104 definitions overriding user aliases, set ``localalias`` to
2114 2105 ``{name: definitionstring}``.
2115 2106 """
2116 2107 if specs == [b'null']:
2117 2108 return revset.baseset([nullrev])
2118 2109 if specs == [b'.']:
2119 2110 quick_data = self._quick_access_changeid.get(b'.')
2120 2111 if quick_data is not None:
2121 2112 return revset.baseset([quick_data[0]])
2122 2113 if user:
2123 2114 m = revset.matchany(
2124 2115 self.ui,
2125 2116 specs,
2126 2117 lookup=revset.lookupfn(self),
2127 2118 localalias=localalias,
2128 2119 )
2129 2120 else:
2130 2121 m = revset.matchany(None, specs, localalias=localalias)
2131 2122 return m(self)
2132 2123
2133 2124 def url(self) -> bytes:
2134 2125 return b'file:' + self.root
2135 2126
2136 2127 def hook(self, name, throw=False, **args):
2137 2128 """Call a hook, passing this repo instance.
2138 2129
2139 2130 This a convenience method to aid invoking hooks. Extensions likely
2140 2131 won't call this unless they have registered a custom hook or are
2141 2132 replacing code that is expected to call a hook.
2142 2133 """
2143 2134 return hook.hook(self.ui, self, name, throw, **args)
2144 2135
2145 2136 @filteredpropertycache
2146 2137 def _tagscache(self):
2147 2138 """Returns a tagscache object that contains various tags related
2148 2139 caches."""
2149 2140
2150 2141 # This simplifies its cache management by having one decorated
2151 2142 # function (this one) and the rest simply fetch things from it.
2152 2143 class tagscache:
2153 2144 def __init__(self):
2154 2145 # These two define the set of tags for this repository. tags
2155 2146 # maps tag name to node; tagtypes maps tag name to 'global' or
2156 2147 # 'local'. (Global tags are defined by .hgtags across all
2157 2148 # heads, and local tags are defined in .hg/localtags.)
2158 2149 # They constitute the in-memory cache of tags.
2159 2150 self.tags = self.tagtypes = None
2160 2151
2161 2152 self.nodetagscache = self.tagslist = None
2162 2153
2163 2154 cache = tagscache()
2164 2155 cache.tags, cache.tagtypes = self._findtags()
2165 2156
2166 2157 return cache
2167 2158
2168 2159 def tags(self):
2169 2160 '''return a mapping of tag to node'''
2170 2161 t = {}
2171 2162 if self.changelog.filteredrevs:
2172 2163 tags, tt = self._findtags()
2173 2164 else:
2174 2165 tags = self._tagscache.tags
2175 2166 rev = self.changelog.rev
2176 2167 for k, v in tags.items():
2177 2168 try:
2178 2169 # ignore tags to unknown nodes
2179 2170 rev(v)
2180 2171 t[k] = v
2181 2172 except (error.LookupError, ValueError):
2182 2173 pass
2183 2174 return t
2184 2175
2185 2176 def _findtags(self):
2186 2177 """Do the hard work of finding tags. Return a pair of dicts
2187 2178 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2188 2179 maps tag name to a string like \'global\' or \'local\'.
2189 2180 Subclasses or extensions are free to add their own tags, but
2190 2181 should be aware that the returned dicts will be retained for the
2191 2182 duration of the localrepo object."""
2192 2183
2193 2184 # XXX what tagtype should subclasses/extensions use? Currently
2194 2185 # mq and bookmarks add tags, but do not set the tagtype at all.
2195 2186 # Should each extension invent its own tag type? Should there
2196 2187 # be one tagtype for all such "virtual" tags? Or is the status
2197 2188 # quo fine?
2198 2189
2199 2190 # map tag name to (node, hist)
2200 2191 alltags = tagsmod.findglobaltags(self.ui, self)
2201 2192 # map tag name to tag type
2202 2193 tagtypes = {tag: b'global' for tag in alltags}
2203 2194
2204 2195 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2205 2196
2206 2197 # Build the return dicts. Have to re-encode tag names because
2207 2198 # the tags module always uses UTF-8 (in order not to lose info
2208 2199 # writing to the cache), but the rest of Mercurial wants them in
2209 2200 # local encoding.
2210 2201 tags = {}
2211 2202 for name, (node, hist) in alltags.items():
2212 2203 if node != self.nullid:
2213 2204 tags[encoding.tolocal(name)] = node
2214 2205 tags[b'tip'] = self.changelog.tip()
2215 2206 tagtypes = {
2216 2207 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2217 2208 }
2218 2209 return (tags, tagtypes)
2219 2210
2220 2211 def tagtype(self, tagname):
2221 2212 """
2222 2213 return the type of the given tag. result can be:
2223 2214
2224 2215 'local' : a local tag
2225 2216 'global' : a global tag
2226 2217 None : tag does not exist
2227 2218 """
2228 2219
2229 2220 return self._tagscache.tagtypes.get(tagname)
2230 2221
2231 2222 def tagslist(self):
2232 2223 '''return a list of tags ordered by revision'''
2233 2224 if not self._tagscache.tagslist:
2234 2225 l = []
2235 2226 for t, n in self.tags().items():
2236 2227 l.append((self.changelog.rev(n), t, n))
2237 2228 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2238 2229
2239 2230 return self._tagscache.tagslist
2240 2231
2241 2232 def nodetags(self, node):
2242 2233 '''return the tags associated with a node'''
2243 2234 if not self._tagscache.nodetagscache:
2244 2235 nodetagscache = {}
2245 2236 for t, n in self._tagscache.tags.items():
2246 2237 nodetagscache.setdefault(n, []).append(t)
2247 2238 for tags in nodetagscache.values():
2248 2239 tags.sort()
2249 2240 self._tagscache.nodetagscache = nodetagscache
2250 2241 return self._tagscache.nodetagscache.get(node, [])
2251 2242
2252 2243 def nodebookmarks(self, node):
2253 2244 """return the list of bookmarks pointing to the specified node"""
2254 2245 return self._bookmarks.names(node)
2255 2246
2256 2247 def branchmap(self):
2257 2248 """returns a dictionary {branch: [branchheads]} with branchheads
2258 2249 ordered by increasing revision number"""
2259 2250 return self._branchcaches[self]
2260 2251
2261 2252 @unfilteredmethod
2262 2253 def revbranchcache(self):
2263 2254 if not self._revbranchcache:
2264 2255 unfi = self.unfiltered()
2265 2256 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2266 2257 return self._revbranchcache
2267 2258
2268 2259 def register_changeset(self, rev, changelogrevision):
2269 2260 self.revbranchcache().setdata(rev, changelogrevision)
2270 2261
2271 2262 def branchtip(self, branch, ignoremissing=False):
2272 2263 """return the tip node for a given branch
2273 2264
2274 2265 If ignoremissing is True, then this method will not raise an error.
2275 2266 This is helpful for callers that only expect None for a missing branch
2276 2267 (e.g. namespace).
2277 2268
2278 2269 """
2279 2270 try:
2280 2271 return self.branchmap().branchtip(branch)
2281 2272 except KeyError:
2282 2273 if not ignoremissing:
2283 2274 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2284 2275 else:
2285 2276 pass
2286 2277
2287 2278 def lookup(self, key):
2288 2279 node = scmutil.revsymbol(self, key).node()
2289 2280 if node is None:
2290 2281 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2291 2282 return node
2292 2283
2293 2284 def lookupbranch(self, key):
2294 2285 if self.branchmap().hasbranch(key):
2295 2286 return key
2296 2287
2297 2288 return scmutil.revsymbol(self, key).branch()
2298 2289
2299 2290 def known(self, nodes):
2300 2291 cl = self.changelog
2301 2292 get_rev = cl.index.get_rev
2302 2293 filtered = cl.filteredrevs
2303 2294 result = []
2304 2295 for n in nodes:
2305 2296 r = get_rev(n)
2306 2297 resp = not (r is None or r in filtered)
2307 2298 result.append(resp)
2308 2299 return result
2309 2300
2310 2301 def local(self):
2311 2302 return self
2312 2303
2313 2304 def publishing(self):
2314 2305 # it's safe (and desirable) to trust the publish flag unconditionally
2315 2306 # so that we don't finalize changes shared between users via ssh or nfs
2316 2307 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2317 2308
2318 2309 def cancopy(self):
2319 2310 # so statichttprepo's override of local() works
2320 2311 if not self.local():
2321 2312 return False
2322 2313 if not self.publishing():
2323 2314 return True
2324 2315 # if publishing we can't copy if there is filtered content
2325 2316 return not self.filtered(b'visible').changelog.filteredrevs
2326 2317
2327 2318 def shared(self):
2328 2319 '''the type of shared repository (None if not shared)'''
2329 2320 if self.sharedpath != self.path:
2330 2321 return b'store'
2331 2322 return None
2332 2323
2333 2324 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2334 2325 return self.vfs.reljoin(self.root, f, *insidef)
2335 2326
2336 2327 def setparents(self, p1, p2=None):
2337 2328 if p2 is None:
2338 2329 p2 = self.nullid
2339 2330 self[None].setparents(p1, p2)
2340 2331 self._quick_access_changeid_invalidate()
2341 2332
2342 2333 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2343 2334 """changeid must be a changeset revision, if specified.
2344 2335 fileid can be a file revision or node."""
2345 2336 return context.filectx(
2346 2337 self, path, changeid, fileid, changectx=changectx
2347 2338 )
2348 2339
2349 2340 def getcwd(self) -> bytes:
2350 2341 return self.dirstate.getcwd()
2351 2342
2352 2343 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2353 2344 return self.dirstate.pathto(f, cwd)
2354 2345
2355 2346 def _loadfilter(self, filter):
2356 2347 if filter not in self._filterpats:
2357 2348 l = []
2358 2349 for pat, cmd in self.ui.configitems(filter):
2359 2350 if cmd == b'!':
2360 2351 continue
2361 2352 mf = matchmod.match(self.root, b'', [pat])
2362 2353 fn = None
2363 2354 params = cmd
2364 2355 for name, filterfn in self._datafilters.items():
2365 2356 if cmd.startswith(name):
2366 2357 fn = filterfn
2367 2358 params = cmd[len(name) :].lstrip()
2368 2359 break
2369 2360 if not fn:
2370 2361 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2371 2362 fn.__name__ = 'commandfilter'
2372 2363 # Wrap old filters not supporting keyword arguments
2373 2364 if not pycompat.getargspec(fn)[2]:
2374 2365 oldfn = fn
2375 2366 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2376 2367 fn.__name__ = 'compat-' + oldfn.__name__
2377 2368 l.append((mf, fn, params))
2378 2369 self._filterpats[filter] = l
2379 2370 return self._filterpats[filter]
2380 2371
2381 2372 def _filter(self, filterpats, filename, data):
2382 2373 for mf, fn, cmd in filterpats:
2383 2374 if mf(filename):
2384 2375 self.ui.debug(
2385 2376 b"filtering %s through %s\n"
2386 2377 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2387 2378 )
2388 2379 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2389 2380 break
2390 2381
2391 2382 return data
2392 2383
2393 2384 @unfilteredpropertycache
2394 2385 def _encodefilterpats(self):
2395 2386 return self._loadfilter(b'encode')
2396 2387
2397 2388 @unfilteredpropertycache
2398 2389 def _decodefilterpats(self):
2399 2390 return self._loadfilter(b'decode')
2400 2391
2401 2392 def adddatafilter(self, name, filter):
2402 2393 self._datafilters[name] = filter
2403 2394
2404 2395 def wread(self, filename: bytes) -> bytes:
2405 2396 if self.wvfs.islink(filename):
2406 2397 data = self.wvfs.readlink(filename)
2407 2398 else:
2408 2399 data = self.wvfs.read(filename)
2409 2400 return self._filter(self._encodefilterpats, filename, data)
2410 2401
2411 2402 def wwrite(
2412 2403 self,
2413 2404 filename: bytes,
2414 2405 data: bytes,
2415 2406 flags: bytes,
2416 2407 backgroundclose=False,
2417 2408 **kwargs,
2418 2409 ) -> int:
2419 2410 """write ``data`` into ``filename`` in the working directory
2420 2411
2421 2412 This returns length of written (maybe decoded) data.
2422 2413 """
2423 2414 data = self._filter(self._decodefilterpats, filename, data)
2424 2415 if b'l' in flags:
2425 2416 self.wvfs.symlink(data, filename)
2426 2417 else:
2427 2418 self.wvfs.write(
2428 2419 filename, data, backgroundclose=backgroundclose, **kwargs
2429 2420 )
2430 2421 if b'x' in flags:
2431 2422 self.wvfs.setflags(filename, False, True)
2432 2423 else:
2433 2424 self.wvfs.setflags(filename, False, False)
2434 2425 return len(data)
2435 2426
2436 2427 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2437 2428 return self._filter(self._decodefilterpats, filename, data)
2438 2429
2439 2430 def currenttransaction(self):
2440 2431 """return the current transaction or None if non exists"""
2441 2432 if self._transref:
2442 2433 tr = self._transref()
2443 2434 else:
2444 2435 tr = None
2445 2436
2446 2437 if tr and tr.running():
2447 2438 return tr
2448 2439 return None
2449 2440
2450 2441 def transaction(self, desc, report=None):
2451 2442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2452 2443 b'devel', b'check-locks'
2453 2444 ):
2454 2445 if self._currentlock(self._lockref) is None:
2455 2446 raise error.ProgrammingError(b'transaction requires locking')
2456 2447 tr = self.currenttransaction()
2457 2448 if tr is not None:
2458 2449 return tr.nest(name=desc)
2459 2450
2460 2451 # abort here if the journal already exists
2461 2452 if self.svfs.exists(b"journal"):
2462 2453 raise error.RepoError(
2463 2454 _(b"abandoned transaction found"),
2464 2455 hint=_(b"run 'hg recover' to clean up transaction"),
2465 2456 )
2466 2457
2467 2458 # At that point your dirstate should be clean:
2468 2459 #
2469 2460 # - If you don't have the wlock, why would you still have a dirty
2470 2461 # dirstate ?
2471 2462 #
2472 2463 # - If you hold the wlock, you should not be opening a transaction in
2473 2464 # the middle of a `distate.changing_*` block. The transaction needs to
2474 2465 # be open before that and wrap the change-context.
2475 2466 #
2476 2467 # - If you are not within a `dirstate.changing_*` context, why is our
2477 2468 # dirstate dirty?
2478 2469 if self.dirstate._dirty:
2479 2470 m = "cannot open a transaction with a dirty dirstate"
2480 2471 raise error.ProgrammingError(m)
2481 2472
2482 2473 idbase = b"%.40f#%f" % (random.random(), time.time())
2483 2474 ha = hex(hashutil.sha1(idbase).digest())
2484 2475 txnid = b'TXN:' + ha
2485 2476 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2486 2477
2487 2478 self._writejournal(desc)
2488 2479 if report:
2489 2480 rp = report
2490 2481 else:
2491 2482 rp = self.ui.warn
2492 2483 vfsmap = self.vfs_map
2493 2484 # we must avoid cyclic reference between repo and transaction.
2494 2485 reporef = weakref.ref(self)
2495 2486 # Code to track tag movement
2496 2487 #
2497 2488 # Since tags are all handled as file content, it is actually quite hard
2498 2489 # to track these movement from a code perspective. So we fallback to a
2499 2490 # tracking at the repository level. One could envision to track changes
2500 2491 # to the '.hgtags' file through changegroup apply but that fails to
2501 2492 # cope with case where transaction expose new heads without changegroup
2502 2493 # being involved (eg: phase movement).
2503 2494 #
2504 2495 # For now, We gate the feature behind a flag since this likely comes
2505 2496 # with performance impacts. The current code run more often than needed
2506 2497 # and do not use caches as much as it could. The current focus is on
2507 2498 # the behavior of the feature so we disable it by default. The flag
2508 2499 # will be removed when we are happy with the performance impact.
2509 2500 #
2510 2501 # Once this feature is no longer experimental move the following
2511 2502 # documentation to the appropriate help section:
2512 2503 #
2513 2504 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2514 2505 # tags (new or changed or deleted tags). In addition the details of
2515 2506 # these changes are made available in a file at:
2516 2507 # ``REPOROOT/.hg/changes/tags.changes``.
2517 2508 # Make sure you check for HG_TAG_MOVED before reading that file as it
2518 2509 # might exist from a previous transaction even if no tag were touched
2519 2510 # in this one. Changes are recorded in a line base format::
2520 2511 #
2521 2512 # <action> <hex-node> <tag-name>\n
2522 2513 #
2523 2514 # Actions are defined as follow:
2524 2515 # "-R": tag is removed,
2525 2516 # "+A": tag is added,
2526 2517 # "-M": tag is moved (old value),
2527 2518 # "+M": tag is moved (new value),
2528 2519 tracktags = lambda x: None
2529 2520 # experimental config: experimental.hook-track-tags
2530 2521 shouldtracktags = self.ui.configbool(
2531 2522 b'experimental', b'hook-track-tags'
2532 2523 )
2533 2524 if desc != b'strip' and shouldtracktags:
2534 2525 oldheads = self.changelog.headrevs()
2535 2526
2536 2527 def tracktags(tr2):
2537 2528 repo = reporef()
2538 2529 assert repo is not None # help pytype
2539 2530 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2540 2531 newheads = repo.changelog.headrevs()
2541 2532 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2542 2533 # notes: we compare lists here.
2543 2534 # As we do it only once buiding set would not be cheaper
2544 2535 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2545 2536 if changes:
2546 2537 tr2.hookargs[b'tag_moved'] = b'1'
2547 2538 with repo.vfs(
2548 2539 b'changes/tags.changes', b'w', atomictemp=True
2549 2540 ) as changesfile:
2550 2541 # note: we do not register the file to the transaction
2551 2542 # because we needs it to still exist on the transaction
2552 2543 # is close (for txnclose hooks)
2553 2544 tagsmod.writediff(changesfile, changes)
2554 2545
2555 2546 def validate(tr2):
2556 2547 """will run pre-closing hooks"""
2557 2548 # XXX the transaction API is a bit lacking here so we take a hacky
2558 2549 # path for now
2559 2550 #
2560 2551 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2561 2552 # dict is copied before these run. In addition we needs the data
2562 2553 # available to in memory hooks too.
2563 2554 #
2564 2555 # Moreover, we also need to make sure this runs before txnclose
2565 2556 # hooks and there is no "pending" mechanism that would execute
2566 2557 # logic only if hooks are about to run.
2567 2558 #
2568 2559 # Fixing this limitation of the transaction is also needed to track
2569 2560 # other families of changes (bookmarks, phases, obsolescence).
2570 2561 #
2571 2562 # This will have to be fixed before we remove the experimental
2572 2563 # gating.
2573 2564 tracktags(tr2)
2574 2565 repo = reporef()
2575 2566 assert repo is not None # help pytype
2576 2567
2577 2568 singleheadopt = (b'experimental', b'single-head-per-branch')
2578 2569 singlehead = repo.ui.configbool(*singleheadopt)
2579 2570 if singlehead:
2580 2571 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2581 2572 accountclosed = singleheadsub.get(
2582 2573 b"account-closed-heads", False
2583 2574 )
2584 2575 if singleheadsub.get(b"public-changes-only", False):
2585 2576 filtername = b"immutable"
2586 2577 else:
2587 2578 filtername = b"visible"
2588 2579 scmutil.enforcesinglehead(
2589 2580 repo, tr2, desc, accountclosed, filtername
2590 2581 )
2591 2582 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2592 2583 for name, (old, new) in sorted(
2593 2584 tr.changes[b'bookmarks'].items()
2594 2585 ):
2595 2586 args = tr.hookargs.copy()
2596 2587 args.update(bookmarks.preparehookargs(name, old, new))
2597 2588 repo.hook(
2598 2589 b'pretxnclose-bookmark',
2599 2590 throw=True,
2600 2591 **pycompat.strkwargs(args),
2601 2592 )
2602 2593 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2603 2594 cl = repo.unfiltered().changelog
2604 2595 for revs, (old, new) in tr.changes[b'phases']:
2605 2596 for rev in revs:
2606 2597 args = tr.hookargs.copy()
2607 2598 node = hex(cl.node(rev))
2608 2599 args.update(phases.preparehookargs(node, old, new))
2609 2600 repo.hook(
2610 2601 b'pretxnclose-phase',
2611 2602 throw=True,
2612 2603 **pycompat.strkwargs(args),
2613 2604 )
2614 2605
2615 2606 repo.hook(
2616 2607 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2617 2608 )
2618 2609
2619 2610 def releasefn(tr, success):
2620 2611 repo = reporef()
2621 2612 if repo is None:
2622 2613 # If the repo has been GC'd (and this release function is being
2623 2614 # called from transaction.__del__), there's not much we can do,
2624 2615 # so just leave the unfinished transaction there and let the
2625 2616 # user run `hg recover`.
2626 2617 return
2627 2618 if success:
2628 2619 # this should be explicitly invoked here, because
2629 2620 # in-memory changes aren't written out at closing
2630 2621 # transaction, if tr.addfilegenerator (via
2631 2622 # dirstate.write or so) isn't invoked while
2632 2623 # transaction running
2633 2624 repo.dirstate.write(None)
2634 2625 else:
2635 2626 # discard all changes (including ones already written
2636 2627 # out) in this transaction
2637 2628 repo.invalidate(clearfilecache=True)
2638 2629
2639 2630 tr = transaction.transaction(
2640 2631 rp,
2641 2632 self.svfs,
2642 2633 vfsmap,
2643 2634 b"journal",
2644 2635 b"undo",
2645 2636 lambda: None,
2646 2637 self.store.createmode,
2647 2638 validator=validate,
2648 2639 releasefn=releasefn,
2649 2640 checkambigfiles=_cachedfiles,
2650 2641 name=desc,
2651 2642 )
2652 2643 for vfs_id, path in self._journalfiles():
2653 2644 tr.add_journal(vfs_id, path)
2654 2645 tr.changes[b'origrepolen'] = len(self)
2655 2646 tr.changes[b'obsmarkers'] = set()
2656 2647 tr.changes[b'phases'] = []
2657 2648 tr.changes[b'bookmarks'] = {}
2658 2649
2659 2650 tr.hookargs[b'txnid'] = txnid
2660 2651 tr.hookargs[b'txnname'] = desc
2661 2652 tr.hookargs[b'changes'] = tr.changes
2662 2653 # note: writing the fncache only during finalize mean that the file is
2663 2654 # outdated when running hooks. As fncache is used for streaming clone,
2664 2655 # this is not expected to break anything that happen during the hooks.
2665 2656 tr.addfinalize(b'flush-fncache', self.store.write)
2666 2657
2667 2658 def txnclosehook(tr2):
2668 2659 """To be run if transaction is successful, will schedule a hook run"""
2669 2660 # Don't reference tr2 in hook() so we don't hold a reference.
2670 2661 # This reduces memory consumption when there are multiple
2671 2662 # transactions per lock. This can likely go away if issue5045
2672 2663 # fixes the function accumulation.
2673 2664 hookargs = tr2.hookargs
2674 2665
2675 2666 def hookfunc(unused_success):
2676 2667 repo = reporef()
2677 2668 assert repo is not None # help pytype
2678 2669
2679 2670 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2680 2671 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2681 2672 for name, (old, new) in bmchanges:
2682 2673 args = tr.hookargs.copy()
2683 2674 args.update(bookmarks.preparehookargs(name, old, new))
2684 2675 repo.hook(
2685 2676 b'txnclose-bookmark',
2686 2677 throw=False,
2687 2678 **pycompat.strkwargs(args),
2688 2679 )
2689 2680
2690 2681 if hook.hashook(repo.ui, b'txnclose-phase'):
2691 2682 cl = repo.unfiltered().changelog
2692 2683 phasemv = sorted(
2693 2684 tr.changes[b'phases'], key=lambda r: r[0][0]
2694 2685 )
2695 2686 for revs, (old, new) in phasemv:
2696 2687 for rev in revs:
2697 2688 args = tr.hookargs.copy()
2698 2689 node = hex(cl.node(rev))
2699 2690 args.update(phases.preparehookargs(node, old, new))
2700 2691 repo.hook(
2701 2692 b'txnclose-phase',
2702 2693 throw=False,
2703 2694 **pycompat.strkwargs(args),
2704 2695 )
2705 2696
2706 2697 repo.hook(
2707 2698 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2708 2699 )
2709 2700
2710 2701 repo = reporef()
2711 2702 assert repo is not None # help pytype
2712 2703 repo._afterlock(hookfunc)
2713 2704
2714 2705 tr.addfinalize(b'txnclose-hook', txnclosehook)
2715 2706 # Include a leading "-" to make it happen before the transaction summary
2716 2707 # reports registered via scmutil.registersummarycallback() whose names
2717 2708 # are 00-txnreport etc. That way, the caches will be warm when the
2718 2709 # callbacks run.
2719 2710 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2720 2711
2721 2712 def txnaborthook(tr2):
2722 2713 """To be run if transaction is aborted"""
2723 2714 repo = reporef()
2724 2715 assert repo is not None # help pytype
2725 2716 repo.hook(
2726 2717 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2727 2718 )
2728 2719
2729 2720 tr.addabort(b'txnabort-hook', txnaborthook)
2730 2721 # avoid eager cache invalidation. in-memory data should be identical
2731 2722 # to stored data if transaction has no error.
2732 2723 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2733 2724 self._transref = weakref.ref(tr)
2734 2725 scmutil.registersummarycallback(self, tr, desc)
2735 2726 # This only exist to deal with the need of rollback to have viable
2736 2727 # parents at the end of the operation. So backup viable parents at the
2737 2728 # time of this operation.
2738 2729 #
2739 2730 # We only do it when the `wlock` is taken, otherwise other might be
2740 2731 # altering the dirstate under us.
2741 2732 #
2742 2733 # This is really not a great way to do this (first, because we cannot
2743 2734 # always do it). There are more viable alternative that exists
2744 2735 #
2745 2736 # - backing only the working copy parent in a dedicated files and doing
2746 2737 # a clean "keep-update" to them on `hg rollback`.
2747 2738 #
2748 2739 # - slightly changing the behavior an applying a logic similar to "hg
2749 2740 # strip" to pick a working copy destination on `hg rollback`
2750 2741 if self.currentwlock() is not None:
2751 2742 ds = self.dirstate
2752 2743 if not self.vfs.exists(b'branch'):
2753 2744 # force a file to be written if None exist
2754 2745 ds.setbranch(b'default', None)
2755 2746
2756 2747 def backup_dirstate(tr):
2757 2748 for f in ds.all_file_names():
2758 2749 # hardlink backup is okay because `dirstate` is always
2759 2750 # atomically written and possible data file are append only
2760 2751 # and resistant to trailing data.
2761 2752 tr.addbackup(f, hardlink=True, location=b'plain')
2762 2753
2763 2754 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2764 2755 return tr
2765 2756
2766 2757 def _journalfiles(self):
2767 2758 return (
2768 2759 (self.svfs, b'journal'),
2769 2760 (self.vfs, b'journal.desc'),
2770 2761 )
2771 2762
2772 2763 def undofiles(self):
2773 2764 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2774 2765
2775 2766 @unfilteredmethod
2776 2767 def _writejournal(self, desc):
2777 2768 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2778 2769
2779 2770 def recover(self):
2780 2771 with self.lock():
2781 2772 if self.svfs.exists(b"journal"):
2782 2773 self.ui.status(_(b"rolling back interrupted transaction\n"))
2783 2774 vfsmap = self.vfs_map
2784 2775 transaction.rollback(
2785 2776 self.svfs,
2786 2777 vfsmap,
2787 2778 b"journal",
2788 2779 self.ui.warn,
2789 2780 checkambigfiles=_cachedfiles,
2790 2781 )
2791 2782 self.invalidate()
2792 2783 return True
2793 2784 else:
2794 2785 self.ui.warn(_(b"no interrupted transaction available\n"))
2795 2786 return False
2796 2787
2797 2788 def rollback(self, dryrun=False, force=False):
2798 2789 wlock = lock = None
2799 2790 try:
2800 2791 wlock = self.wlock()
2801 2792 lock = self.lock()
2802 2793 if self.svfs.exists(b"undo"):
2803 2794 return self._rollback(dryrun, force)
2804 2795 else:
2805 2796 self.ui.warn(_(b"no rollback information available\n"))
2806 2797 return 1
2807 2798 finally:
2808 2799 release(lock, wlock)
2809 2800
2810 2801 @unfilteredmethod # Until we get smarter cache management
2811 2802 def _rollback(self, dryrun, force):
2812 2803 ui = self.ui
2813 2804
2814 2805 parents = self.dirstate.parents()
2815 2806 try:
2816 2807 args = self.vfs.read(b'undo.desc').splitlines()
2817 2808 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2818 2809 if len(args) >= 3:
2819 2810 detail = args[2]
2820 2811 oldtip = oldlen - 1
2821 2812
2822 2813 if detail and ui.verbose:
2823 2814 msg = _(
2824 2815 b'repository tip rolled back to revision %d'
2825 2816 b' (undo %s: %s)\n'
2826 2817 ) % (oldtip, desc, detail)
2827 2818 else:
2828 2819 msg = _(
2829 2820 b'repository tip rolled back to revision %d (undo %s)\n'
2830 2821 ) % (oldtip, desc)
2831 2822 parentgone = any(self[p].rev() > oldtip for p in parents)
2832 2823 except IOError:
2833 2824 msg = _(b'rolling back unknown transaction\n')
2834 2825 desc = None
2835 2826 parentgone = True
2836 2827
2837 2828 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2838 2829 raise error.Abort(
2839 2830 _(
2840 2831 b'rollback of last commit while not checked out '
2841 2832 b'may lose data'
2842 2833 ),
2843 2834 hint=_(b'use -f to force'),
2844 2835 )
2845 2836
2846 2837 ui.status(msg)
2847 2838 if dryrun:
2848 2839 return 0
2849 2840
2850 2841 self.destroying()
2851 2842 vfsmap = self.vfs_map
2852 2843 skip_journal_pattern = None
2853 2844 if not parentgone:
2854 2845 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2855 2846 transaction.rollback(
2856 2847 self.svfs,
2857 2848 vfsmap,
2858 2849 b'undo',
2859 2850 ui.warn,
2860 2851 checkambigfiles=_cachedfiles,
2861 2852 skip_journal_pattern=skip_journal_pattern,
2862 2853 )
2863 2854 self.invalidate()
2864 2855 self.dirstate.invalidate()
2865 2856
2866 2857 if parentgone:
2867 2858 # replace this with some explicit parent update in the future.
2868 2859 has_node = self.changelog.index.has_node
2869 2860 if not all(has_node(p) for p in self.dirstate._pl):
2870 2861 # There was no dirstate to backup initially, we need to drop
2871 2862 # the existing one.
2872 2863 with self.dirstate.changing_parents(self):
2873 2864 self.dirstate.setparents(self.nullid)
2874 2865 self.dirstate.clear()
2875 2866
2876 2867 parents = tuple([p.rev() for p in self[None].parents()])
2877 2868 if len(parents) > 1:
2878 2869 ui.status(
2879 2870 _(
2880 2871 b'working directory now based on '
2881 2872 b'revisions %d and %d\n'
2882 2873 )
2883 2874 % parents
2884 2875 )
2885 2876 else:
2886 2877 ui.status(
2887 2878 _(b'working directory now based on revision %d\n') % parents
2888 2879 )
2889 2880 mergestatemod.mergestate.clean(self)
2890 2881
2891 2882 # TODO: if we know which new heads may result from this rollback, pass
2892 2883 # them to destroy(), which will prevent the branchhead cache from being
2893 2884 # invalidated.
2894 2885 self.destroyed()
2895 2886 return 0
2896 2887
2897 2888 def _buildcacheupdater(self, newtransaction):
2898 2889 """called during transaction to build the callback updating cache
2899 2890
2900 2891 Lives on the repository to help extension who might want to augment
2901 2892 this logic. For this purpose, the created transaction is passed to the
2902 2893 method.
2903 2894 """
2904 2895 # we must avoid cyclic reference between repo and transaction.
2905 2896 reporef = weakref.ref(self)
2906 2897
2907 2898 def updater(tr):
2908 2899 repo = reporef()
2909 2900 assert repo is not None # help pytype
2910 2901 repo.updatecaches(tr)
2911 2902
2912 2903 return updater
2913 2904
2914 2905 @unfilteredmethod
2915 2906 def updatecaches(self, tr=None, full=False, caches=None):
2916 2907 """warm appropriate caches
2917 2908
2918 2909 If this function is called after a transaction closed. The transaction
2919 2910 will be available in the 'tr' argument. This can be used to selectively
2920 2911 update caches relevant to the changes in that transaction.
2921 2912
2922 2913 If 'full' is set, make sure all caches the function knows about have
2923 2914 up-to-date data. Even the ones usually loaded more lazily.
2924 2915
2925 2916 The `full` argument can take a special "post-clone" value. In this case
2926 2917 the cache warming is made after a clone and of the slower cache might
2927 2918 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2928 2919 as we plan for a cleaner way to deal with this for 5.9.
2929 2920 """
2930 2921 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2931 2922 # During strip, many caches are invalid but
2932 2923 # later call to `destroyed` will refresh them.
2933 2924 return
2934 2925
2935 2926 unfi = self.unfiltered()
2936 2927
2937 2928 if caches is None:
2938 2929 caches = repository.CACHES_DEFAULT
2939 2930
2940 2931 if repository.CACHE_BRANCHMAP_SERVED in caches:
2941 2932 if tr is None or tr.changes[b'origrepolen'] < len(self):
2942 2933 self.ui.debug(b'updating the branch cache\n')
2943 2934 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2944 2935 served = self.filtered(b'served')
2945 2936 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2946 2937 served_hidden = self.filtered(b'served.hidden')
2947 2938 self._branchcaches.update_disk(
2948 2939 served_hidden, detect_pure_topo=dpt
2949 2940 )
2950 2941
2951 2942 if repository.CACHE_CHANGELOG_CACHE in caches:
2952 2943 self.changelog.update_caches(transaction=tr)
2953 2944
2954 2945 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2955 2946 self.manifestlog.update_caches(transaction=tr)
2956 2947 for entry in self.store.walk():
2957 2948 if not entry.is_revlog:
2958 2949 continue
2959 2950 if not entry.is_manifestlog:
2960 2951 continue
2961 2952 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2962 2953 if manifestrevlog is not None:
2963 2954 manifestrevlog.update_caches(transaction=tr)
2964 2955
2965 2956 if repository.CACHE_REV_BRANCH in caches:
2966 2957 rbc = unfi.revbranchcache()
2967 2958 for r in unfi.changelog:
2968 2959 rbc.branchinfo(r)
2969 2960 rbc.write()
2970 2961
2971 2962 if repository.CACHE_FULL_MANIFEST in caches:
2972 2963 # ensure the working copy parents are in the manifestfulltextcache
2973 2964 for ctx in self[b'.'].parents():
2974 2965 ctx.manifest() # accessing the manifest is enough
2975 2966
2976 2967 if repository.CACHE_FILE_NODE_TAGS in caches:
2977 2968 # accessing fnode cache warms the cache
2978 2969 tagsmod.warm_cache(self)
2979 2970
2980 2971 if repository.CACHE_TAGS_DEFAULT in caches:
2981 2972 # accessing tags warm the cache
2982 2973 self.tags()
2983 2974 if repository.CACHE_TAGS_SERVED in caches:
2984 2975 self.filtered(b'served').tags()
2985 2976
2986 2977 if repository.CACHE_BRANCHMAP_ALL in caches:
2987 2978 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2988 2979 # so we're forcing a write to cause these caches to be warmed up
2989 2980 # even if they haven't explicitly been requested yet (if they've
2990 2981 # never been used by hg, they won't ever have been written, even if
2991 2982 # they're a subset of another kind of cache that *has* been used).
2992 2983 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2993 2984
2994 2985 for filt in repoview.filtertable.keys():
2995 2986 filtered = self.filtered(filt)
2996 2987 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2997 2988
2998 2989 # flush all possibly delayed write.
2999 2990 self._branchcaches.write_dirty(self)
3000 2991
3001 2992 def invalidatecaches(self):
3002 2993 if '_tagscache' in vars(self):
3003 2994 # can't use delattr on proxy
3004 2995 del self.__dict__['_tagscache']
3005 2996
3006 2997 self._branchcaches.clear()
3007 2998 self.invalidatevolatilesets()
3008 2999 self._sparsesignaturecache.clear()
3009 3000
3010 3001 def invalidatevolatilesets(self):
3011 3002 self.filteredrevcache.clear()
3012 3003 obsolete.clearobscaches(self)
3013 3004 self._quick_access_changeid_invalidate()
3014 3005
3015 3006 def invalidatedirstate(self):
3016 3007 """Invalidates the dirstate, causing the next call to dirstate
3017 3008 to check if it was modified since the last time it was read,
3018 3009 rereading it if it has.
3019 3010
3020 3011 This is different to dirstate.invalidate() that it doesn't always
3021 3012 rereads the dirstate. Use dirstate.invalidate() if you want to
3022 3013 explicitly read the dirstate again (i.e. restoring it to a previous
3023 3014 known good state)."""
3024 3015 unfi = self.unfiltered()
3025 3016 if 'dirstate' in unfi.__dict__:
3026 3017 assert not self.dirstate.is_changing_any
3027 3018 del unfi.__dict__['dirstate']
3028 3019
3029 3020 def invalidate(self, clearfilecache=False):
3030 3021 """Invalidates both store and non-store parts other than dirstate
3031 3022
3032 3023 If a transaction is running, invalidation of store is omitted,
3033 3024 because discarding in-memory changes might cause inconsistency
3034 3025 (e.g. incomplete fncache causes unintentional failure, but
3035 3026 redundant one doesn't).
3036 3027 """
3037 3028 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3038 3029 for k in list(self._filecache.keys()):
3039 3030 if (
3040 3031 k == b'changelog'
3041 3032 and self.currenttransaction()
3042 3033 and self.changelog.is_delaying
3043 3034 ):
3044 3035 # The changelog object may store unwritten revisions. We don't
3045 3036 # want to lose them.
3046 3037 # TODO: Solve the problem instead of working around it.
3047 3038 continue
3048 3039
3049 3040 if clearfilecache:
3050 3041 del self._filecache[k]
3051 3042 try:
3052 3043 # XXX ideally, the key would be a unicode string to match the
3053 3044 # fact it refers to an attribut name. However changing this was
3054 3045 # a bit a scope creep compared to the series cleaning up
3055 3046 # del/set/getattr so we kept thing simple here.
3056 3047 delattr(unfiltered, pycompat.sysstr(k))
3057 3048 except AttributeError:
3058 3049 pass
3059 3050 self.invalidatecaches()
3060 3051 if not self.currenttransaction():
3061 3052 # TODO: Changing contents of store outside transaction
3062 3053 # causes inconsistency. We should make in-memory store
3063 3054 # changes detectable, and abort if changed.
3064 3055 self.store.invalidatecaches()
3065 3056
3066 3057 def invalidateall(self):
3067 3058 """Fully invalidates both store and non-store parts, causing the
3068 3059 subsequent operation to reread any outside changes."""
3069 3060 # extension should hook this to invalidate its caches
3070 3061 self.invalidate()
3071 3062 self.invalidatedirstate()
3072 3063
3073 3064 @unfilteredmethod
3074 3065 def _refreshfilecachestats(self, tr):
3075 3066 """Reload stats of cached files so that they are flagged as valid"""
3076 3067 for k, ce in self._filecache.items():
3077 3068 k = pycompat.sysstr(k)
3078 3069 if k == 'dirstate' or k not in self.__dict__:
3079 3070 continue
3080 3071 ce.refresh()
3081 3072
3082 3073 def _lock(
3083 3074 self,
3084 3075 vfs,
3085 3076 lockname,
3086 3077 wait,
3087 3078 releasefn,
3088 3079 acquirefn,
3089 3080 desc,
3090 3081 ):
3091 3082 timeout = 0
3092 3083 warntimeout = 0
3093 3084 if wait:
3094 3085 timeout = self.ui.configint(b"ui", b"timeout")
3095 3086 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3096 3087 # internal config: ui.signal-safe-lock
3097 3088 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3098 3089 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3099 3090 if not sync_file:
3100 3091 sync_file = None
3101 3092
3102 3093 l = lockmod.trylock(
3103 3094 self.ui,
3104 3095 vfs,
3105 3096 lockname,
3106 3097 timeout,
3107 3098 warntimeout,
3108 3099 releasefn=releasefn,
3109 3100 acquirefn=acquirefn,
3110 3101 desc=desc,
3111 3102 signalsafe=signalsafe,
3112 3103 devel_wait_sync_file=sync_file,
3113 3104 )
3114 3105 return l
3115 3106
3116 3107 def _afterlock(self, callback):
3117 3108 """add a callback to be run when the repository is fully unlocked
3118 3109
3119 3110 The callback will be executed when the outermost lock is released
3120 3111 (with wlock being higher level than 'lock')."""
3121 3112 for ref in (self._wlockref, self._lockref):
3122 3113 l = ref and ref()
3123 3114 if l and l.held:
3124 3115 l.postrelease.append(callback)
3125 3116 break
3126 3117 else: # no lock have been found.
3127 3118 callback(True)
3128 3119
3129 3120 def lock(self, wait=True):
3130 3121 """Lock the repository store (.hg/store) and return a weak reference
3131 3122 to the lock. Use this before modifying the store (e.g. committing or
3132 3123 stripping). If you are opening a transaction, get a lock as well.)
3133 3124
3134 3125 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3135 3126 'wlock' first to avoid a dead-lock hazard."""
3136 3127 l = self._currentlock(self._lockref)
3137 3128 if l is not None:
3138 3129 l.lock()
3139 3130 return l
3140 3131
3141 3132 self.hook(b'prelock', throw=True)
3142 3133 l = self._lock(
3143 3134 vfs=self.svfs,
3144 3135 lockname=b"lock",
3145 3136 wait=wait,
3146 3137 releasefn=None,
3147 3138 acquirefn=self.invalidate,
3148 3139 desc=_(b'repository %s') % self.origroot,
3149 3140 )
3150 3141 self._lockref = weakref.ref(l)
3151 3142 return l
3152 3143
3153 3144 def wlock(self, wait=True):
3154 3145 """Lock the non-store parts of the repository (everything under
3155 3146 .hg except .hg/store) and return a weak reference to the lock.
3156 3147
3157 3148 Use this before modifying files in .hg.
3158 3149
3159 3150 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3160 3151 'wlock' first to avoid a dead-lock hazard."""
3161 3152 l = self._wlockref() if self._wlockref else None
3162 3153 if l is not None and l.held:
3163 3154 l.lock()
3164 3155 return l
3165 3156
3166 3157 self.hook(b'prewlock', throw=True)
3167 3158 # We do not need to check for non-waiting lock acquisition. Such
3168 3159 # acquisition would not cause dead-lock as they would just fail.
3169 3160 if wait and (
3170 3161 self.ui.configbool(b'devel', b'all-warnings')
3171 3162 or self.ui.configbool(b'devel', b'check-locks')
3172 3163 ):
3173 3164 if self._currentlock(self._lockref) is not None:
3174 3165 self.ui.develwarn(b'"wlock" acquired after "lock"')
3175 3166
3176 3167 def unlock():
3177 3168 if self.dirstate.is_changing_any:
3178 3169 msg = b"wlock release in the middle of a changing parents"
3179 3170 self.ui.develwarn(msg)
3180 3171 self.dirstate.invalidate()
3181 3172 else:
3182 3173 if self.dirstate._dirty:
3183 3174 msg = b"dirty dirstate on wlock release"
3184 3175 self.ui.develwarn(msg)
3185 3176 self.dirstate.write(None)
3186 3177
3187 3178 unfi = self.unfiltered()
3188 3179 if 'dirstate' in unfi.__dict__:
3189 3180 del unfi.__dict__['dirstate']
3190 3181
3191 3182 l = self._lock(
3192 3183 self.vfs,
3193 3184 b"wlock",
3194 3185 wait,
3195 3186 unlock,
3196 3187 self.invalidatedirstate,
3197 3188 _(b'working directory of %s') % self.origroot,
3198 3189 )
3199 3190 self._wlockref = weakref.ref(l)
3200 3191 return l
3201 3192
3202 3193 def _currentlock(self, lockref):
3203 3194 """Returns the lock if it's held, or None if it's not."""
3204 3195 if lockref is None:
3205 3196 return None
3206 3197 l = lockref()
3207 3198 if l is None or not l.held:
3208 3199 return None
3209 3200 return l
3210 3201
3211 3202 def currentwlock(self):
3212 3203 """Returns the wlock if it's held, or None if it's not."""
3213 3204 return self._currentlock(self._wlockref)
3214 3205
3215 3206 def currentlock(self):
3216 3207 """Returns the lock if it's held, or None if it's not."""
3217 3208 return self._currentlock(self._lockref)
3218 3209
3219 3210 def checkcommitpatterns(self, wctx, match, status, fail):
3220 3211 """check for commit arguments that aren't committable"""
3221 3212 if match.isexact() or match.prefix():
3222 3213 matched = set(status.modified + status.added + status.removed)
3223 3214
3224 3215 for f in match.files():
3225 3216 f = self.dirstate.normalize(f)
3226 3217 if f == b'.' or f in matched or f in wctx.substate:
3227 3218 continue
3228 3219 if f in status.deleted:
3229 3220 fail(f, _(b'file not found!'))
3230 3221 # Is it a directory that exists or used to exist?
3231 3222 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3232 3223 d = f + b'/'
3233 3224 for mf in matched:
3234 3225 if mf.startswith(d):
3235 3226 break
3236 3227 else:
3237 3228 fail(f, _(b"no match under directory!"))
3238 3229 elif f not in self.dirstate:
3239 3230 fail(f, _(b"file not tracked!"))
3240 3231
3241 3232 @unfilteredmethod
3242 3233 def commit(
3243 3234 self,
3244 3235 text=b"",
3245 3236 user=None,
3246 3237 date=None,
3247 3238 match=None,
3248 3239 force=False,
3249 3240 editor=None,
3250 3241 extra=None,
3251 3242 ):
3252 3243 """Add a new revision to current repository.
3253 3244
3254 3245 Revision information is gathered from the working directory,
3255 3246 match can be used to filter the committed files. If editor is
3256 3247 supplied, it is called to get a commit message.
3257 3248 """
3258 3249 if extra is None:
3259 3250 extra = {}
3260 3251
3261 3252 def fail(f, msg):
3262 3253 raise error.InputError(b'%s: %s' % (f, msg))
3263 3254
3264 3255 if not match:
3265 3256 match = matchmod.always()
3266 3257
3267 3258 if not force:
3268 3259 match.bad = fail
3269 3260
3270 3261 # lock() for recent changelog (see issue4368)
3271 3262 with self.wlock(), self.lock():
3272 3263 wctx = self[None]
3273 3264 merge = len(wctx.parents()) > 1
3274 3265
3275 3266 if not force and merge and not match.always():
3276 3267 raise error.Abort(
3277 3268 _(
3278 3269 b'cannot partially commit a merge '
3279 3270 b'(do not specify files or patterns)'
3280 3271 )
3281 3272 )
3282 3273
3283 3274 status = self.status(match=match, clean=force)
3284 3275 if force:
3285 3276 status.modified.extend(
3286 3277 status.clean
3287 3278 ) # mq may commit clean files
3288 3279
3289 3280 # check subrepos
3290 3281 subs, commitsubs, newstate = subrepoutil.precommit(
3291 3282 self.ui, wctx, status, match, force=force
3292 3283 )
3293 3284
3294 3285 # make sure all explicit patterns are matched
3295 3286 if not force:
3296 3287 self.checkcommitpatterns(wctx, match, status, fail)
3297 3288
3298 3289 cctx = context.workingcommitctx(
3299 3290 self, status, text, user, date, extra
3300 3291 )
3301 3292
3302 3293 ms = mergestatemod.mergestate.read(self)
3303 3294 mergeutil.checkunresolved(ms)
3304 3295
3305 3296 # internal config: ui.allowemptycommit
3306 3297 if cctx.isempty() and not self.ui.configbool(
3307 3298 b'ui', b'allowemptycommit'
3308 3299 ):
3309 3300 self.ui.debug(b'nothing to commit, clearing merge state\n')
3310 3301 ms.reset()
3311 3302 return None
3312 3303
3313 3304 if merge and cctx.deleted():
3314 3305 raise error.Abort(_(b"cannot commit merge with missing files"))
3315 3306
3316 3307 if editor:
3317 3308 cctx._text = editor(self, cctx, subs)
3318 3309 edited = text != cctx._text
3319 3310
3320 3311 # Save commit message in case this transaction gets rolled back
3321 3312 # (e.g. by a pretxncommit hook). Leave the content alone on
3322 3313 # the assumption that the user will use the same editor again.
3323 3314 msg_path = self.savecommitmessage(cctx._text)
3324 3315
3325 3316 # commit subs and write new state
3326 3317 if subs:
3327 3318 uipathfn = scmutil.getuipathfn(self)
3328 3319 for s in sorted(commitsubs):
3329 3320 sub = wctx.sub(s)
3330 3321 self.ui.status(
3331 3322 _(b'committing subrepository %s\n')
3332 3323 % uipathfn(subrepoutil.subrelpath(sub))
3333 3324 )
3334 3325 sr = sub.commit(cctx._text, user, date)
3335 3326 newstate[s] = (newstate[s][0], sr)
3336 3327 subrepoutil.writestate(self, newstate)
3337 3328
3338 3329 p1, p2 = self.dirstate.parents()
3339 3330 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3340 3331 try:
3341 3332 self.hook(
3342 3333 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3343 3334 )
3344 3335 with self.transaction(b'commit'):
3345 3336 ret = self.commitctx(cctx, True)
3346 3337 # update bookmarks, dirstate and mergestate
3347 3338 bookmarks.update(self, [p1, p2], ret)
3348 3339 cctx.markcommitted(ret)
3349 3340 ms.reset()
3350 3341 except: # re-raises
3351 3342 if edited:
3352 3343 self.ui.write(
3353 3344 _(b'note: commit message saved in %s\n') % msg_path
3354 3345 )
3355 3346 self.ui.write(
3356 3347 _(
3357 3348 b"note: use 'hg commit --logfile "
3358 3349 b"%s --edit' to reuse it\n"
3359 3350 )
3360 3351 % msg_path
3361 3352 )
3362 3353 raise
3363 3354
3364 3355 def commithook(unused_success):
3365 3356 # hack for command that use a temporary commit (eg: histedit)
3366 3357 # temporary commit got stripped before hook release
3367 3358 if self.changelog.hasnode(ret):
3368 3359 self.hook(
3369 3360 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3370 3361 )
3371 3362
3372 3363 self._afterlock(commithook)
3373 3364 return ret
3374 3365
3375 3366 @unfilteredmethod
3376 3367 def commitctx(self, ctx, error=False, origctx=None):
3377 3368 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3378 3369
3379 3370 @unfilteredmethod
3380 3371 def destroying(self):
3381 3372 """Inform the repository that nodes are about to be destroyed.
3382 3373 Intended for use by strip and rollback, so there's a common
3383 3374 place for anything that has to be done before destroying history.
3384 3375
3385 3376 This is mostly useful for saving state that is in memory and waiting
3386 3377 to be flushed when the current lock is released. Because a call to
3387 3378 destroyed is imminent, the repo will be invalidated causing those
3388 3379 changes to stay in memory (waiting for the next unlock), or vanish
3389 3380 completely.
3390 3381 """
3391 3382 # When using the same lock to commit and strip, the phasecache is left
3392 3383 # dirty after committing. Then when we strip, the repo is invalidated,
3393 3384 # causing those changes to disappear.
3394 3385 if '_phasecache' in vars(self):
3395 3386 self._phasecache.write(self)
3396 3387
3397 3388 @unfilteredmethod
3398 3389 def destroyed(self):
3399 3390 """Inform the repository that nodes have been destroyed.
3400 3391 Intended for use by strip and rollback, so there's a common
3401 3392 place for anything that has to be done after destroying history.
3402 3393 """
3403 3394 # refresh all repository caches
3404 3395 self.updatecaches()
3405 3396
3406 3397 # Ensure the persistent tag cache is updated. Doing it now
3407 3398 # means that the tag cache only has to worry about destroyed
3408 3399 # heads immediately after a strip/rollback. That in turn
3409 3400 # guarantees that "cachetip == currenttip" (comparing both rev
3410 3401 # and node) always means no nodes have been added or destroyed.
3411 3402
3412 3403 # XXX this is suboptimal when qrefresh'ing: we strip the current
3413 3404 # head, refresh the tag cache, then immediately add a new head.
3414 3405 # But I think doing it this way is necessary for the "instant
3415 3406 # tag cache retrieval" case to work.
3416 3407 self.invalidate()
3417 3408
3418 3409 def status(
3419 3410 self,
3420 3411 node1=b'.',
3421 3412 node2=None,
3422 3413 match=None,
3423 3414 ignored=False,
3424 3415 clean=False,
3425 3416 unknown=False,
3426 3417 listsubrepos=False,
3427 3418 ):
3428 3419 '''a convenience method that calls node1.status(node2)'''
3429 3420 return self[node1].status(
3430 3421 node2, match, ignored, clean, unknown, listsubrepos
3431 3422 )
3432 3423
3433 3424 def addpostdsstatus(self, ps):
3434 3425 """Add a callback to run within the wlock, at the point at which status
3435 3426 fixups happen.
3436 3427
3437 3428 On status completion, callback(wctx, status) will be called with the
3438 3429 wlock held, unless the dirstate has changed from underneath or the wlock
3439 3430 couldn't be grabbed.
3440 3431
3441 3432 Callbacks should not capture and use a cached copy of the dirstate --
3442 3433 it might change in the meanwhile. Instead, they should access the
3443 3434 dirstate via wctx.repo().dirstate.
3444 3435
3445 3436 This list is emptied out after each status run -- extensions should
3446 3437 make sure it adds to this list each time dirstate.status is called.
3447 3438 Extensions should also make sure they don't call this for statuses
3448 3439 that don't involve the dirstate.
3449 3440 """
3450 3441
3451 3442 # The list is located here for uniqueness reasons -- it is actually
3452 3443 # managed by the workingctx, but that isn't unique per-repo.
3453 3444 self._postdsstatus.append(ps)
3454 3445
3455 3446 def postdsstatus(self):
3456 3447 """Used by workingctx to get the list of post-dirstate-status hooks."""
3457 3448 return self._postdsstatus
3458 3449
3459 3450 def clearpostdsstatus(self):
3460 3451 """Used by workingctx to clear post-dirstate-status hooks."""
3461 3452 del self._postdsstatus[:]
3462 3453
3463 3454 def heads(self, start=None):
3464 3455 if start is None:
3465 3456 cl = self.changelog
3466 3457 headrevs = reversed(cl.headrevs())
3467 3458 return [cl.node(rev) for rev in headrevs]
3468 3459
3469 3460 heads = self.changelog.heads(start)
3470 3461 # sort the output in rev descending order
3471 3462 return sorted(heads, key=self.changelog.rev, reverse=True)
3472 3463
3473 3464 def branchheads(self, branch=None, start=None, closed=False):
3474 3465 """return a (possibly filtered) list of heads for the given branch
3475 3466
3476 3467 Heads are returned in topological order, from newest to oldest.
3477 3468 If branch is None, use the dirstate branch.
3478 3469 If start is not None, return only heads reachable from start.
3479 3470 If closed is True, return heads that are marked as closed as well.
3480 3471 """
3481 3472 if branch is None:
3482 3473 branch = self[None].branch()
3483 3474 branches = self.branchmap()
3484 3475 if not branches.hasbranch(branch):
3485 3476 return []
3486 3477 # the cache returns heads ordered lowest to highest
3487 3478 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3488 3479 if start is not None:
3489 3480 # filter out the heads that cannot be reached from startrev
3490 3481 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3491 3482 bheads = [h for h in bheads if h in fbheads]
3492 3483 return bheads
3493 3484
3494 3485 def branches(self, nodes):
3495 3486 if not nodes:
3496 3487 nodes = [self.changelog.tip()]
3497 3488 b = []
3498 3489 for n in nodes:
3499 3490 t = n
3500 3491 while True:
3501 3492 p = self.changelog.parents(n)
3502 3493 if p[1] != self.nullid or p[0] == self.nullid:
3503 3494 b.append((t, n, p[0], p[1]))
3504 3495 break
3505 3496 n = p[0]
3506 3497 return b
3507 3498
3508 3499 def between(self, pairs):
3509 3500 r = []
3510 3501
3511 3502 for top, bottom in pairs:
3512 3503 n, l, i = top, [], 0
3513 3504 f = 1
3514 3505
3515 3506 while n != bottom and n != self.nullid:
3516 3507 p = self.changelog.parents(n)[0]
3517 3508 if i == f:
3518 3509 l.append(n)
3519 3510 f = f * 2
3520 3511 n = p
3521 3512 i += 1
3522 3513
3523 3514 r.append(l)
3524 3515
3525 3516 return r
3526 3517
3527 3518 def checkpush(self, pushop):
3528 3519 """Extensions can override this function if additional checks have
3529 3520 to be performed before pushing, or call it if they override push
3530 3521 command.
3531 3522 """
3532 3523
3533 3524 @unfilteredpropertycache
3534 3525 def prepushoutgoinghooks(self):
3535 3526 """Return util.hooks consists of a pushop with repo, remote, outgoing
3536 3527 methods, which are called before pushing changesets.
3537 3528 """
3538 3529 return util.hooks()
3539 3530
3540 3531 def pushkey(self, namespace, key, old, new):
3541 3532 try:
3542 3533 tr = self.currenttransaction()
3543 3534 hookargs = {}
3544 3535 if tr is not None:
3545 3536 hookargs.update(tr.hookargs)
3546 3537 hookargs = pycompat.strkwargs(hookargs)
3547 3538 hookargs['namespace'] = namespace
3548 3539 hookargs['key'] = key
3549 3540 hookargs['old'] = old
3550 3541 hookargs['new'] = new
3551 3542 self.hook(b'prepushkey', throw=True, **hookargs)
3552 3543 except error.HookAbort as exc:
3553 3544 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3554 3545 if exc.hint:
3555 3546 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3556 3547 return False
3557 3548 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3558 3549 ret = pushkey.push(self, namespace, key, old, new)
3559 3550
3560 3551 def runhook(unused_success):
3561 3552 self.hook(
3562 3553 b'pushkey',
3563 3554 namespace=namespace,
3564 3555 key=key,
3565 3556 old=old,
3566 3557 new=new,
3567 3558 ret=ret,
3568 3559 )
3569 3560
3570 3561 self._afterlock(runhook)
3571 3562 return ret
3572 3563
3573 3564 def listkeys(self, namespace):
3574 3565 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3575 3566 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3576 3567 values = pushkey.list(self, namespace)
3577 3568 self.hook(b'listkeys', namespace=namespace, values=values)
3578 3569 return values
3579 3570
3580 3571 def debugwireargs(self, one, two, three=None, four=None, five=None):
3581 3572 '''used to test argument passing over the wire'''
3582 3573 return b"%s %s %s %s %s" % (
3583 3574 one,
3584 3575 two,
3585 3576 pycompat.bytestr(three),
3586 3577 pycompat.bytestr(four),
3587 3578 pycompat.bytestr(five),
3588 3579 )
3589 3580
3590 3581 def savecommitmessage(self, text):
3591 3582 fp = self.vfs(b'last-message.txt', b'wb')
3592 3583 try:
3593 3584 fp.write(text)
3594 3585 finally:
3595 3586 fp.close()
3596 3587 return self.pathto(fp.name[len(self.root) + 1 :])
3597 3588
3598 3589 def register_wanted_sidedata(self, category):
3599 3590 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3600 3591 # Only revlogv2 repos can want sidedata.
3601 3592 return
3602 3593 self._wanted_sidedata.add(pycompat.bytestr(category))
3603 3594
3604 3595 def register_sidedata_computer(
3605 3596 self, kind, category, keys, computer, flags, replace=False
3606 3597 ):
3607 3598 if kind not in revlogconst.ALL_KINDS:
3608 3599 msg = _(b"unexpected revlog kind '%s'.")
3609 3600 raise error.ProgrammingError(msg % kind)
3610 3601 category = pycompat.bytestr(category)
3611 3602 already_registered = category in self._sidedata_computers.get(kind, [])
3612 3603 if already_registered and not replace:
3613 3604 msg = _(
3614 3605 b"cannot register a sidedata computer twice for category '%s'."
3615 3606 )
3616 3607 raise error.ProgrammingError(msg % category)
3617 3608 if replace and not already_registered:
3618 3609 msg = _(
3619 3610 b"cannot replace a sidedata computer that isn't registered "
3620 3611 b"for category '%s'."
3621 3612 )
3622 3613 raise error.ProgrammingError(msg % category)
3623 3614 self._sidedata_computers.setdefault(kind, {})
3624 3615 self._sidedata_computers[kind][category] = (keys, computer, flags)
3625 3616
3626 3617
3627 3618 localrepository = interfaceutil.implementer(repository.ilocalrepositorymain)(
3628 3619 LocalRepository
3629 3620 )
3630 3621
3631 3622 if typing.TYPE_CHECKING:
3632 3623 # Help pytype by hiding the interface stuff that confuses it.
3633 3624 localrepository = LocalRepository
3634 3625
3635 3626
3636 3627 def undoname(fn: bytes) -> bytes:
3637 3628 base, name = os.path.split(fn)
3638 3629 assert name.startswith(b'journal')
3639 3630 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3640 3631
3641 3632
3642 3633 def instance(ui, path: bytes, create, intents=None, createopts=None):
3643 3634 # prevent cyclic import localrepo -> upgrade -> localrepo
3644 3635 from . import upgrade
3645 3636
3646 3637 localpath = urlutil.urllocalpath(path)
3647 3638 if create:
3648 3639 createrepository(ui, localpath, createopts=createopts)
3649 3640
3650 3641 def repo_maker():
3651 3642 return makelocalrepository(ui, localpath, intents=intents)
3652 3643
3653 3644 repo = repo_maker()
3654 3645 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3655 3646 return repo
3656 3647
3657 3648
3658 3649 def islocal(path: bytes) -> bool:
3659 3650 return True
3660 3651
3661 3652
3662 3653 def defaultcreateopts(ui, createopts=None):
3663 3654 """Populate the default creation options for a repository.
3664 3655
3665 3656 A dictionary of explicitly requested creation options can be passed
3666 3657 in. Missing keys will be populated.
3667 3658 """
3668 3659 createopts = dict(createopts or {})
3669 3660
3670 3661 if b'backend' not in createopts:
3671 3662 # experimental config: storage.new-repo-backend
3672 3663 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3673 3664
3674 3665 return createopts
3675 3666
3676 3667
3677 3668 def clone_requirements(ui, createopts, srcrepo):
3678 3669 """clone the requirements of a local repo for a local clone
3679 3670
3680 3671 The store requirements are unchanged while the working copy requirements
3681 3672 depends on the configuration
3682 3673 """
3683 3674 target_requirements = set()
3684 3675 if not srcrepo.requirements:
3685 3676 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3686 3677 # with it.
3687 3678 return target_requirements
3688 3679 createopts = defaultcreateopts(ui, createopts=createopts)
3689 3680 for r in newreporequirements(ui, createopts):
3690 3681 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3691 3682 target_requirements.add(r)
3692 3683
3693 3684 for r in srcrepo.requirements:
3694 3685 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3695 3686 target_requirements.add(r)
3696 3687 return target_requirements
3697 3688
3698 3689
3699 3690 def newreporequirements(ui, createopts):
3700 3691 """Determine the set of requirements for a new local repository.
3701 3692
3702 3693 Extensions can wrap this function to specify custom requirements for
3703 3694 new repositories.
3704 3695 """
3705 3696
3706 3697 if b'backend' not in createopts:
3707 3698 raise error.ProgrammingError(
3708 3699 b'backend key not present in createopts; '
3709 3700 b'was defaultcreateopts() called?'
3710 3701 )
3711 3702
3712 3703 if createopts[b'backend'] != b'revlogv1':
3713 3704 raise error.Abort(
3714 3705 _(
3715 3706 b'unable to determine repository requirements for '
3716 3707 b'storage backend: %s'
3717 3708 )
3718 3709 % createopts[b'backend']
3719 3710 )
3720 3711
3721 3712 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3722 3713 if ui.configbool(b'format', b'usestore'):
3723 3714 requirements.add(requirementsmod.STORE_REQUIREMENT)
3724 3715 if ui.configbool(b'format', b'usefncache'):
3725 3716 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3726 3717 if ui.configbool(b'format', b'dotencode'):
3727 3718 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3728 3719
3729 3720 compengines = ui.configlist(b'format', b'revlog-compression')
3730 3721 for compengine in compengines:
3731 3722 if compengine in util.compengines:
3732 3723 engine = util.compengines[compengine]
3733 3724 if engine.available() and engine.revlogheader():
3734 3725 break
3735 3726 else:
3736 3727 raise error.Abort(
3737 3728 _(
3738 3729 b'compression engines %s defined by '
3739 3730 b'format.revlog-compression not available'
3740 3731 )
3741 3732 % b', '.join(b'"%s"' % e for e in compengines),
3742 3733 hint=_(
3743 3734 b'run "hg debuginstall" to list available '
3744 3735 b'compression engines'
3745 3736 ),
3746 3737 )
3747 3738
3748 3739 # zlib is the historical default and doesn't need an explicit requirement.
3749 3740 if compengine == b'zstd':
3750 3741 requirements.add(b'revlog-compression-zstd')
3751 3742 elif compengine != b'zlib':
3752 3743 requirements.add(b'exp-compression-%s' % compengine)
3753 3744
3754 3745 if scmutil.gdinitconfig(ui):
3755 3746 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3756 3747 if ui.configbool(b'format', b'sparse-revlog'):
3757 3748 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3758 3749
3759 3750 # experimental config: format.use-dirstate-v2
3760 3751 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3761 3752 if ui.configbool(b'format', b'use-dirstate-v2'):
3762 3753 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3763 3754
3764 3755 # experimental config: format.exp-use-copies-side-data-changeset
3765 3756 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3766 3757 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3767 3758 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3768 3759 if ui.configbool(b'experimental', b'treemanifest'):
3769 3760 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3770 3761
3771 3762 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3772 3763 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3773 3764 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3774 3765
3775 3766 revlogv2 = ui.config(b'experimental', b'revlogv2')
3776 3767 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3777 3768 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3778 3769 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3779 3770 # experimental config: format.internal-phase
3780 3771 if ui.configbool(b'format', b'use-internal-phase'):
3781 3772 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3782 3773
3783 3774 # experimental config: format.exp-archived-phase
3784 3775 if ui.configbool(b'format', b'exp-archived-phase'):
3785 3776 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3786 3777
3787 3778 if createopts.get(b'narrowfiles'):
3788 3779 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3789 3780
3790 3781 if createopts.get(b'lfs'):
3791 3782 requirements.add(b'lfs')
3792 3783
3793 3784 if ui.configbool(b'format', b'bookmarks-in-store'):
3794 3785 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3795 3786
3796 3787 # The feature is disabled unless a fast implementation is available.
3797 3788 persistent_nodemap_default = policy.importrust('revlog') is not None
3798 3789 if ui.configbool(
3799 3790 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3800 3791 ):
3801 3792 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3802 3793
3803 3794 # if share-safe is enabled, let's create the new repository with the new
3804 3795 # requirement
3805 3796 if ui.configbool(b'format', b'use-share-safe'):
3806 3797 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3807 3798
3808 3799 # if we are creating a share-repoΒΉ we have to handle requirement
3809 3800 # differently.
3810 3801 #
3811 3802 # [1] (i.e. reusing the store from another repository, just having a
3812 3803 # working copy)
3813 3804 if b'sharedrepo' in createopts:
3814 3805 source_requirements = set(createopts[b'sharedrepo'].requirements)
3815 3806
3816 3807 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3817 3808 # share to an old school repository, we have to copy the
3818 3809 # requirements and hope for the best.
3819 3810 requirements = source_requirements
3820 3811 else:
3821 3812 # We have control on the working copy only, so "copy" the non
3822 3813 # working copy part over, ignoring previous logic.
3823 3814 to_drop = set()
3824 3815 for req in requirements:
3825 3816 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3826 3817 continue
3827 3818 if req in source_requirements:
3828 3819 continue
3829 3820 to_drop.add(req)
3830 3821 requirements -= to_drop
3831 3822 requirements |= source_requirements
3832 3823
3833 3824 if createopts.get(b'sharedrelative'):
3834 3825 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3835 3826 else:
3836 3827 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3837 3828
3838 3829 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3839 3830 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3840 3831 msg = _(b"ignoring unknown tracked key version: %d\n")
3841 3832 hint = _(
3842 3833 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3843 3834 )
3844 3835 if version != 1:
3845 3836 ui.warn(msg % version, hint=hint)
3846 3837 else:
3847 3838 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3848 3839
3849 3840 return requirements
3850 3841
3851 3842
3852 3843 def checkrequirementscompat(ui, requirements):
3853 3844 """Checks compatibility of repository requirements enabled and disabled.
3854 3845
3855 3846 Returns a set of requirements which needs to be dropped because dependend
3856 3847 requirements are not enabled. Also warns users about it"""
3857 3848
3858 3849 dropped = set()
3859 3850
3860 3851 if requirementsmod.STORE_REQUIREMENT not in requirements:
3861 3852 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3862 3853 ui.warn(
3863 3854 _(
3864 3855 b'ignoring enabled \'format.bookmarks-in-store\' config '
3865 3856 b'beacuse it is incompatible with disabled '
3866 3857 b'\'format.usestore\' config\n'
3867 3858 )
3868 3859 )
3869 3860 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3870 3861
3871 3862 if (
3872 3863 requirementsmod.SHARED_REQUIREMENT in requirements
3873 3864 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3874 3865 ):
3875 3866 raise error.Abort(
3876 3867 _(
3877 3868 b"cannot create shared repository as source was created"
3878 3869 b" with 'format.usestore' config disabled"
3879 3870 )
3880 3871 )
3881 3872
3882 3873 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3883 3874 if ui.hasconfig(b'format', b'use-share-safe'):
3884 3875 msg = _(
3885 3876 b"ignoring enabled 'format.use-share-safe' config because "
3886 3877 b"it is incompatible with disabled 'format.usestore'"
3887 3878 b" config\n"
3888 3879 )
3889 3880 ui.warn(msg)
3890 3881 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3891 3882
3892 3883 return dropped
3893 3884
3894 3885
3895 3886 def filterknowncreateopts(ui, createopts):
3896 3887 """Filters a dict of repo creation options against options that are known.
3897 3888
3898 3889 Receives a dict of repo creation options and returns a dict of those
3899 3890 options that we don't know how to handle.
3900 3891
3901 3892 This function is called as part of repository creation. If the
3902 3893 returned dict contains any items, repository creation will not
3903 3894 be allowed, as it means there was a request to create a repository
3904 3895 with options not recognized by loaded code.
3905 3896
3906 3897 Extensions can wrap this function to filter out creation options
3907 3898 they know how to handle.
3908 3899 """
3909 3900 known = {
3910 3901 b'backend',
3911 3902 b'lfs',
3912 3903 b'narrowfiles',
3913 3904 b'sharedrepo',
3914 3905 b'sharedrelative',
3915 3906 b'shareditems',
3916 3907 b'shallowfilestore',
3917 3908 }
3918 3909
3919 3910 return {k: v for k, v in createopts.items() if k not in known}
3920 3911
3921 3912
3922 3913 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3923 3914 """Create a new repository in a vfs.
3924 3915
3925 3916 ``path`` path to the new repo's working directory.
3926 3917 ``createopts`` options for the new repository.
3927 3918 ``requirement`` predefined set of requirements.
3928 3919 (incompatible with ``createopts``)
3929 3920
3930 3921 The following keys for ``createopts`` are recognized:
3931 3922
3932 3923 backend
3933 3924 The storage backend to use.
3934 3925 lfs
3935 3926 Repository will be created with ``lfs`` requirement. The lfs extension
3936 3927 will automatically be loaded when the repository is accessed.
3937 3928 narrowfiles
3938 3929 Set up repository to support narrow file storage.
3939 3930 sharedrepo
3940 3931 Repository object from which storage should be shared.
3941 3932 sharedrelative
3942 3933 Boolean indicating if the path to the shared repo should be
3943 3934 stored as relative. By default, the pointer to the "parent" repo
3944 3935 is stored as an absolute path.
3945 3936 shareditems
3946 3937 Set of items to share to the new repository (in addition to storage).
3947 3938 shallowfilestore
3948 3939 Indicates that storage for files should be shallow (not all ancestor
3949 3940 revisions are known).
3950 3941 """
3951 3942
3952 3943 if requirements is not None:
3953 3944 if createopts is not None:
3954 3945 msg = b'cannot specify both createopts and requirements'
3955 3946 raise error.ProgrammingError(msg)
3956 3947 createopts = {}
3957 3948 else:
3958 3949 createopts = defaultcreateopts(ui, createopts=createopts)
3959 3950
3960 3951 unknownopts = filterknowncreateopts(ui, createopts)
3961 3952
3962 3953 if not isinstance(unknownopts, dict):
3963 3954 raise error.ProgrammingError(
3964 3955 b'filterknowncreateopts() did not return a dict'
3965 3956 )
3966 3957
3967 3958 if unknownopts:
3968 3959 raise error.Abort(
3969 3960 _(
3970 3961 b'unable to create repository because of unknown '
3971 3962 b'creation option: %s'
3972 3963 )
3973 3964 % b', '.join(sorted(unknownopts)),
3974 3965 hint=_(b'is a required extension not loaded?'),
3975 3966 )
3976 3967
3977 3968 requirements = newreporequirements(ui, createopts=createopts)
3978 3969 requirements -= checkrequirementscompat(ui, requirements)
3979 3970
3980 3971 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3981 3972
3982 3973 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3983 3974 if hgvfs.exists():
3984 3975 raise error.RepoError(_(b'repository %s already exists') % path)
3985 3976
3986 3977 if b'sharedrepo' in createopts:
3987 3978 sharedpath = createopts[b'sharedrepo'].sharedpath
3988 3979
3989 3980 if createopts.get(b'sharedrelative'):
3990 3981 try:
3991 3982 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3992 3983 sharedpath = util.pconvert(sharedpath)
3993 3984 except (IOError, ValueError) as e:
3994 3985 # ValueError is raised on Windows if the drive letters differ
3995 3986 # on each path.
3996 3987 raise error.Abort(
3997 3988 _(b'cannot calculate relative path'),
3998 3989 hint=stringutil.forcebytestr(e),
3999 3990 )
4000 3991
4001 3992 if not wdirvfs.exists():
4002 3993 wdirvfs.makedirs()
4003 3994
4004 3995 hgvfs.makedir(notindexed=True)
4005 3996 if b'sharedrepo' not in createopts:
4006 3997 hgvfs.mkdir(b'cache')
4007 3998 hgvfs.mkdir(b'wcache')
4008 3999
4009 4000 has_store = requirementsmod.STORE_REQUIREMENT in requirements
4010 4001 if has_store and b'sharedrepo' not in createopts:
4011 4002 hgvfs.mkdir(b'store')
4012 4003
4013 4004 # We create an invalid changelog outside the store so very old
4014 4005 # Mercurial versions (which didn't know about the requirements
4015 4006 # file) encounter an error on reading the changelog. This
4016 4007 # effectively locks out old clients and prevents them from
4017 4008 # mucking with a repo in an unknown format.
4018 4009 #
4019 4010 # The revlog header has version 65535, which won't be recognized by
4020 4011 # such old clients.
4021 4012 hgvfs.append(
4022 4013 b'00changelog.i',
4023 4014 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
4024 4015 b'layout',
4025 4016 )
4026 4017
4027 4018 # Filter the requirements into working copy and store ones
4028 4019 wcreq, storereq = scmutil.filterrequirements(requirements)
4029 4020 # write working copy ones
4030 4021 scmutil.writerequires(hgvfs, wcreq)
4031 4022 # If there are store requirements and the current repository
4032 4023 # is not a shared one, write stored requirements
4033 4024 # For new shared repository, we don't need to write the store
4034 4025 # requirements as they are already present in store requires
4035 4026 if storereq and b'sharedrepo' not in createopts:
4036 4027 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4037 4028 scmutil.writerequires(storevfs, storereq)
4038 4029
4039 4030 # Write out file telling readers where to find the shared store.
4040 4031 if b'sharedrepo' in createopts:
4041 4032 hgvfs.write(b'sharedpath', sharedpath)
4042 4033
4043 4034 if createopts.get(b'shareditems'):
4044 4035 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4045 4036 hgvfs.write(b'shared', shared)
4046 4037
4047 4038
4048 4039 def poisonrepository(repo):
4049 4040 """Poison a repository instance so it can no longer be used."""
4050 4041 # Perform any cleanup on the instance.
4051 4042 repo.close()
4052 4043
4053 4044 # Our strategy is to replace the type of the object with one that
4054 4045 # has all attribute lookups result in error.
4055 4046 #
4056 4047 # But we have to allow the close() method because some constructors
4057 4048 # of repos call close() on repo references.
4058 4049 class poisonedrepository:
4059 4050 def __getattribute__(self, item):
4060 4051 if item == 'close':
4061 4052 return object.__getattribute__(self, item)
4062 4053
4063 4054 raise error.ProgrammingError(
4064 4055 b'repo instances should not be used after unshare'
4065 4056 )
4066 4057
4067 4058 def close(self):
4068 4059 pass
4069 4060
4070 4061 # We may have a repoview, which intercepts __setattr__. So be sure
4071 4062 # we operate at the lowest level possible.
4072 4063 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now