##// END OF EJS Templates
typing: make the localrepo classes known to pytype...
Matt Harbison -
r52788:ee7e106b default
parent child Browse files
Show More
@@ -1,4037 +1,4091
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import functools
12 12 import os
13 13 import random
14 14 import re
15 15 import sys
16 16 import time
17 import typing
17 18 import weakref
18 19
19 20 from concurrent import futures
20 21 from typing import (
21 22 Optional,
22 23 )
23 24
24 25 from .i18n import _
25 26 from .node import (
26 27 bin,
27 28 hex,
28 29 nullrev,
29 30 sha1nodeconstants,
30 31 short,
31 32 )
32 33 from . import (
33 34 bookmarks,
34 35 branchmap,
35 36 bundle2,
36 37 bundlecaches,
37 38 changegroup,
38 39 color,
39 40 commit,
40 41 context,
41 42 dirstate,
42 43 discovery,
43 44 encoding,
44 45 error,
45 46 exchange,
46 47 extensions,
47 48 filelog,
48 49 hook,
49 50 lock as lockmod,
50 51 match as matchmod,
51 52 mergestate as mergestatemod,
52 53 mergeutil,
53 54 namespaces,
54 55 narrowspec,
55 56 obsolete,
56 57 pathutil,
57 58 phases,
58 59 policy,
59 60 pushkey,
60 61 pycompat,
61 62 rcutil,
62 63 repoview,
63 64 requirements as requirementsmod,
64 65 revlog,
65 66 revset,
66 67 revsetlang,
67 68 scmutil,
68 69 sparse,
69 70 store as storemod,
70 71 subrepoutil,
71 72 tags as tagsmod,
72 73 transaction,
73 74 txnutil,
74 75 util,
75 76 vfs as vfsmod,
76 77 wireprototypes,
77 78 )
78 79
79 80 from .interfaces import (
80 81 repository,
81 82 util as interfaceutil,
82 83 )
83 84
84 85 from .utils import (
85 86 hashutil,
86 87 procutil,
87 88 stringutil,
88 89 urlutil,
89 90 )
90 91
91 92 from .revlogutils import (
92 93 concurrency_checker as revlogchecker,
93 94 constants as revlogconst,
94 95 sidedata as sidedatamod,
95 96 )
96 97
97 98 release = lockmod.release
98 99 urlerr = util.urlerr
99 100 urlreq = util.urlreq
100 101
101 102 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
102 103 b"^((dirstate|narrowspec.dirstate).*|branch$)"
103 104 )
104 105
105 106 # set of (path, vfs-location) tuples. vfs-location is:
106 107 # - 'plain for vfs relative paths
107 108 # - '' for svfs relative paths
108 109 _cachedfiles = set()
109 110
110 111
111 112 class _basefilecache(scmutil.filecache):
112 113 """All filecache usage on repo are done for logic that should be unfiltered"""
113 114
114 115 def __get__(self, repo, type=None):
115 116 if repo is None:
116 117 return self
117 118 # proxy to unfiltered __dict__ since filtered repo has no entry
118 119 unfi = repo.unfiltered()
119 120 try:
120 121 return unfi.__dict__[self.sname]
121 122 except KeyError:
122 123 pass
123 124 return super(_basefilecache, self).__get__(unfi, type)
124 125
125 126 def set(self, repo, value):
126 127 return super(_basefilecache, self).set(repo.unfiltered(), value)
127 128
128 129
129 130 class repofilecache(_basefilecache):
130 131 """filecache for files in .hg but outside of .hg/store"""
131 132
132 133 def __init__(self, *paths):
133 134 super(repofilecache, self).__init__(*paths)
134 135 for path in paths:
135 136 _cachedfiles.add((path, b'plain'))
136 137
137 138 def join(self, obj, fname):
138 139 return obj.vfs.join(fname)
139 140
140 141
141 142 class storecache(_basefilecache):
142 143 """filecache for files in the store"""
143 144
144 145 def __init__(self, *paths):
145 146 super(storecache, self).__init__(*paths)
146 147 for path in paths:
147 148 _cachedfiles.add((path, b''))
148 149
149 150 def join(self, obj, fname):
150 151 return obj.sjoin(fname)
151 152
152 153
153 154 class changelogcache(storecache):
154 155 """filecache for the changelog"""
155 156
156 157 def __init__(self):
157 158 super(changelogcache, self).__init__()
158 159 _cachedfiles.add((b'00changelog.i', b''))
159 160 _cachedfiles.add((b'00changelog.n', b''))
160 161
161 162 def tracked_paths(self, obj):
162 163 paths = [self.join(obj, b'00changelog.i')]
163 164 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 165 paths.append(self.join(obj, b'00changelog.n'))
165 166 return paths
166 167
167 168
168 169 class manifestlogcache(storecache):
169 170 """filecache for the manifestlog"""
170 171
171 172 def __init__(self):
172 173 super(manifestlogcache, self).__init__()
173 174 _cachedfiles.add((b'00manifest.i', b''))
174 175 _cachedfiles.add((b'00manifest.n', b''))
175 176
176 177 def tracked_paths(self, obj):
177 178 paths = [self.join(obj, b'00manifest.i')]
178 179 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 180 paths.append(self.join(obj, b'00manifest.n'))
180 181 return paths
181 182
182 183
183 184 class mixedrepostorecache(_basefilecache):
184 185 """filecache for a mix files in .hg/store and outside"""
185 186
186 187 def __init__(self, *pathsandlocations):
187 188 # scmutil.filecache only uses the path for passing back into our
188 189 # join(), so we can safely pass a list of paths and locations
189 190 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 191 _cachedfiles.update(pathsandlocations)
191 192
192 193 def join(self, obj, fnameandlocation):
193 194 fname, location = fnameandlocation
194 195 if location == b'plain':
195 196 return obj.vfs.join(fname)
196 197 else:
197 198 if location != b'':
198 199 raise error.ProgrammingError(
199 200 b'unexpected location: %s' % location
200 201 )
201 202 return obj.sjoin(fname)
202 203
203 204
204 205 def isfilecached(repo, name):
205 206 """check if a repo has already cached "name" filecache-ed property
206 207
207 208 This returns (cachedobj-or-None, iscached) tuple.
208 209 """
209 210 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 211 if not cacheentry:
211 212 return None, False
212 213 return cacheentry.obj, True
213 214
214 215
215 216 class unfilteredpropertycache(util.propertycache):
216 217 """propertycache that apply to unfiltered repo only"""
217 218
218 219 def __get__(self, repo, type=None):
219 220 unfi = repo.unfiltered()
220 221 if unfi is repo:
221 222 return super(unfilteredpropertycache, self).__get__(unfi)
222 223 return getattr(unfi, self.name)
223 224
224 225
225 226 class filteredpropertycache(util.propertycache):
226 227 """propertycache that must take filtering in account"""
227 228
228 229 def cachevalue(self, obj, value):
229 230 object.__setattr__(obj, self.name, value)
230 231
231 232
232 233 def hasunfilteredcache(repo, name):
233 234 """check if a repo has an unfilteredpropertycache value for <name>"""
234 235 return name in vars(repo.unfiltered())
235 236
236 237
237 238 def unfilteredmethod(orig):
238 239 """decorate method that always need to be run on unfiltered version"""
239 240
240 241 @functools.wraps(orig)
241 242 def wrapper(repo, *args, **kwargs):
242 243 return orig(repo.unfiltered(), *args, **kwargs)
243 244
244 245 return wrapper
245 246
246 247
247 248 moderncaps = {
248 249 b'lookup',
249 250 b'branchmap',
250 251 b'pushkey',
251 252 b'known',
252 253 b'getbundle',
253 254 b'unbundle',
254 255 }
255 256 legacycaps = moderncaps.union({b'changegroupsubset'})
256 257
257 258
258 @interfaceutil.implementer(repository.ipeercommandexecutor)
259 class localcommandexecutor:
259 class LocalCommandExecutor:
260 260 def __init__(self, peer):
261 261 self._peer = peer
262 262 self._sent = False
263 263 self._closed = False
264 264
265 265 def __enter__(self):
266 266 return self
267 267
268 268 def __exit__(self, exctype, excvalue, exctb):
269 269 self.close()
270 270
271 271 def callcommand(self, command, args):
272 272 if self._sent:
273 273 raise error.ProgrammingError(
274 274 b'callcommand() cannot be used after sendcommands()'
275 275 )
276 276
277 277 if self._closed:
278 278 raise error.ProgrammingError(
279 279 b'callcommand() cannot be used after close()'
280 280 )
281 281
282 282 # We don't need to support anything fancy. Just call the named
283 283 # method on the peer and return a resolved future.
284 284 fn = getattr(self._peer, pycompat.sysstr(command))
285 285
286 286 f = futures.Future()
287 287
288 288 try:
289 289 result = fn(**pycompat.strkwargs(args))
290 290 except Exception:
291 291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
292 292 else:
293 293 f.set_result(result)
294 294
295 295 return f
296 296
297 297 def sendcommands(self):
298 298 self._sent = True
299 299
300 300 def close(self):
301 301 self._closed = True
302 302
303 303
304 @interfaceutil.implementer(repository.ipeercommands)
305 class localpeer(repository.peer):
304 localcommandexecutor = interfaceutil.implementer(
305 repository.ipeercommandexecutor
306 )(LocalCommandExecutor)
307
308 if typing.TYPE_CHECKING:
309 # Help pytype by hiding the interface stuff that confuses it.
310 localcommandexecutor = LocalCommandExecutor
311
312
313 class LocalPeer(repository.peer):
306 314 '''peer for a local repo; reflects only the most recent API'''
307 315
308 316 def __init__(self, repo, caps=None, path=None, remotehidden=False):
309 super(localpeer, self).__init__(
317 super(LocalPeer, self).__init__(
310 318 repo.ui, path=path, remotehidden=remotehidden
311 319 )
312 320
313 321 if caps is None:
314 322 caps = moderncaps.copy()
315 323 if remotehidden:
316 324 self._repo = repo.filtered(b'served.hidden')
317 325 else:
318 326 self._repo = repo.filtered(b'served')
319 327 if repo._wanted_sidedata:
320 328 formatted = bundle2.format_remote_wanted_sidedata(repo)
321 329 caps.add(b'exp-wanted-sidedata=' + formatted)
322 330
323 331 self._caps = repo._restrictcapabilities(caps)
324 332
325 333 # Begin of _basepeer interface.
326 334
327 335 def url(self):
328 336 return self._repo.url()
329 337
330 338 def local(self):
331 339 return self._repo
332 340
333 341 def canpush(self):
334 342 return True
335 343
336 344 def close(self):
337 345 self._repo.close()
338 346
339 347 # End of _basepeer interface.
340 348
341 349 # Begin of _basewirecommands interface.
342 350
343 351 def branchmap(self):
344 352 return self._repo.branchmap()
345 353
346 354 def capabilities(self):
347 355 return self._caps
348 356
349 357 def get_cached_bundle_inline(self, path):
350 358 # not needed with local peer
351 359 raise NotImplementedError
352 360
353 361 def clonebundles(self):
354 362 return bundlecaches.get_manifest(self._repo)
355 363
356 364 def debugwireargs(self, one, two, three=None, four=None, five=None):
357 365 """Used to test argument passing over the wire"""
358 366 return b"%s %s %s %s %s" % (
359 367 one,
360 368 two,
361 369 pycompat.bytestr(three),
362 370 pycompat.bytestr(four),
363 371 pycompat.bytestr(five),
364 372 )
365 373
366 374 def getbundle(
367 375 self,
368 376 source,
369 377 heads=None,
370 378 common=None,
371 379 bundlecaps=None,
372 380 remote_sidedata=None,
373 381 **kwargs,
374 382 ):
375 383 chunks = exchange.getbundlechunks(
376 384 self._repo,
377 385 source,
378 386 heads=heads,
379 387 common=common,
380 388 bundlecaps=bundlecaps,
381 389 remote_sidedata=remote_sidedata,
382 390 **kwargs,
383 391 )[1]
384 392 cb = util.chunkbuffer(chunks)
385 393
386 394 if exchange.bundle2requested(bundlecaps):
387 395 # When requesting a bundle2, getbundle returns a stream to make the
388 396 # wire level function happier. We need to build a proper object
389 397 # from it in local peer.
390 398 return bundle2.getunbundler(self.ui, cb)
391 399 else:
392 400 return changegroup.getunbundler(b'01', cb, None)
393 401
394 402 def heads(self):
395 403 return self._repo.heads()
396 404
397 405 def known(self, nodes):
398 406 return self._repo.known(nodes)
399 407
400 408 def listkeys(self, namespace):
401 409 return self._repo.listkeys(namespace)
402 410
403 411 def lookup(self, key):
404 412 return self._repo.lookup(key)
405 413
406 414 def pushkey(self, namespace, key, old, new):
407 415 return self._repo.pushkey(namespace, key, old, new)
408 416
409 417 def stream_out(self):
410 418 raise error.Abort(_(b'cannot perform stream clone against local peer'))
411 419
412 420 def unbundle(self, bundle, heads, url):
413 421 """apply a bundle on a repo
414 422
415 423 This function handles the repo locking itself."""
416 424 try:
417 425 try:
418 426 bundle = exchange.readbundle(self.ui, bundle, None)
419 427 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
420 428 if hasattr(ret, 'getchunks'):
421 429 # This is a bundle20 object, turn it into an unbundler.
422 430 # This little dance should be dropped eventually when the
423 431 # API is finally improved.
424 432 stream = util.chunkbuffer(ret.getchunks())
425 433 ret = bundle2.getunbundler(self.ui, stream)
426 434 return ret
427 435 except Exception as exc:
428 436 # If the exception contains output salvaged from a bundle2
429 437 # reply, we need to make sure it is printed before continuing
430 438 # to fail. So we build a bundle2 with such output and consume
431 439 # it directly.
432 440 #
433 441 # This is not very elegant but allows a "simple" solution for
434 442 # issue4594
435 443 output = getattr(exc, '_bundle2salvagedoutput', ())
436 444 if output:
437 445 bundler = bundle2.bundle20(self._repo.ui)
438 446 for out in output:
439 447 bundler.addpart(out)
440 448 stream = util.chunkbuffer(bundler.getchunks())
441 449 b = bundle2.getunbundler(self.ui, stream)
442 450 bundle2.processbundle(self._repo, b)
443 451 raise
444 452 except error.PushRaced as exc:
445 453 raise error.ResponseError(
446 454 _(b'push failed:'), stringutil.forcebytestr(exc)
447 455 )
448 456
449 457 # End of _basewirecommands interface.
450 458
451 459 # Begin of peer interface.
452 460
453 461 def commandexecutor(self):
454 462 return localcommandexecutor(self)
455 463
456 464 # End of peer interface.
457 465
458 466
459 @interfaceutil.implementer(repository.ipeerlegacycommands)
460 class locallegacypeer(localpeer):
467 localpeer = interfaceutil.implementer(repository.ipeercommands)(LocalPeer)
468
469 if typing.TYPE_CHECKING:
470 # Help pytype by hiding the interface stuff that confuses it.
471 localpeer = LocalPeer
472
473
474 class LocalLegacyPeer(localpeer):
461 475 """peer extension which implements legacy methods too; used for tests with
462 476 restricted capabilities"""
463 477
464 478 def __init__(self, repo, path=None, remotehidden=False):
465 super(locallegacypeer, self).__init__(
479 super(LocalLegacyPeer, self).__init__(
466 480 repo, caps=legacycaps, path=path, remotehidden=remotehidden
467 481 )
468 482
469 483 # Begin of baselegacywirecommands interface.
470 484
471 485 def between(self, pairs):
472 486 return self._repo.between(pairs)
473 487
474 488 def branches(self, nodes):
475 489 return self._repo.branches(nodes)
476 490
477 491 def changegroup(self, nodes, source):
478 492 outgoing = discovery.outgoing(
479 493 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
480 494 )
481 495 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
482 496
483 497 def changegroupsubset(self, bases, heads, source):
484 498 outgoing = discovery.outgoing(
485 499 self._repo, missingroots=bases, ancestorsof=heads
486 500 )
487 501 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
488 502
489 503 # End of baselegacywirecommands interface.
490 504
491 505
506 locallegacypeer = interfaceutil.implementer(repository.ipeerlegacycommands)(
507 LocalLegacyPeer
508 )
509
510 if typing.TYPE_CHECKING:
511 # Help pytype by hiding the interface stuff that confuses it.
512 locallegacypeer = LocalLegacyPeer
513
492 514 # Functions receiving (ui, features) that extensions can register to impact
493 515 # the ability to load repositories with custom requirements. Only
494 516 # functions defined in loaded extensions are called.
495 517 #
496 518 # The function receives a set of requirement strings that the repository
497 519 # is capable of opening. Functions will typically add elements to the
498 520 # set to reflect that the extension knows how to handle that requirements.
499 521 featuresetupfuncs = set()
500 522
501 523
502 524 def _getsharedvfs(hgvfs, requirements):
503 525 """returns the vfs object pointing to root of shared source
504 526 repo for a shared repository
505 527
506 528 hgvfs is vfs pointing at .hg/ of current repo (shared one)
507 529 requirements is a set of requirements of current repo (shared one)
508 530 """
509 531 # The ``shared`` or ``relshared`` requirements indicate the
510 532 # store lives in the path contained in the ``.hg/sharedpath`` file.
511 533 # This is an absolute path for ``shared`` and relative to
512 534 # ``.hg/`` for ``relshared``.
513 535 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 536 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
515 537 sharedpath = util.normpath(hgvfs.join(sharedpath))
516 538
517 539 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518 540
519 541 if not sharedvfs.exists():
520 542 raise error.RepoError(
521 543 _(b'.hg/sharedpath points to nonexistent directory %s')
522 544 % sharedvfs.base
523 545 )
524 546 return sharedvfs
525 547
526 548
527 549 def makelocalrepository(baseui, path: bytes, intents=None):
528 550 """Create a local repository object.
529 551
530 552 Given arguments needed to construct a local repository, this function
531 553 performs various early repository loading functionality (such as
532 554 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
533 555 the repository can be opened, derives a type suitable for representing
534 556 that repository, and returns an instance of it.
535 557
536 558 The returned object conforms to the ``repository.completelocalrepository``
537 559 interface.
538 560
539 561 The repository type is derived by calling a series of factory functions
540 562 for each aspect/interface of the final repository. These are defined by
541 563 ``REPO_INTERFACES``.
542 564
543 565 Each factory function is called to produce a type implementing a specific
544 566 interface. The cumulative list of returned types will be combined into a
545 567 new type and that type will be instantiated to represent the local
546 568 repository.
547 569
548 570 The factory functions each receive various state that may be consulted
549 571 as part of deriving a type.
550 572
551 573 Extensions should wrap these factory functions to customize repository type
552 574 creation. Note that an extension's wrapped function may be called even if
553 575 that extension is not loaded for the repo being constructed. Extensions
554 576 should check if their ``__name__`` appears in the
555 577 ``extensionmodulenames`` set passed to the factory function and no-op if
556 578 not.
557 579 """
558 580 ui = baseui.copy()
559 581 # Prevent copying repo configuration.
560 582 ui.copy = baseui.copy
561 583
562 584 # Working directory VFS rooted at repository root.
563 585 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
564 586
565 587 # Main VFS for .hg/ directory.
566 588 hgpath = wdirvfs.join(b'.hg')
567 589 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
568 590 # Whether this repository is shared one or not
569 591 shared = False
570 592 # If this repository is shared, vfs pointing to shared repo
571 593 sharedvfs = None
572 594
573 595 # The .hg/ path should exist and should be a directory. All other
574 596 # cases are errors.
575 597 if not hgvfs.isdir():
576 598 try:
577 599 hgvfs.stat()
578 600 except FileNotFoundError:
579 601 pass
580 602 except ValueError as e:
581 603 # Can be raised on Python 3.8 when path is invalid.
582 604 raise error.Abort(
583 605 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
584 606 )
585 607
586 608 raise error.RepoError(_(b'repository %s not found') % path)
587 609
588 610 requirements = scmutil.readrequires(hgvfs, True)
589 611 shared = (
590 612 requirementsmod.SHARED_REQUIREMENT in requirements
591 613 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 614 )
593 615 storevfs = None
594 616 if shared:
595 617 # This is a shared repo
596 618 sharedvfs = _getsharedvfs(hgvfs, requirements)
597 619 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
598 620 else:
599 621 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
600 622
601 623 # if .hg/requires contains the sharesafe requirement, it means
602 624 # there exists a `.hg/store/requires` too and we should read it
603 625 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
604 626 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
605 627 # is not present, refer checkrequirementscompat() for that
606 628 #
607 629 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
608 630 # repository was shared the old way. We check the share source .hg/requires
609 631 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
610 632 # to be reshared
611 633 hint = _(b"see `hg help config.format.use-share-safe` for more information")
612 634 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
613 635 if (
614 636 shared
615 637 and requirementsmod.SHARESAFE_REQUIREMENT
616 638 not in scmutil.readrequires(sharedvfs, True)
617 639 ):
618 640 mismatch_warn = ui.configbool(
619 641 b'share', b'safe-mismatch.source-not-safe.warn'
620 642 )
621 643 mismatch_config = ui.config(
622 644 b'share', b'safe-mismatch.source-not-safe'
623 645 )
624 646 mismatch_verbose_upgrade = ui.configbool(
625 647 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
626 648 )
627 649 if mismatch_config in (
628 650 b'downgrade-allow',
629 651 b'allow',
630 652 b'downgrade-abort',
631 653 ):
632 654 # prevent cyclic import localrepo -> upgrade -> localrepo
633 655 from . import upgrade
634 656
635 657 upgrade.downgrade_share_to_non_safe(
636 658 ui,
637 659 hgvfs,
638 660 sharedvfs,
639 661 requirements,
640 662 mismatch_config,
641 663 mismatch_warn,
642 664 mismatch_verbose_upgrade,
643 665 )
644 666 elif mismatch_config == b'abort':
645 667 raise error.Abort(
646 668 _(b"share source does not support share-safe requirement"),
647 669 hint=hint,
648 670 )
649 671 else:
650 672 raise error.Abort(
651 673 _(
652 674 b"share-safe mismatch with source.\nUnrecognized"
653 675 b" value '%s' of `share.safe-mismatch.source-not-safe`"
654 676 b" set."
655 677 )
656 678 % mismatch_config,
657 679 hint=hint,
658 680 )
659 681 else:
660 682 requirements |= scmutil.readrequires(storevfs, False)
661 683 elif shared:
662 684 sourcerequires = scmutil.readrequires(sharedvfs, False)
663 685 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
664 686 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
665 687 mismatch_warn = ui.configbool(
666 688 b'share', b'safe-mismatch.source-safe.warn'
667 689 )
668 690 mismatch_verbose_upgrade = ui.configbool(
669 691 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
670 692 )
671 693 if mismatch_config in (
672 694 b'upgrade-allow',
673 695 b'allow',
674 696 b'upgrade-abort',
675 697 ):
676 698 # prevent cyclic import localrepo -> upgrade -> localrepo
677 699 from . import upgrade
678 700
679 701 upgrade.upgrade_share_to_safe(
680 702 ui,
681 703 hgvfs,
682 704 storevfs,
683 705 requirements,
684 706 mismatch_config,
685 707 mismatch_warn,
686 708 mismatch_verbose_upgrade,
687 709 )
688 710 elif mismatch_config == b'abort':
689 711 raise error.Abort(
690 712 _(
691 713 b'version mismatch: source uses share-safe'
692 714 b' functionality while the current share does not'
693 715 ),
694 716 hint=hint,
695 717 )
696 718 else:
697 719 raise error.Abort(
698 720 _(
699 721 b"share-safe mismatch with source.\nUnrecognized"
700 722 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 723 )
702 724 % mismatch_config,
703 725 hint=hint,
704 726 )
705 727
706 728 # The .hg/hgrc file may load extensions or contain config options
707 729 # that influence repository construction. Attempt to load it and
708 730 # process any new extensions that it may have pulled in.
709 731 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 732 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 733 extensions.loadall(ui)
712 734 extensions.populateui(ui)
713 735
714 736 # Set of module names of extensions loaded for this repository.
715 737 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716 738
717 739 supportedrequirements = gathersupportedrequirements(ui)
718 740
719 741 # We first validate the requirements are known.
720 742 ensurerequirementsrecognized(requirements, supportedrequirements)
721 743
722 744 # Then we validate that the known set is reasonable to use together.
723 745 ensurerequirementscompatible(ui, requirements)
724 746
725 747 # TODO there are unhandled edge cases related to opening repositories with
726 748 # shared storage. If storage is shared, we should also test for requirements
727 749 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 750 # that repo, as that repo may load extensions needed to open it. This is a
729 751 # bit complicated because we don't want the other hgrc to overwrite settings
730 752 # in this hgrc.
731 753 #
732 754 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 755 # file when sharing repos. But if a requirement is added after the share is
734 756 # performed, thereby introducing a new requirement for the opener, we may
735 757 # will not see that and could encounter a run-time error interacting with
736 758 # that shared store since it has an unknown-to-us requirement.
737 759
738 760 # At this point, we know we should be capable of opening the repository.
739 761 # Now get on with doing that.
740 762
741 763 features = set()
742 764
743 765 # The "store" part of the repository holds versioned data. How it is
744 766 # accessed is determined by various requirements. If `shared` or
745 767 # `relshared` requirements are present, this indicates current repository
746 768 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 769 if shared:
748 770 storebasepath = sharedvfs.base
749 771 cachepath = sharedvfs.join(b'cache')
750 772 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 773 else:
752 774 storebasepath = hgvfs.base
753 775 cachepath = hgvfs.join(b'cache')
754 776 wcachepath = hgvfs.join(b'wcache')
755 777
756 778 # The store has changed over time and the exact layout is dictated by
757 779 # requirements. The store interface abstracts differences across all
758 780 # of them.
759 781 store = makestore(
760 782 requirements,
761 783 storebasepath,
762 784 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 785 )
764 786 hgvfs.createmode = store.createmode
765 787
766 788 storevfs = store.vfs
767 789 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768 790
769 791 if (
770 792 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 793 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 794 ):
773 795 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 796 # the revlogv2 docket introduced race condition that we need to fix
775 797 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776 798
777 799 # The cache vfs is used to manage cache files.
778 800 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 801 cachevfs.createmode = store.createmode
780 802 # The cache vfs is used to manage cache files related to the working copy
781 803 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 804 wcachevfs.createmode = store.createmode
783 805
784 806 # Now resolve the type for the repository object. We do this by repeatedly
785 807 # calling a factory function to produces types for specific aspects of the
786 808 # repo's operation. The aggregate returned types are used as base classes
787 809 # for a dynamically-derived type, which will represent our new repository.
788 810
789 811 bases = []
790 812 extrastate = {}
791 813
792 814 for iface, fn in REPO_INTERFACES:
793 815 # We pass all potentially useful state to give extensions tons of
794 816 # flexibility.
795 817 typ = fn()(
796 818 ui=ui,
797 819 intents=intents,
798 820 requirements=requirements,
799 821 features=features,
800 822 wdirvfs=wdirvfs,
801 823 hgvfs=hgvfs,
802 824 store=store,
803 825 storevfs=storevfs,
804 826 storeoptions=storevfs.options,
805 827 cachevfs=cachevfs,
806 828 wcachevfs=wcachevfs,
807 829 extensionmodulenames=extensionmodulenames,
808 830 extrastate=extrastate,
809 831 baseclasses=bases,
810 832 )
811 833
812 834 if not isinstance(typ, type):
813 835 raise error.ProgrammingError(
814 836 b'unable to construct type for %s' % iface
815 837 )
816 838
817 839 bases.append(typ)
818 840
819 841 # type() allows you to use characters in type names that wouldn't be
820 842 # recognized as Python symbols in source code. We abuse that to add
821 843 # rich information about our constructed repo.
822 844 name = pycompat.sysstr(
823 845 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 846 )
825 847
826 848 cls = type(name, tuple(bases), {})
827 849
828 850 return cls(
829 851 baseui=baseui,
830 852 ui=ui,
831 853 origroot=path,
832 854 wdirvfs=wdirvfs,
833 855 hgvfs=hgvfs,
834 856 requirements=requirements,
835 857 supportedrequirements=supportedrequirements,
836 858 sharedpath=storebasepath,
837 859 store=store,
838 860 cachevfs=cachevfs,
839 861 wcachevfs=wcachevfs,
840 862 features=features,
841 863 intents=intents,
842 864 )
843 865
844 866
845 867 def loadhgrc(
846 868 ui,
847 869 wdirvfs: vfsmod.vfs,
848 870 hgvfs: vfsmod.vfs,
849 871 requirements,
850 872 sharedvfs: Optional[vfsmod.vfs] = None,
851 873 ):
852 874 """Load hgrc files/content into a ui instance.
853 875
854 876 This is called during repository opening to load any additional
855 877 config files or settings relevant to the current repository.
856 878
857 879 Returns a bool indicating whether any additional configs were loaded.
858 880
859 881 Extensions should monkeypatch this function to modify how per-repo
860 882 configs are loaded. For example, an extension may wish to pull in
861 883 configs from alternate files or sources.
862 884
863 885 sharedvfs is vfs object pointing to source repo if the current one is a
864 886 shared one
865 887 """
866 888 if not rcutil.use_repo_hgrc():
867 889 return False
868 890
869 891 ret = False
870 892 # first load config from shared source if we has to
871 893 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
872 894 try:
873 895 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
874 896 ret = True
875 897 except IOError:
876 898 pass
877 899
878 900 try:
879 901 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
880 902 ret = True
881 903 except IOError:
882 904 pass
883 905
884 906 try:
885 907 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
886 908 ret = True
887 909 except IOError:
888 910 pass
889 911
890 912 return ret
891 913
892 914
893 915 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
894 916 """Perform additional actions after .hg/hgrc is loaded.
895 917
896 918 This function is called during repository loading immediately after
897 919 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
898 920
899 921 The function can be used to validate configs, automatically add
900 922 options (including extensions) based on requirements, etc.
901 923 """
902 924
903 925 # Map of requirements to list of extensions to load automatically when
904 926 # requirement is present.
905 927 autoextensions = {
906 928 b'git': [b'git'],
907 929 b'largefiles': [b'largefiles'],
908 930 b'lfs': [b'lfs'],
909 931 }
910 932
911 933 for requirement, names in sorted(autoextensions.items()):
912 934 if requirement not in requirements:
913 935 continue
914 936
915 937 for name in names:
916 938 if not ui.hasconfig(b'extensions', name):
917 939 ui.setconfig(b'extensions', name, b'', source=b'autoload')
918 940
919 941
920 942 def gathersupportedrequirements(ui):
921 943 """Determine the complete set of recognized requirements."""
922 944 # Start with all requirements supported by this file.
923 945 supported = set(localrepository._basesupported)
924 946
925 947 # Execute ``featuresetupfuncs`` entries if they belong to an extension
926 948 # relevant to this ui instance.
927 949 modules = {m.__name__ for n, m in extensions.extensions(ui)}
928 950
929 951 for fn in featuresetupfuncs:
930 952 if fn.__module__ in modules:
931 953 fn(ui, supported)
932 954
933 955 # Add derived requirements from registered compression engines.
934 956 for name in util.compengines:
935 957 engine = util.compengines[name]
936 958 if engine.available() and engine.revlogheader():
937 959 supported.add(b'exp-compression-%s' % name)
938 960 if engine.name() == b'zstd':
939 961 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
940 962
941 963 return supported
942 964
943 965
944 966 def ensurerequirementsrecognized(requirements, supported):
945 967 """Validate that a set of local requirements is recognized.
946 968
947 969 Receives a set of requirements. Raises an ``error.RepoError`` if there
948 970 exists any requirement in that set that currently loaded code doesn't
949 971 recognize.
950 972
951 973 Returns a set of supported requirements.
952 974 """
953 975 missing = set()
954 976
955 977 for requirement in requirements:
956 978 if requirement in supported:
957 979 continue
958 980
959 981 if not requirement or not requirement[0:1].isalnum():
960 982 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
961 983
962 984 missing.add(requirement)
963 985
964 986 if missing:
965 987 raise error.RequirementError(
966 988 _(b'repository requires features unknown to this Mercurial: %s')
967 989 % b' '.join(sorted(missing)),
968 990 hint=_(
969 991 b'see https://mercurial-scm.org/wiki/MissingRequirement '
970 992 b'for more information'
971 993 ),
972 994 )
973 995
974 996
975 997 def ensurerequirementscompatible(ui, requirements):
976 998 """Validates that a set of recognized requirements is mutually compatible.
977 999
978 1000 Some requirements may not be compatible with others or require
979 1001 config options that aren't enabled. This function is called during
980 1002 repository opening to ensure that the set of requirements needed
981 1003 to open a repository is sane and compatible with config options.
982 1004
983 1005 Extensions can monkeypatch this function to perform additional
984 1006 checking.
985 1007
986 1008 ``error.RepoError`` should be raised on failure.
987 1009 """
988 1010 if (
989 1011 requirementsmod.SPARSE_REQUIREMENT in requirements
990 1012 and not sparse.enabled
991 1013 ):
992 1014 raise error.RepoError(
993 1015 _(
994 1016 b'repository is using sparse feature but '
995 1017 b'sparse is not enabled; enable the '
996 1018 b'"sparse" extensions to access'
997 1019 )
998 1020 )
999 1021
1000 1022
1001 1023 def makestore(requirements, path, vfstype):
1002 1024 """Construct a storage object for a repository."""
1003 1025 if requirementsmod.STORE_REQUIREMENT in requirements:
1004 1026 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1005 1027 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1006 1028 return storemod.fncachestore(path, vfstype, dotencode)
1007 1029
1008 1030 return storemod.encodedstore(path, vfstype)
1009 1031
1010 1032 return storemod.basicstore(path, vfstype)
1011 1033
1012 1034
1013 1035 def resolvestorevfsoptions(ui, requirements, features):
1014 1036 """Resolve the options to pass to the store vfs opener.
1015 1037
1016 1038 The returned dict is used to influence behavior of the storage layer.
1017 1039 """
1018 1040 options = {}
1019 1041
1020 1042 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1021 1043 options[b'treemanifest'] = True
1022 1044
1023 1045 # experimental config: format.manifestcachesize
1024 1046 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1025 1047 if manifestcachesize is not None:
1026 1048 options[b'manifestcachesize'] = manifestcachesize
1027 1049
1028 1050 # In the absence of another requirement superseding a revlog-related
1029 1051 # requirement, we have to assume the repo is using revlog version 0.
1030 1052 # This revlog format is super old and we don't bother trying to parse
1031 1053 # opener options for it because those options wouldn't do anything
1032 1054 # meaningful on such old repos.
1033 1055 if (
1034 1056 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1035 1057 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1036 1058 ):
1037 1059 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1038 1060 else: # explicitly mark repo as using revlogv0
1039 1061 options[b'revlogv0'] = True
1040 1062
1041 1063 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1042 1064 options[b'copies-storage'] = b'changeset-sidedata'
1043 1065 else:
1044 1066 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1045 1067 copiesextramode = (b'changeset-only', b'compatibility')
1046 1068 if writecopiesto in copiesextramode:
1047 1069 options[b'copies-storage'] = b'extra'
1048 1070
1049 1071 return options
1050 1072
1051 1073
1052 1074 def resolverevlogstorevfsoptions(ui, requirements, features):
1053 1075 """Resolve opener options specific to revlogs."""
1054 1076
1055 1077 options = {}
1056 1078 options[b'flagprocessors'] = {}
1057 1079
1058 1080 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1059 1081 data_config = options[b'data-config'] = revlog.DataConfig()
1060 1082 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1061 1083
1062 1084 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 1085 options[b'revlogv1'] = True
1064 1086 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 1087 options[b'revlogv2'] = True
1066 1088 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 1089 options[b'changelogv2'] = True
1068 1090 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 1091 options[b'changelogv2.compute-rank'] = cmp_rank
1070 1092
1071 1093 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 1094 options[b'generaldelta'] = True
1073 1095
1074 1096 # experimental config: format.chunkcachesize
1075 1097 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 1098 if chunkcachesize is not None:
1077 1099 data_config.chunk_cache_size = chunkcachesize
1078 1100
1079 1101 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1080 1102 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1081 1103 data_config.uncompressed_cache_count = 10_000
1082 1104 data_config.uncompressed_cache_factor = 4
1083 1105 if memory_profile >= scmutil.RESOURCE_HIGH:
1084 1106 data_config.uncompressed_cache_factor = 10
1085 1107
1086 1108 delta_config.delta_both_parents = ui.configbool(
1087 1109 b'storage', b'revlog.optimize-delta-parent-choice'
1088 1110 )
1089 1111 delta_config.candidate_group_chunk_size = ui.configint(
1090 1112 b'storage',
1091 1113 b'revlog.delta-parent-search.candidate-group-chunk-size',
1092 1114 )
1093 1115 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1094 1116
1095 1117 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1096 1118 options[b'issue6528.fix-incoming'] = issue6528
1097 1119
1098 1120 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1099 1121 lazydeltabase = False
1100 1122 if lazydelta:
1101 1123 lazydeltabase = ui.configbool(
1102 1124 b'storage', b'revlog.reuse-external-delta-parent'
1103 1125 )
1104 1126 if lazydeltabase is None:
1105 1127 lazydeltabase = not scmutil.gddeltaconfig(ui)
1106 1128 delta_config.lazy_delta = lazydelta
1107 1129 delta_config.lazy_delta_base = lazydeltabase
1108 1130
1109 1131 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1110 1132 if 0 <= chainspan:
1111 1133 delta_config.max_deltachain_span = chainspan
1112 1134
1113 1135 has_populate = util.has_mmap_populate()
1114 1136 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1115 1137 data_config.mmap_index_threshold = ui.configbytes(
1116 1138 b'storage',
1117 1139 b'revlog.mmap.index:size-threshold',
1118 1140 )
1119 1141
1120 1142 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1121 1143 srdensitythres = float(
1122 1144 ui.config(b'experimental', b'sparse-read.density-threshold')
1123 1145 )
1124 1146 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1125 1147 data_config.with_sparse_read = withsparseread
1126 1148 data_config.sr_density_threshold = srdensitythres
1127 1149 data_config.sr_min_gap_size = srmingapsize
1128 1150
1129 1151 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1130 1152 delta_config.sparse_revlog = sparserevlog
1131 1153 if sparserevlog:
1132 1154 options[b'generaldelta'] = True
1133 1155 data_config.with_sparse_read = True
1134 1156
1135 1157 maxchainlen = None
1136 1158 if sparserevlog:
1137 1159 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1138 1160 # experimental config: format.maxchainlen
1139 1161 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1140 1162 if maxchainlen is not None:
1141 1163 delta_config.max_chain_len = maxchainlen
1142 1164
1143 1165 for r in requirements:
1144 1166 # we allow multiple compression engine requirement to co-exist because
1145 1167 # strickly speaking, revlog seems to support mixed compression style.
1146 1168 #
1147 1169 # The compression used for new entries will be "the last one"
1148 1170 prefix = r.startswith
1149 1171 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1150 1172 feature_config.compression_engine = r.split(b'-', 2)[2]
1151 1173
1152 1174 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1153 1175 if zlib_level is not None:
1154 1176 if not (0 <= zlib_level <= 9):
1155 1177 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1156 1178 raise error.Abort(msg % zlib_level)
1157 1179 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1158 1180 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1159 1181 if zstd_level is not None:
1160 1182 if not (0 <= zstd_level <= 22):
1161 1183 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1162 1184 raise error.Abort(msg % zstd_level)
1163 1185 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1164 1186
1165 1187 if requirementsmod.NARROW_REQUIREMENT in requirements:
1166 1188 feature_config.enable_ellipsis = True
1167 1189
1168 1190 if ui.configbool(b'experimental', b'rust.index'):
1169 1191 options[b'rust.index'] = True
1170 1192 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1171 1193 slow_path = ui.config(
1172 1194 b'storage', b'revlog.persistent-nodemap.slow-path'
1173 1195 )
1174 1196 if slow_path not in (b'allow', b'warn', b'abort'):
1175 1197 default = ui.config_default(
1176 1198 b'storage', b'revlog.persistent-nodemap.slow-path'
1177 1199 )
1178 1200 msg = _(
1179 1201 b'unknown value for config '
1180 1202 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1181 1203 )
1182 1204 ui.warn(msg % slow_path)
1183 1205 if not ui.quiet:
1184 1206 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 1207 slow_path = default
1186 1208
1187 1209 msg = _(
1188 1210 b"accessing `persistent-nodemap` repository without associated "
1189 1211 b"fast implementation."
1190 1212 )
1191 1213 hint = _(
1192 1214 b"check `hg help config.format.use-persistent-nodemap` "
1193 1215 b"for details"
1194 1216 )
1195 1217 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1196 1218 if slow_path == b'warn':
1197 1219 msg = b"warning: " + msg + b'\n'
1198 1220 ui.warn(msg)
1199 1221 if not ui.quiet:
1200 1222 hint = b'(' + hint + b')\n'
1201 1223 ui.warn(hint)
1202 1224 if slow_path == b'abort':
1203 1225 raise error.Abort(msg, hint=hint)
1204 1226 options[b'persistent-nodemap'] = True
1205 1227 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1206 1228 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1207 1229 if slow_path not in (b'allow', b'warn', b'abort'):
1208 1230 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1209 1231 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1210 1232 ui.warn(msg % slow_path)
1211 1233 if not ui.quiet:
1212 1234 ui.warn(_(b'falling back to default value: %s\n') % default)
1213 1235 slow_path = default
1214 1236
1215 1237 msg = _(
1216 1238 b"accessing `dirstate-v2` repository without associated "
1217 1239 b"fast implementation."
1218 1240 )
1219 1241 hint = _(
1220 1242 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1221 1243 )
1222 1244 if not dirstate.HAS_FAST_DIRSTATE_V2:
1223 1245 if slow_path == b'warn':
1224 1246 msg = b"warning: " + msg + b'\n'
1225 1247 ui.warn(msg)
1226 1248 if not ui.quiet:
1227 1249 hint = b'(' + hint + b')\n'
1228 1250 ui.warn(hint)
1229 1251 if slow_path == b'abort':
1230 1252 raise error.Abort(msg, hint=hint)
1231 1253 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1232 1254 options[b'persistent-nodemap.mmap'] = True
1233 1255 if ui.configbool(b'devel', b'persistent-nodemap'):
1234 1256 options[b'devel-force-nodemap'] = True
1235 1257
1236 1258 return options
1237 1259
1238 1260
1239 1261 def makemain(**kwargs):
1240 1262 """Produce a type conforming to ``ilocalrepositorymain``."""
1241 1263 return localrepository
1242 1264
1243 1265
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 class revlogfilestorage:
1266 class RevlogFileStorage:
1246 1267 """File storage when using revlogs."""
1247 1268
1248 1269 def file(self, path):
1249 1270 if path.startswith(b'/'):
1250 1271 path = path[1:]
1251 1272
1252 1273 try_split = (
1253 1274 self.currenttransaction() is not None
1254 1275 or txnutil.mayhavepending(self.root)
1255 1276 )
1256 1277
1257 1278 return filelog.filelog(self.svfs, path, try_split=try_split)
1258 1279
1259 1280
1260 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1261 class revlognarrowfilestorage:
1281 revlogfilestorage = interfaceutil.implementer(
1282 repository.ilocalrepositoryfilestorage
1283 )(RevlogFileStorage)
1284
1285 if typing.TYPE_CHECKING:
1286 # Help pytype by hiding the interface stuff that confuses it.
1287 revlogfilestorage = RevlogFileStorage
1288
1289
1290 class RevlogNarrowFileStorage:
1262 1291 """File storage when using revlogs and narrow files."""
1263 1292
1264 1293 def file(self, path):
1265 1294 if path.startswith(b'/'):
1266 1295 path = path[1:]
1267 1296
1268 1297 try_split = (
1269 1298 self.currenttransaction() is not None
1270 1299 or txnutil.mayhavepending(self.root)
1271 1300 )
1272 1301 return filelog.narrowfilelog(
1273 1302 self.svfs, path, self._storenarrowmatch, try_split=try_split
1274 1303 )
1275 1304
1276 1305
1306 revlognarrowfilestorage = interfaceutil.implementer(
1307 repository.ilocalrepositoryfilestorage
1308 )(RevlogNarrowFileStorage)
1309
1310 if typing.TYPE_CHECKING:
1311 # Help pytype by hiding the interface stuff that confuses it.
1312 revlognarrowfilestorage = RevlogNarrowFileStorage
1313
1314
1277 1315 def makefilestorage(requirements, features, **kwargs):
1278 1316 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1279 1317 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1280 1318 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1281 1319
1282 1320 if requirementsmod.NARROW_REQUIREMENT in requirements:
1283 1321 return revlognarrowfilestorage
1284 1322 else:
1285 1323 return revlogfilestorage
1286 1324
1287 1325
1288 1326 # List of repository interfaces and factory functions for them. Each
1289 1327 # will be called in order during ``makelocalrepository()`` to iteratively
1290 1328 # derive the final type for a local repository instance. We capture the
1291 1329 # function as a lambda so we don't hold a reference and the module-level
1292 1330 # functions can be wrapped.
1293 1331 REPO_INTERFACES = [
1294 1332 (repository.ilocalrepositorymain, lambda: makemain),
1295 1333 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1296 1334 ]
1297 1335
1298
1299 @interfaceutil.implementer(repository.ilocalrepositorymain)
1300 class localrepository:
1336 _localrepo_base_classes = object
1337
1338 if typing.TYPE_CHECKING:
1339 _localrepo_base_classes = [
1340 repository.ilocalrepositorymain,
1341 repository.ilocalrepositoryfilestorage,
1342 ]
1343
1344
1345 class LocalRepository(_localrepo_base_classes):
1301 1346 """Main class for representing local repositories.
1302 1347
1303 1348 All local repositories are instances of this class.
1304 1349
1305 1350 Constructed on its own, instances of this class are not usable as
1306 1351 repository objects. To obtain a usable repository object, call
1307 1352 ``hg.repository()``, ``localrepo.instance()``, or
1308 1353 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1309 1354 ``instance()`` adds support for creating new repositories.
1310 1355 ``hg.repository()`` adds more extension integration, including calling
1311 1356 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1312 1357 used.
1313 1358 """
1314 1359
1315 1360 _basesupported = {
1316 1361 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1317 1362 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1318 1363 requirementsmod.CHANGELOGV2_REQUIREMENT,
1319 1364 requirementsmod.COPIESSDC_REQUIREMENT,
1320 1365 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1321 1366 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1322 1367 requirementsmod.DOTENCODE_REQUIREMENT,
1323 1368 requirementsmod.FNCACHE_REQUIREMENT,
1324 1369 requirementsmod.GENERALDELTA_REQUIREMENT,
1325 1370 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1326 1371 requirementsmod.NODEMAP_REQUIREMENT,
1327 1372 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1328 1373 requirementsmod.REVLOGV1_REQUIREMENT,
1329 1374 requirementsmod.REVLOGV2_REQUIREMENT,
1330 1375 requirementsmod.SHARED_REQUIREMENT,
1331 1376 requirementsmod.SHARESAFE_REQUIREMENT,
1332 1377 requirementsmod.SPARSE_REQUIREMENT,
1333 1378 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1334 1379 requirementsmod.STORE_REQUIREMENT,
1335 1380 requirementsmod.TREEMANIFEST_REQUIREMENT,
1336 1381 }
1337 1382
1338 1383 # list of prefix for file which can be written without 'wlock'
1339 1384 # Extensions should extend this list when needed
1340 1385 _wlockfreeprefix = {
1341 1386 # We migh consider requiring 'wlock' for the next
1342 1387 # two, but pretty much all the existing code assume
1343 1388 # wlock is not needed so we keep them excluded for
1344 1389 # now.
1345 1390 b'hgrc',
1346 1391 b'requires',
1347 1392 # XXX cache is a complicatged business someone
1348 1393 # should investigate this in depth at some point
1349 1394 b'cache/',
1350 1395 # XXX bisect was still a bit too messy at the time
1351 1396 # this changeset was introduced. Someone should fix
1352 1397 # the remainig bit and drop this line
1353 1398 b'bisect.state',
1354 1399 }
1355 1400
1356 1401 def __init__(
1357 1402 self,
1358 1403 baseui,
1359 1404 ui,
1360 1405 origroot: bytes,
1361 1406 wdirvfs: vfsmod.vfs,
1362 1407 hgvfs: vfsmod.vfs,
1363 1408 requirements,
1364 1409 supportedrequirements,
1365 1410 sharedpath: bytes,
1366 1411 store,
1367 1412 cachevfs: vfsmod.vfs,
1368 1413 wcachevfs: vfsmod.vfs,
1369 1414 features,
1370 1415 intents=None,
1371 1416 ):
1372 1417 """Create a new local repository instance.
1373 1418
1374 1419 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1375 1420 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1376 1421 object.
1377 1422
1378 1423 Arguments:
1379 1424
1380 1425 baseui
1381 1426 ``ui.ui`` instance that ``ui`` argument was based off of.
1382 1427
1383 1428 ui
1384 1429 ``ui.ui`` instance for use by the repository.
1385 1430
1386 1431 origroot
1387 1432 ``bytes`` path to working directory root of this repository.
1388 1433
1389 1434 wdirvfs
1390 1435 ``vfs.vfs`` rooted at the working directory.
1391 1436
1392 1437 hgvfs
1393 1438 ``vfs.vfs`` rooted at .hg/
1394 1439
1395 1440 requirements
1396 1441 ``set`` of bytestrings representing repository opening requirements.
1397 1442
1398 1443 supportedrequirements
1399 1444 ``set`` of bytestrings representing repository requirements that we
1400 1445 know how to open. May be a supetset of ``requirements``.
1401 1446
1402 1447 sharedpath
1403 1448 ``bytes`` Defining path to storage base directory. Points to a
1404 1449 ``.hg/`` directory somewhere.
1405 1450
1406 1451 store
1407 1452 ``store.basicstore`` (or derived) instance providing access to
1408 1453 versioned storage.
1409 1454
1410 1455 cachevfs
1411 1456 ``vfs.vfs`` used for cache files.
1412 1457
1413 1458 wcachevfs
1414 1459 ``vfs.vfs`` used for cache files related to the working copy.
1415 1460
1416 1461 features
1417 1462 ``set`` of bytestrings defining features/capabilities of this
1418 1463 instance.
1419 1464
1420 1465 intents
1421 1466 ``set`` of system strings indicating what this repo will be used
1422 1467 for.
1423 1468 """
1424 1469 self.baseui = baseui
1425 1470 self.ui = ui
1426 1471 self.origroot = origroot
1427 1472 # vfs rooted at working directory.
1428 1473 self.wvfs = wdirvfs
1429 1474 self.root = wdirvfs.base
1430 1475 # vfs rooted at .hg/. Used to access most non-store paths.
1431 1476 self.vfs = hgvfs
1432 1477 self.path = hgvfs.base
1433 1478 self.requirements = requirements
1434 1479 self.nodeconstants = sha1nodeconstants
1435 1480 self.nullid = self.nodeconstants.nullid
1436 1481 self.supported = supportedrequirements
1437 1482 self.sharedpath = sharedpath
1438 1483 self.store = store
1439 1484 self.cachevfs = cachevfs
1440 1485 self.wcachevfs = wcachevfs
1441 1486 self.features = features
1442 1487
1443 1488 self.filtername = None
1444 1489
1445 1490 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1446 1491 b'devel', b'check-locks'
1447 1492 ):
1448 1493 self.vfs.audit = self._getvfsward(self.vfs.audit)
1449 1494 # A list of callback to shape the phase if no data were found.
1450 1495 # Callback are in the form: func(repo, roots) --> processed root.
1451 1496 # This list it to be filled by extension during repo setup
1452 1497 self._phasedefaults = []
1453 1498
1454 1499 color.setup(self.ui)
1455 1500
1456 1501 self.spath = self.store.path
1457 1502 self.svfs = self.store.vfs
1458 1503 self.sjoin = self.store.join
1459 1504 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1460 1505 b'devel', b'check-locks'
1461 1506 ):
1462 1507 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1463 1508 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1464 1509 else: # standard vfs
1465 1510 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1466 1511
1467 1512 self._dirstatevalidatewarned = False
1468 1513
1469 1514 self._branchcaches = branchmap.BranchMapCache()
1470 1515 self._revbranchcache = None
1471 1516 self._filterpats = {}
1472 1517 self._datafilters = {}
1473 1518 self._transref = self._lockref = self._wlockref = None
1474 1519
1475 1520 # A cache for various files under .hg/ that tracks file changes,
1476 1521 # (used by the filecache decorator)
1477 1522 #
1478 1523 # Maps a property name to its util.filecacheentry
1479 1524 self._filecache = {}
1480 1525
1481 1526 # hold sets of revision to be filtered
1482 1527 # should be cleared when something might have changed the filter value:
1483 1528 # - new changesets,
1484 1529 # - phase change,
1485 1530 # - new obsolescence marker,
1486 1531 # - working directory parent change,
1487 1532 # - bookmark changes
1488 1533 self.filteredrevcache = {}
1489 1534
1490 1535 self._dirstate = None
1491 1536 # post-dirstate-status hooks
1492 1537 self._postdsstatus = []
1493 1538
1494 1539 self._pending_narrow_pats = None
1495 1540 self._pending_narrow_pats_dirstate = None
1496 1541
1497 1542 # generic mapping between names and nodes
1498 1543 self.names = namespaces.namespaces()
1499 1544
1500 1545 # Key to signature value.
1501 1546 self._sparsesignaturecache = {}
1502 1547 # Signature to cached matcher instance.
1503 1548 self._sparsematchercache = {}
1504 1549
1505 1550 self._extrafilterid = repoview.extrafilter(ui)
1506 1551
1507 1552 self.filecopiesmode = None
1508 1553 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1509 1554 self.filecopiesmode = b'changeset-sidedata'
1510 1555
1511 1556 self._wanted_sidedata = set()
1512 1557 self._sidedata_computers = {}
1513 1558 sidedatamod.set_sidedata_spec_for_repo(self)
1514 1559
1515 1560 def _getvfsward(self, origfunc):
1516 1561 """build a ward for self.vfs"""
1517 1562 rref = weakref.ref(self)
1518 1563
1519 1564 def checkvfs(path, mode=None):
1520 1565 ret = origfunc(path, mode=mode)
1521 1566 repo = rref()
1522 1567 if (
1523 1568 repo is None
1524 1569 or not hasattr(repo, '_wlockref')
1525 1570 or not hasattr(repo, '_lockref')
1526 1571 ):
1527 1572 return
1528 1573 if mode in (None, b'r', b'rb'):
1529 1574 return
1530 1575 if path.startswith(repo.path):
1531 1576 # truncate name relative to the repository (.hg)
1532 1577 path = path[len(repo.path) + 1 :]
1533 1578 if path.startswith(b'cache/'):
1534 1579 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1535 1580 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1536 1581 # path prefixes covered by 'lock'
1537 1582 vfs_path_prefixes = (
1538 1583 b'journal.',
1539 1584 b'undo.',
1540 1585 b'strip-backup/',
1541 1586 b'cache/',
1542 1587 )
1543 1588 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1544 1589 if repo._currentlock(repo._lockref) is None:
1545 1590 repo.ui.develwarn(
1546 1591 b'write with no lock: "%s"' % path,
1547 1592 stacklevel=3,
1548 1593 config=b'check-locks',
1549 1594 )
1550 1595 elif repo._currentlock(repo._wlockref) is None:
1551 1596 # rest of vfs files are covered by 'wlock'
1552 1597 #
1553 1598 # exclude special files
1554 1599 for prefix in self._wlockfreeprefix:
1555 1600 if path.startswith(prefix):
1556 1601 return
1557 1602 repo.ui.develwarn(
1558 1603 b'write with no wlock: "%s"' % path,
1559 1604 stacklevel=3,
1560 1605 config=b'check-locks',
1561 1606 )
1562 1607 return ret
1563 1608
1564 1609 return checkvfs
1565 1610
1566 1611 def _getsvfsward(self, origfunc):
1567 1612 """build a ward for self.svfs"""
1568 1613 rref = weakref.ref(self)
1569 1614
1570 1615 def checksvfs(path, mode=None):
1571 1616 ret = origfunc(path, mode=mode)
1572 1617 repo = rref()
1573 1618 if repo is None or not hasattr(repo, '_lockref'):
1574 1619 return
1575 1620 if mode in (None, b'r', b'rb'):
1576 1621 return
1577 1622 if path.startswith(repo.sharedpath):
1578 1623 # truncate name relative to the repository (.hg)
1579 1624 path = path[len(repo.sharedpath) + 1 :]
1580 1625 if repo._currentlock(repo._lockref) is None:
1581 1626 repo.ui.develwarn(
1582 1627 b'write with no lock: "%s"' % path, stacklevel=4
1583 1628 )
1584 1629 return ret
1585 1630
1586 1631 return checksvfs
1587 1632
1588 1633 @property
1589 1634 def vfs_map(self):
1590 1635 return {
1591 1636 b'': self.svfs,
1592 1637 b'plain': self.vfs,
1593 1638 b'store': self.svfs,
1594 1639 }
1595 1640
1596 1641 def close(self):
1597 1642 self._writecaches()
1598 1643
1599 1644 def _writecaches(self):
1600 1645 if self._revbranchcache:
1601 1646 self._revbranchcache.write()
1602 1647
1603 1648 def _restrictcapabilities(self, caps):
1604 1649 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1605 1650 caps = set(caps)
1606 1651 capsblob = bundle2.encodecaps(
1607 1652 bundle2.getrepocaps(self, role=b'client')
1608 1653 )
1609 1654 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1610 1655 if self.ui.configbool(b'experimental', b'narrow'):
1611 1656 caps.add(wireprototypes.NARROWCAP)
1612 1657 return caps
1613 1658
1614 1659 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1615 1660 # self -> auditor -> self._checknested -> self
1616 1661
1617 1662 @property
1618 1663 def auditor(self):
1619 1664 # This is only used by context.workingctx.match in order to
1620 1665 # detect files in subrepos.
1621 1666 return pathutil.pathauditor(self.root, callback=self._checknested)
1622 1667
1623 1668 @property
1624 1669 def nofsauditor(self):
1625 1670 # This is only used by context.basectx.match in order to detect
1626 1671 # files in subrepos.
1627 1672 return pathutil.pathauditor(
1628 1673 self.root, callback=self._checknested, realfs=False, cached=True
1629 1674 )
1630 1675
1631 1676 def _checknested(self, path):
1632 1677 """Determine if path is a legal nested repository."""
1633 1678 if not path.startswith(self.root):
1634 1679 return False
1635 1680 subpath = path[len(self.root) + 1 :]
1636 1681 normsubpath = util.pconvert(subpath)
1637 1682
1638 1683 # XXX: Checking against the current working copy is wrong in
1639 1684 # the sense that it can reject things like
1640 1685 #
1641 1686 # $ hg cat -r 10 sub/x.txt
1642 1687 #
1643 1688 # if sub/ is no longer a subrepository in the working copy
1644 1689 # parent revision.
1645 1690 #
1646 1691 # However, it can of course also allow things that would have
1647 1692 # been rejected before, such as the above cat command if sub/
1648 1693 # is a subrepository now, but was a normal directory before.
1649 1694 # The old path auditor would have rejected by mistake since it
1650 1695 # panics when it sees sub/.hg/.
1651 1696 #
1652 1697 # All in all, checking against the working copy seems sensible
1653 1698 # since we want to prevent access to nested repositories on
1654 1699 # the filesystem *now*.
1655 1700 ctx = self[None]
1656 1701 parts = util.splitpath(subpath)
1657 1702 while parts:
1658 1703 prefix = b'/'.join(parts)
1659 1704 if prefix in ctx.substate:
1660 1705 if prefix == normsubpath:
1661 1706 return True
1662 1707 else:
1663 1708 sub = ctx.sub(prefix)
1664 1709 return sub.checknested(subpath[len(prefix) + 1 :])
1665 1710 else:
1666 1711 parts.pop()
1667 1712 return False
1668 1713
1669 1714 def peer(self, path=None, remotehidden=False):
1670 1715 return localpeer(
1671 1716 self, path=path, remotehidden=remotehidden
1672 1717 ) # not cached to avoid reference cycle
1673 1718
1674 1719 def unfiltered(self):
1675 1720 """Return unfiltered version of the repository
1676 1721
1677 1722 Intended to be overwritten by filtered repo."""
1678 1723 return self
1679 1724
1680 1725 def filtered(self, name, visibilityexceptions=None):
1681 1726 """Return a filtered version of a repository
1682 1727
1683 1728 The `name` parameter is the identifier of the requested view. This
1684 1729 will return a repoview object set "exactly" to the specified view.
1685 1730
1686 1731 This function does not apply recursive filtering to a repository. For
1687 1732 example calling `repo.filtered("served")` will return a repoview using
1688 1733 the "served" view, regardless of the initial view used by `repo`.
1689 1734
1690 1735 In other word, there is always only one level of `repoview` "filtering".
1691 1736 """
1692 1737 if self._extrafilterid is not None and b'%' not in name:
1693 1738 name = name + b'%' + self._extrafilterid
1694 1739
1695 1740 cls = repoview.newtype(self.unfiltered().__class__)
1696 1741 return cls(self, name, visibilityexceptions)
1697 1742
1698 1743 @mixedrepostorecache(
1699 1744 (b'bookmarks', b'plain'),
1700 1745 (b'bookmarks.current', b'plain'),
1701 1746 (b'bookmarks', b''),
1702 1747 (b'00changelog.i', b''),
1703 1748 )
1704 1749 def _bookmarks(self):
1705 1750 # Since the multiple files involved in the transaction cannot be
1706 1751 # written atomically (with current repository format), there is a race
1707 1752 # condition here.
1708 1753 #
1709 1754 # 1) changelog content A is read
1710 1755 # 2) outside transaction update changelog to content B
1711 1756 # 3) outside transaction update bookmark file referring to content B
1712 1757 # 4) bookmarks file content is read and filtered against changelog-A
1713 1758 #
1714 1759 # When this happens, bookmarks against nodes missing from A are dropped.
1715 1760 #
1716 1761 # Having this happening during read is not great, but it become worse
1717 1762 # when this happen during write because the bookmarks to the "unknown"
1718 1763 # nodes will be dropped for good. However, writes happen within locks.
1719 1764 # This locking makes it possible to have a race free consistent read.
1720 1765 # For this purpose data read from disc before locking are
1721 1766 # "invalidated" right after the locks are taken. This invalidations are
1722 1767 # "light", the `filecache` mechanism keep the data in memory and will
1723 1768 # reuse them if the underlying files did not changed. Not parsing the
1724 1769 # same data multiple times helps performances.
1725 1770 #
1726 1771 # Unfortunately in the case describe above, the files tracked by the
1727 1772 # bookmarks file cache might not have changed, but the in-memory
1728 1773 # content is still "wrong" because we used an older changelog content
1729 1774 # to process the on-disk data. So after locking, the changelog would be
1730 1775 # refreshed but `_bookmarks` would be preserved.
1731 1776 # Adding `00changelog.i` to the list of tracked file is not
1732 1777 # enough, because at the time we build the content for `_bookmarks` in
1733 1778 # (4), the changelog file has already diverged from the content used
1734 1779 # for loading `changelog` in (1)
1735 1780 #
1736 1781 # To prevent the issue, we force the changelog to be explicitly
1737 1782 # reloaded while computing `_bookmarks`. The data race can still happen
1738 1783 # without the lock (with a narrower window), but it would no longer go
1739 1784 # undetected during the lock time refresh.
1740 1785 #
1741 1786 # The new schedule is as follow
1742 1787 #
1743 1788 # 1) filecache logic detect that `_bookmarks` needs to be computed
1744 1789 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1745 1790 # 3) We force `changelog` filecache to be tested
1746 1791 # 4) cachestat for `changelog` are captured (for changelog)
1747 1792 # 5) `_bookmarks` is computed and cached
1748 1793 #
1749 1794 # The step in (3) ensure we have a changelog at least as recent as the
1750 1795 # cache stat computed in (1). As a result at locking time:
1751 1796 # * if the changelog did not changed since (1) -> we can reuse the data
1752 1797 # * otherwise -> the bookmarks get refreshed.
1753 1798 self._refreshchangelog()
1754 1799 return bookmarks.bmstore(self)
1755 1800
1756 1801 def _refreshchangelog(self):
1757 1802 """make sure the in memory changelog match the on-disk one"""
1758 1803 if 'changelog' in vars(self) and self.currenttransaction() is None:
1759 1804 del self.changelog
1760 1805
1761 1806 @property
1762 1807 def _activebookmark(self):
1763 1808 return self._bookmarks.active
1764 1809
1765 1810 # _phasesets depend on changelog. what we need is to call
1766 1811 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1767 1812 # can't be easily expressed in filecache mechanism.
1768 1813 @storecache(b'phaseroots', b'00changelog.i')
1769 1814 def _phasecache(self):
1770 1815 return phases.phasecache(self, self._phasedefaults)
1771 1816
1772 1817 @storecache(b'obsstore')
1773 1818 def obsstore(self):
1774 1819 return obsolete.makestore(self.ui, self)
1775 1820
1776 1821 @changelogcache()
1777 1822 def changelog(repo):
1778 1823 # load dirstate before changelog to avoid race see issue6303
1779 1824 repo.dirstate.prefetch_parents()
1780 1825 return repo.store.changelog(
1781 1826 txnutil.mayhavepending(repo.root),
1782 1827 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1783 1828 )
1784 1829
1785 1830 @manifestlogcache()
1786 1831 def manifestlog(self):
1787 1832 return self.store.manifestlog(self, self._storenarrowmatch)
1788 1833
1789 1834 @unfilteredpropertycache
1790 1835 def dirstate(self):
1791 1836 if self._dirstate is None:
1792 1837 self._dirstate = self._makedirstate()
1793 1838 else:
1794 1839 self._dirstate.refresh()
1795 1840 return self._dirstate
1796 1841
1797 1842 def _makedirstate(self):
1798 1843 """Extension point for wrapping the dirstate per-repo."""
1799 1844 sparsematchfn = None
1800 1845 if sparse.use_sparse(self):
1801 1846 sparsematchfn = lambda: sparse.matcher(self)
1802 1847 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1803 1848 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1804 1849 use_dirstate_v2 = v2_req in self.requirements
1805 1850 use_tracked_hint = th in self.requirements
1806 1851
1807 1852 return dirstate.dirstate(
1808 1853 self.vfs,
1809 1854 self.ui,
1810 1855 self.root,
1811 1856 self._dirstatevalidate,
1812 1857 sparsematchfn,
1813 1858 self.nodeconstants,
1814 1859 use_dirstate_v2,
1815 1860 use_tracked_hint=use_tracked_hint,
1816 1861 )
1817 1862
1818 1863 def _dirstatevalidate(self, node):
1819 1864 okay = True
1820 1865 try:
1821 1866 self.changelog.rev(node)
1822 1867 except error.LookupError:
1823 1868 # If the parent are unknown it might just be because the changelog
1824 1869 # in memory is lagging behind the dirstate in memory. So try to
1825 1870 # refresh the changelog first.
1826 1871 #
1827 1872 # We only do so if we don't hold the lock, if we do hold the lock
1828 1873 # the invalidation at that time should have taken care of this and
1829 1874 # something is very fishy.
1830 1875 if self.currentlock() is None:
1831 1876 self.invalidate()
1832 1877 try:
1833 1878 self.changelog.rev(node)
1834 1879 except error.LookupError:
1835 1880 okay = False
1836 1881 else:
1837 1882 # XXX we should consider raising an error here.
1838 1883 okay = False
1839 1884 if okay:
1840 1885 return node
1841 1886 else:
1842 1887 if not self._dirstatevalidatewarned:
1843 1888 self._dirstatevalidatewarned = True
1844 1889 self.ui.warn(
1845 1890 _(b"warning: ignoring unknown working parent %s!\n")
1846 1891 % short(node)
1847 1892 )
1848 1893 return self.nullid
1849 1894
1850 1895 @storecache(narrowspec.FILENAME)
1851 1896 def narrowpats(self):
1852 1897 """matcher patterns for this repository's narrowspec
1853 1898
1854 1899 A tuple of (includes, excludes).
1855 1900 """
1856 1901 # the narrow management should probably move into its own object
1857 1902 val = self._pending_narrow_pats
1858 1903 if val is None:
1859 1904 val = narrowspec.load(self)
1860 1905 return val
1861 1906
1862 1907 @storecache(narrowspec.FILENAME)
1863 1908 def _storenarrowmatch(self):
1864 1909 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1865 1910 return matchmod.always()
1866 1911 include, exclude = self.narrowpats
1867 1912 return narrowspec.match(self.root, include=include, exclude=exclude)
1868 1913
1869 1914 @storecache(narrowspec.FILENAME)
1870 1915 def _narrowmatch(self):
1871 1916 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1872 1917 return matchmod.always()
1873 1918 narrowspec.checkworkingcopynarrowspec(self)
1874 1919 include, exclude = self.narrowpats
1875 1920 return narrowspec.match(self.root, include=include, exclude=exclude)
1876 1921
1877 1922 def narrowmatch(self, match=None, includeexact=False):
1878 1923 """matcher corresponding the the repo's narrowspec
1879 1924
1880 1925 If `match` is given, then that will be intersected with the narrow
1881 1926 matcher.
1882 1927
1883 1928 If `includeexact` is True, then any exact matches from `match` will
1884 1929 be included even if they're outside the narrowspec.
1885 1930 """
1886 1931 if match:
1887 1932 if includeexact and not self._narrowmatch.always():
1888 1933 # do not exclude explicitly-specified paths so that they can
1889 1934 # be warned later on
1890 1935 em = matchmod.exact(match.files())
1891 1936 nm = matchmod.unionmatcher([self._narrowmatch, em])
1892 1937 return matchmod.intersectmatchers(match, nm)
1893 1938 return matchmod.intersectmatchers(match, self._narrowmatch)
1894 1939 return self._narrowmatch
1895 1940
1896 1941 def setnarrowpats(self, newincludes, newexcludes):
1897 1942 narrowspec.save(self, newincludes, newexcludes)
1898 1943 self.invalidate(clearfilecache=True)
1899 1944
1900 1945 @unfilteredpropertycache
1901 1946 def _quick_access_changeid_null(self):
1902 1947 return {
1903 1948 b'null': (nullrev, self.nodeconstants.nullid),
1904 1949 nullrev: (nullrev, self.nodeconstants.nullid),
1905 1950 self.nullid: (nullrev, self.nullid),
1906 1951 }
1907 1952
1908 1953 @unfilteredpropertycache
1909 1954 def _quick_access_changeid_wc(self):
1910 1955 # also fast path access to the working copy parents
1911 1956 # however, only do it for filter that ensure wc is visible.
1912 1957 quick = self._quick_access_changeid_null.copy()
1913 1958 cl = self.unfiltered().changelog
1914 1959 for node in self.dirstate.parents():
1915 1960 if node == self.nullid:
1916 1961 continue
1917 1962 rev = cl.index.get_rev(node)
1918 1963 if rev is None:
1919 1964 # unknown working copy parent case:
1920 1965 #
1921 1966 # skip the fast path and let higher code deal with it
1922 1967 continue
1923 1968 pair = (rev, node)
1924 1969 quick[rev] = pair
1925 1970 quick[node] = pair
1926 1971 # also add the parents of the parents
1927 1972 for r in cl.parentrevs(rev):
1928 1973 if r == nullrev:
1929 1974 continue
1930 1975 n = cl.node(r)
1931 1976 pair = (r, n)
1932 1977 quick[r] = pair
1933 1978 quick[n] = pair
1934 1979 p1node = self.dirstate.p1()
1935 1980 if p1node != self.nullid:
1936 1981 quick[b'.'] = quick[p1node]
1937 1982 return quick
1938 1983
1939 1984 @unfilteredmethod
1940 1985 def _quick_access_changeid_invalidate(self):
1941 1986 if '_quick_access_changeid_wc' in vars(self):
1942 1987 del self.__dict__['_quick_access_changeid_wc']
1943 1988
1944 1989 @property
1945 1990 def _quick_access_changeid(self):
1946 1991 """an helper dictionnary for __getitem__ calls
1947 1992
1948 1993 This contains a list of symbol we can recognise right away without
1949 1994 further processing.
1950 1995 """
1951 1996 if self.filtername in repoview.filter_has_wc:
1952 1997 return self._quick_access_changeid_wc
1953 1998 return self._quick_access_changeid_null
1954 1999
1955 2000 def __getitem__(self, changeid):
1956 2001 # dealing with special cases
1957 2002 if changeid is None:
1958 2003 return context.workingctx(self)
1959 2004 if isinstance(changeid, context.basectx):
1960 2005 return changeid
1961 2006
1962 2007 # dealing with multiple revisions
1963 2008 if isinstance(changeid, slice):
1964 2009 # wdirrev isn't contiguous so the slice shouldn't include it
1965 2010 return [
1966 2011 self[i]
1967 2012 for i in range(*changeid.indices(len(self)))
1968 2013 if i not in self.changelog.filteredrevs
1969 2014 ]
1970 2015
1971 2016 # dealing with some special values
1972 2017 quick_access = self._quick_access_changeid.get(changeid)
1973 2018 if quick_access is not None:
1974 2019 rev, node = quick_access
1975 2020 return context.changectx(self, rev, node, maybe_filtered=False)
1976 2021 if changeid == b'tip':
1977 2022 node = self.changelog.tip()
1978 2023 rev = self.changelog.rev(node)
1979 2024 return context.changectx(self, rev, node)
1980 2025
1981 2026 # dealing with arbitrary values
1982 2027 try:
1983 2028 if isinstance(changeid, int):
1984 2029 node = self.changelog.node(changeid)
1985 2030 rev = changeid
1986 2031 elif changeid == b'.':
1987 2032 # this is a hack to delay/avoid loading obsmarkers
1988 2033 # when we know that '.' won't be hidden
1989 2034 node = self.dirstate.p1()
1990 2035 rev = self.unfiltered().changelog.rev(node)
1991 2036 elif len(changeid) == self.nodeconstants.nodelen:
1992 2037 try:
1993 2038 node = changeid
1994 2039 rev = self.changelog.rev(changeid)
1995 2040 except error.FilteredLookupError:
1996 2041 changeid = hex(changeid) # for the error message
1997 2042 raise
1998 2043 except LookupError:
1999 2044 # check if it might have come from damaged dirstate
2000 2045 #
2001 2046 # XXX we could avoid the unfiltered if we had a recognizable
2002 2047 # exception for filtered changeset access
2003 2048 if (
2004 2049 self.local()
2005 2050 and changeid in self.unfiltered().dirstate.parents()
2006 2051 ):
2007 2052 msg = _(b"working directory has unknown parent '%s'!")
2008 2053 raise error.Abort(msg % short(changeid))
2009 2054 changeid = hex(changeid) # for the error message
2010 2055 raise
2011 2056
2012 2057 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2013 2058 node = bin(changeid)
2014 2059 rev = self.changelog.rev(node)
2015 2060 else:
2016 2061 raise error.ProgrammingError(
2017 2062 b"unsupported changeid '%s' of type %s"
2018 2063 % (changeid, pycompat.bytestr(type(changeid)))
2019 2064 )
2020 2065
2021 2066 return context.changectx(self, rev, node)
2022 2067
2023 2068 except (error.FilteredIndexError, error.FilteredLookupError):
2024 2069 raise error.FilteredRepoLookupError(
2025 2070 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2026 2071 )
2027 2072 except (IndexError, LookupError):
2028 2073 raise error.RepoLookupError(
2029 2074 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2030 2075 )
2031 2076 except error.WdirUnsupported:
2032 2077 return context.workingctx(self)
2033 2078
2034 2079 def __contains__(self, changeid):
2035 2080 """True if the given changeid exists"""
2036 2081 try:
2037 2082 self[changeid]
2038 2083 return True
2039 2084 except error.RepoLookupError:
2040 2085 return False
2041 2086
2042 2087 def __nonzero__(self):
2043 2088 return True
2044 2089
2045 2090 __bool__ = __nonzero__
2046 2091
2047 2092 def __len__(self):
2048 2093 # no need to pay the cost of repoview.changelog
2049 2094 unfi = self.unfiltered()
2050 2095 return len(unfi.changelog)
2051 2096
2052 2097 def __iter__(self):
2053 2098 return iter(self.changelog)
2054 2099
2055 2100 def revs(self, expr: bytes, *args):
2056 2101 """Find revisions matching a revset.
2057 2102
2058 2103 The revset is specified as a string ``expr`` that may contain
2059 2104 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2060 2105
2061 2106 Revset aliases from the configuration are not expanded. To expand
2062 2107 user aliases, consider calling ``scmutil.revrange()`` or
2063 2108 ``repo.anyrevs([expr], user=True)``.
2064 2109
2065 2110 Returns a smartset.abstractsmartset, which is a list-like interface
2066 2111 that contains integer revisions.
2067 2112 """
2068 2113 tree = revsetlang.spectree(expr, *args)
2069 2114 return revset.makematcher(tree)(self)
2070 2115
2071 2116 def set(self, expr: bytes, *args):
2072 2117 """Find revisions matching a revset and emit changectx instances.
2073 2118
2074 2119 This is a convenience wrapper around ``revs()`` that iterates the
2075 2120 result and is a generator of changectx instances.
2076 2121
2077 2122 Revset aliases from the configuration are not expanded. To expand
2078 2123 user aliases, consider calling ``scmutil.revrange()``.
2079 2124 """
2080 2125 for r in self.revs(expr, *args):
2081 2126 yield self[r]
2082 2127
2083 2128 def anyrevs(self, specs: bytes, user=False, localalias=None):
2084 2129 """Find revisions matching one of the given revsets.
2085 2130
2086 2131 Revset aliases from the configuration are not expanded by default. To
2087 2132 expand user aliases, specify ``user=True``. To provide some local
2088 2133 definitions overriding user aliases, set ``localalias`` to
2089 2134 ``{name: definitionstring}``.
2090 2135 """
2091 2136 if specs == [b'null']:
2092 2137 return revset.baseset([nullrev])
2093 2138 if specs == [b'.']:
2094 2139 quick_data = self._quick_access_changeid.get(b'.')
2095 2140 if quick_data is not None:
2096 2141 return revset.baseset([quick_data[0]])
2097 2142 if user:
2098 2143 m = revset.matchany(
2099 2144 self.ui,
2100 2145 specs,
2101 2146 lookup=revset.lookupfn(self),
2102 2147 localalias=localalias,
2103 2148 )
2104 2149 else:
2105 2150 m = revset.matchany(None, specs, localalias=localalias)
2106 2151 return m(self)
2107 2152
2108 2153 def url(self) -> bytes:
2109 2154 return b'file:' + self.root
2110 2155
2111 2156 def hook(self, name, throw=False, **args):
2112 2157 """Call a hook, passing this repo instance.
2113 2158
2114 2159 This a convenience method to aid invoking hooks. Extensions likely
2115 2160 won't call this unless they have registered a custom hook or are
2116 2161 replacing code that is expected to call a hook.
2117 2162 """
2118 2163 return hook.hook(self.ui, self, name, throw, **args)
2119 2164
2120 2165 @filteredpropertycache
2121 2166 def _tagscache(self):
2122 2167 """Returns a tagscache object that contains various tags related
2123 2168 caches."""
2124 2169
2125 2170 # This simplifies its cache management by having one decorated
2126 2171 # function (this one) and the rest simply fetch things from it.
2127 2172 class tagscache:
2128 2173 def __init__(self):
2129 2174 # These two define the set of tags for this repository. tags
2130 2175 # maps tag name to node; tagtypes maps tag name to 'global' or
2131 2176 # 'local'. (Global tags are defined by .hgtags across all
2132 2177 # heads, and local tags are defined in .hg/localtags.)
2133 2178 # They constitute the in-memory cache of tags.
2134 2179 self.tags = self.tagtypes = None
2135 2180
2136 2181 self.nodetagscache = self.tagslist = None
2137 2182
2138 2183 cache = tagscache()
2139 2184 cache.tags, cache.tagtypes = self._findtags()
2140 2185
2141 2186 return cache
2142 2187
2143 2188 def tags(self):
2144 2189 '''return a mapping of tag to node'''
2145 2190 t = {}
2146 2191 if self.changelog.filteredrevs:
2147 2192 tags, tt = self._findtags()
2148 2193 else:
2149 2194 tags = self._tagscache.tags
2150 2195 rev = self.changelog.rev
2151 2196 for k, v in tags.items():
2152 2197 try:
2153 2198 # ignore tags to unknown nodes
2154 2199 rev(v)
2155 2200 t[k] = v
2156 2201 except (error.LookupError, ValueError):
2157 2202 pass
2158 2203 return t
2159 2204
2160 2205 def _findtags(self):
2161 2206 """Do the hard work of finding tags. Return a pair of dicts
2162 2207 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2163 2208 maps tag name to a string like \'global\' or \'local\'.
2164 2209 Subclasses or extensions are free to add their own tags, but
2165 2210 should be aware that the returned dicts will be retained for the
2166 2211 duration of the localrepo object."""
2167 2212
2168 2213 # XXX what tagtype should subclasses/extensions use? Currently
2169 2214 # mq and bookmarks add tags, but do not set the tagtype at all.
2170 2215 # Should each extension invent its own tag type? Should there
2171 2216 # be one tagtype for all such "virtual" tags? Or is the status
2172 2217 # quo fine?
2173 2218
2174 2219 # map tag name to (node, hist)
2175 2220 alltags = tagsmod.findglobaltags(self.ui, self)
2176 2221 # map tag name to tag type
2177 2222 tagtypes = {tag: b'global' for tag in alltags}
2178 2223
2179 2224 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2180 2225
2181 2226 # Build the return dicts. Have to re-encode tag names because
2182 2227 # the tags module always uses UTF-8 (in order not to lose info
2183 2228 # writing to the cache), but the rest of Mercurial wants them in
2184 2229 # local encoding.
2185 2230 tags = {}
2186 2231 for name, (node, hist) in alltags.items():
2187 2232 if node != self.nullid:
2188 2233 tags[encoding.tolocal(name)] = node
2189 2234 tags[b'tip'] = self.changelog.tip()
2190 2235 tagtypes = {
2191 2236 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2192 2237 }
2193 2238 return (tags, tagtypes)
2194 2239
2195 2240 def tagtype(self, tagname):
2196 2241 """
2197 2242 return the type of the given tag. result can be:
2198 2243
2199 2244 'local' : a local tag
2200 2245 'global' : a global tag
2201 2246 None : tag does not exist
2202 2247 """
2203 2248
2204 2249 return self._tagscache.tagtypes.get(tagname)
2205 2250
2206 2251 def tagslist(self):
2207 2252 '''return a list of tags ordered by revision'''
2208 2253 if not self._tagscache.tagslist:
2209 2254 l = []
2210 2255 for t, n in self.tags().items():
2211 2256 l.append((self.changelog.rev(n), t, n))
2212 2257 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2213 2258
2214 2259 return self._tagscache.tagslist
2215 2260
2216 2261 def nodetags(self, node):
2217 2262 '''return the tags associated with a node'''
2218 2263 if not self._tagscache.nodetagscache:
2219 2264 nodetagscache = {}
2220 2265 for t, n in self._tagscache.tags.items():
2221 2266 nodetagscache.setdefault(n, []).append(t)
2222 2267 for tags in nodetagscache.values():
2223 2268 tags.sort()
2224 2269 self._tagscache.nodetagscache = nodetagscache
2225 2270 return self._tagscache.nodetagscache.get(node, [])
2226 2271
2227 2272 def nodebookmarks(self, node):
2228 2273 """return the list of bookmarks pointing to the specified node"""
2229 2274 return self._bookmarks.names(node)
2230 2275
2231 2276 def branchmap(self):
2232 2277 """returns a dictionary {branch: [branchheads]} with branchheads
2233 2278 ordered by increasing revision number"""
2234 2279 return self._branchcaches[self]
2235 2280
2236 2281 @unfilteredmethod
2237 2282 def revbranchcache(self):
2238 2283 if not self._revbranchcache:
2239 2284 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2240 2285 return self._revbranchcache
2241 2286
2242 2287 def register_changeset(self, rev, changelogrevision):
2243 2288 self.revbranchcache().setdata(rev, changelogrevision)
2244 2289
2245 2290 def branchtip(self, branch, ignoremissing=False):
2246 2291 """return the tip node for a given branch
2247 2292
2248 2293 If ignoremissing is True, then this method will not raise an error.
2249 2294 This is helpful for callers that only expect None for a missing branch
2250 2295 (e.g. namespace).
2251 2296
2252 2297 """
2253 2298 try:
2254 2299 return self.branchmap().branchtip(branch)
2255 2300 except KeyError:
2256 2301 if not ignoremissing:
2257 2302 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2258 2303 else:
2259 2304 pass
2260 2305
2261 2306 def lookup(self, key):
2262 2307 node = scmutil.revsymbol(self, key).node()
2263 2308 if node is None:
2264 2309 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2265 2310 return node
2266 2311
2267 2312 def lookupbranch(self, key):
2268 2313 if self.branchmap().hasbranch(key):
2269 2314 return key
2270 2315
2271 2316 return scmutil.revsymbol(self, key).branch()
2272 2317
2273 2318 def known(self, nodes):
2274 2319 cl = self.changelog
2275 2320 get_rev = cl.index.get_rev
2276 2321 filtered = cl.filteredrevs
2277 2322 result = []
2278 2323 for n in nodes:
2279 2324 r = get_rev(n)
2280 2325 resp = not (r is None or r in filtered)
2281 2326 result.append(resp)
2282 2327 return result
2283 2328
2284 2329 def local(self):
2285 2330 return self
2286 2331
2287 2332 def publishing(self):
2288 2333 # it's safe (and desirable) to trust the publish flag unconditionally
2289 2334 # so that we don't finalize changes shared between users via ssh or nfs
2290 2335 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2291 2336
2292 2337 def cancopy(self):
2293 2338 # so statichttprepo's override of local() works
2294 2339 if not self.local():
2295 2340 return False
2296 2341 if not self.publishing():
2297 2342 return True
2298 2343 # if publishing we can't copy if there is filtered content
2299 2344 return not self.filtered(b'visible').changelog.filteredrevs
2300 2345
2301 2346 def shared(self):
2302 2347 '''the type of shared repository (None if not shared)'''
2303 2348 if self.sharedpath != self.path:
2304 2349 return b'store'
2305 2350 return None
2306 2351
2307 2352 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2308 2353 return self.vfs.reljoin(self.root, f, *insidef)
2309 2354
2310 2355 def setparents(self, p1, p2=None):
2311 2356 if p2 is None:
2312 2357 p2 = self.nullid
2313 2358 self[None].setparents(p1, p2)
2314 2359 self._quick_access_changeid_invalidate()
2315 2360
2316 2361 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2317 2362 """changeid must be a changeset revision, if specified.
2318 2363 fileid can be a file revision or node."""
2319 2364 return context.filectx(
2320 2365 self, path, changeid, fileid, changectx=changectx
2321 2366 )
2322 2367
2323 2368 def getcwd(self) -> bytes:
2324 2369 return self.dirstate.getcwd()
2325 2370
2326 2371 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2327 2372 return self.dirstate.pathto(f, cwd)
2328 2373
2329 2374 def _loadfilter(self, filter):
2330 2375 if filter not in self._filterpats:
2331 2376 l = []
2332 2377 for pat, cmd in self.ui.configitems(filter):
2333 2378 if cmd == b'!':
2334 2379 continue
2335 2380 mf = matchmod.match(self.root, b'', [pat])
2336 2381 fn = None
2337 2382 params = cmd
2338 2383 for name, filterfn in self._datafilters.items():
2339 2384 if cmd.startswith(name):
2340 2385 fn = filterfn
2341 2386 params = cmd[len(name) :].lstrip()
2342 2387 break
2343 2388 if not fn:
2344 2389 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2345 2390 fn.__name__ = 'commandfilter'
2346 2391 # Wrap old filters not supporting keyword arguments
2347 2392 if not pycompat.getargspec(fn)[2]:
2348 2393 oldfn = fn
2349 2394 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2350 2395 fn.__name__ = 'compat-' + oldfn.__name__
2351 2396 l.append((mf, fn, params))
2352 2397 self._filterpats[filter] = l
2353 2398 return self._filterpats[filter]
2354 2399
2355 2400 def _filter(self, filterpats, filename, data):
2356 2401 for mf, fn, cmd in filterpats:
2357 2402 if mf(filename):
2358 2403 self.ui.debug(
2359 2404 b"filtering %s through %s\n"
2360 2405 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2361 2406 )
2362 2407 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2363 2408 break
2364 2409
2365 2410 return data
2366 2411
2367 2412 @unfilteredpropertycache
2368 2413 def _encodefilterpats(self):
2369 2414 return self._loadfilter(b'encode')
2370 2415
2371 2416 @unfilteredpropertycache
2372 2417 def _decodefilterpats(self):
2373 2418 return self._loadfilter(b'decode')
2374 2419
2375 2420 def adddatafilter(self, name, filter):
2376 2421 self._datafilters[name] = filter
2377 2422
2378 2423 def wread(self, filename: bytes) -> bytes:
2379 2424 if self.wvfs.islink(filename):
2380 2425 data = self.wvfs.readlink(filename)
2381 2426 else:
2382 2427 data = self.wvfs.read(filename)
2383 2428 return self._filter(self._encodefilterpats, filename, data)
2384 2429
2385 2430 def wwrite(
2386 2431 self,
2387 2432 filename: bytes,
2388 2433 data: bytes,
2389 2434 flags: bytes,
2390 2435 backgroundclose=False,
2391 2436 **kwargs,
2392 2437 ) -> int:
2393 2438 """write ``data`` into ``filename`` in the working directory
2394 2439
2395 2440 This returns length of written (maybe decoded) data.
2396 2441 """
2397 2442 data = self._filter(self._decodefilterpats, filename, data)
2398 2443 if b'l' in flags:
2399 2444 self.wvfs.symlink(data, filename)
2400 2445 else:
2401 2446 self.wvfs.write(
2402 2447 filename, data, backgroundclose=backgroundclose, **kwargs
2403 2448 )
2404 2449 if b'x' in flags:
2405 2450 self.wvfs.setflags(filename, False, True)
2406 2451 else:
2407 2452 self.wvfs.setflags(filename, False, False)
2408 2453 return len(data)
2409 2454
2410 2455 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2411 2456 return self._filter(self._decodefilterpats, filename, data)
2412 2457
2413 2458 def currenttransaction(self):
2414 2459 """return the current transaction or None if non exists"""
2415 2460 if self._transref:
2416 2461 tr = self._transref()
2417 2462 else:
2418 2463 tr = None
2419 2464
2420 2465 if tr and tr.running():
2421 2466 return tr
2422 2467 return None
2423 2468
2424 2469 def transaction(self, desc, report=None):
2425 2470 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2426 2471 b'devel', b'check-locks'
2427 2472 ):
2428 2473 if self._currentlock(self._lockref) is None:
2429 2474 raise error.ProgrammingError(b'transaction requires locking')
2430 2475 tr = self.currenttransaction()
2431 2476 if tr is not None:
2432 2477 return tr.nest(name=desc)
2433 2478
2434 2479 # abort here if the journal already exists
2435 2480 if self.svfs.exists(b"journal"):
2436 2481 raise error.RepoError(
2437 2482 _(b"abandoned transaction found"),
2438 2483 hint=_(b"run 'hg recover' to clean up transaction"),
2439 2484 )
2440 2485
2441 2486 # At that point your dirstate should be clean:
2442 2487 #
2443 2488 # - If you don't have the wlock, why would you still have a dirty
2444 2489 # dirstate ?
2445 2490 #
2446 2491 # - If you hold the wlock, you should not be opening a transaction in
2447 2492 # the middle of a `distate.changing_*` block. The transaction needs to
2448 2493 # be open before that and wrap the change-context.
2449 2494 #
2450 2495 # - If you are not within a `dirstate.changing_*` context, why is our
2451 2496 # dirstate dirty?
2452 2497 if self.dirstate._dirty:
2453 2498 m = "cannot open a transaction with a dirty dirstate"
2454 2499 raise error.ProgrammingError(m)
2455 2500
2456 2501 idbase = b"%.40f#%f" % (random.random(), time.time())
2457 2502 ha = hex(hashutil.sha1(idbase).digest())
2458 2503 txnid = b'TXN:' + ha
2459 2504 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2460 2505
2461 2506 self._writejournal(desc)
2462 2507 if report:
2463 2508 rp = report
2464 2509 else:
2465 2510 rp = self.ui.warn
2466 2511 vfsmap = self.vfs_map
2467 2512 # we must avoid cyclic reference between repo and transaction.
2468 2513 reporef = weakref.ref(self)
2469 2514 # Code to track tag movement
2470 2515 #
2471 2516 # Since tags are all handled as file content, it is actually quite hard
2472 2517 # to track these movement from a code perspective. So we fallback to a
2473 2518 # tracking at the repository level. One could envision to track changes
2474 2519 # to the '.hgtags' file through changegroup apply but that fails to
2475 2520 # cope with case where transaction expose new heads without changegroup
2476 2521 # being involved (eg: phase movement).
2477 2522 #
2478 2523 # For now, We gate the feature behind a flag since this likely comes
2479 2524 # with performance impacts. The current code run more often than needed
2480 2525 # and do not use caches as much as it could. The current focus is on
2481 2526 # the behavior of the feature so we disable it by default. The flag
2482 2527 # will be removed when we are happy with the performance impact.
2483 2528 #
2484 2529 # Once this feature is no longer experimental move the following
2485 2530 # documentation to the appropriate help section:
2486 2531 #
2487 2532 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2488 2533 # tags (new or changed or deleted tags). In addition the details of
2489 2534 # these changes are made available in a file at:
2490 2535 # ``REPOROOT/.hg/changes/tags.changes``.
2491 2536 # Make sure you check for HG_TAG_MOVED before reading that file as it
2492 2537 # might exist from a previous transaction even if no tag were touched
2493 2538 # in this one. Changes are recorded in a line base format::
2494 2539 #
2495 2540 # <action> <hex-node> <tag-name>\n
2496 2541 #
2497 2542 # Actions are defined as follow:
2498 2543 # "-R": tag is removed,
2499 2544 # "+A": tag is added,
2500 2545 # "-M": tag is moved (old value),
2501 2546 # "+M": tag is moved (new value),
2502 2547 tracktags = lambda x: None
2503 2548 # experimental config: experimental.hook-track-tags
2504 2549 shouldtracktags = self.ui.configbool(
2505 2550 b'experimental', b'hook-track-tags'
2506 2551 )
2507 2552 if desc != b'strip' and shouldtracktags:
2508 2553 oldheads = self.changelog.headrevs()
2509 2554
2510 2555 def tracktags(tr2):
2511 2556 repo = reporef()
2512 2557 assert repo is not None # help pytype
2513 2558 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2514 2559 newheads = repo.changelog.headrevs()
2515 2560 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2516 2561 # notes: we compare lists here.
2517 2562 # As we do it only once buiding set would not be cheaper
2518 2563 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2519 2564 if changes:
2520 2565 tr2.hookargs[b'tag_moved'] = b'1'
2521 2566 with repo.vfs(
2522 2567 b'changes/tags.changes', b'w', atomictemp=True
2523 2568 ) as changesfile:
2524 2569 # note: we do not register the file to the transaction
2525 2570 # because we needs it to still exist on the transaction
2526 2571 # is close (for txnclose hooks)
2527 2572 tagsmod.writediff(changesfile, changes)
2528 2573
2529 2574 def validate(tr2):
2530 2575 """will run pre-closing hooks"""
2531 2576 # XXX the transaction API is a bit lacking here so we take a hacky
2532 2577 # path for now
2533 2578 #
2534 2579 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2535 2580 # dict is copied before these run. In addition we needs the data
2536 2581 # available to in memory hooks too.
2537 2582 #
2538 2583 # Moreover, we also need to make sure this runs before txnclose
2539 2584 # hooks and there is no "pending" mechanism that would execute
2540 2585 # logic only if hooks are about to run.
2541 2586 #
2542 2587 # Fixing this limitation of the transaction is also needed to track
2543 2588 # other families of changes (bookmarks, phases, obsolescence).
2544 2589 #
2545 2590 # This will have to be fixed before we remove the experimental
2546 2591 # gating.
2547 2592 tracktags(tr2)
2548 2593 repo = reporef()
2549 2594 assert repo is not None # help pytype
2550 2595
2551 2596 singleheadopt = (b'experimental', b'single-head-per-branch')
2552 2597 singlehead = repo.ui.configbool(*singleheadopt)
2553 2598 if singlehead:
2554 2599 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2555 2600 accountclosed = singleheadsub.get(
2556 2601 b"account-closed-heads", False
2557 2602 )
2558 2603 if singleheadsub.get(b"public-changes-only", False):
2559 2604 filtername = b"immutable"
2560 2605 else:
2561 2606 filtername = b"visible"
2562 2607 scmutil.enforcesinglehead(
2563 2608 repo, tr2, desc, accountclosed, filtername
2564 2609 )
2565 2610 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2566 2611 for name, (old, new) in sorted(
2567 2612 tr.changes[b'bookmarks'].items()
2568 2613 ):
2569 2614 args = tr.hookargs.copy()
2570 2615 args.update(bookmarks.preparehookargs(name, old, new))
2571 2616 repo.hook(
2572 2617 b'pretxnclose-bookmark',
2573 2618 throw=True,
2574 2619 **pycompat.strkwargs(args),
2575 2620 )
2576 2621 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2577 2622 cl = repo.unfiltered().changelog
2578 2623 for revs, (old, new) in tr.changes[b'phases']:
2579 2624 for rev in revs:
2580 2625 args = tr.hookargs.copy()
2581 2626 node = hex(cl.node(rev))
2582 2627 args.update(phases.preparehookargs(node, old, new))
2583 2628 repo.hook(
2584 2629 b'pretxnclose-phase',
2585 2630 throw=True,
2586 2631 **pycompat.strkwargs(args),
2587 2632 )
2588 2633
2589 2634 repo.hook(
2590 2635 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2591 2636 )
2592 2637
2593 2638 def releasefn(tr, success):
2594 2639 repo = reporef()
2595 2640 if repo is None:
2596 2641 # If the repo has been GC'd (and this release function is being
2597 2642 # called from transaction.__del__), there's not much we can do,
2598 2643 # so just leave the unfinished transaction there and let the
2599 2644 # user run `hg recover`.
2600 2645 return
2601 2646 if success:
2602 2647 # this should be explicitly invoked here, because
2603 2648 # in-memory changes aren't written out at closing
2604 2649 # transaction, if tr.addfilegenerator (via
2605 2650 # dirstate.write or so) isn't invoked while
2606 2651 # transaction running
2607 2652 repo.dirstate.write(None)
2608 2653 else:
2609 2654 # discard all changes (including ones already written
2610 2655 # out) in this transaction
2611 2656 repo.invalidate(clearfilecache=True)
2612 2657
2613 2658 tr = transaction.transaction(
2614 2659 rp,
2615 2660 self.svfs,
2616 2661 vfsmap,
2617 2662 b"journal",
2618 2663 b"undo",
2619 2664 lambda: None,
2620 2665 self.store.createmode,
2621 2666 validator=validate,
2622 2667 releasefn=releasefn,
2623 2668 checkambigfiles=_cachedfiles,
2624 2669 name=desc,
2625 2670 )
2626 2671 for vfs_id, path in self._journalfiles():
2627 2672 tr.add_journal(vfs_id, path)
2628 2673 tr.changes[b'origrepolen'] = len(self)
2629 2674 tr.changes[b'obsmarkers'] = set()
2630 2675 tr.changes[b'phases'] = []
2631 2676 tr.changes[b'bookmarks'] = {}
2632 2677
2633 2678 tr.hookargs[b'txnid'] = txnid
2634 2679 tr.hookargs[b'txnname'] = desc
2635 2680 tr.hookargs[b'changes'] = tr.changes
2636 2681 # note: writing the fncache only during finalize mean that the file is
2637 2682 # outdated when running hooks. As fncache is used for streaming clone,
2638 2683 # this is not expected to break anything that happen during the hooks.
2639 2684 tr.addfinalize(b'flush-fncache', self.store.write)
2640 2685
2641 2686 def txnclosehook(tr2):
2642 2687 """To be run if transaction is successful, will schedule a hook run"""
2643 2688 # Don't reference tr2 in hook() so we don't hold a reference.
2644 2689 # This reduces memory consumption when there are multiple
2645 2690 # transactions per lock. This can likely go away if issue5045
2646 2691 # fixes the function accumulation.
2647 2692 hookargs = tr2.hookargs
2648 2693
2649 2694 def hookfunc(unused_success):
2650 2695 repo = reporef()
2651 2696 assert repo is not None # help pytype
2652 2697
2653 2698 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2654 2699 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2655 2700 for name, (old, new) in bmchanges:
2656 2701 args = tr.hookargs.copy()
2657 2702 args.update(bookmarks.preparehookargs(name, old, new))
2658 2703 repo.hook(
2659 2704 b'txnclose-bookmark',
2660 2705 throw=False,
2661 2706 **pycompat.strkwargs(args),
2662 2707 )
2663 2708
2664 2709 if hook.hashook(repo.ui, b'txnclose-phase'):
2665 2710 cl = repo.unfiltered().changelog
2666 2711 phasemv = sorted(
2667 2712 tr.changes[b'phases'], key=lambda r: r[0][0]
2668 2713 )
2669 2714 for revs, (old, new) in phasemv:
2670 2715 for rev in revs:
2671 2716 args = tr.hookargs.copy()
2672 2717 node = hex(cl.node(rev))
2673 2718 args.update(phases.preparehookargs(node, old, new))
2674 2719 repo.hook(
2675 2720 b'txnclose-phase',
2676 2721 throw=False,
2677 2722 **pycompat.strkwargs(args),
2678 2723 )
2679 2724
2680 2725 repo.hook(
2681 2726 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2682 2727 )
2683 2728
2684 2729 repo = reporef()
2685 2730 assert repo is not None # help pytype
2686 2731 repo._afterlock(hookfunc)
2687 2732
2688 2733 tr.addfinalize(b'txnclose-hook', txnclosehook)
2689 2734 # Include a leading "-" to make it happen before the transaction summary
2690 2735 # reports registered via scmutil.registersummarycallback() whose names
2691 2736 # are 00-txnreport etc. That way, the caches will be warm when the
2692 2737 # callbacks run.
2693 2738 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2694 2739
2695 2740 def txnaborthook(tr2):
2696 2741 """To be run if transaction is aborted"""
2697 2742 repo = reporef()
2698 2743 assert repo is not None # help pytype
2699 2744 repo.hook(
2700 2745 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2701 2746 )
2702 2747
2703 2748 tr.addabort(b'txnabort-hook', txnaborthook)
2704 2749 # avoid eager cache invalidation. in-memory data should be identical
2705 2750 # to stored data if transaction has no error.
2706 2751 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2707 2752 self._transref = weakref.ref(tr)
2708 2753 scmutil.registersummarycallback(self, tr, desc)
2709 2754 # This only exist to deal with the need of rollback to have viable
2710 2755 # parents at the end of the operation. So backup viable parents at the
2711 2756 # time of this operation.
2712 2757 #
2713 2758 # We only do it when the `wlock` is taken, otherwise other might be
2714 2759 # altering the dirstate under us.
2715 2760 #
2716 2761 # This is really not a great way to do this (first, because we cannot
2717 2762 # always do it). There are more viable alternative that exists
2718 2763 #
2719 2764 # - backing only the working copy parent in a dedicated files and doing
2720 2765 # a clean "keep-update" to them on `hg rollback`.
2721 2766 #
2722 2767 # - slightly changing the behavior an applying a logic similar to "hg
2723 2768 # strip" to pick a working copy destination on `hg rollback`
2724 2769 if self.currentwlock() is not None:
2725 2770 ds = self.dirstate
2726 2771 if not self.vfs.exists(b'branch'):
2727 2772 # force a file to be written if None exist
2728 2773 ds.setbranch(b'default', None)
2729 2774
2730 2775 def backup_dirstate(tr):
2731 2776 for f in ds.all_file_names():
2732 2777 # hardlink backup is okay because `dirstate` is always
2733 2778 # atomically written and possible data file are append only
2734 2779 # and resistant to trailing data.
2735 2780 tr.addbackup(f, hardlink=True, location=b'plain')
2736 2781
2737 2782 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2738 2783 return tr
2739 2784
2740 2785 def _journalfiles(self):
2741 2786 return (
2742 2787 (self.svfs, b'journal'),
2743 2788 (self.vfs, b'journal.desc'),
2744 2789 )
2745 2790
2746 2791 def undofiles(self):
2747 2792 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2748 2793
2749 2794 @unfilteredmethod
2750 2795 def _writejournal(self, desc):
2751 2796 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2752 2797
2753 2798 def recover(self):
2754 2799 with self.lock():
2755 2800 if self.svfs.exists(b"journal"):
2756 2801 self.ui.status(_(b"rolling back interrupted transaction\n"))
2757 2802 vfsmap = self.vfs_map
2758 2803 transaction.rollback(
2759 2804 self.svfs,
2760 2805 vfsmap,
2761 2806 b"journal",
2762 2807 self.ui.warn,
2763 2808 checkambigfiles=_cachedfiles,
2764 2809 )
2765 2810 self.invalidate()
2766 2811 return True
2767 2812 else:
2768 2813 self.ui.warn(_(b"no interrupted transaction available\n"))
2769 2814 return False
2770 2815
2771 2816 def rollback(self, dryrun=False, force=False):
2772 2817 wlock = lock = None
2773 2818 try:
2774 2819 wlock = self.wlock()
2775 2820 lock = self.lock()
2776 2821 if self.svfs.exists(b"undo"):
2777 2822 return self._rollback(dryrun, force)
2778 2823 else:
2779 2824 self.ui.warn(_(b"no rollback information available\n"))
2780 2825 return 1
2781 2826 finally:
2782 2827 release(lock, wlock)
2783 2828
2784 2829 @unfilteredmethod # Until we get smarter cache management
2785 2830 def _rollback(self, dryrun, force):
2786 2831 ui = self.ui
2787 2832
2788 2833 parents = self.dirstate.parents()
2789 2834 try:
2790 2835 args = self.vfs.read(b'undo.desc').splitlines()
2791 2836 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2792 2837 if len(args) >= 3:
2793 2838 detail = args[2]
2794 2839 oldtip = oldlen - 1
2795 2840
2796 2841 if detail and ui.verbose:
2797 2842 msg = _(
2798 2843 b'repository tip rolled back to revision %d'
2799 2844 b' (undo %s: %s)\n'
2800 2845 ) % (oldtip, desc, detail)
2801 2846 else:
2802 2847 msg = _(
2803 2848 b'repository tip rolled back to revision %d (undo %s)\n'
2804 2849 ) % (oldtip, desc)
2805 2850 parentgone = any(self[p].rev() > oldtip for p in parents)
2806 2851 except IOError:
2807 2852 msg = _(b'rolling back unknown transaction\n')
2808 2853 desc = None
2809 2854 parentgone = True
2810 2855
2811 2856 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2812 2857 raise error.Abort(
2813 2858 _(
2814 2859 b'rollback of last commit while not checked out '
2815 2860 b'may lose data'
2816 2861 ),
2817 2862 hint=_(b'use -f to force'),
2818 2863 )
2819 2864
2820 2865 ui.status(msg)
2821 2866 if dryrun:
2822 2867 return 0
2823 2868
2824 2869 self.destroying()
2825 2870 vfsmap = self.vfs_map
2826 2871 skip_journal_pattern = None
2827 2872 if not parentgone:
2828 2873 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2829 2874 transaction.rollback(
2830 2875 self.svfs,
2831 2876 vfsmap,
2832 2877 b'undo',
2833 2878 ui.warn,
2834 2879 checkambigfiles=_cachedfiles,
2835 2880 skip_journal_pattern=skip_journal_pattern,
2836 2881 )
2837 2882 self.invalidate()
2838 2883 self.dirstate.invalidate()
2839 2884
2840 2885 if parentgone:
2841 2886 # replace this with some explicit parent update in the future.
2842 2887 has_node = self.changelog.index.has_node
2843 2888 if not all(has_node(p) for p in self.dirstate._pl):
2844 2889 # There was no dirstate to backup initially, we need to drop
2845 2890 # the existing one.
2846 2891 with self.dirstate.changing_parents(self):
2847 2892 self.dirstate.setparents(self.nullid)
2848 2893 self.dirstate.clear()
2849 2894
2850 2895 parents = tuple([p.rev() for p in self[None].parents()])
2851 2896 if len(parents) > 1:
2852 2897 ui.status(
2853 2898 _(
2854 2899 b'working directory now based on '
2855 2900 b'revisions %d and %d\n'
2856 2901 )
2857 2902 % parents
2858 2903 )
2859 2904 else:
2860 2905 ui.status(
2861 2906 _(b'working directory now based on revision %d\n') % parents
2862 2907 )
2863 2908 mergestatemod.mergestate.clean(self)
2864 2909
2865 2910 # TODO: if we know which new heads may result from this rollback, pass
2866 2911 # them to destroy(), which will prevent the branchhead cache from being
2867 2912 # invalidated.
2868 2913 self.destroyed()
2869 2914 return 0
2870 2915
2871 2916 def _buildcacheupdater(self, newtransaction):
2872 2917 """called during transaction to build the callback updating cache
2873 2918
2874 2919 Lives on the repository to help extension who might want to augment
2875 2920 this logic. For this purpose, the created transaction is passed to the
2876 2921 method.
2877 2922 """
2878 2923 # we must avoid cyclic reference between repo and transaction.
2879 2924 reporef = weakref.ref(self)
2880 2925
2881 2926 def updater(tr):
2882 2927 repo = reporef()
2883 2928 assert repo is not None # help pytype
2884 2929 repo.updatecaches(tr)
2885 2930
2886 2931 return updater
2887 2932
2888 2933 @unfilteredmethod
2889 2934 def updatecaches(self, tr=None, full=False, caches=None):
2890 2935 """warm appropriate caches
2891 2936
2892 2937 If this function is called after a transaction closed. The transaction
2893 2938 will be available in the 'tr' argument. This can be used to selectively
2894 2939 update caches relevant to the changes in that transaction.
2895 2940
2896 2941 If 'full' is set, make sure all caches the function knows about have
2897 2942 up-to-date data. Even the ones usually loaded more lazily.
2898 2943
2899 2944 The `full` argument can take a special "post-clone" value. In this case
2900 2945 the cache warming is made after a clone and of the slower cache might
2901 2946 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2902 2947 as we plan for a cleaner way to deal with this for 5.9.
2903 2948 """
2904 2949 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2905 2950 # During strip, many caches are invalid but
2906 2951 # later call to `destroyed` will refresh them.
2907 2952 return
2908 2953
2909 2954 unfi = self.unfiltered()
2910 2955
2911 2956 if caches is None:
2912 2957 caches = repository.CACHES_DEFAULT
2913 2958
2914 2959 if repository.CACHE_BRANCHMAP_SERVED in caches:
2915 2960 if tr is None or tr.changes[b'origrepolen'] < len(self):
2916 2961 self.ui.debug(b'updating the branch cache\n')
2917 2962 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2918 2963 served = self.filtered(b'served')
2919 2964 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2920 2965 served_hidden = self.filtered(b'served.hidden')
2921 2966 self._branchcaches.update_disk(
2922 2967 served_hidden, detect_pure_topo=dpt
2923 2968 )
2924 2969
2925 2970 if repository.CACHE_CHANGELOG_CACHE in caches:
2926 2971 self.changelog.update_caches(transaction=tr)
2927 2972
2928 2973 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2929 2974 self.manifestlog.update_caches(transaction=tr)
2930 2975 for entry in self.store.walk():
2931 2976 if not entry.is_revlog:
2932 2977 continue
2933 2978 if not entry.is_manifestlog:
2934 2979 continue
2935 2980 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2936 2981 if manifestrevlog is not None:
2937 2982 manifestrevlog.update_caches(transaction=tr)
2938 2983
2939 2984 if repository.CACHE_REV_BRANCH in caches:
2940 2985 rbc = unfi.revbranchcache()
2941 2986 for r in unfi.changelog:
2942 2987 rbc.branchinfo(r)
2943 2988 rbc.write()
2944 2989
2945 2990 if repository.CACHE_FULL_MANIFEST in caches:
2946 2991 # ensure the working copy parents are in the manifestfulltextcache
2947 2992 for ctx in self[b'.'].parents():
2948 2993 ctx.manifest() # accessing the manifest is enough
2949 2994
2950 2995 if repository.CACHE_FILE_NODE_TAGS in caches:
2951 2996 # accessing fnode cache warms the cache
2952 2997 tagsmod.warm_cache(self)
2953 2998
2954 2999 if repository.CACHE_TAGS_DEFAULT in caches:
2955 3000 # accessing tags warm the cache
2956 3001 self.tags()
2957 3002 if repository.CACHE_TAGS_SERVED in caches:
2958 3003 self.filtered(b'served').tags()
2959 3004
2960 3005 if repository.CACHE_BRANCHMAP_ALL in caches:
2961 3006 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2962 3007 # so we're forcing a write to cause these caches to be warmed up
2963 3008 # even if they haven't explicitly been requested yet (if they've
2964 3009 # never been used by hg, they won't ever have been written, even if
2965 3010 # they're a subset of another kind of cache that *has* been used).
2966 3011 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2967 3012
2968 3013 for filt in repoview.filtertable.keys():
2969 3014 filtered = self.filtered(filt)
2970 3015 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2971 3016
2972 3017 # flush all possibly delayed write.
2973 3018 self._branchcaches.write_dirty(self)
2974 3019
2975 3020 def invalidatecaches(self):
2976 3021 if '_tagscache' in vars(self):
2977 3022 # can't use delattr on proxy
2978 3023 del self.__dict__['_tagscache']
2979 3024
2980 3025 self._branchcaches.clear()
2981 3026 self.invalidatevolatilesets()
2982 3027 self._sparsesignaturecache.clear()
2983 3028
2984 3029 def invalidatevolatilesets(self):
2985 3030 self.filteredrevcache.clear()
2986 3031 obsolete.clearobscaches(self)
2987 3032 self._quick_access_changeid_invalidate()
2988 3033
2989 3034 def invalidatedirstate(self):
2990 3035 """Invalidates the dirstate, causing the next call to dirstate
2991 3036 to check if it was modified since the last time it was read,
2992 3037 rereading it if it has.
2993 3038
2994 3039 This is different to dirstate.invalidate() that it doesn't always
2995 3040 rereads the dirstate. Use dirstate.invalidate() if you want to
2996 3041 explicitly read the dirstate again (i.e. restoring it to a previous
2997 3042 known good state)."""
2998 3043 unfi = self.unfiltered()
2999 3044 if 'dirstate' in unfi.__dict__:
3000 3045 assert not self.dirstate.is_changing_any
3001 3046 del unfi.__dict__['dirstate']
3002 3047
3003 3048 def invalidate(self, clearfilecache=False):
3004 3049 """Invalidates both store and non-store parts other than dirstate
3005 3050
3006 3051 If a transaction is running, invalidation of store is omitted,
3007 3052 because discarding in-memory changes might cause inconsistency
3008 3053 (e.g. incomplete fncache causes unintentional failure, but
3009 3054 redundant one doesn't).
3010 3055 """
3011 3056 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3012 3057 for k in list(self._filecache.keys()):
3013 3058 if (
3014 3059 k == b'changelog'
3015 3060 and self.currenttransaction()
3016 3061 and self.changelog.is_delaying
3017 3062 ):
3018 3063 # The changelog object may store unwritten revisions. We don't
3019 3064 # want to lose them.
3020 3065 # TODO: Solve the problem instead of working around it.
3021 3066 continue
3022 3067
3023 3068 if clearfilecache:
3024 3069 del self._filecache[k]
3025 3070 try:
3026 3071 # XXX ideally, the key would be a unicode string to match the
3027 3072 # fact it refers to an attribut name. However changing this was
3028 3073 # a bit a scope creep compared to the series cleaning up
3029 3074 # del/set/getattr so we kept thing simple here.
3030 3075 delattr(unfiltered, pycompat.sysstr(k))
3031 3076 except AttributeError:
3032 3077 pass
3033 3078 self.invalidatecaches()
3034 3079 if not self.currenttransaction():
3035 3080 # TODO: Changing contents of store outside transaction
3036 3081 # causes inconsistency. We should make in-memory store
3037 3082 # changes detectable, and abort if changed.
3038 3083 self.store.invalidatecaches()
3039 3084
3040 3085 def invalidateall(self):
3041 3086 """Fully invalidates both store and non-store parts, causing the
3042 3087 subsequent operation to reread any outside changes."""
3043 3088 # extension should hook this to invalidate its caches
3044 3089 self.invalidate()
3045 3090 self.invalidatedirstate()
3046 3091
3047 3092 @unfilteredmethod
3048 3093 def _refreshfilecachestats(self, tr):
3049 3094 """Reload stats of cached files so that they are flagged as valid"""
3050 3095 for k, ce in self._filecache.items():
3051 3096 k = pycompat.sysstr(k)
3052 3097 if k == 'dirstate' or k not in self.__dict__:
3053 3098 continue
3054 3099 ce.refresh()
3055 3100
3056 3101 def _lock(
3057 3102 self,
3058 3103 vfs,
3059 3104 lockname,
3060 3105 wait,
3061 3106 releasefn,
3062 3107 acquirefn,
3063 3108 desc,
3064 3109 ):
3065 3110 timeout = 0
3066 3111 warntimeout = 0
3067 3112 if wait:
3068 3113 timeout = self.ui.configint(b"ui", b"timeout")
3069 3114 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3070 3115 # internal config: ui.signal-safe-lock
3071 3116 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3072 3117 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3073 3118 if not sync_file:
3074 3119 sync_file = None
3075 3120
3076 3121 l = lockmod.trylock(
3077 3122 self.ui,
3078 3123 vfs,
3079 3124 lockname,
3080 3125 timeout,
3081 3126 warntimeout,
3082 3127 releasefn=releasefn,
3083 3128 acquirefn=acquirefn,
3084 3129 desc=desc,
3085 3130 signalsafe=signalsafe,
3086 3131 devel_wait_sync_file=sync_file,
3087 3132 )
3088 3133 return l
3089 3134
3090 3135 def _afterlock(self, callback):
3091 3136 """add a callback to be run when the repository is fully unlocked
3092 3137
3093 3138 The callback will be executed when the outermost lock is released
3094 3139 (with wlock being higher level than 'lock')."""
3095 3140 for ref in (self._wlockref, self._lockref):
3096 3141 l = ref and ref()
3097 3142 if l and l.held:
3098 3143 l.postrelease.append(callback)
3099 3144 break
3100 3145 else: # no lock have been found.
3101 3146 callback(True)
3102 3147
3103 3148 def lock(self, wait=True):
3104 3149 """Lock the repository store (.hg/store) and return a weak reference
3105 3150 to the lock. Use this before modifying the store (e.g. committing or
3106 3151 stripping). If you are opening a transaction, get a lock as well.)
3107 3152
3108 3153 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3109 3154 'wlock' first to avoid a dead-lock hazard."""
3110 3155 l = self._currentlock(self._lockref)
3111 3156 if l is not None:
3112 3157 l.lock()
3113 3158 return l
3114 3159
3115 3160 self.hook(b'prelock', throw=True)
3116 3161 l = self._lock(
3117 3162 vfs=self.svfs,
3118 3163 lockname=b"lock",
3119 3164 wait=wait,
3120 3165 releasefn=None,
3121 3166 acquirefn=self.invalidate,
3122 3167 desc=_(b'repository %s') % self.origroot,
3123 3168 )
3124 3169 self._lockref = weakref.ref(l)
3125 3170 return l
3126 3171
3127 3172 def wlock(self, wait=True):
3128 3173 """Lock the non-store parts of the repository (everything under
3129 3174 .hg except .hg/store) and return a weak reference to the lock.
3130 3175
3131 3176 Use this before modifying files in .hg.
3132 3177
3133 3178 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3134 3179 'wlock' first to avoid a dead-lock hazard."""
3135 3180 l = self._wlockref() if self._wlockref else None
3136 3181 if l is not None and l.held:
3137 3182 l.lock()
3138 3183 return l
3139 3184
3140 3185 self.hook(b'prewlock', throw=True)
3141 3186 # We do not need to check for non-waiting lock acquisition. Such
3142 3187 # acquisition would not cause dead-lock as they would just fail.
3143 3188 if wait and (
3144 3189 self.ui.configbool(b'devel', b'all-warnings')
3145 3190 or self.ui.configbool(b'devel', b'check-locks')
3146 3191 ):
3147 3192 if self._currentlock(self._lockref) is not None:
3148 3193 self.ui.develwarn(b'"wlock" acquired after "lock"')
3149 3194
3150 3195 def unlock():
3151 3196 if self.dirstate.is_changing_any:
3152 3197 msg = b"wlock release in the middle of a changing parents"
3153 3198 self.ui.develwarn(msg)
3154 3199 self.dirstate.invalidate()
3155 3200 else:
3156 3201 if self.dirstate._dirty:
3157 3202 msg = b"dirty dirstate on wlock release"
3158 3203 self.ui.develwarn(msg)
3159 3204 self.dirstate.write(None)
3160 3205
3161 3206 unfi = self.unfiltered()
3162 3207 if 'dirstate' in unfi.__dict__:
3163 3208 del unfi.__dict__['dirstate']
3164 3209
3165 3210 l = self._lock(
3166 3211 self.vfs,
3167 3212 b"wlock",
3168 3213 wait,
3169 3214 unlock,
3170 3215 self.invalidatedirstate,
3171 3216 _(b'working directory of %s') % self.origroot,
3172 3217 )
3173 3218 self._wlockref = weakref.ref(l)
3174 3219 return l
3175 3220
3176 3221 def _currentlock(self, lockref):
3177 3222 """Returns the lock if it's held, or None if it's not."""
3178 3223 if lockref is None:
3179 3224 return None
3180 3225 l = lockref()
3181 3226 if l is None or not l.held:
3182 3227 return None
3183 3228 return l
3184 3229
3185 3230 def currentwlock(self):
3186 3231 """Returns the wlock if it's held, or None if it's not."""
3187 3232 return self._currentlock(self._wlockref)
3188 3233
3189 3234 def currentlock(self):
3190 3235 """Returns the lock if it's held, or None if it's not."""
3191 3236 return self._currentlock(self._lockref)
3192 3237
3193 3238 def checkcommitpatterns(self, wctx, match, status, fail):
3194 3239 """check for commit arguments that aren't committable"""
3195 3240 if match.isexact() or match.prefix():
3196 3241 matched = set(status.modified + status.added + status.removed)
3197 3242
3198 3243 for f in match.files():
3199 3244 f = self.dirstate.normalize(f)
3200 3245 if f == b'.' or f in matched or f in wctx.substate:
3201 3246 continue
3202 3247 if f in status.deleted:
3203 3248 fail(f, _(b'file not found!'))
3204 3249 # Is it a directory that exists or used to exist?
3205 3250 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3206 3251 d = f + b'/'
3207 3252 for mf in matched:
3208 3253 if mf.startswith(d):
3209 3254 break
3210 3255 else:
3211 3256 fail(f, _(b"no match under directory!"))
3212 3257 elif f not in self.dirstate:
3213 3258 fail(f, _(b"file not tracked!"))
3214 3259
3215 3260 @unfilteredmethod
3216 3261 def commit(
3217 3262 self,
3218 3263 text=b"",
3219 3264 user=None,
3220 3265 date=None,
3221 3266 match=None,
3222 3267 force=False,
3223 3268 editor=None,
3224 3269 extra=None,
3225 3270 ):
3226 3271 """Add a new revision to current repository.
3227 3272
3228 3273 Revision information is gathered from the working directory,
3229 3274 match can be used to filter the committed files. If editor is
3230 3275 supplied, it is called to get a commit message.
3231 3276 """
3232 3277 if extra is None:
3233 3278 extra = {}
3234 3279
3235 3280 def fail(f, msg):
3236 3281 raise error.InputError(b'%s: %s' % (f, msg))
3237 3282
3238 3283 if not match:
3239 3284 match = matchmod.always()
3240 3285
3241 3286 if not force:
3242 3287 match.bad = fail
3243 3288
3244 3289 # lock() for recent changelog (see issue4368)
3245 3290 with self.wlock(), self.lock():
3246 3291 wctx = self[None]
3247 3292 merge = len(wctx.parents()) > 1
3248 3293
3249 3294 if not force and merge and not match.always():
3250 3295 raise error.Abort(
3251 3296 _(
3252 3297 b'cannot partially commit a merge '
3253 3298 b'(do not specify files or patterns)'
3254 3299 )
3255 3300 )
3256 3301
3257 3302 status = self.status(match=match, clean=force)
3258 3303 if force:
3259 3304 status.modified.extend(
3260 3305 status.clean
3261 3306 ) # mq may commit clean files
3262 3307
3263 3308 # check subrepos
3264 3309 subs, commitsubs, newstate = subrepoutil.precommit(
3265 3310 self.ui, wctx, status, match, force=force
3266 3311 )
3267 3312
3268 3313 # make sure all explicit patterns are matched
3269 3314 if not force:
3270 3315 self.checkcommitpatterns(wctx, match, status, fail)
3271 3316
3272 3317 cctx = context.workingcommitctx(
3273 3318 self, status, text, user, date, extra
3274 3319 )
3275 3320
3276 3321 ms = mergestatemod.mergestate.read(self)
3277 3322 mergeutil.checkunresolved(ms)
3278 3323
3279 3324 # internal config: ui.allowemptycommit
3280 3325 if cctx.isempty() and not self.ui.configbool(
3281 3326 b'ui', b'allowemptycommit'
3282 3327 ):
3283 3328 self.ui.debug(b'nothing to commit, clearing merge state\n')
3284 3329 ms.reset()
3285 3330 return None
3286 3331
3287 3332 if merge and cctx.deleted():
3288 3333 raise error.Abort(_(b"cannot commit merge with missing files"))
3289 3334
3290 3335 if editor:
3291 3336 cctx._text = editor(self, cctx, subs)
3292 3337 edited = text != cctx._text
3293 3338
3294 3339 # Save commit message in case this transaction gets rolled back
3295 3340 # (e.g. by a pretxncommit hook). Leave the content alone on
3296 3341 # the assumption that the user will use the same editor again.
3297 3342 msg_path = self.savecommitmessage(cctx._text)
3298 3343
3299 3344 # commit subs and write new state
3300 3345 if subs:
3301 3346 uipathfn = scmutil.getuipathfn(self)
3302 3347 for s in sorted(commitsubs):
3303 3348 sub = wctx.sub(s)
3304 3349 self.ui.status(
3305 3350 _(b'committing subrepository %s\n')
3306 3351 % uipathfn(subrepoutil.subrelpath(sub))
3307 3352 )
3308 3353 sr = sub.commit(cctx._text, user, date)
3309 3354 newstate[s] = (newstate[s][0], sr)
3310 3355 subrepoutil.writestate(self, newstate)
3311 3356
3312 3357 p1, p2 = self.dirstate.parents()
3313 3358 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3314 3359 try:
3315 3360 self.hook(
3316 3361 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3317 3362 )
3318 3363 with self.transaction(b'commit'):
3319 3364 ret = self.commitctx(cctx, True)
3320 3365 # update bookmarks, dirstate and mergestate
3321 3366 bookmarks.update(self, [p1, p2], ret)
3322 3367 cctx.markcommitted(ret)
3323 3368 ms.reset()
3324 3369 except: # re-raises
3325 3370 if edited:
3326 3371 self.ui.write(
3327 3372 _(b'note: commit message saved in %s\n') % msg_path
3328 3373 )
3329 3374 self.ui.write(
3330 3375 _(
3331 3376 b"note: use 'hg commit --logfile "
3332 3377 b"%s --edit' to reuse it\n"
3333 3378 )
3334 3379 % msg_path
3335 3380 )
3336 3381 raise
3337 3382
3338 3383 def commithook(unused_success):
3339 3384 # hack for command that use a temporary commit (eg: histedit)
3340 3385 # temporary commit got stripped before hook release
3341 3386 if self.changelog.hasnode(ret):
3342 3387 self.hook(
3343 3388 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3344 3389 )
3345 3390
3346 3391 self._afterlock(commithook)
3347 3392 return ret
3348 3393
3349 3394 @unfilteredmethod
3350 3395 def commitctx(self, ctx, error=False, origctx=None):
3351 3396 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3352 3397
3353 3398 @unfilteredmethod
3354 3399 def destroying(self):
3355 3400 """Inform the repository that nodes are about to be destroyed.
3356 3401 Intended for use by strip and rollback, so there's a common
3357 3402 place for anything that has to be done before destroying history.
3358 3403
3359 3404 This is mostly useful for saving state that is in memory and waiting
3360 3405 to be flushed when the current lock is released. Because a call to
3361 3406 destroyed is imminent, the repo will be invalidated causing those
3362 3407 changes to stay in memory (waiting for the next unlock), or vanish
3363 3408 completely.
3364 3409 """
3365 3410 # When using the same lock to commit and strip, the phasecache is left
3366 3411 # dirty after committing. Then when we strip, the repo is invalidated,
3367 3412 # causing those changes to disappear.
3368 3413 if '_phasecache' in vars(self):
3369 3414 self._phasecache.write(self)
3370 3415
3371 3416 @unfilteredmethod
3372 3417 def destroyed(self):
3373 3418 """Inform the repository that nodes have been destroyed.
3374 3419 Intended for use by strip and rollback, so there's a common
3375 3420 place for anything that has to be done after destroying history.
3376 3421 """
3377 3422 # refresh all repository caches
3378 3423 self.updatecaches()
3379 3424
3380 3425 # Ensure the persistent tag cache is updated. Doing it now
3381 3426 # means that the tag cache only has to worry about destroyed
3382 3427 # heads immediately after a strip/rollback. That in turn
3383 3428 # guarantees that "cachetip == currenttip" (comparing both rev
3384 3429 # and node) always means no nodes have been added or destroyed.
3385 3430
3386 3431 # XXX this is suboptimal when qrefresh'ing: we strip the current
3387 3432 # head, refresh the tag cache, then immediately add a new head.
3388 3433 # But I think doing it this way is necessary for the "instant
3389 3434 # tag cache retrieval" case to work.
3390 3435 self.invalidate()
3391 3436
3392 3437 def status(
3393 3438 self,
3394 3439 node1=b'.',
3395 3440 node2=None,
3396 3441 match=None,
3397 3442 ignored=False,
3398 3443 clean=False,
3399 3444 unknown=False,
3400 3445 listsubrepos=False,
3401 3446 ):
3402 3447 '''a convenience method that calls node1.status(node2)'''
3403 3448 return self[node1].status(
3404 3449 node2, match, ignored, clean, unknown, listsubrepos
3405 3450 )
3406 3451
3407 3452 def addpostdsstatus(self, ps):
3408 3453 """Add a callback to run within the wlock, at the point at which status
3409 3454 fixups happen.
3410 3455
3411 3456 On status completion, callback(wctx, status) will be called with the
3412 3457 wlock held, unless the dirstate has changed from underneath or the wlock
3413 3458 couldn't be grabbed.
3414 3459
3415 3460 Callbacks should not capture and use a cached copy of the dirstate --
3416 3461 it might change in the meanwhile. Instead, they should access the
3417 3462 dirstate via wctx.repo().dirstate.
3418 3463
3419 3464 This list is emptied out after each status run -- extensions should
3420 3465 make sure it adds to this list each time dirstate.status is called.
3421 3466 Extensions should also make sure they don't call this for statuses
3422 3467 that don't involve the dirstate.
3423 3468 """
3424 3469
3425 3470 # The list is located here for uniqueness reasons -- it is actually
3426 3471 # managed by the workingctx, but that isn't unique per-repo.
3427 3472 self._postdsstatus.append(ps)
3428 3473
3429 3474 def postdsstatus(self):
3430 3475 """Used by workingctx to get the list of post-dirstate-status hooks."""
3431 3476 return self._postdsstatus
3432 3477
3433 3478 def clearpostdsstatus(self):
3434 3479 """Used by workingctx to clear post-dirstate-status hooks."""
3435 3480 del self._postdsstatus[:]
3436 3481
3437 3482 def heads(self, start=None):
3438 3483 if start is None:
3439 3484 cl = self.changelog
3440 3485 headrevs = reversed(cl.headrevs())
3441 3486 return [cl.node(rev) for rev in headrevs]
3442 3487
3443 3488 heads = self.changelog.heads(start)
3444 3489 # sort the output in rev descending order
3445 3490 return sorted(heads, key=self.changelog.rev, reverse=True)
3446 3491
3447 3492 def branchheads(self, branch=None, start=None, closed=False):
3448 3493 """return a (possibly filtered) list of heads for the given branch
3449 3494
3450 3495 Heads are returned in topological order, from newest to oldest.
3451 3496 If branch is None, use the dirstate branch.
3452 3497 If start is not None, return only heads reachable from start.
3453 3498 If closed is True, return heads that are marked as closed as well.
3454 3499 """
3455 3500 if branch is None:
3456 3501 branch = self[None].branch()
3457 3502 branches = self.branchmap()
3458 3503 if not branches.hasbranch(branch):
3459 3504 return []
3460 3505 # the cache returns heads ordered lowest to highest
3461 3506 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3462 3507 if start is not None:
3463 3508 # filter out the heads that cannot be reached from startrev
3464 3509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3465 3510 bheads = [h for h in bheads if h in fbheads]
3466 3511 return bheads
3467 3512
3468 3513 def branches(self, nodes):
3469 3514 if not nodes:
3470 3515 nodes = [self.changelog.tip()]
3471 3516 b = []
3472 3517 for n in nodes:
3473 3518 t = n
3474 3519 while True:
3475 3520 p = self.changelog.parents(n)
3476 3521 if p[1] != self.nullid or p[0] == self.nullid:
3477 3522 b.append((t, n, p[0], p[1]))
3478 3523 break
3479 3524 n = p[0]
3480 3525 return b
3481 3526
3482 3527 def between(self, pairs):
3483 3528 r = []
3484 3529
3485 3530 for top, bottom in pairs:
3486 3531 n, l, i = top, [], 0
3487 3532 f = 1
3488 3533
3489 3534 while n != bottom and n != self.nullid:
3490 3535 p = self.changelog.parents(n)[0]
3491 3536 if i == f:
3492 3537 l.append(n)
3493 3538 f = f * 2
3494 3539 n = p
3495 3540 i += 1
3496 3541
3497 3542 r.append(l)
3498 3543
3499 3544 return r
3500 3545
3501 3546 def checkpush(self, pushop):
3502 3547 """Extensions can override this function if additional checks have
3503 3548 to be performed before pushing, or call it if they override push
3504 3549 command.
3505 3550 """
3506 3551
3507 3552 @unfilteredpropertycache
3508 3553 def prepushoutgoinghooks(self):
3509 3554 """Return util.hooks consists of a pushop with repo, remote, outgoing
3510 3555 methods, which are called before pushing changesets.
3511 3556 """
3512 3557 return util.hooks()
3513 3558
3514 3559 def pushkey(self, namespace, key, old, new):
3515 3560 try:
3516 3561 tr = self.currenttransaction()
3517 3562 hookargs = {}
3518 3563 if tr is not None:
3519 3564 hookargs.update(tr.hookargs)
3520 3565 hookargs = pycompat.strkwargs(hookargs)
3521 3566 hookargs['namespace'] = namespace
3522 3567 hookargs['key'] = key
3523 3568 hookargs['old'] = old
3524 3569 hookargs['new'] = new
3525 3570 self.hook(b'prepushkey', throw=True, **hookargs)
3526 3571 except error.HookAbort as exc:
3527 3572 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3528 3573 if exc.hint:
3529 3574 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3530 3575 return False
3531 3576 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3532 3577 ret = pushkey.push(self, namespace, key, old, new)
3533 3578
3534 3579 def runhook(unused_success):
3535 3580 self.hook(
3536 3581 b'pushkey',
3537 3582 namespace=namespace,
3538 3583 key=key,
3539 3584 old=old,
3540 3585 new=new,
3541 3586 ret=ret,
3542 3587 )
3543 3588
3544 3589 self._afterlock(runhook)
3545 3590 return ret
3546 3591
3547 3592 def listkeys(self, namespace):
3548 3593 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3549 3594 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3550 3595 values = pushkey.list(self, namespace)
3551 3596 self.hook(b'listkeys', namespace=namespace, values=values)
3552 3597 return values
3553 3598
3554 3599 def debugwireargs(self, one, two, three=None, four=None, five=None):
3555 3600 '''used to test argument passing over the wire'''
3556 3601 return b"%s %s %s %s %s" % (
3557 3602 one,
3558 3603 two,
3559 3604 pycompat.bytestr(three),
3560 3605 pycompat.bytestr(four),
3561 3606 pycompat.bytestr(five),
3562 3607 )
3563 3608
3564 3609 def savecommitmessage(self, text):
3565 3610 fp = self.vfs(b'last-message.txt', b'wb')
3566 3611 try:
3567 3612 fp.write(text)
3568 3613 finally:
3569 3614 fp.close()
3570 3615 return self.pathto(fp.name[len(self.root) + 1 :])
3571 3616
3572 3617 def register_wanted_sidedata(self, category):
3573 3618 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3574 3619 # Only revlogv2 repos can want sidedata.
3575 3620 return
3576 3621 self._wanted_sidedata.add(pycompat.bytestr(category))
3577 3622
3578 3623 def register_sidedata_computer(
3579 3624 self, kind, category, keys, computer, flags, replace=False
3580 3625 ):
3581 3626 if kind not in revlogconst.ALL_KINDS:
3582 3627 msg = _(b"unexpected revlog kind '%s'.")
3583 3628 raise error.ProgrammingError(msg % kind)
3584 3629 category = pycompat.bytestr(category)
3585 3630 already_registered = category in self._sidedata_computers.get(kind, [])
3586 3631 if already_registered and not replace:
3587 3632 msg = _(
3588 3633 b"cannot register a sidedata computer twice for category '%s'."
3589 3634 )
3590 3635 raise error.ProgrammingError(msg % category)
3591 3636 if replace and not already_registered:
3592 3637 msg = _(
3593 3638 b"cannot replace a sidedata computer that isn't registered "
3594 3639 b"for category '%s'."
3595 3640 )
3596 3641 raise error.ProgrammingError(msg % category)
3597 3642 self._sidedata_computers.setdefault(kind, {})
3598 3643 self._sidedata_computers[kind][category] = (keys, computer, flags)
3599 3644
3600 3645
3646 localrepository = interfaceutil.implementer(repository.ilocalrepositorymain)(
3647 LocalRepository
3648 )
3649
3650 if typing.TYPE_CHECKING:
3651 # Help pytype by hiding the interface stuff that confuses it.
3652 localrepository = LocalRepository
3653
3654
3601 3655 def undoname(fn: bytes) -> bytes:
3602 3656 base, name = os.path.split(fn)
3603 3657 assert name.startswith(b'journal')
3604 3658 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3605 3659
3606 3660
3607 3661 def instance(ui, path: bytes, create, intents=None, createopts=None):
3608 3662 # prevent cyclic import localrepo -> upgrade -> localrepo
3609 3663 from . import upgrade
3610 3664
3611 3665 localpath = urlutil.urllocalpath(path)
3612 3666 if create:
3613 3667 createrepository(ui, localpath, createopts=createopts)
3614 3668
3615 3669 def repo_maker():
3616 3670 return makelocalrepository(ui, localpath, intents=intents)
3617 3671
3618 3672 repo = repo_maker()
3619 3673 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3620 3674 return repo
3621 3675
3622 3676
3623 3677 def islocal(path: bytes) -> bool:
3624 3678 return True
3625 3679
3626 3680
3627 3681 def defaultcreateopts(ui, createopts=None):
3628 3682 """Populate the default creation options for a repository.
3629 3683
3630 3684 A dictionary of explicitly requested creation options can be passed
3631 3685 in. Missing keys will be populated.
3632 3686 """
3633 3687 createopts = dict(createopts or {})
3634 3688
3635 3689 if b'backend' not in createopts:
3636 3690 # experimental config: storage.new-repo-backend
3637 3691 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3638 3692
3639 3693 return createopts
3640 3694
3641 3695
3642 3696 def clone_requirements(ui, createopts, srcrepo):
3643 3697 """clone the requirements of a local repo for a local clone
3644 3698
3645 3699 The store requirements are unchanged while the working copy requirements
3646 3700 depends on the configuration
3647 3701 """
3648 3702 target_requirements = set()
3649 3703 if not srcrepo.requirements:
3650 3704 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3651 3705 # with it.
3652 3706 return target_requirements
3653 3707 createopts = defaultcreateopts(ui, createopts=createopts)
3654 3708 for r in newreporequirements(ui, createopts):
3655 3709 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3656 3710 target_requirements.add(r)
3657 3711
3658 3712 for r in srcrepo.requirements:
3659 3713 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3660 3714 target_requirements.add(r)
3661 3715 return target_requirements
3662 3716
3663 3717
3664 3718 def newreporequirements(ui, createopts):
3665 3719 """Determine the set of requirements for a new local repository.
3666 3720
3667 3721 Extensions can wrap this function to specify custom requirements for
3668 3722 new repositories.
3669 3723 """
3670 3724
3671 3725 if b'backend' not in createopts:
3672 3726 raise error.ProgrammingError(
3673 3727 b'backend key not present in createopts; '
3674 3728 b'was defaultcreateopts() called?'
3675 3729 )
3676 3730
3677 3731 if createopts[b'backend'] != b'revlogv1':
3678 3732 raise error.Abort(
3679 3733 _(
3680 3734 b'unable to determine repository requirements for '
3681 3735 b'storage backend: %s'
3682 3736 )
3683 3737 % createopts[b'backend']
3684 3738 )
3685 3739
3686 3740 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3687 3741 if ui.configbool(b'format', b'usestore'):
3688 3742 requirements.add(requirementsmod.STORE_REQUIREMENT)
3689 3743 if ui.configbool(b'format', b'usefncache'):
3690 3744 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3691 3745 if ui.configbool(b'format', b'dotencode'):
3692 3746 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3693 3747
3694 3748 compengines = ui.configlist(b'format', b'revlog-compression')
3695 3749 for compengine in compengines:
3696 3750 if compengine in util.compengines:
3697 3751 engine = util.compengines[compengine]
3698 3752 if engine.available() and engine.revlogheader():
3699 3753 break
3700 3754 else:
3701 3755 raise error.Abort(
3702 3756 _(
3703 3757 b'compression engines %s defined by '
3704 3758 b'format.revlog-compression not available'
3705 3759 )
3706 3760 % b', '.join(b'"%s"' % e for e in compengines),
3707 3761 hint=_(
3708 3762 b'run "hg debuginstall" to list available '
3709 3763 b'compression engines'
3710 3764 ),
3711 3765 )
3712 3766
3713 3767 # zlib is the historical default and doesn't need an explicit requirement.
3714 3768 if compengine == b'zstd':
3715 3769 requirements.add(b'revlog-compression-zstd')
3716 3770 elif compengine != b'zlib':
3717 3771 requirements.add(b'exp-compression-%s' % compengine)
3718 3772
3719 3773 if scmutil.gdinitconfig(ui):
3720 3774 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3721 3775 if ui.configbool(b'format', b'sparse-revlog'):
3722 3776 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3723 3777
3724 3778 # experimental config: format.use-dirstate-v2
3725 3779 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3726 3780 if ui.configbool(b'format', b'use-dirstate-v2'):
3727 3781 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3728 3782
3729 3783 # experimental config: format.exp-use-copies-side-data-changeset
3730 3784 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3731 3785 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3732 3786 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3733 3787 if ui.configbool(b'experimental', b'treemanifest'):
3734 3788 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3735 3789
3736 3790 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3737 3791 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3738 3792 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3739 3793
3740 3794 revlogv2 = ui.config(b'experimental', b'revlogv2')
3741 3795 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3742 3796 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3743 3797 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3744 3798 # experimental config: format.internal-phase
3745 3799 if ui.configbool(b'format', b'use-internal-phase'):
3746 3800 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3747 3801
3748 3802 # experimental config: format.exp-archived-phase
3749 3803 if ui.configbool(b'format', b'exp-archived-phase'):
3750 3804 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3751 3805
3752 3806 if createopts.get(b'narrowfiles'):
3753 3807 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3754 3808
3755 3809 if createopts.get(b'lfs'):
3756 3810 requirements.add(b'lfs')
3757 3811
3758 3812 if ui.configbool(b'format', b'bookmarks-in-store'):
3759 3813 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3760 3814
3761 3815 # The feature is disabled unless a fast implementation is available.
3762 3816 persistent_nodemap_default = policy.importrust('revlog') is not None
3763 3817 if ui.configbool(
3764 3818 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3765 3819 ):
3766 3820 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3767 3821
3768 3822 # if share-safe is enabled, let's create the new repository with the new
3769 3823 # requirement
3770 3824 if ui.configbool(b'format', b'use-share-safe'):
3771 3825 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3772 3826
3773 3827 # if we are creating a share-repoΒΉ we have to handle requirement
3774 3828 # differently.
3775 3829 #
3776 3830 # [1] (i.e. reusing the store from another repository, just having a
3777 3831 # working copy)
3778 3832 if b'sharedrepo' in createopts:
3779 3833 source_requirements = set(createopts[b'sharedrepo'].requirements)
3780 3834
3781 3835 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3782 3836 # share to an old school repository, we have to copy the
3783 3837 # requirements and hope for the best.
3784 3838 requirements = source_requirements
3785 3839 else:
3786 3840 # We have control on the working copy only, so "copy" the non
3787 3841 # working copy part over, ignoring previous logic.
3788 3842 to_drop = set()
3789 3843 for req in requirements:
3790 3844 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3791 3845 continue
3792 3846 if req in source_requirements:
3793 3847 continue
3794 3848 to_drop.add(req)
3795 3849 requirements -= to_drop
3796 3850 requirements |= source_requirements
3797 3851
3798 3852 if createopts.get(b'sharedrelative'):
3799 3853 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3800 3854 else:
3801 3855 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3802 3856
3803 3857 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3804 3858 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3805 3859 msg = _(b"ignoring unknown tracked key version: %d\n")
3806 3860 hint = _(
3807 3861 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3808 3862 )
3809 3863 if version != 1:
3810 3864 ui.warn(msg % version, hint=hint)
3811 3865 else:
3812 3866 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3813 3867
3814 3868 return requirements
3815 3869
3816 3870
3817 3871 def checkrequirementscompat(ui, requirements):
3818 3872 """Checks compatibility of repository requirements enabled and disabled.
3819 3873
3820 3874 Returns a set of requirements which needs to be dropped because dependend
3821 3875 requirements are not enabled. Also warns users about it"""
3822 3876
3823 3877 dropped = set()
3824 3878
3825 3879 if requirementsmod.STORE_REQUIREMENT not in requirements:
3826 3880 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3827 3881 ui.warn(
3828 3882 _(
3829 3883 b'ignoring enabled \'format.bookmarks-in-store\' config '
3830 3884 b'beacuse it is incompatible with disabled '
3831 3885 b'\'format.usestore\' config\n'
3832 3886 )
3833 3887 )
3834 3888 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3835 3889
3836 3890 if (
3837 3891 requirementsmod.SHARED_REQUIREMENT in requirements
3838 3892 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3839 3893 ):
3840 3894 raise error.Abort(
3841 3895 _(
3842 3896 b"cannot create shared repository as source was created"
3843 3897 b" with 'format.usestore' config disabled"
3844 3898 )
3845 3899 )
3846 3900
3847 3901 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3848 3902 if ui.hasconfig(b'format', b'use-share-safe'):
3849 3903 msg = _(
3850 3904 b"ignoring enabled 'format.use-share-safe' config because "
3851 3905 b"it is incompatible with disabled 'format.usestore'"
3852 3906 b" config\n"
3853 3907 )
3854 3908 ui.warn(msg)
3855 3909 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3856 3910
3857 3911 return dropped
3858 3912
3859 3913
3860 3914 def filterknowncreateopts(ui, createopts):
3861 3915 """Filters a dict of repo creation options against options that are known.
3862 3916
3863 3917 Receives a dict of repo creation options and returns a dict of those
3864 3918 options that we don't know how to handle.
3865 3919
3866 3920 This function is called as part of repository creation. If the
3867 3921 returned dict contains any items, repository creation will not
3868 3922 be allowed, as it means there was a request to create a repository
3869 3923 with options not recognized by loaded code.
3870 3924
3871 3925 Extensions can wrap this function to filter out creation options
3872 3926 they know how to handle.
3873 3927 """
3874 3928 known = {
3875 3929 b'backend',
3876 3930 b'lfs',
3877 3931 b'narrowfiles',
3878 3932 b'sharedrepo',
3879 3933 b'sharedrelative',
3880 3934 b'shareditems',
3881 3935 b'shallowfilestore',
3882 3936 }
3883 3937
3884 3938 return {k: v for k, v in createopts.items() if k not in known}
3885 3939
3886 3940
3887 3941 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3888 3942 """Create a new repository in a vfs.
3889 3943
3890 3944 ``path`` path to the new repo's working directory.
3891 3945 ``createopts`` options for the new repository.
3892 3946 ``requirement`` predefined set of requirements.
3893 3947 (incompatible with ``createopts``)
3894 3948
3895 3949 The following keys for ``createopts`` are recognized:
3896 3950
3897 3951 backend
3898 3952 The storage backend to use.
3899 3953 lfs
3900 3954 Repository will be created with ``lfs`` requirement. The lfs extension
3901 3955 will automatically be loaded when the repository is accessed.
3902 3956 narrowfiles
3903 3957 Set up repository to support narrow file storage.
3904 3958 sharedrepo
3905 3959 Repository object from which storage should be shared.
3906 3960 sharedrelative
3907 3961 Boolean indicating if the path to the shared repo should be
3908 3962 stored as relative. By default, the pointer to the "parent" repo
3909 3963 is stored as an absolute path.
3910 3964 shareditems
3911 3965 Set of items to share to the new repository (in addition to storage).
3912 3966 shallowfilestore
3913 3967 Indicates that storage for files should be shallow (not all ancestor
3914 3968 revisions are known).
3915 3969 """
3916 3970
3917 3971 if requirements is not None:
3918 3972 if createopts is not None:
3919 3973 msg = b'cannot specify both createopts and requirements'
3920 3974 raise error.ProgrammingError(msg)
3921 3975 createopts = {}
3922 3976 else:
3923 3977 createopts = defaultcreateopts(ui, createopts=createopts)
3924 3978
3925 3979 unknownopts = filterknowncreateopts(ui, createopts)
3926 3980
3927 3981 if not isinstance(unknownopts, dict):
3928 3982 raise error.ProgrammingError(
3929 3983 b'filterknowncreateopts() did not return a dict'
3930 3984 )
3931 3985
3932 3986 if unknownopts:
3933 3987 raise error.Abort(
3934 3988 _(
3935 3989 b'unable to create repository because of unknown '
3936 3990 b'creation option: %s'
3937 3991 )
3938 3992 % b', '.join(sorted(unknownopts)),
3939 3993 hint=_(b'is a required extension not loaded?'),
3940 3994 )
3941 3995
3942 3996 requirements = newreporequirements(ui, createopts=createopts)
3943 3997 requirements -= checkrequirementscompat(ui, requirements)
3944 3998
3945 3999 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3946 4000
3947 4001 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3948 4002 if hgvfs.exists():
3949 4003 raise error.RepoError(_(b'repository %s already exists') % path)
3950 4004
3951 4005 if b'sharedrepo' in createopts:
3952 4006 sharedpath = createopts[b'sharedrepo'].sharedpath
3953 4007
3954 4008 if createopts.get(b'sharedrelative'):
3955 4009 try:
3956 4010 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3957 4011 sharedpath = util.pconvert(sharedpath)
3958 4012 except (IOError, ValueError) as e:
3959 4013 # ValueError is raised on Windows if the drive letters differ
3960 4014 # on each path.
3961 4015 raise error.Abort(
3962 4016 _(b'cannot calculate relative path'),
3963 4017 hint=stringutil.forcebytestr(e),
3964 4018 )
3965 4019
3966 4020 if not wdirvfs.exists():
3967 4021 wdirvfs.makedirs()
3968 4022
3969 4023 hgvfs.makedir(notindexed=True)
3970 4024 if b'sharedrepo' not in createopts:
3971 4025 hgvfs.mkdir(b'cache')
3972 4026 hgvfs.mkdir(b'wcache')
3973 4027
3974 4028 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3975 4029 if has_store and b'sharedrepo' not in createopts:
3976 4030 hgvfs.mkdir(b'store')
3977 4031
3978 4032 # We create an invalid changelog outside the store so very old
3979 4033 # Mercurial versions (which didn't know about the requirements
3980 4034 # file) encounter an error on reading the changelog. This
3981 4035 # effectively locks out old clients and prevents them from
3982 4036 # mucking with a repo in an unknown format.
3983 4037 #
3984 4038 # The revlog header has version 65535, which won't be recognized by
3985 4039 # such old clients.
3986 4040 hgvfs.append(
3987 4041 b'00changelog.i',
3988 4042 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3989 4043 b'layout',
3990 4044 )
3991 4045
3992 4046 # Filter the requirements into working copy and store ones
3993 4047 wcreq, storereq = scmutil.filterrequirements(requirements)
3994 4048 # write working copy ones
3995 4049 scmutil.writerequires(hgvfs, wcreq)
3996 4050 # If there are store requirements and the current repository
3997 4051 # is not a shared one, write stored requirements
3998 4052 # For new shared repository, we don't need to write the store
3999 4053 # requirements as they are already present in store requires
4000 4054 if storereq and b'sharedrepo' not in createopts:
4001 4055 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4002 4056 scmutil.writerequires(storevfs, storereq)
4003 4057
4004 4058 # Write out file telling readers where to find the shared store.
4005 4059 if b'sharedrepo' in createopts:
4006 4060 hgvfs.write(b'sharedpath', sharedpath)
4007 4061
4008 4062 if createopts.get(b'shareditems'):
4009 4063 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4010 4064 hgvfs.write(b'shared', shared)
4011 4065
4012 4066
4013 4067 def poisonrepository(repo):
4014 4068 """Poison a repository instance so it can no longer be used."""
4015 4069 # Perform any cleanup on the instance.
4016 4070 repo.close()
4017 4071
4018 4072 # Our strategy is to replace the type of the object with one that
4019 4073 # has all attribute lookups result in error.
4020 4074 #
4021 4075 # But we have to allow the close() method because some constructors
4022 4076 # of repos call close() on repo references.
4023 4077 class poisonedrepository:
4024 4078 def __getattribute__(self, item):
4025 4079 if item == 'close':
4026 4080 return object.__getattribute__(self, item)
4027 4081
4028 4082 raise error.ProgrammingError(
4029 4083 b'repo instances should not be used after unshare'
4030 4084 )
4031 4085
4032 4086 def close(self):
4033 4087 pass
4034 4088
4035 4089 # We may have a repoview, which intercepts __setattr__. So be sure
4036 4090 # we operate at the lowest level possible.
4037 4091 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now