##// END OF EJS Templates
localrepo: drop the CamelCase name for `localrepo.ilocalrepositorymain`...
Matt Harbison -
r52973:513b4137 default
parent child Browse files
Show More
@@ -1,4054 +1,4044
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import functools
12 12 import os
13 13 import random
14 14 import re
15 15 import sys
16 16 import time
17 17 import typing
18 18 import weakref
19 19
20 20 from concurrent import futures
21 21 from typing import (
22 22 Optional,
23 23 )
24 24
25 25 from .i18n import _
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 )
33 33 from . import (
34 34 bookmarks,
35 35 branchmap,
36 36 bundle2,
37 37 bundlecaches,
38 38 changegroup,
39 39 color,
40 40 commit,
41 41 context,
42 42 dirstate,
43 43 discovery,
44 44 encoding,
45 45 error,
46 46 exchange,
47 47 extensions,
48 48 filelog,
49 49 hook,
50 50 lock as lockmod,
51 51 match as matchmod,
52 52 mergestate as mergestatemod,
53 53 mergeutil,
54 54 namespaces,
55 55 narrowspec,
56 56 obsolete,
57 57 pathutil,
58 58 phases,
59 59 policy,
60 60 pushkey,
61 61 pycompat,
62 62 rcutil,
63 63 repoview,
64 64 requirements as requirementsmod,
65 65 revlog,
66 66 revset,
67 67 revsetlang,
68 68 scmutil,
69 69 sparse,
70 70 store as storemod,
71 71 subrepoutil,
72 72 tags as tagsmod,
73 73 transaction,
74 74 txnutil,
75 75 util,
76 76 vfs as vfsmod,
77 77 wireprototypes,
78 78 )
79 79
80 80 from .branching import (
81 81 rev_cache as rev_branch_cache,
82 82 )
83 83
84 84 from .interfaces import (
85 85 repository,
86 util as interfaceutil,
87 86 )
88 87
89 88 from .utils import (
90 89 hashutil,
91 90 procutil,
92 91 stringutil,
93 92 urlutil,
94 93 )
95 94
96 95 from .revlogutils import (
97 96 concurrency_checker as revlogchecker,
98 97 constants as revlogconst,
99 98 sidedata as sidedatamod,
100 99 )
101 100
102 101 release = lockmod.release
103 102 urlerr = util.urlerr
104 103 urlreq = util.urlreq
105 104
106 105 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
107 106 b"^((dirstate|narrowspec.dirstate).*|branch$)"
108 107 )
109 108
110 109 # set of (path, vfs-location) tuples. vfs-location is:
111 110 # - 'plain for vfs relative paths
112 111 # - '' for svfs relative paths
113 112 _cachedfiles = set()
114 113
115 114
116 115 class _basefilecache(scmutil.filecache):
117 116 """All filecache usage on repo are done for logic that should be unfiltered"""
118 117
119 118 def __get__(self, repo, type=None):
120 119 if repo is None:
121 120 return self
122 121 # proxy to unfiltered __dict__ since filtered repo has no entry
123 122 unfi = repo.unfiltered()
124 123 try:
125 124 return unfi.__dict__[self.sname]
126 125 except KeyError:
127 126 pass
128 127 return super(_basefilecache, self).__get__(unfi, type)
129 128
130 129 def set(self, repo, value):
131 130 return super(_basefilecache, self).set(repo.unfiltered(), value)
132 131
133 132
134 133 class repofilecache(_basefilecache):
135 134 """filecache for files in .hg but outside of .hg/store"""
136 135
137 136 def __init__(self, *paths):
138 137 super(repofilecache, self).__init__(*paths)
139 138 for path in paths:
140 139 _cachedfiles.add((path, b'plain'))
141 140
142 141 def join(self, obj, fname):
143 142 return obj.vfs.join(fname)
144 143
145 144
146 145 class storecache(_basefilecache):
147 146 """filecache for files in the store"""
148 147
149 148 def __init__(self, *paths):
150 149 super(storecache, self).__init__(*paths)
151 150 for path in paths:
152 151 _cachedfiles.add((path, b''))
153 152
154 153 def join(self, obj, fname):
155 154 return obj.sjoin(fname)
156 155
157 156
158 157 class changelogcache(storecache):
159 158 """filecache for the changelog"""
160 159
161 160 def __init__(self):
162 161 super(changelogcache, self).__init__()
163 162 _cachedfiles.add((b'00changelog.i', b''))
164 163 _cachedfiles.add((b'00changelog.n', b''))
165 164
166 165 def tracked_paths(self, obj):
167 166 paths = [self.join(obj, b'00changelog.i')]
168 167 if obj.store.opener.options.get(b'persistent-nodemap', False):
169 168 paths.append(self.join(obj, b'00changelog.n'))
170 169 return paths
171 170
172 171
173 172 class manifestlogcache(storecache):
174 173 """filecache for the manifestlog"""
175 174
176 175 def __init__(self):
177 176 super(manifestlogcache, self).__init__()
178 177 _cachedfiles.add((b'00manifest.i', b''))
179 178 _cachedfiles.add((b'00manifest.n', b''))
180 179
181 180 def tracked_paths(self, obj):
182 181 paths = [self.join(obj, b'00manifest.i')]
183 182 if obj.store.opener.options.get(b'persistent-nodemap', False):
184 183 paths.append(self.join(obj, b'00manifest.n'))
185 184 return paths
186 185
187 186
188 187 class mixedrepostorecache(_basefilecache):
189 188 """filecache for a mix files in .hg/store and outside"""
190 189
191 190 def __init__(self, *pathsandlocations):
192 191 # scmutil.filecache only uses the path for passing back into our
193 192 # join(), so we can safely pass a list of paths and locations
194 193 super(mixedrepostorecache, self).__init__(*pathsandlocations)
195 194 _cachedfiles.update(pathsandlocations)
196 195
197 196 def join(self, obj, fnameandlocation):
198 197 fname, location = fnameandlocation
199 198 if location == b'plain':
200 199 return obj.vfs.join(fname)
201 200 else:
202 201 if location != b'':
203 202 raise error.ProgrammingError(
204 203 b'unexpected location: %s' % location
205 204 )
206 205 return obj.sjoin(fname)
207 206
208 207
209 208 def isfilecached(repo, name):
210 209 """check if a repo has already cached "name" filecache-ed property
211 210
212 211 This returns (cachedobj-or-None, iscached) tuple.
213 212 """
214 213 cacheentry = repo.unfiltered()._filecache.get(name, None)
215 214 if not cacheentry:
216 215 return None, False
217 216 return cacheentry.obj, True
218 217
219 218
220 219 class unfilteredpropertycache(util.propertycache):
221 220 """propertycache that apply to unfiltered repo only"""
222 221
223 222 def __get__(self, repo, type=None):
224 223 unfi = repo.unfiltered()
225 224 if unfi is repo:
226 225 return super(unfilteredpropertycache, self).__get__(unfi)
227 226 return getattr(unfi, self.name)
228 227
229 228
230 229 class filteredpropertycache(util.propertycache):
231 230 """propertycache that must take filtering in account"""
232 231
233 232 def cachevalue(self, obj, value):
234 233 object.__setattr__(obj, self.name, value)
235 234
236 235
237 236 def hasunfilteredcache(repo, name):
238 237 """check if a repo has an unfilteredpropertycache value for <name>"""
239 238 return name in vars(repo.unfiltered())
240 239
241 240
242 241 def unfilteredmethod(orig):
243 242 """decorate method that always need to be run on unfiltered version"""
244 243
245 244 @functools.wraps(orig)
246 245 def wrapper(repo, *args, **kwargs):
247 246 return orig(repo.unfiltered(), *args, **kwargs)
248 247
249 248 return wrapper
250 249
251 250
252 251 moderncaps = {
253 252 b'lookup',
254 253 b'branchmap',
255 254 b'pushkey',
256 255 b'known',
257 256 b'getbundle',
258 257 b'unbundle',
259 258 }
260 259 legacycaps = moderncaps.union({b'changegroupsubset'})
261 260
262 261
263 262 class localcommandexecutor: # (repository.ipeercommandexecutor)
264 263 def __init__(self, peer):
265 264 self._peer = peer
266 265 self._sent = False
267 266 self._closed = False
268 267
269 268 def __enter__(self):
270 269 return self
271 270
272 271 def __exit__(self, exctype, excvalue, exctb):
273 272 self.close()
274 273
275 274 def callcommand(self, command, args):
276 275 if self._sent:
277 276 raise error.ProgrammingError(
278 277 b'callcommand() cannot be used after sendcommands()'
279 278 )
280 279
281 280 if self._closed:
282 281 raise error.ProgrammingError(
283 282 b'callcommand() cannot be used after close()'
284 283 )
285 284
286 285 # We don't need to support anything fancy. Just call the named
287 286 # method on the peer and return a resolved future.
288 287 fn = getattr(self._peer, pycompat.sysstr(command))
289 288
290 289 f = futures.Future()
291 290
292 291 try:
293 292 result = fn(**pycompat.strkwargs(args))
294 293 except Exception:
295 294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
296 295 else:
297 296 f.set_result(result)
298 297
299 298 return f
300 299
301 300 def sendcommands(self):
302 301 self._sent = True
303 302
304 303 def close(self):
305 304 self._closed = True
306 305
307 306
308 307 class localpeer(repository.peer): # (repository.ipeercommands)
309 308 '''peer for a local repo; reflects only the most recent API'''
310 309
311 310 def __init__(self, repo, caps=None, path=None, remotehidden=False):
312 311 super(localpeer, self).__init__(
313 312 repo.ui, path=path, remotehidden=remotehidden
314 313 )
315 314
316 315 if caps is None:
317 316 caps = moderncaps.copy()
318 317 if remotehidden:
319 318 self._repo = repo.filtered(b'served.hidden')
320 319 else:
321 320 self._repo = repo.filtered(b'served')
322 321 if repo._wanted_sidedata:
323 322 formatted = bundle2.format_remote_wanted_sidedata(repo)
324 323 caps.add(b'exp-wanted-sidedata=' + formatted)
325 324
326 325 self._caps = repo._restrictcapabilities(caps)
327 326
328 327 # Begin of _basepeer interface.
329 328
330 329 def url(self):
331 330 return self._repo.url()
332 331
333 332 def local(self):
334 333 return self._repo
335 334
336 335 def canpush(self):
337 336 return True
338 337
339 338 def close(self):
340 339 self._repo.close()
341 340
342 341 # End of _basepeer interface.
343 342
344 343 # Begin of _basewirecommands interface.
345 344
346 345 def branchmap(self):
347 346 return self._repo.branchmap()
348 347
349 348 def capabilities(self):
350 349 return self._caps
351 350
352 351 def get_cached_bundle_inline(self, path):
353 352 # not needed with local peer
354 353 raise NotImplementedError
355 354
356 355 def clonebundles(self):
357 356 return bundlecaches.get_manifest(self._repo)
358 357
359 358 def debugwireargs(self, one, two, three=None, four=None, five=None):
360 359 """Used to test argument passing over the wire"""
361 360 return b"%s %s %s %s %s" % (
362 361 one,
363 362 two,
364 363 pycompat.bytestr(three),
365 364 pycompat.bytestr(four),
366 365 pycompat.bytestr(five),
367 366 )
368 367
369 368 def getbundle(
370 369 self,
371 370 source,
372 371 heads=None,
373 372 common=None,
374 373 bundlecaps=None,
375 374 remote_sidedata=None,
376 375 **kwargs,
377 376 ):
378 377 chunks = exchange.getbundlechunks(
379 378 self._repo,
380 379 source,
381 380 heads=heads,
382 381 common=common,
383 382 bundlecaps=bundlecaps,
384 383 remote_sidedata=remote_sidedata,
385 384 **kwargs,
386 385 )[1]
387 386 cb = util.chunkbuffer(chunks)
388 387
389 388 if exchange.bundle2requested(bundlecaps):
390 389 # When requesting a bundle2, getbundle returns a stream to make the
391 390 # wire level function happier. We need to build a proper object
392 391 # from it in local peer.
393 392 return bundle2.getunbundler(self.ui, cb)
394 393 else:
395 394 return changegroup.getunbundler(b'01', cb, None)
396 395
397 396 def heads(self):
398 397 return self._repo.heads()
399 398
400 399 def known(self, nodes):
401 400 return self._repo.known(nodes)
402 401
403 402 def listkeys(self, namespace):
404 403 return self._repo.listkeys(namespace)
405 404
406 405 def lookup(self, key):
407 406 return self._repo.lookup(key)
408 407
409 408 def pushkey(self, namespace, key, old, new):
410 409 return self._repo.pushkey(namespace, key, old, new)
411 410
412 411 def stream_out(self):
413 412 raise error.Abort(_(b'cannot perform stream clone against local peer'))
414 413
415 414 def unbundle(self, bundle, heads, url):
416 415 """apply a bundle on a repo
417 416
418 417 This function handles the repo locking itself."""
419 418 try:
420 419 try:
421 420 bundle = exchange.readbundle(self.ui, bundle, None)
422 421 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
423 422 if hasattr(ret, 'getchunks'):
424 423 # This is a bundle20 object, turn it into an unbundler.
425 424 # This little dance should be dropped eventually when the
426 425 # API is finally improved.
427 426 stream = util.chunkbuffer(ret.getchunks())
428 427 ret = bundle2.getunbundler(self.ui, stream)
429 428 return ret
430 429 except Exception as exc:
431 430 # If the exception contains output salvaged from a bundle2
432 431 # reply, we need to make sure it is printed before continuing
433 432 # to fail. So we build a bundle2 with such output and consume
434 433 # it directly.
435 434 #
436 435 # This is not very elegant but allows a "simple" solution for
437 436 # issue4594
438 437 output = getattr(exc, '_bundle2salvagedoutput', ())
439 438 if output:
440 439 bundler = bundle2.bundle20(self._repo.ui)
441 440 for out in output:
442 441 bundler.addpart(out)
443 442 stream = util.chunkbuffer(bundler.getchunks())
444 443 b = bundle2.getunbundler(self.ui, stream)
445 444 bundle2.processbundle(self._repo, b)
446 445 raise
447 446 except error.PushRaced as exc:
448 447 raise error.ResponseError(
449 448 _(b'push failed:'), stringutil.forcebytestr(exc)
450 449 )
451 450
452 451 # End of _basewirecommands interface.
453 452
454 453 # Begin of peer interface.
455 454
456 455 def commandexecutor(self):
457 456 return localcommandexecutor(self)
458 457
459 458 # End of peer interface.
460 459
461 460
462 461 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
463 462 """peer extension which implements legacy methods too; used for tests with
464 463 restricted capabilities"""
465 464
466 465 def __init__(self, repo, path=None, remotehidden=False):
467 466 super(locallegacypeer, self).__init__(
468 467 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 468 )
470 469
471 470 # Begin of baselegacywirecommands interface.
472 471
473 472 def between(self, pairs):
474 473 return self._repo.between(pairs)
475 474
476 475 def branches(self, nodes):
477 476 return self._repo.branches(nodes)
478 477
479 478 def changegroup(self, nodes, source):
480 479 outgoing = discovery.outgoing(
481 480 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 481 )
483 482 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484 483
485 484 def changegroupsubset(self, bases, heads, source):
486 485 outgoing = discovery.outgoing(
487 486 self._repo, missingroots=bases, ancestorsof=heads
488 487 )
489 488 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490 489
491 490 # End of baselegacywirecommands interface.
492 491
493 492
494 493 # Functions receiving (ui, features) that extensions can register to impact
495 494 # the ability to load repositories with custom requirements. Only
496 495 # functions defined in loaded extensions are called.
497 496 #
498 497 # The function receives a set of requirement strings that the repository
499 498 # is capable of opening. Functions will typically add elements to the
500 499 # set to reflect that the extension knows how to handle that requirements.
501 500 featuresetupfuncs = set()
502 501
503 502
504 503 def _getsharedvfs(hgvfs, requirements):
505 504 """returns the vfs object pointing to root of shared source
506 505 repo for a shared repository
507 506
508 507 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 508 requirements is a set of requirements of current repo (shared one)
510 509 """
511 510 # The ``shared`` or ``relshared`` requirements indicate the
512 511 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 512 # This is an absolute path for ``shared`` and relative to
514 513 # ``.hg/`` for ``relshared``.
515 514 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 515 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 516 sharedpath = util.normpath(hgvfs.join(sharedpath))
518 517
519 518 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520 519
521 520 if not sharedvfs.exists():
522 521 raise error.RepoError(
523 522 _(b'.hg/sharedpath points to nonexistent directory %s')
524 523 % sharedvfs.base
525 524 )
526 525 return sharedvfs
527 526
528 527
529 528 def makelocalrepository(baseui, path: bytes, intents=None):
530 529 """Create a local repository object.
531 530
532 531 Given arguments needed to construct a local repository, this function
533 532 performs various early repository loading functionality (such as
534 533 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 534 the repository can be opened, derives a type suitable for representing
536 535 that repository, and returns an instance of it.
537 536
538 537 The returned object conforms to the ``repository.completelocalrepository``
539 538 interface.
540 539
541 540 The repository type is derived by calling a series of factory functions
542 541 for each aspect/interface of the final repository. These are defined by
543 542 ``REPO_INTERFACES``.
544 543
545 544 Each factory function is called to produce a type implementing a specific
546 545 interface. The cumulative list of returned types will be combined into a
547 546 new type and that type will be instantiated to represent the local
548 547 repository.
549 548
550 549 The factory functions each receive various state that may be consulted
551 550 as part of deriving a type.
552 551
553 552 Extensions should wrap these factory functions to customize repository type
554 553 creation. Note that an extension's wrapped function may be called even if
555 554 that extension is not loaded for the repo being constructed. Extensions
556 555 should check if their ``__name__`` appears in the
557 556 ``extensionmodulenames`` set passed to the factory function and no-op if
558 557 not.
559 558 """
560 559 ui = baseui.copy()
561 560 # Prevent copying repo configuration.
562 561 ui.copy = baseui.copy
563 562
564 563 # Working directory VFS rooted at repository root.
565 564 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566 565
567 566 # Main VFS for .hg/ directory.
568 567 hgpath = wdirvfs.join(b'.hg')
569 568 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 569 # Whether this repository is shared one or not
571 570 shared = False
572 571 # If this repository is shared, vfs pointing to shared repo
573 572 sharedvfs = None
574 573
575 574 # The .hg/ path should exist and should be a directory. All other
576 575 # cases are errors.
577 576 if not hgvfs.isdir():
578 577 try:
579 578 hgvfs.stat()
580 579 except FileNotFoundError:
581 580 pass
582 581 except ValueError as e:
583 582 # Can be raised on Python 3.8 when path is invalid.
584 583 raise error.Abort(
585 584 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 585 )
587 586
588 587 raise error.RepoError(_(b'repository %s not found') % path)
589 588
590 589 requirements = scmutil.readrequires(hgvfs, True)
591 590 shared = (
592 591 requirementsmod.SHARED_REQUIREMENT in requirements
593 592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 593 )
595 594 storevfs = None
596 595 if shared:
597 596 # This is a shared repo
598 597 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 598 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 599 else:
601 600 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602 601
603 602 # if .hg/requires contains the sharesafe requirement, it means
604 603 # there exists a `.hg/store/requires` too and we should read it
605 604 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 605 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 606 # is not present, refer checkrequirementscompat() for that
608 607 #
609 608 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 609 # repository was shared the old way. We check the share source .hg/requires
611 610 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 611 # to be reshared
613 612 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 613 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 614 if (
616 615 shared
617 616 and requirementsmod.SHARESAFE_REQUIREMENT
618 617 not in scmutil.readrequires(sharedvfs, True)
619 618 ):
620 619 mismatch_warn = ui.configbool(
621 620 b'share', b'safe-mismatch.source-not-safe.warn'
622 621 )
623 622 mismatch_config = ui.config(
624 623 b'share', b'safe-mismatch.source-not-safe'
625 624 )
626 625 mismatch_verbose_upgrade = ui.configbool(
627 626 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
628 627 )
629 628 if mismatch_config in (
630 629 b'downgrade-allow',
631 630 b'allow',
632 631 b'downgrade-abort',
633 632 ):
634 633 # prevent cyclic import localrepo -> upgrade -> localrepo
635 634 from . import upgrade
636 635
637 636 upgrade.downgrade_share_to_non_safe(
638 637 ui,
639 638 hgvfs,
640 639 sharedvfs,
641 640 requirements,
642 641 mismatch_config,
643 642 mismatch_warn,
644 643 mismatch_verbose_upgrade,
645 644 )
646 645 elif mismatch_config == b'abort':
647 646 raise error.Abort(
648 647 _(b"share source does not support share-safe requirement"),
649 648 hint=hint,
650 649 )
651 650 else:
652 651 raise error.Abort(
653 652 _(
654 653 b"share-safe mismatch with source.\nUnrecognized"
655 654 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 655 b" set."
657 656 )
658 657 % mismatch_config,
659 658 hint=hint,
660 659 )
661 660 else:
662 661 requirements |= scmutil.readrequires(storevfs, False)
663 662 elif shared:
664 663 sourcerequires = scmutil.readrequires(sharedvfs, False)
665 664 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 665 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 666 mismatch_warn = ui.configbool(
668 667 b'share', b'safe-mismatch.source-safe.warn'
669 668 )
670 669 mismatch_verbose_upgrade = ui.configbool(
671 670 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
672 671 )
673 672 if mismatch_config in (
674 673 b'upgrade-allow',
675 674 b'allow',
676 675 b'upgrade-abort',
677 676 ):
678 677 # prevent cyclic import localrepo -> upgrade -> localrepo
679 678 from . import upgrade
680 679
681 680 upgrade.upgrade_share_to_safe(
682 681 ui,
683 682 hgvfs,
684 683 storevfs,
685 684 requirements,
686 685 mismatch_config,
687 686 mismatch_warn,
688 687 mismatch_verbose_upgrade,
689 688 )
690 689 elif mismatch_config == b'abort':
691 690 raise error.Abort(
692 691 _(
693 692 b'version mismatch: source uses share-safe'
694 693 b' functionality while the current share does not'
695 694 ),
696 695 hint=hint,
697 696 )
698 697 else:
699 698 raise error.Abort(
700 699 _(
701 700 b"share-safe mismatch with source.\nUnrecognized"
702 701 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 702 )
704 703 % mismatch_config,
705 704 hint=hint,
706 705 )
707 706
708 707 # The .hg/hgrc file may load extensions or contain config options
709 708 # that influence repository construction. Attempt to load it and
710 709 # process any new extensions that it may have pulled in.
711 710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 712 extensions.loadall(ui)
714 713 extensions.populateui(ui)
715 714
716 715 # Set of module names of extensions loaded for this repository.
717 716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718 717
719 718 supportedrequirements = gathersupportedrequirements(ui)
720 719
721 720 # We first validate the requirements are known.
722 721 ensurerequirementsrecognized(requirements, supportedrequirements)
723 722
724 723 # Then we validate that the known set is reasonable to use together.
725 724 ensurerequirementscompatible(ui, requirements)
726 725
727 726 # TODO there are unhandled edge cases related to opening repositories with
728 727 # shared storage. If storage is shared, we should also test for requirements
729 728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 729 # that repo, as that repo may load extensions needed to open it. This is a
731 730 # bit complicated because we don't want the other hgrc to overwrite settings
732 731 # in this hgrc.
733 732 #
734 733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 734 # file when sharing repos. But if a requirement is added after the share is
736 735 # performed, thereby introducing a new requirement for the opener, we may
737 736 # will not see that and could encounter a run-time error interacting with
738 737 # that shared store since it has an unknown-to-us requirement.
739 738
740 739 # At this point, we know we should be capable of opening the repository.
741 740 # Now get on with doing that.
742 741
743 742 features = set()
744 743
745 744 # The "store" part of the repository holds versioned data. How it is
746 745 # accessed is determined by various requirements. If `shared` or
747 746 # `relshared` requirements are present, this indicates current repository
748 747 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 748 if shared:
750 749 storebasepath = sharedvfs.base
751 750 cachepath = sharedvfs.join(b'cache')
752 751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 752 else:
754 753 storebasepath = hgvfs.base
755 754 cachepath = hgvfs.join(b'cache')
756 755 wcachepath = hgvfs.join(b'wcache')
757 756
758 757 # The store has changed over time and the exact layout is dictated by
759 758 # requirements. The store interface abstracts differences across all
760 759 # of them.
761 760 store = makestore(
762 761 requirements,
763 762 storebasepath,
764 763 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 764 )
766 765 hgvfs.createmode = store.createmode
767 766
768 767 storevfs = store.vfs
769 768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770 769
771 770 if (
772 771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 773 ):
775 774 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 775 # the revlogv2 docket introduced race condition that we need to fix
777 776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778 777
779 778 # The cache vfs is used to manage cache files.
780 779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 780 cachevfs.createmode = store.createmode
782 781 # The cache vfs is used to manage cache files related to the working copy
783 782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 783 wcachevfs.createmode = store.createmode
785 784
786 785 # Now resolve the type for the repository object. We do this by repeatedly
787 786 # calling a factory function to produces types for specific aspects of the
788 787 # repo's operation. The aggregate returned types are used as base classes
789 788 # for a dynamically-derived type, which will represent our new repository.
790 789
791 790 bases = []
792 791 extrastate = {}
793 792
794 793 for iface, fn in REPO_INTERFACES:
795 794 # We pass all potentially useful state to give extensions tons of
796 795 # flexibility.
797 796 typ = fn()(
798 797 ui=ui,
799 798 intents=intents,
800 799 requirements=requirements,
801 800 features=features,
802 801 wdirvfs=wdirvfs,
803 802 hgvfs=hgvfs,
804 803 store=store,
805 804 storevfs=storevfs,
806 805 storeoptions=storevfs.options,
807 806 cachevfs=cachevfs,
808 807 wcachevfs=wcachevfs,
809 808 extensionmodulenames=extensionmodulenames,
810 809 extrastate=extrastate,
811 810 baseclasses=bases,
812 811 )
813 812
814 813 if not isinstance(typ, type):
815 814 raise error.ProgrammingError(
816 815 b'unable to construct type for %s' % iface
817 816 )
818 817
819 818 bases.append(typ)
820 819
821 820 # type() allows you to use characters in type names that wouldn't be
822 821 # recognized as Python symbols in source code. We abuse that to add
823 822 # rich information about our constructed repo.
824 823 name = pycompat.sysstr(
825 824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 825 )
827 826
828 827 cls = type(name, tuple(bases), {})
829 828
830 829 return cls(
831 830 baseui=baseui,
832 831 ui=ui,
833 832 origroot=path,
834 833 wdirvfs=wdirvfs,
835 834 hgvfs=hgvfs,
836 835 requirements=requirements,
837 836 supportedrequirements=supportedrequirements,
838 837 sharedpath=storebasepath,
839 838 store=store,
840 839 cachevfs=cachevfs,
841 840 wcachevfs=wcachevfs,
842 841 features=features,
843 842 intents=intents,
844 843 )
845 844
846 845
847 846 def loadhgrc(
848 847 ui,
849 848 wdirvfs: vfsmod.vfs,
850 849 hgvfs: vfsmod.vfs,
851 850 requirements,
852 851 sharedvfs: Optional[vfsmod.vfs] = None,
853 852 ):
854 853 """Load hgrc files/content into a ui instance.
855 854
856 855 This is called during repository opening to load any additional
857 856 config files or settings relevant to the current repository.
858 857
859 858 Returns a bool indicating whether any additional configs were loaded.
860 859
861 860 Extensions should monkeypatch this function to modify how per-repo
862 861 configs are loaded. For example, an extension may wish to pull in
863 862 configs from alternate files or sources.
864 863
865 864 sharedvfs is vfs object pointing to source repo if the current one is a
866 865 shared one
867 866 """
868 867 if not rcutil.use_repo_hgrc():
869 868 return False
870 869
871 870 ret = False
872 871 # first load config from shared source if we has to
873 872 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
874 873 try:
875 874 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
876 875 ret = True
877 876 except IOError:
878 877 pass
879 878
880 879 try:
881 880 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
882 881 ret = True
883 882 except IOError:
884 883 pass
885 884
886 885 try:
887 886 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
888 887 ret = True
889 888 except IOError:
890 889 pass
891 890
892 891 return ret
893 892
894 893
895 894 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
896 895 """Perform additional actions after .hg/hgrc is loaded.
897 896
898 897 This function is called during repository loading immediately after
899 898 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
900 899
901 900 The function can be used to validate configs, automatically add
902 901 options (including extensions) based on requirements, etc.
903 902 """
904 903
905 904 # Map of requirements to list of extensions to load automatically when
906 905 # requirement is present.
907 906 autoextensions = {
908 907 b'git': [b'git'],
909 908 b'largefiles': [b'largefiles'],
910 909 b'lfs': [b'lfs'],
911 910 }
912 911
913 912 for requirement, names in sorted(autoextensions.items()):
914 913 if requirement not in requirements:
915 914 continue
916 915
917 916 for name in names:
918 917 if not ui.hasconfig(b'extensions', name):
919 918 ui.setconfig(b'extensions', name, b'', source=b'autoload')
920 919
921 920
922 921 def gathersupportedrequirements(ui):
923 922 """Determine the complete set of recognized requirements."""
924 923 # Start with all requirements supported by this file.
925 924 supported = set(localrepository._basesupported)
926 925
927 926 # Execute ``featuresetupfuncs`` entries if they belong to an extension
928 927 # relevant to this ui instance.
929 928 modules = {m.__name__ for n, m in extensions.extensions(ui)}
930 929
931 930 for fn in featuresetupfuncs:
932 931 if fn.__module__ in modules:
933 932 fn(ui, supported)
934 933
935 934 # Add derived requirements from registered compression engines.
936 935 for name in util.compengines:
937 936 engine = util.compengines[name]
938 937 if engine.available() and engine.revlogheader():
939 938 supported.add(b'exp-compression-%s' % name)
940 939 if engine.name() == b'zstd':
941 940 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
942 941
943 942 return supported
944 943
945 944
946 945 def ensurerequirementsrecognized(requirements, supported):
947 946 """Validate that a set of local requirements is recognized.
948 947
949 948 Receives a set of requirements. Raises an ``error.RepoError`` if there
950 949 exists any requirement in that set that currently loaded code doesn't
951 950 recognize.
952 951
953 952 Returns a set of supported requirements.
954 953 """
955 954 missing = set()
956 955
957 956 for requirement in requirements:
958 957 if requirement in supported:
959 958 continue
960 959
961 960 if not requirement or not requirement[0:1].isalnum():
962 961 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
963 962
964 963 missing.add(requirement)
965 964
966 965 if missing:
967 966 raise error.RequirementError(
968 967 _(b'repository requires features unknown to this Mercurial: %s')
969 968 % b' '.join(sorted(missing)),
970 969 hint=_(
971 970 b'see https://mercurial-scm.org/wiki/MissingRequirement '
972 971 b'for more information'
973 972 ),
974 973 )
975 974
976 975
977 976 def ensurerequirementscompatible(ui, requirements):
978 977 """Validates that a set of recognized requirements is mutually compatible.
979 978
980 979 Some requirements may not be compatible with others or require
981 980 config options that aren't enabled. This function is called during
982 981 repository opening to ensure that the set of requirements needed
983 982 to open a repository is sane and compatible with config options.
984 983
985 984 Extensions can monkeypatch this function to perform additional
986 985 checking.
987 986
988 987 ``error.RepoError`` should be raised on failure.
989 988 """
990 989 if (
991 990 requirementsmod.SPARSE_REQUIREMENT in requirements
992 991 and not sparse.enabled
993 992 ):
994 993 raise error.RepoError(
995 994 _(
996 995 b'repository is using sparse feature but '
997 996 b'sparse is not enabled; enable the '
998 997 b'"sparse" extensions to access'
999 998 )
1000 999 )
1001 1000
1002 1001
1003 1002 def makestore(requirements, path, vfstype):
1004 1003 """Construct a storage object for a repository."""
1005 1004 if requirementsmod.STORE_REQUIREMENT in requirements:
1006 1005 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1007 1006 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1008 1007 return storemod.fncachestore(path, vfstype, dotencode)
1009 1008
1010 1009 return storemod.encodedstore(path, vfstype)
1011 1010
1012 1011 return storemod.basicstore(path, vfstype)
1013 1012
1014 1013
1015 1014 def resolvestorevfsoptions(ui, requirements, features):
1016 1015 """Resolve the options to pass to the store vfs opener.
1017 1016
1018 1017 The returned dict is used to influence behavior of the storage layer.
1019 1018 """
1020 1019 options = {}
1021 1020
1022 1021 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1023 1022 options[b'treemanifest'] = True
1024 1023
1025 1024 # experimental config: format.manifestcachesize
1026 1025 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1027 1026 if manifestcachesize is not None:
1028 1027 options[b'manifestcachesize'] = manifestcachesize
1029 1028
1030 1029 # In the absence of another requirement superseding a revlog-related
1031 1030 # requirement, we have to assume the repo is using revlog version 0.
1032 1031 # This revlog format is super old and we don't bother trying to parse
1033 1032 # opener options for it because those options wouldn't do anything
1034 1033 # meaningful on such old repos.
1035 1034 if (
1036 1035 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1037 1036 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1038 1037 ):
1039 1038 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1040 1039 else: # explicitly mark repo as using revlogv0
1041 1040 options[b'revlogv0'] = True
1042 1041
1043 1042 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1044 1043 options[b'copies-storage'] = b'changeset-sidedata'
1045 1044 else:
1046 1045 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1047 1046 copiesextramode = (b'changeset-only', b'compatibility')
1048 1047 if writecopiesto in copiesextramode:
1049 1048 options[b'copies-storage'] = b'extra'
1050 1049
1051 1050 return options
1052 1051
1053 1052
1054 1053 def resolverevlogstorevfsoptions(ui, requirements, features):
1055 1054 """Resolve opener options specific to revlogs."""
1056 1055
1057 1056 options = {}
1058 1057 options[b'flagprocessors'] = {}
1059 1058
1060 1059 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1061 1060 data_config = options[b'data-config'] = revlog.DataConfig()
1062 1061 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1063 1062
1064 1063 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 1064 options[b'revlogv1'] = True
1066 1065 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 1066 options[b'revlogv2'] = True
1068 1067 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 1068 options[b'changelogv2'] = True
1070 1069 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 1070 options[b'changelogv2.compute-rank'] = cmp_rank
1072 1071
1073 1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 1073 options[b'generaldelta'] = True
1075 1074
1076 1075 # experimental config: format.chunkcachesize
1077 1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 1077 if chunkcachesize is not None:
1079 1078 data_config.chunk_cache_size = chunkcachesize
1080 1079
1081 1080 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1082 1081 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1083 1082 data_config.uncompressed_cache_count = 10_000
1084 1083 data_config.uncompressed_cache_factor = 4
1085 1084 if memory_profile >= scmutil.RESOURCE_HIGH:
1086 1085 data_config.uncompressed_cache_factor = 10
1087 1086
1088 1087 delta_config.delta_both_parents = ui.configbool(
1089 1088 b'storage', b'revlog.optimize-delta-parent-choice'
1090 1089 )
1091 1090 delta_config.candidate_group_chunk_size = ui.configint(
1092 1091 b'storage',
1093 1092 b'revlog.delta-parent-search.candidate-group-chunk-size',
1094 1093 )
1095 1094 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1096 1095
1097 1096 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1098 1097 options[b'issue6528.fix-incoming'] = issue6528
1099 1098
1100 1099 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1101 1100 lazydeltabase = False
1102 1101 if lazydelta:
1103 1102 lazydeltabase = ui.configbool(
1104 1103 b'storage', b'revlog.reuse-external-delta-parent'
1105 1104 )
1106 1105 if lazydeltabase is None:
1107 1106 lazydeltabase = not scmutil.gddeltaconfig(ui)
1108 1107 delta_config.lazy_delta = lazydelta
1109 1108 delta_config.lazy_delta_base = lazydeltabase
1110 1109
1111 1110 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1112 1111 if 0 <= chainspan:
1113 1112 delta_config.max_deltachain_span = chainspan
1114 1113
1115 1114 has_populate = util.has_mmap_populate()
1116 1115 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1117 1116 data_config.mmap_index_threshold = ui.configbytes(
1118 1117 b'storage',
1119 1118 b'revlog.mmap.index:size-threshold',
1120 1119 )
1121 1120
1122 1121 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1123 1122 srdensitythres = float(
1124 1123 ui.config(b'experimental', b'sparse-read.density-threshold')
1125 1124 )
1126 1125 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1127 1126 data_config.with_sparse_read = withsparseread
1128 1127 data_config.sr_density_threshold = srdensitythres
1129 1128 data_config.sr_min_gap_size = srmingapsize
1130 1129
1131 1130 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1132 1131 delta_config.sparse_revlog = sparserevlog
1133 1132 if sparserevlog:
1134 1133 options[b'generaldelta'] = True
1135 1134 data_config.with_sparse_read = True
1136 1135
1137 1136 maxchainlen = None
1138 1137 if sparserevlog:
1139 1138 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1140 1139 # experimental config: format.maxchainlen
1141 1140 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1142 1141 if maxchainlen is not None:
1143 1142 delta_config.max_chain_len = maxchainlen
1144 1143
1145 1144 for r in requirements:
1146 1145 # we allow multiple compression engine requirement to co-exist because
1147 1146 # strickly speaking, revlog seems to support mixed compression style.
1148 1147 #
1149 1148 # The compression used for new entries will be "the last one"
1150 1149 prefix = r.startswith
1151 1150 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1152 1151 feature_config.compression_engine = r.split(b'-', 2)[2]
1153 1152
1154 1153 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1155 1154 if zlib_level is not None:
1156 1155 if not (0 <= zlib_level <= 9):
1157 1156 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1158 1157 raise error.Abort(msg % zlib_level)
1159 1158 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1160 1159 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1161 1160 if zstd_level is not None:
1162 1161 if not (0 <= zstd_level <= 22):
1163 1162 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 1163 raise error.Abort(msg % zstd_level)
1165 1164 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1166 1165
1167 1166 if requirementsmod.NARROW_REQUIREMENT in requirements:
1168 1167 feature_config.enable_ellipsis = True
1169 1168
1170 1169 if ui.configbool(b'experimental', b'rust.index'):
1171 1170 options[b'rust.index'] = True
1172 1171 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1173 1172 slow_path = ui.config(
1174 1173 b'storage', b'revlog.persistent-nodemap.slow-path'
1175 1174 )
1176 1175 if slow_path not in (b'allow', b'warn', b'abort'):
1177 1176 default = ui.config_default(
1178 1177 b'storage', b'revlog.persistent-nodemap.slow-path'
1179 1178 )
1180 1179 msg = _(
1181 1180 b'unknown value for config '
1182 1181 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1183 1182 )
1184 1183 ui.warn(msg % slow_path)
1185 1184 if not ui.quiet:
1186 1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1187 1186 slow_path = default
1188 1187
1189 1188 msg = _(
1190 1189 b"accessing `persistent-nodemap` repository without associated "
1191 1190 b"fast implementation."
1192 1191 )
1193 1192 hint = _(
1194 1193 b"check `hg help config.format.use-persistent-nodemap` "
1195 1194 b"for details"
1196 1195 )
1197 1196 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1198 1197 if slow_path == b'warn':
1199 1198 msg = b"warning: " + msg + b'\n'
1200 1199 ui.warn(msg)
1201 1200 if not ui.quiet:
1202 1201 hint = b'(' + hint + b')\n'
1203 1202 ui.warn(hint)
1204 1203 if slow_path == b'abort':
1205 1204 raise error.Abort(msg, hint=hint)
1206 1205 options[b'persistent-nodemap'] = True
1207 1206 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1208 1207 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1209 1208 if slow_path not in (b'allow', b'warn', b'abort'):
1210 1209 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1211 1210 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1212 1211 ui.warn(msg % slow_path)
1213 1212 if not ui.quiet:
1214 1213 ui.warn(_(b'falling back to default value: %s\n') % default)
1215 1214 slow_path = default
1216 1215
1217 1216 msg = _(
1218 1217 b"accessing `dirstate-v2` repository without associated "
1219 1218 b"fast implementation."
1220 1219 )
1221 1220 hint = _(
1222 1221 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1223 1222 )
1224 1223 if not dirstate.HAS_FAST_DIRSTATE_V2:
1225 1224 if slow_path == b'warn':
1226 1225 msg = b"warning: " + msg + b'\n'
1227 1226 ui.warn(msg)
1228 1227 if not ui.quiet:
1229 1228 hint = b'(' + hint + b')\n'
1230 1229 ui.warn(hint)
1231 1230 if slow_path == b'abort':
1232 1231 raise error.Abort(msg, hint=hint)
1233 1232 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1234 1233 options[b'persistent-nodemap.mmap'] = True
1235 1234 if ui.configbool(b'devel', b'persistent-nodemap'):
1236 1235 options[b'devel-force-nodemap'] = True
1237 1236
1238 1237 return options
1239 1238
1240 1239
1241 1240 def makemain(**kwargs):
1242 1241 """Produce a type conforming to ``ilocalrepositorymain``."""
1243 1242 return localrepository
1244 1243
1245 1244
1246 1245 class revlogfilestorage: # (repository.ilocalrepositoryfilestorage)
1247 1246 """File storage when using revlogs."""
1248 1247
1249 1248 def file(self, path):
1250 1249 if path.startswith(b'/'):
1251 1250 path = path[1:]
1252 1251
1253 1252 try_split = (
1254 1253 self.currenttransaction() is not None
1255 1254 or txnutil.mayhavepending(self.root)
1256 1255 )
1257 1256
1258 1257 return filelog.filelog(self.svfs, path, try_split=try_split)
1259 1258
1260 1259
1261 1260 class revlognarrowfilestorage: # (repository.ilocalrepositoryfilestorage)
1262 1261 """File storage when using revlogs and narrow files."""
1263 1262
1264 1263 def file(self, path):
1265 1264 if path.startswith(b'/'):
1266 1265 path = path[1:]
1267 1266
1268 1267 try_split = (
1269 1268 self.currenttransaction() is not None
1270 1269 or txnutil.mayhavepending(self.root)
1271 1270 )
1272 1271 return filelog.narrowfilelog(
1273 1272 self.svfs, path, self._storenarrowmatch, try_split=try_split
1274 1273 )
1275 1274
1276 1275
1277 1276 def makefilestorage(requirements, features, **kwargs):
1278 1277 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1279 1278 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1280 1279 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1281 1280
1282 1281 if requirementsmod.NARROW_REQUIREMENT in requirements:
1283 1282 return revlognarrowfilestorage
1284 1283 else:
1285 1284 return revlogfilestorage
1286 1285
1287 1286
1288 1287 # List of repository interfaces and factory functions for them. Each
1289 1288 # will be called in order during ``makelocalrepository()`` to iteratively
1290 1289 # derive the final type for a local repository instance. We capture the
1291 1290 # function as a lambda so we don't hold a reference and the module-level
1292 1291 # functions can be wrapped.
1293 1292 REPO_INTERFACES = [
1294 1293 (repository.ilocalrepositorymain, lambda: makemain),
1295 1294 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1296 1295 ]
1297 1296
1298 1297 _localrepo_base_classes = object
1299 1298
1300 1299 if typing.TYPE_CHECKING:
1301 1300 _localrepo_base_classes = [
1302 1301 repository.ilocalrepositorymain,
1303 1302 repository.ilocalrepositoryfilestorage,
1304 1303 ]
1305 1304
1306 1305
1307 class LocalRepository(_localrepo_base_classes):
1306 class localrepository(_localrepo_base_classes):
1308 1307 """Main class for representing local repositories.
1309 1308
1310 1309 All local repositories are instances of this class.
1311 1310
1312 1311 Constructed on its own, instances of this class are not usable as
1313 1312 repository objects. To obtain a usable repository object, call
1314 1313 ``hg.repository()``, ``localrepo.instance()``, or
1315 1314 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1316 1315 ``instance()`` adds support for creating new repositories.
1317 1316 ``hg.repository()`` adds more extension integration, including calling
1318 1317 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1319 1318 used.
1320 1319 """
1321 1320
1322 1321 _basesupported = {
1323 1322 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1324 1323 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1325 1324 requirementsmod.CHANGELOGV2_REQUIREMENT,
1326 1325 requirementsmod.COPIESSDC_REQUIREMENT,
1327 1326 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1328 1327 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1329 1328 requirementsmod.DOTENCODE_REQUIREMENT,
1330 1329 requirementsmod.FNCACHE_REQUIREMENT,
1331 1330 requirementsmod.GENERALDELTA_REQUIREMENT,
1332 1331 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1333 1332 requirementsmod.NODEMAP_REQUIREMENT,
1334 1333 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1335 1334 requirementsmod.REVLOGV1_REQUIREMENT,
1336 1335 requirementsmod.REVLOGV2_REQUIREMENT,
1337 1336 requirementsmod.SHARED_REQUIREMENT,
1338 1337 requirementsmod.SHARESAFE_REQUIREMENT,
1339 1338 requirementsmod.SPARSE_REQUIREMENT,
1340 1339 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1341 1340 requirementsmod.STORE_REQUIREMENT,
1342 1341 requirementsmod.TREEMANIFEST_REQUIREMENT,
1343 1342 }
1344 1343
1345 1344 # list of prefix for file which can be written without 'wlock'
1346 1345 # Extensions should extend this list when needed
1347 1346 _wlockfreeprefix = {
1348 1347 # We migh consider requiring 'wlock' for the next
1349 1348 # two, but pretty much all the existing code assume
1350 1349 # wlock is not needed so we keep them excluded for
1351 1350 # now.
1352 1351 b'hgrc',
1353 1352 b'requires',
1354 1353 # XXX cache is a complicatged business someone
1355 1354 # should investigate this in depth at some point
1356 1355 b'cache/',
1357 1356 # XXX bisect was still a bit too messy at the time
1358 1357 # this changeset was introduced. Someone should fix
1359 1358 # the remainig bit and drop this line
1360 1359 b'bisect.state',
1361 1360 }
1362 1361
1363 1362 def __init__(
1364 1363 self,
1365 1364 baseui,
1366 1365 ui,
1367 1366 origroot: bytes,
1368 1367 wdirvfs: vfsmod.vfs,
1369 1368 hgvfs: vfsmod.vfs,
1370 1369 requirements,
1371 1370 supportedrequirements,
1372 1371 sharedpath: bytes,
1373 1372 store,
1374 1373 cachevfs: vfsmod.vfs,
1375 1374 wcachevfs: vfsmod.vfs,
1376 1375 features,
1377 1376 intents=None,
1378 1377 ):
1379 1378 """Create a new local repository instance.
1380 1379
1381 1380 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1382 1381 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1383 1382 object.
1384 1383
1385 1384 Arguments:
1386 1385
1387 1386 baseui
1388 1387 ``ui.ui`` instance that ``ui`` argument was based off of.
1389 1388
1390 1389 ui
1391 1390 ``ui.ui`` instance for use by the repository.
1392 1391
1393 1392 origroot
1394 1393 ``bytes`` path to working directory root of this repository.
1395 1394
1396 1395 wdirvfs
1397 1396 ``vfs.vfs`` rooted at the working directory.
1398 1397
1399 1398 hgvfs
1400 1399 ``vfs.vfs`` rooted at .hg/
1401 1400
1402 1401 requirements
1403 1402 ``set`` of bytestrings representing repository opening requirements.
1404 1403
1405 1404 supportedrequirements
1406 1405 ``set`` of bytestrings representing repository requirements that we
1407 1406 know how to open. May be a supetset of ``requirements``.
1408 1407
1409 1408 sharedpath
1410 1409 ``bytes`` Defining path to storage base directory. Points to a
1411 1410 ``.hg/`` directory somewhere.
1412 1411
1413 1412 store
1414 1413 ``store.basicstore`` (or derived) instance providing access to
1415 1414 versioned storage.
1416 1415
1417 1416 cachevfs
1418 1417 ``vfs.vfs`` used for cache files.
1419 1418
1420 1419 wcachevfs
1421 1420 ``vfs.vfs`` used for cache files related to the working copy.
1422 1421
1423 1422 features
1424 1423 ``set`` of bytestrings defining features/capabilities of this
1425 1424 instance.
1426 1425
1427 1426 intents
1428 1427 ``set`` of system strings indicating what this repo will be used
1429 1428 for.
1430 1429 """
1431 1430 self.baseui = baseui
1432 1431 self.ui = ui
1433 1432 self.origroot = origroot
1434 1433 # vfs rooted at working directory.
1435 1434 self.wvfs = wdirvfs
1436 1435 self.root = wdirvfs.base
1437 1436 # vfs rooted at .hg/. Used to access most non-store paths.
1438 1437 self.vfs = hgvfs
1439 1438 self.path = hgvfs.base
1440 1439 self.requirements = requirements
1441 1440 self.nodeconstants = sha1nodeconstants
1442 1441 self.nullid = self.nodeconstants.nullid
1443 1442 self.supported = supportedrequirements
1444 1443 self.sharedpath = sharedpath
1445 1444 self.store = store
1446 1445 self.cachevfs = cachevfs
1447 1446 self.wcachevfs = wcachevfs
1448 1447 self.features = features
1449 1448
1450 1449 self.filtername = None
1451 1450
1452 1451 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1453 1452 b'devel', b'check-locks'
1454 1453 ):
1455 1454 self.vfs.audit = self._getvfsward(self.vfs.audit)
1456 1455 # A list of callback to shape the phase if no data were found.
1457 1456 # Callback are in the form: func(repo, roots) --> processed root.
1458 1457 # This list it to be filled by extension during repo setup
1459 1458 self._phasedefaults = []
1460 1459
1461 1460 color.setup(self.ui)
1462 1461
1463 1462 self.spath = self.store.path
1464 1463 self.svfs = self.store.vfs
1465 1464 self.sjoin = self.store.join
1466 1465 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1467 1466 b'devel', b'check-locks'
1468 1467 ):
1469 1468 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1470 1469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1471 1470 else: # standard vfs
1472 1471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1473 1472
1474 1473 self._dirstatevalidatewarned = False
1475 1474
1476 1475 self._branchcaches = branchmap.BranchMapCache()
1477 1476 self._revbranchcache = None
1478 1477 self._filterpats = {}
1479 1478 self._datafilters = {}
1480 1479 self._transref = self._lockref = self._wlockref = None
1481 1480
1482 1481 # A cache for various files under .hg/ that tracks file changes,
1483 1482 # (used by the filecache decorator)
1484 1483 #
1485 1484 # Maps a property name to its util.filecacheentry
1486 1485 self._filecache = {}
1487 1486
1488 1487 # hold sets of revision to be filtered
1489 1488 # should be cleared when something might have changed the filter value:
1490 1489 # - new changesets,
1491 1490 # - phase change,
1492 1491 # - new obsolescence marker,
1493 1492 # - working directory parent change,
1494 1493 # - bookmark changes
1495 1494 self.filteredrevcache = {}
1496 1495
1497 1496 self._dirstate = None
1498 1497 # post-dirstate-status hooks
1499 1498 self._postdsstatus = []
1500 1499
1501 1500 self._pending_narrow_pats = None
1502 1501 self._pending_narrow_pats_dirstate = None
1503 1502
1504 1503 # generic mapping between names and nodes
1505 1504 self.names = namespaces.namespaces()
1506 1505
1507 1506 # Key to signature value.
1508 1507 self._sparsesignaturecache = {}
1509 1508 # Signature to cached matcher instance.
1510 1509 self._sparsematchercache = {}
1511 1510
1512 1511 self._extrafilterid = repoview.extrafilter(ui)
1513 1512
1514 1513 self.filecopiesmode = None
1515 1514 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1516 1515 self.filecopiesmode = b'changeset-sidedata'
1517 1516
1518 1517 self._wanted_sidedata = set()
1519 1518 self._sidedata_computers = {}
1520 1519 sidedatamod.set_sidedata_spec_for_repo(self)
1521 1520
1522 1521 def _getvfsward(self, origfunc):
1523 1522 """build a ward for self.vfs"""
1524 1523 rref = weakref.ref(self)
1525 1524
1526 1525 def checkvfs(path, mode=None):
1527 1526 ret = origfunc(path, mode=mode)
1528 1527 repo = rref()
1529 1528 if (
1530 1529 repo is None
1531 1530 or not hasattr(repo, '_wlockref')
1532 1531 or not hasattr(repo, '_lockref')
1533 1532 ):
1534 1533 return
1535 1534 if mode in (None, b'r', b'rb'):
1536 1535 return
1537 1536 if path.startswith(repo.path):
1538 1537 # truncate name relative to the repository (.hg)
1539 1538 path = path[len(repo.path) + 1 :]
1540 1539 if path.startswith(b'cache/'):
1541 1540 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1542 1541 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1543 1542 # path prefixes covered by 'lock'
1544 1543 vfs_path_prefixes = (
1545 1544 b'journal.',
1546 1545 b'undo.',
1547 1546 b'strip-backup/',
1548 1547 b'cache/',
1549 1548 )
1550 1549 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1551 1550 if repo._currentlock(repo._lockref) is None:
1552 1551 repo.ui.develwarn(
1553 1552 b'write with no lock: "%s"' % path,
1554 1553 stacklevel=3,
1555 1554 config=b'check-locks',
1556 1555 )
1557 1556 elif repo._currentlock(repo._wlockref) is None:
1558 1557 # rest of vfs files are covered by 'wlock'
1559 1558 #
1560 1559 # exclude special files
1561 1560 for prefix in self._wlockfreeprefix:
1562 1561 if path.startswith(prefix):
1563 1562 return
1564 1563 repo.ui.develwarn(
1565 1564 b'write with no wlock: "%s"' % path,
1566 1565 stacklevel=3,
1567 1566 config=b'check-locks',
1568 1567 )
1569 1568 return ret
1570 1569
1571 1570 return checkvfs
1572 1571
1573 1572 def _getsvfsward(self, origfunc):
1574 1573 """build a ward for self.svfs"""
1575 1574 rref = weakref.ref(self)
1576 1575
1577 1576 def checksvfs(path, mode=None):
1578 1577 ret = origfunc(path, mode=mode)
1579 1578 repo = rref()
1580 1579 if repo is None or not hasattr(repo, '_lockref'):
1581 1580 return
1582 1581 if mode in (None, b'r', b'rb'):
1583 1582 return
1584 1583 if path.startswith(repo.sharedpath):
1585 1584 # truncate name relative to the repository (.hg)
1586 1585 path = path[len(repo.sharedpath) + 1 :]
1587 1586 if repo._currentlock(repo._lockref) is None:
1588 1587 repo.ui.develwarn(
1589 1588 b'write with no lock: "%s"' % path, stacklevel=4
1590 1589 )
1591 1590 return ret
1592 1591
1593 1592 return checksvfs
1594 1593
1595 1594 @property
1596 1595 def vfs_map(self):
1597 1596 return {
1598 1597 b'': self.svfs,
1599 1598 b'plain': self.vfs,
1600 1599 b'store': self.svfs,
1601 1600 }
1602 1601
1603 1602 def close(self):
1604 1603 self._writecaches()
1605 1604
1606 1605 def _writecaches(self):
1607 1606 if self._revbranchcache:
1608 1607 self._revbranchcache.write()
1609 1608
1610 1609 def _restrictcapabilities(self, caps):
1611 1610 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1612 1611 caps = set(caps)
1613 1612 capsblob = bundle2.encodecaps(
1614 1613 bundle2.getrepocaps(self, role=b'client')
1615 1614 )
1616 1615 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1617 1616 if self.ui.configbool(b'experimental', b'narrow'):
1618 1617 caps.add(wireprototypes.NARROWCAP)
1619 1618 return caps
1620 1619
1621 1620 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1622 1621 # self -> auditor -> self._checknested -> self
1623 1622
1624 1623 @property
1625 1624 def auditor(self):
1626 1625 # This is only used by context.workingctx.match in order to
1627 1626 # detect files in subrepos.
1628 1627 return pathutil.pathauditor(self.root, callback=self._checknested)
1629 1628
1630 1629 @property
1631 1630 def nofsauditor(self):
1632 1631 # This is only used by context.basectx.match in order to detect
1633 1632 # files in subrepos.
1634 1633 return pathutil.pathauditor(
1635 1634 self.root, callback=self._checknested, realfs=False, cached=True
1636 1635 )
1637 1636
1638 1637 def _checknested(self, path):
1639 1638 """Determine if path is a legal nested repository."""
1640 1639 if not path.startswith(self.root):
1641 1640 return False
1642 1641 subpath = path[len(self.root) + 1 :]
1643 1642 normsubpath = util.pconvert(subpath)
1644 1643
1645 1644 # XXX: Checking against the current working copy is wrong in
1646 1645 # the sense that it can reject things like
1647 1646 #
1648 1647 # $ hg cat -r 10 sub/x.txt
1649 1648 #
1650 1649 # if sub/ is no longer a subrepository in the working copy
1651 1650 # parent revision.
1652 1651 #
1653 1652 # However, it can of course also allow things that would have
1654 1653 # been rejected before, such as the above cat command if sub/
1655 1654 # is a subrepository now, but was a normal directory before.
1656 1655 # The old path auditor would have rejected by mistake since it
1657 1656 # panics when it sees sub/.hg/.
1658 1657 #
1659 1658 # All in all, checking against the working copy seems sensible
1660 1659 # since we want to prevent access to nested repositories on
1661 1660 # the filesystem *now*.
1662 1661 ctx = self[None]
1663 1662 parts = util.splitpath(subpath)
1664 1663 while parts:
1665 1664 prefix = b'/'.join(parts)
1666 1665 if prefix in ctx.substate:
1667 1666 if prefix == normsubpath:
1668 1667 return True
1669 1668 else:
1670 1669 sub = ctx.sub(prefix)
1671 1670 return sub.checknested(subpath[len(prefix) + 1 :])
1672 1671 else:
1673 1672 parts.pop()
1674 1673 return False
1675 1674
1676 1675 def peer(self, path=None, remotehidden=False):
1677 1676 return localpeer(
1678 1677 self, path=path, remotehidden=remotehidden
1679 1678 ) # not cached to avoid reference cycle
1680 1679
1681 1680 def unfiltered(self):
1682 1681 """Return unfiltered version of the repository
1683 1682
1684 1683 Intended to be overwritten by filtered repo."""
1685 1684 return self
1686 1685
1687 1686 def filtered(self, name, visibilityexceptions=None):
1688 1687 """Return a filtered version of a repository
1689 1688
1690 1689 The `name` parameter is the identifier of the requested view. This
1691 1690 will return a repoview object set "exactly" to the specified view.
1692 1691
1693 1692 This function does not apply recursive filtering to a repository. For
1694 1693 example calling `repo.filtered("served")` will return a repoview using
1695 1694 the "served" view, regardless of the initial view used by `repo`.
1696 1695
1697 1696 In other word, there is always only one level of `repoview` "filtering".
1698 1697 """
1699 1698 if self._extrafilterid is not None and b'%' not in name:
1700 1699 name = name + b'%' + self._extrafilterid
1701 1700
1702 1701 cls = repoview.newtype(self.unfiltered().__class__)
1703 1702 return cls(self, name, visibilityexceptions)
1704 1703
1705 1704 @mixedrepostorecache(
1706 1705 (b'bookmarks', b'plain'),
1707 1706 (b'bookmarks.current', b'plain'),
1708 1707 (b'bookmarks', b''),
1709 1708 (b'00changelog.i', b''),
1710 1709 )
1711 1710 def _bookmarks(self):
1712 1711 # Since the multiple files involved in the transaction cannot be
1713 1712 # written atomically (with current repository format), there is a race
1714 1713 # condition here.
1715 1714 #
1716 1715 # 1) changelog content A is read
1717 1716 # 2) outside transaction update changelog to content B
1718 1717 # 3) outside transaction update bookmark file referring to content B
1719 1718 # 4) bookmarks file content is read and filtered against changelog-A
1720 1719 #
1721 1720 # When this happens, bookmarks against nodes missing from A are dropped.
1722 1721 #
1723 1722 # Having this happening during read is not great, but it become worse
1724 1723 # when this happen during write because the bookmarks to the "unknown"
1725 1724 # nodes will be dropped for good. However, writes happen within locks.
1726 1725 # This locking makes it possible to have a race free consistent read.
1727 1726 # For this purpose data read from disc before locking are
1728 1727 # "invalidated" right after the locks are taken. This invalidations are
1729 1728 # "light", the `filecache` mechanism keep the data in memory and will
1730 1729 # reuse them if the underlying files did not changed. Not parsing the
1731 1730 # same data multiple times helps performances.
1732 1731 #
1733 1732 # Unfortunately in the case describe above, the files tracked by the
1734 1733 # bookmarks file cache might not have changed, but the in-memory
1735 1734 # content is still "wrong" because we used an older changelog content
1736 1735 # to process the on-disk data. So after locking, the changelog would be
1737 1736 # refreshed but `_bookmarks` would be preserved.
1738 1737 # Adding `00changelog.i` to the list of tracked file is not
1739 1738 # enough, because at the time we build the content for `_bookmarks` in
1740 1739 # (4), the changelog file has already diverged from the content used
1741 1740 # for loading `changelog` in (1)
1742 1741 #
1743 1742 # To prevent the issue, we force the changelog to be explicitly
1744 1743 # reloaded while computing `_bookmarks`. The data race can still happen
1745 1744 # without the lock (with a narrower window), but it would no longer go
1746 1745 # undetected during the lock time refresh.
1747 1746 #
1748 1747 # The new schedule is as follow
1749 1748 #
1750 1749 # 1) filecache logic detect that `_bookmarks` needs to be computed
1751 1750 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1752 1751 # 3) We force `changelog` filecache to be tested
1753 1752 # 4) cachestat for `changelog` are captured (for changelog)
1754 1753 # 5) `_bookmarks` is computed and cached
1755 1754 #
1756 1755 # The step in (3) ensure we have a changelog at least as recent as the
1757 1756 # cache stat computed in (1). As a result at locking time:
1758 1757 # * if the changelog did not changed since (1) -> we can reuse the data
1759 1758 # * otherwise -> the bookmarks get refreshed.
1760 1759 self._refreshchangelog()
1761 1760 return bookmarks.bmstore(self)
1762 1761
1763 1762 def _refreshchangelog(self):
1764 1763 """make sure the in memory changelog match the on-disk one"""
1765 1764 if 'changelog' in vars(self) and self.currenttransaction() is None:
1766 1765 del self.changelog
1767 1766
1768 1767 @property
1769 1768 def _activebookmark(self):
1770 1769 return self._bookmarks.active
1771 1770
1772 1771 # _phasesets depend on changelog. what we need is to call
1773 1772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1774 1773 # can't be easily expressed in filecache mechanism.
1775 1774 @storecache(b'phaseroots', b'00changelog.i')
1776 1775 def _phasecache(self):
1777 1776 return phases.phasecache(self, self._phasedefaults)
1778 1777
1779 1778 @storecache(b'obsstore')
1780 1779 def obsstore(self):
1781 1780 return obsolete.makestore(self.ui, self)
1782 1781
1783 1782 @changelogcache()
1784 1783 def changelog(repo):
1785 1784 # load dirstate before changelog to avoid race see issue6303
1786 1785 repo.dirstate.prefetch_parents()
1787 1786 return repo.store.changelog(
1788 1787 txnutil.mayhavepending(repo.root),
1789 1788 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1790 1789 )
1791 1790
1792 1791 @manifestlogcache()
1793 1792 def manifestlog(self):
1794 1793 return self.store.manifestlog(self, self._storenarrowmatch)
1795 1794
1796 1795 @unfilteredpropertycache
1797 1796 def dirstate(self):
1798 1797 if self._dirstate is None:
1799 1798 self._dirstate = self._makedirstate()
1800 1799 else:
1801 1800 self._dirstate.refresh()
1802 1801 return self._dirstate
1803 1802
1804 1803 def _makedirstate(self):
1805 1804 """Extension point for wrapping the dirstate per-repo."""
1806 1805 sparsematchfn = None
1807 1806 if sparse.use_sparse(self):
1808 1807 sparsematchfn = lambda: sparse.matcher(self)
1809 1808 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1810 1809 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1811 1810 use_dirstate_v2 = v2_req in self.requirements
1812 1811 use_tracked_hint = th in self.requirements
1813 1812
1814 1813 return dirstate.dirstate(
1815 1814 self.vfs,
1816 1815 self.ui,
1817 1816 self.root,
1818 1817 self._dirstatevalidate,
1819 1818 sparsematchfn,
1820 1819 self.nodeconstants,
1821 1820 use_dirstate_v2,
1822 1821 use_tracked_hint=use_tracked_hint,
1823 1822 )
1824 1823
1825 1824 def _dirstatevalidate(self, node):
1826 1825 okay = True
1827 1826 try:
1828 1827 self.changelog.rev(node)
1829 1828 except error.LookupError:
1830 1829 # If the parent are unknown it might just be because the changelog
1831 1830 # in memory is lagging behind the dirstate in memory. So try to
1832 1831 # refresh the changelog first.
1833 1832 #
1834 1833 # We only do so if we don't hold the lock, if we do hold the lock
1835 1834 # the invalidation at that time should have taken care of this and
1836 1835 # something is very fishy.
1837 1836 if self.currentlock() is None:
1838 1837 self.invalidate()
1839 1838 try:
1840 1839 self.changelog.rev(node)
1841 1840 except error.LookupError:
1842 1841 okay = False
1843 1842 else:
1844 1843 # XXX we should consider raising an error here.
1845 1844 okay = False
1846 1845 if okay:
1847 1846 return node
1848 1847 else:
1849 1848 if not self._dirstatevalidatewarned:
1850 1849 self._dirstatevalidatewarned = True
1851 1850 self.ui.warn(
1852 1851 _(b"warning: ignoring unknown working parent %s!\n")
1853 1852 % short(node)
1854 1853 )
1855 1854 return self.nullid
1856 1855
1857 1856 @storecache(narrowspec.FILENAME)
1858 1857 def narrowpats(self):
1859 1858 """matcher patterns for this repository's narrowspec
1860 1859
1861 1860 A tuple of (includes, excludes).
1862 1861 """
1863 1862 # the narrow management should probably move into its own object
1864 1863 val = self._pending_narrow_pats
1865 1864 if val is None:
1866 1865 val = narrowspec.load(self)
1867 1866 return val
1868 1867
1869 1868 @storecache(narrowspec.FILENAME)
1870 1869 def _storenarrowmatch(self):
1871 1870 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1872 1871 return matchmod.always()
1873 1872 include, exclude = self.narrowpats
1874 1873 return narrowspec.match(self.root, include=include, exclude=exclude)
1875 1874
1876 1875 @storecache(narrowspec.FILENAME)
1877 1876 def _narrowmatch(self):
1878 1877 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1879 1878 return matchmod.always()
1880 1879 narrowspec.checkworkingcopynarrowspec(self)
1881 1880 include, exclude = self.narrowpats
1882 1881 return narrowspec.match(self.root, include=include, exclude=exclude)
1883 1882
1884 1883 def narrowmatch(self, match=None, includeexact=False):
1885 1884 """matcher corresponding the the repo's narrowspec
1886 1885
1887 1886 If `match` is given, then that will be intersected with the narrow
1888 1887 matcher.
1889 1888
1890 1889 If `includeexact` is True, then any exact matches from `match` will
1891 1890 be included even if they're outside the narrowspec.
1892 1891 """
1893 1892 if match:
1894 1893 if includeexact and not self._narrowmatch.always():
1895 1894 # do not exclude explicitly-specified paths so that they can
1896 1895 # be warned later on
1897 1896 em = matchmod.exact(match.files())
1898 1897 nm = matchmod.unionmatcher([self._narrowmatch, em])
1899 1898 return matchmod.intersectmatchers(match, nm)
1900 1899 return matchmod.intersectmatchers(match, self._narrowmatch)
1901 1900 return self._narrowmatch
1902 1901
1903 1902 def setnarrowpats(self, newincludes, newexcludes):
1904 1903 narrowspec.save(self, newincludes, newexcludes)
1905 1904 self.invalidate(clearfilecache=True)
1906 1905
1907 1906 @unfilteredpropertycache
1908 1907 def _quick_access_changeid_null(self):
1909 1908 return {
1910 1909 b'null': (nullrev, self.nodeconstants.nullid),
1911 1910 nullrev: (nullrev, self.nodeconstants.nullid),
1912 1911 self.nullid: (nullrev, self.nullid),
1913 1912 }
1914 1913
1915 1914 @unfilteredpropertycache
1916 1915 def _quick_access_changeid_wc(self):
1917 1916 # also fast path access to the working copy parents
1918 1917 # however, only do it for filter that ensure wc is visible.
1919 1918 quick = self._quick_access_changeid_null.copy()
1920 1919 cl = self.unfiltered().changelog
1921 1920 for node in self.dirstate.parents():
1922 1921 if node == self.nullid:
1923 1922 continue
1924 1923 rev = cl.index.get_rev(node)
1925 1924 if rev is None:
1926 1925 # unknown working copy parent case:
1927 1926 #
1928 1927 # skip the fast path and let higher code deal with it
1929 1928 continue
1930 1929 pair = (rev, node)
1931 1930 quick[rev] = pair
1932 1931 quick[node] = pair
1933 1932 # also add the parents of the parents
1934 1933 for r in cl.parentrevs(rev):
1935 1934 if r == nullrev:
1936 1935 continue
1937 1936 n = cl.node(r)
1938 1937 pair = (r, n)
1939 1938 quick[r] = pair
1940 1939 quick[n] = pair
1941 1940 p1node = self.dirstate.p1()
1942 1941 if p1node != self.nullid:
1943 1942 quick[b'.'] = quick[p1node]
1944 1943 return quick
1945 1944
1946 1945 @unfilteredmethod
1947 1946 def _quick_access_changeid_invalidate(self):
1948 1947 if '_quick_access_changeid_wc' in vars(self):
1949 1948 del self.__dict__['_quick_access_changeid_wc']
1950 1949
1951 1950 @property
1952 1951 def _quick_access_changeid(self):
1953 1952 """an helper dictionnary for __getitem__ calls
1954 1953
1955 1954 This contains a list of symbol we can recognise right away without
1956 1955 further processing.
1957 1956 """
1958 1957 if self.filtername in repoview.filter_has_wc:
1959 1958 return self._quick_access_changeid_wc
1960 1959 return self._quick_access_changeid_null
1961 1960
1962 1961 def __getitem__(self, changeid):
1963 1962 # dealing with special cases
1964 1963 if changeid is None:
1965 1964 return context.workingctx(self)
1966 1965 if isinstance(changeid, context.basectx):
1967 1966 return changeid
1968 1967
1969 1968 # dealing with multiple revisions
1970 1969 if isinstance(changeid, slice):
1971 1970 # wdirrev isn't contiguous so the slice shouldn't include it
1972 1971 return [
1973 1972 self[i]
1974 1973 for i in range(*changeid.indices(len(self)))
1975 1974 if i not in self.changelog.filteredrevs
1976 1975 ]
1977 1976
1978 1977 # dealing with some special values
1979 1978 quick_access = self._quick_access_changeid.get(changeid)
1980 1979 if quick_access is not None:
1981 1980 rev, node = quick_access
1982 1981 return context.changectx(self, rev, node, maybe_filtered=False)
1983 1982 if changeid == b'tip':
1984 1983 node = self.changelog.tip()
1985 1984 rev = self.changelog.rev(node)
1986 1985 return context.changectx(self, rev, node)
1987 1986
1988 1987 # dealing with arbitrary values
1989 1988 try:
1990 1989 if isinstance(changeid, int):
1991 1990 node = self.changelog.node(changeid)
1992 1991 rev = changeid
1993 1992 elif changeid == b'.':
1994 1993 # this is a hack to delay/avoid loading obsmarkers
1995 1994 # when we know that '.' won't be hidden
1996 1995 node = self.dirstate.p1()
1997 1996 rev = self.unfiltered().changelog.rev(node)
1998 1997 elif len(changeid) == self.nodeconstants.nodelen:
1999 1998 try:
2000 1999 node = changeid
2001 2000 rev = self.changelog.rev(changeid)
2002 2001 except error.FilteredLookupError:
2003 2002 changeid = hex(changeid) # for the error message
2004 2003 raise
2005 2004 except LookupError:
2006 2005 # check if it might have come from damaged dirstate
2007 2006 #
2008 2007 # XXX we could avoid the unfiltered if we had a recognizable
2009 2008 # exception for filtered changeset access
2010 2009 if (
2011 2010 self.local()
2012 2011 and changeid in self.unfiltered().dirstate.parents()
2013 2012 ):
2014 2013 msg = _(b"working directory has unknown parent '%s'!")
2015 2014 raise error.Abort(msg % short(changeid))
2016 2015 changeid = hex(changeid) # for the error message
2017 2016 raise
2018 2017
2019 2018 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2020 2019 node = bin(changeid)
2021 2020 rev = self.changelog.rev(node)
2022 2021 else:
2023 2022 raise error.ProgrammingError(
2024 2023 b"unsupported changeid '%s' of type %s"
2025 2024 % (changeid, pycompat.bytestr(type(changeid)))
2026 2025 )
2027 2026
2028 2027 return context.changectx(self, rev, node)
2029 2028
2030 2029 except (error.FilteredIndexError, error.FilteredLookupError):
2031 2030 raise error.FilteredRepoLookupError(
2032 2031 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2033 2032 )
2034 2033 except (IndexError, LookupError):
2035 2034 raise error.RepoLookupError(
2036 2035 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2037 2036 )
2038 2037 except error.WdirUnsupported:
2039 2038 return context.workingctx(self)
2040 2039
2041 2040 def __contains__(self, changeid):
2042 2041 """True if the given changeid exists"""
2043 2042 try:
2044 2043 self[changeid]
2045 2044 return True
2046 2045 except error.RepoLookupError:
2047 2046 return False
2048 2047
2049 2048 def __nonzero__(self):
2050 2049 return True
2051 2050
2052 2051 __bool__ = __nonzero__
2053 2052
2054 2053 def __len__(self):
2055 2054 # no need to pay the cost of repoview.changelog
2056 2055 unfi = self.unfiltered()
2057 2056 return len(unfi.changelog)
2058 2057
2059 2058 def __iter__(self):
2060 2059 return iter(self.changelog)
2061 2060
2062 2061 def revs(self, expr: bytes, *args):
2063 2062 """Find revisions matching a revset.
2064 2063
2065 2064 The revset is specified as a string ``expr`` that may contain
2066 2065 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2067 2066
2068 2067 Revset aliases from the configuration are not expanded. To expand
2069 2068 user aliases, consider calling ``scmutil.revrange()`` or
2070 2069 ``repo.anyrevs([expr], user=True)``.
2071 2070
2072 2071 Returns a smartset.abstractsmartset, which is a list-like interface
2073 2072 that contains integer revisions.
2074 2073 """
2075 2074 tree = revsetlang.spectree(expr, *args)
2076 2075 return revset.makematcher(tree)(self)
2077 2076
2078 2077 def set(self, expr: bytes, *args):
2079 2078 """Find revisions matching a revset and emit changectx instances.
2080 2079
2081 2080 This is a convenience wrapper around ``revs()`` that iterates the
2082 2081 result and is a generator of changectx instances.
2083 2082
2084 2083 Revset aliases from the configuration are not expanded. To expand
2085 2084 user aliases, consider calling ``scmutil.revrange()``.
2086 2085 """
2087 2086 for r in self.revs(expr, *args):
2088 2087 yield self[r]
2089 2088
2090 2089 def anyrevs(self, specs: bytes, user=False, localalias=None):
2091 2090 """Find revisions matching one of the given revsets.
2092 2091
2093 2092 Revset aliases from the configuration are not expanded by default. To
2094 2093 expand user aliases, specify ``user=True``. To provide some local
2095 2094 definitions overriding user aliases, set ``localalias`` to
2096 2095 ``{name: definitionstring}``.
2097 2096 """
2098 2097 if specs == [b'null']:
2099 2098 return revset.baseset([nullrev])
2100 2099 if specs == [b'.']:
2101 2100 quick_data = self._quick_access_changeid.get(b'.')
2102 2101 if quick_data is not None:
2103 2102 return revset.baseset([quick_data[0]])
2104 2103 if user:
2105 2104 m = revset.matchany(
2106 2105 self.ui,
2107 2106 specs,
2108 2107 lookup=revset.lookupfn(self),
2109 2108 localalias=localalias,
2110 2109 )
2111 2110 else:
2112 2111 m = revset.matchany(None, specs, localalias=localalias)
2113 2112 return m(self)
2114 2113
2115 2114 def url(self) -> bytes:
2116 2115 return b'file:' + self.root
2117 2116
2118 2117 def hook(self, name, throw=False, **args):
2119 2118 """Call a hook, passing this repo instance.
2120 2119
2121 2120 This a convenience method to aid invoking hooks. Extensions likely
2122 2121 won't call this unless they have registered a custom hook or are
2123 2122 replacing code that is expected to call a hook.
2124 2123 """
2125 2124 return hook.hook(self.ui, self, name, throw, **args)
2126 2125
2127 2126 @filteredpropertycache
2128 2127 def _tagscache(self):
2129 2128 """Returns a tagscache object that contains various tags related
2130 2129 caches."""
2131 2130
2132 2131 # This simplifies its cache management by having one decorated
2133 2132 # function (this one) and the rest simply fetch things from it.
2134 2133 class tagscache:
2135 2134 def __init__(self):
2136 2135 # These two define the set of tags for this repository. tags
2137 2136 # maps tag name to node; tagtypes maps tag name to 'global' or
2138 2137 # 'local'. (Global tags are defined by .hgtags across all
2139 2138 # heads, and local tags are defined in .hg/localtags.)
2140 2139 # They constitute the in-memory cache of tags.
2141 2140 self.tags = self.tagtypes = None
2142 2141
2143 2142 self.nodetagscache = self.tagslist = None
2144 2143
2145 2144 cache = tagscache()
2146 2145 cache.tags, cache.tagtypes = self._findtags()
2147 2146
2148 2147 return cache
2149 2148
2150 2149 def tags(self):
2151 2150 '''return a mapping of tag to node'''
2152 2151 t = {}
2153 2152 if self.changelog.filteredrevs:
2154 2153 tags, tt = self._findtags()
2155 2154 else:
2156 2155 tags = self._tagscache.tags
2157 2156 rev = self.changelog.rev
2158 2157 for k, v in tags.items():
2159 2158 try:
2160 2159 # ignore tags to unknown nodes
2161 2160 rev(v)
2162 2161 t[k] = v
2163 2162 except (error.LookupError, ValueError):
2164 2163 pass
2165 2164 return t
2166 2165
2167 2166 def _findtags(self):
2168 2167 """Do the hard work of finding tags. Return a pair of dicts
2169 2168 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2170 2169 maps tag name to a string like \'global\' or \'local\'.
2171 2170 Subclasses or extensions are free to add their own tags, but
2172 2171 should be aware that the returned dicts will be retained for the
2173 2172 duration of the localrepo object."""
2174 2173
2175 2174 # XXX what tagtype should subclasses/extensions use? Currently
2176 2175 # mq and bookmarks add tags, but do not set the tagtype at all.
2177 2176 # Should each extension invent its own tag type? Should there
2178 2177 # be one tagtype for all such "virtual" tags? Or is the status
2179 2178 # quo fine?
2180 2179
2181 2180 # map tag name to (node, hist)
2182 2181 alltags = tagsmod.findglobaltags(self.ui, self)
2183 2182 # map tag name to tag type
2184 2183 tagtypes = {tag: b'global' for tag in alltags}
2185 2184
2186 2185 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2187 2186
2188 2187 # Build the return dicts. Have to re-encode tag names because
2189 2188 # the tags module always uses UTF-8 (in order not to lose info
2190 2189 # writing to the cache), but the rest of Mercurial wants them in
2191 2190 # local encoding.
2192 2191 tags = {}
2193 2192 for name, (node, hist) in alltags.items():
2194 2193 if node != self.nullid:
2195 2194 tags[encoding.tolocal(name)] = node
2196 2195 tags[b'tip'] = self.changelog.tip()
2197 2196 tagtypes = {
2198 2197 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2199 2198 }
2200 2199 return (tags, tagtypes)
2201 2200
2202 2201 def tagtype(self, tagname):
2203 2202 """
2204 2203 return the type of the given tag. result can be:
2205 2204
2206 2205 'local' : a local tag
2207 2206 'global' : a global tag
2208 2207 None : tag does not exist
2209 2208 """
2210 2209
2211 2210 return self._tagscache.tagtypes.get(tagname)
2212 2211
2213 2212 def tagslist(self):
2214 2213 '''return a list of tags ordered by revision'''
2215 2214 if not self._tagscache.tagslist:
2216 2215 l = []
2217 2216 for t, n in self.tags().items():
2218 2217 l.append((self.changelog.rev(n), t, n))
2219 2218 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2220 2219
2221 2220 return self._tagscache.tagslist
2222 2221
2223 2222 def nodetags(self, node):
2224 2223 '''return the tags associated with a node'''
2225 2224 if not self._tagscache.nodetagscache:
2226 2225 nodetagscache = {}
2227 2226 for t, n in self._tagscache.tags.items():
2228 2227 nodetagscache.setdefault(n, []).append(t)
2229 2228 for tags in nodetagscache.values():
2230 2229 tags.sort()
2231 2230 self._tagscache.nodetagscache = nodetagscache
2232 2231 return self._tagscache.nodetagscache.get(node, [])
2233 2232
2234 2233 def nodebookmarks(self, node):
2235 2234 """return the list of bookmarks pointing to the specified node"""
2236 2235 return self._bookmarks.names(node)
2237 2236
2238 2237 def branchmap(self):
2239 2238 """returns a dictionary {branch: [branchheads]} with branchheads
2240 2239 ordered by increasing revision number"""
2241 2240 return self._branchcaches[self]
2242 2241
2243 2242 @unfilteredmethod
2244 2243 def revbranchcache(self):
2245 2244 if not self._revbranchcache:
2246 2245 unfi = self.unfiltered()
2247 2246 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2248 2247 return self._revbranchcache
2249 2248
2250 2249 def register_changeset(self, rev, changelogrevision):
2251 2250 self.revbranchcache().setdata(rev, changelogrevision)
2252 2251
2253 2252 def branchtip(self, branch, ignoremissing=False):
2254 2253 """return the tip node for a given branch
2255 2254
2256 2255 If ignoremissing is True, then this method will not raise an error.
2257 2256 This is helpful for callers that only expect None for a missing branch
2258 2257 (e.g. namespace).
2259 2258
2260 2259 """
2261 2260 try:
2262 2261 return self.branchmap().branchtip(branch)
2263 2262 except KeyError:
2264 2263 if not ignoremissing:
2265 2264 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2266 2265 else:
2267 2266 pass
2268 2267
2269 2268 def lookup(self, key):
2270 2269 node = scmutil.revsymbol(self, key).node()
2271 2270 if node is None:
2272 2271 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2273 2272 return node
2274 2273
2275 2274 def lookupbranch(self, key):
2276 2275 if self.branchmap().hasbranch(key):
2277 2276 return key
2278 2277
2279 2278 return scmutil.revsymbol(self, key).branch()
2280 2279
2281 2280 def known(self, nodes):
2282 2281 cl = self.changelog
2283 2282 get_rev = cl.index.get_rev
2284 2283 filtered = cl.filteredrevs
2285 2284 result = []
2286 2285 for n in nodes:
2287 2286 r = get_rev(n)
2288 2287 resp = not (r is None or r in filtered)
2289 2288 result.append(resp)
2290 2289 return result
2291 2290
2292 2291 def local(self):
2293 2292 return self
2294 2293
2295 2294 def publishing(self):
2296 2295 # it's safe (and desirable) to trust the publish flag unconditionally
2297 2296 # so that we don't finalize changes shared between users via ssh or nfs
2298 2297 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2299 2298
2300 2299 def cancopy(self):
2301 2300 # so statichttprepo's override of local() works
2302 2301 if not self.local():
2303 2302 return False
2304 2303 if not self.publishing():
2305 2304 return True
2306 2305 # if publishing we can't copy if there is filtered content
2307 2306 return not self.filtered(b'visible').changelog.filteredrevs
2308 2307
2309 2308 def shared(self):
2310 2309 '''the type of shared repository (None if not shared)'''
2311 2310 if self.sharedpath != self.path:
2312 2311 return b'store'
2313 2312 return None
2314 2313
2315 2314 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2316 2315 return self.vfs.reljoin(self.root, f, *insidef)
2317 2316
2318 2317 def setparents(self, p1, p2=None):
2319 2318 if p2 is None:
2320 2319 p2 = self.nullid
2321 2320 self[None].setparents(p1, p2)
2322 2321 self._quick_access_changeid_invalidate()
2323 2322
2324 2323 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2325 2324 """changeid must be a changeset revision, if specified.
2326 2325 fileid can be a file revision or node."""
2327 2326 return context.filectx(
2328 2327 self, path, changeid, fileid, changectx=changectx
2329 2328 )
2330 2329
2331 2330 def getcwd(self) -> bytes:
2332 2331 return self.dirstate.getcwd()
2333 2332
2334 2333 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2335 2334 return self.dirstate.pathto(f, cwd)
2336 2335
2337 2336 def _loadfilter(self, filter):
2338 2337 if filter not in self._filterpats:
2339 2338 l = []
2340 2339 for pat, cmd in self.ui.configitems(filter):
2341 2340 if cmd == b'!':
2342 2341 continue
2343 2342 mf = matchmod.match(self.root, b'', [pat])
2344 2343 fn = None
2345 2344 params = cmd
2346 2345 for name, filterfn in self._datafilters.items():
2347 2346 if cmd.startswith(name):
2348 2347 fn = filterfn
2349 2348 params = cmd[len(name) :].lstrip()
2350 2349 break
2351 2350 if not fn:
2352 2351 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2353 2352 fn.__name__ = 'commandfilter'
2354 2353 # Wrap old filters not supporting keyword arguments
2355 2354 if not pycompat.getargspec(fn)[2]:
2356 2355 oldfn = fn
2357 2356 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2358 2357 fn.__name__ = 'compat-' + oldfn.__name__
2359 2358 l.append((mf, fn, params))
2360 2359 self._filterpats[filter] = l
2361 2360 return self._filterpats[filter]
2362 2361
2363 2362 def _filter(self, filterpats, filename, data):
2364 2363 for mf, fn, cmd in filterpats:
2365 2364 if mf(filename):
2366 2365 self.ui.debug(
2367 2366 b"filtering %s through %s\n"
2368 2367 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2369 2368 )
2370 2369 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2371 2370 break
2372 2371
2373 2372 return data
2374 2373
2375 2374 @unfilteredpropertycache
2376 2375 def _encodefilterpats(self):
2377 2376 return self._loadfilter(b'encode')
2378 2377
2379 2378 @unfilteredpropertycache
2380 2379 def _decodefilterpats(self):
2381 2380 return self._loadfilter(b'decode')
2382 2381
2383 2382 def adddatafilter(self, name, filter):
2384 2383 self._datafilters[name] = filter
2385 2384
2386 2385 def wread(self, filename: bytes) -> bytes:
2387 2386 if self.wvfs.islink(filename):
2388 2387 data = self.wvfs.readlink(filename)
2389 2388 else:
2390 2389 data = self.wvfs.read(filename)
2391 2390 return self._filter(self._encodefilterpats, filename, data)
2392 2391
2393 2392 def wwrite(
2394 2393 self,
2395 2394 filename: bytes,
2396 2395 data: bytes,
2397 2396 flags: bytes,
2398 2397 backgroundclose=False,
2399 2398 **kwargs,
2400 2399 ) -> int:
2401 2400 """write ``data`` into ``filename`` in the working directory
2402 2401
2403 2402 This returns length of written (maybe decoded) data.
2404 2403 """
2405 2404 data = self._filter(self._decodefilterpats, filename, data)
2406 2405 if b'l' in flags:
2407 2406 self.wvfs.symlink(data, filename)
2408 2407 else:
2409 2408 self.wvfs.write(
2410 2409 filename, data, backgroundclose=backgroundclose, **kwargs
2411 2410 )
2412 2411 if b'x' in flags:
2413 2412 self.wvfs.setflags(filename, False, True)
2414 2413 else:
2415 2414 self.wvfs.setflags(filename, False, False)
2416 2415 return len(data)
2417 2416
2418 2417 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2419 2418 return self._filter(self._decodefilterpats, filename, data)
2420 2419
2421 2420 def currenttransaction(self):
2422 2421 """return the current transaction or None if non exists"""
2423 2422 if self._transref:
2424 2423 tr = self._transref()
2425 2424 else:
2426 2425 tr = None
2427 2426
2428 2427 if tr and tr.running():
2429 2428 return tr
2430 2429 return None
2431 2430
2432 2431 def transaction(self, desc, report=None):
2433 2432 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2434 2433 b'devel', b'check-locks'
2435 2434 ):
2436 2435 if self._currentlock(self._lockref) is None:
2437 2436 raise error.ProgrammingError(b'transaction requires locking')
2438 2437 tr = self.currenttransaction()
2439 2438 if tr is not None:
2440 2439 return tr.nest(name=desc)
2441 2440
2442 2441 # abort here if the journal already exists
2443 2442 if self.svfs.exists(b"journal"):
2444 2443 raise error.RepoError(
2445 2444 _(b"abandoned transaction found"),
2446 2445 hint=_(b"run 'hg recover' to clean up transaction"),
2447 2446 )
2448 2447
2449 2448 # At that point your dirstate should be clean:
2450 2449 #
2451 2450 # - If you don't have the wlock, why would you still have a dirty
2452 2451 # dirstate ?
2453 2452 #
2454 2453 # - If you hold the wlock, you should not be opening a transaction in
2455 2454 # the middle of a `distate.changing_*` block. The transaction needs to
2456 2455 # be open before that and wrap the change-context.
2457 2456 #
2458 2457 # - If you are not within a `dirstate.changing_*` context, why is our
2459 2458 # dirstate dirty?
2460 2459 if self.dirstate._dirty:
2461 2460 m = "cannot open a transaction with a dirty dirstate"
2462 2461 raise error.ProgrammingError(m)
2463 2462
2464 2463 idbase = b"%.40f#%f" % (random.random(), time.time())
2465 2464 ha = hex(hashutil.sha1(idbase).digest())
2466 2465 txnid = b'TXN:' + ha
2467 2466 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2468 2467
2469 2468 self._writejournal(desc)
2470 2469 if report:
2471 2470 rp = report
2472 2471 else:
2473 2472 rp = self.ui.warn
2474 2473 vfsmap = self.vfs_map
2475 2474 # we must avoid cyclic reference between repo and transaction.
2476 2475 reporef = weakref.ref(self)
2477 2476 # Code to track tag movement
2478 2477 #
2479 2478 # Since tags are all handled as file content, it is actually quite hard
2480 2479 # to track these movement from a code perspective. So we fallback to a
2481 2480 # tracking at the repository level. One could envision to track changes
2482 2481 # to the '.hgtags' file through changegroup apply but that fails to
2483 2482 # cope with case where transaction expose new heads without changegroup
2484 2483 # being involved (eg: phase movement).
2485 2484 #
2486 2485 # For now, We gate the feature behind a flag since this likely comes
2487 2486 # with performance impacts. The current code run more often than needed
2488 2487 # and do not use caches as much as it could. The current focus is on
2489 2488 # the behavior of the feature so we disable it by default. The flag
2490 2489 # will be removed when we are happy with the performance impact.
2491 2490 #
2492 2491 # Once this feature is no longer experimental move the following
2493 2492 # documentation to the appropriate help section:
2494 2493 #
2495 2494 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2496 2495 # tags (new or changed or deleted tags). In addition the details of
2497 2496 # these changes are made available in a file at:
2498 2497 # ``REPOROOT/.hg/changes/tags.changes``.
2499 2498 # Make sure you check for HG_TAG_MOVED before reading that file as it
2500 2499 # might exist from a previous transaction even if no tag were touched
2501 2500 # in this one. Changes are recorded in a line base format::
2502 2501 #
2503 2502 # <action> <hex-node> <tag-name>\n
2504 2503 #
2505 2504 # Actions are defined as follow:
2506 2505 # "-R": tag is removed,
2507 2506 # "+A": tag is added,
2508 2507 # "-M": tag is moved (old value),
2509 2508 # "+M": tag is moved (new value),
2510 2509 tracktags = lambda x: None
2511 2510 # experimental config: experimental.hook-track-tags
2512 2511 shouldtracktags = self.ui.configbool(
2513 2512 b'experimental', b'hook-track-tags'
2514 2513 )
2515 2514 if desc != b'strip' and shouldtracktags:
2516 2515 oldheads = self.changelog.headrevs()
2517 2516
2518 2517 def tracktags(tr2):
2519 2518 repo = reporef()
2520 2519 assert repo is not None # help pytype
2521 2520 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2522 2521 newheads = repo.changelog.headrevs()
2523 2522 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2524 2523 # notes: we compare lists here.
2525 2524 # As we do it only once buiding set would not be cheaper
2526 2525 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2527 2526 if changes:
2528 2527 tr2.hookargs[b'tag_moved'] = b'1'
2529 2528 with repo.vfs(
2530 2529 b'changes/tags.changes', b'w', atomictemp=True
2531 2530 ) as changesfile:
2532 2531 # note: we do not register the file to the transaction
2533 2532 # because we needs it to still exist on the transaction
2534 2533 # is close (for txnclose hooks)
2535 2534 tagsmod.writediff(changesfile, changes)
2536 2535
2537 2536 def validate(tr2):
2538 2537 """will run pre-closing hooks"""
2539 2538 # XXX the transaction API is a bit lacking here so we take a hacky
2540 2539 # path for now
2541 2540 #
2542 2541 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2543 2542 # dict is copied before these run. In addition we needs the data
2544 2543 # available to in memory hooks too.
2545 2544 #
2546 2545 # Moreover, we also need to make sure this runs before txnclose
2547 2546 # hooks and there is no "pending" mechanism that would execute
2548 2547 # logic only if hooks are about to run.
2549 2548 #
2550 2549 # Fixing this limitation of the transaction is also needed to track
2551 2550 # other families of changes (bookmarks, phases, obsolescence).
2552 2551 #
2553 2552 # This will have to be fixed before we remove the experimental
2554 2553 # gating.
2555 2554 tracktags(tr2)
2556 2555 repo = reporef()
2557 2556 assert repo is not None # help pytype
2558 2557
2559 2558 singleheadopt = (b'experimental', b'single-head-per-branch')
2560 2559 singlehead = repo.ui.configbool(*singleheadopt)
2561 2560 if singlehead:
2562 2561 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2563 2562 accountclosed = singleheadsub.get(
2564 2563 b"account-closed-heads", False
2565 2564 )
2566 2565 if singleheadsub.get(b"public-changes-only", False):
2567 2566 filtername = b"immutable"
2568 2567 else:
2569 2568 filtername = b"visible"
2570 2569 scmutil.enforcesinglehead(
2571 2570 repo, tr2, desc, accountclosed, filtername
2572 2571 )
2573 2572 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2574 2573 for name, (old, new) in sorted(
2575 2574 tr.changes[b'bookmarks'].items()
2576 2575 ):
2577 2576 args = tr.hookargs.copy()
2578 2577 args.update(bookmarks.preparehookargs(name, old, new))
2579 2578 repo.hook(
2580 2579 b'pretxnclose-bookmark',
2581 2580 throw=True,
2582 2581 **pycompat.strkwargs(args),
2583 2582 )
2584 2583 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2585 2584 cl = repo.unfiltered().changelog
2586 2585 for revs, (old, new) in tr.changes[b'phases']:
2587 2586 for rev in revs:
2588 2587 args = tr.hookargs.copy()
2589 2588 node = hex(cl.node(rev))
2590 2589 args.update(phases.preparehookargs(node, old, new))
2591 2590 repo.hook(
2592 2591 b'pretxnclose-phase',
2593 2592 throw=True,
2594 2593 **pycompat.strkwargs(args),
2595 2594 )
2596 2595
2597 2596 repo.hook(
2598 2597 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2599 2598 )
2600 2599
2601 2600 def releasefn(tr, success):
2602 2601 repo = reporef()
2603 2602 if repo is None:
2604 2603 # If the repo has been GC'd (and this release function is being
2605 2604 # called from transaction.__del__), there's not much we can do,
2606 2605 # so just leave the unfinished transaction there and let the
2607 2606 # user run `hg recover`.
2608 2607 return
2609 2608 if success:
2610 2609 # this should be explicitly invoked here, because
2611 2610 # in-memory changes aren't written out at closing
2612 2611 # transaction, if tr.addfilegenerator (via
2613 2612 # dirstate.write or so) isn't invoked while
2614 2613 # transaction running
2615 2614 repo.dirstate.write(None)
2616 2615 else:
2617 2616 # discard all changes (including ones already written
2618 2617 # out) in this transaction
2619 2618 repo.invalidate(clearfilecache=True)
2620 2619
2621 2620 tr = transaction.transaction(
2622 2621 rp,
2623 2622 self.svfs,
2624 2623 vfsmap,
2625 2624 b"journal",
2626 2625 b"undo",
2627 2626 lambda: None,
2628 2627 self.store.createmode,
2629 2628 validator=validate,
2630 2629 releasefn=releasefn,
2631 2630 checkambigfiles=_cachedfiles,
2632 2631 name=desc,
2633 2632 )
2634 2633 for vfs_id, path in self._journalfiles():
2635 2634 tr.add_journal(vfs_id, path)
2636 2635 tr.changes[b'origrepolen'] = len(self)
2637 2636 tr.changes[b'obsmarkers'] = set()
2638 2637 tr.changes[b'phases'] = []
2639 2638 tr.changes[b'bookmarks'] = {}
2640 2639
2641 2640 tr.hookargs[b'txnid'] = txnid
2642 2641 tr.hookargs[b'txnname'] = desc
2643 2642 tr.hookargs[b'changes'] = tr.changes
2644 2643 # note: writing the fncache only during finalize mean that the file is
2645 2644 # outdated when running hooks. As fncache is used for streaming clone,
2646 2645 # this is not expected to break anything that happen during the hooks.
2647 2646 tr.addfinalize(b'flush-fncache', self.store.write)
2648 2647
2649 2648 def txnclosehook(tr2):
2650 2649 """To be run if transaction is successful, will schedule a hook run"""
2651 2650 # Don't reference tr2 in hook() so we don't hold a reference.
2652 2651 # This reduces memory consumption when there are multiple
2653 2652 # transactions per lock. This can likely go away if issue5045
2654 2653 # fixes the function accumulation.
2655 2654 hookargs = tr2.hookargs
2656 2655
2657 2656 def hookfunc(unused_success):
2658 2657 repo = reporef()
2659 2658 assert repo is not None # help pytype
2660 2659
2661 2660 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2662 2661 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2663 2662 for name, (old, new) in bmchanges:
2664 2663 args = tr.hookargs.copy()
2665 2664 args.update(bookmarks.preparehookargs(name, old, new))
2666 2665 repo.hook(
2667 2666 b'txnclose-bookmark',
2668 2667 throw=False,
2669 2668 **pycompat.strkwargs(args),
2670 2669 )
2671 2670
2672 2671 if hook.hashook(repo.ui, b'txnclose-phase'):
2673 2672 cl = repo.unfiltered().changelog
2674 2673 phasemv = sorted(
2675 2674 tr.changes[b'phases'], key=lambda r: r[0][0]
2676 2675 )
2677 2676 for revs, (old, new) in phasemv:
2678 2677 for rev in revs:
2679 2678 args = tr.hookargs.copy()
2680 2679 node = hex(cl.node(rev))
2681 2680 args.update(phases.preparehookargs(node, old, new))
2682 2681 repo.hook(
2683 2682 b'txnclose-phase',
2684 2683 throw=False,
2685 2684 **pycompat.strkwargs(args),
2686 2685 )
2687 2686
2688 2687 repo.hook(
2689 2688 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2690 2689 )
2691 2690
2692 2691 repo = reporef()
2693 2692 assert repo is not None # help pytype
2694 2693 repo._afterlock(hookfunc)
2695 2694
2696 2695 tr.addfinalize(b'txnclose-hook', txnclosehook)
2697 2696 # Include a leading "-" to make it happen before the transaction summary
2698 2697 # reports registered via scmutil.registersummarycallback() whose names
2699 2698 # are 00-txnreport etc. That way, the caches will be warm when the
2700 2699 # callbacks run.
2701 2700 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2702 2701
2703 2702 def txnaborthook(tr2):
2704 2703 """To be run if transaction is aborted"""
2705 2704 repo = reporef()
2706 2705 assert repo is not None # help pytype
2707 2706 repo.hook(
2708 2707 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2709 2708 )
2710 2709
2711 2710 tr.addabort(b'txnabort-hook', txnaborthook)
2712 2711 # avoid eager cache invalidation. in-memory data should be identical
2713 2712 # to stored data if transaction has no error.
2714 2713 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2715 2714 self._transref = weakref.ref(tr)
2716 2715 scmutil.registersummarycallback(self, tr, desc)
2717 2716 # This only exist to deal with the need of rollback to have viable
2718 2717 # parents at the end of the operation. So backup viable parents at the
2719 2718 # time of this operation.
2720 2719 #
2721 2720 # We only do it when the `wlock` is taken, otherwise other might be
2722 2721 # altering the dirstate under us.
2723 2722 #
2724 2723 # This is really not a great way to do this (first, because we cannot
2725 2724 # always do it). There are more viable alternative that exists
2726 2725 #
2727 2726 # - backing only the working copy parent in a dedicated files and doing
2728 2727 # a clean "keep-update" to them on `hg rollback`.
2729 2728 #
2730 2729 # - slightly changing the behavior an applying a logic similar to "hg
2731 2730 # strip" to pick a working copy destination on `hg rollback`
2732 2731 if self.currentwlock() is not None:
2733 2732 ds = self.dirstate
2734 2733 if not self.vfs.exists(b'branch'):
2735 2734 # force a file to be written if None exist
2736 2735 ds.setbranch(b'default', None)
2737 2736
2738 2737 def backup_dirstate(tr):
2739 2738 for f in ds.all_file_names():
2740 2739 # hardlink backup is okay because `dirstate` is always
2741 2740 # atomically written and possible data file are append only
2742 2741 # and resistant to trailing data.
2743 2742 tr.addbackup(f, hardlink=True, location=b'plain')
2744 2743
2745 2744 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2746 2745 return tr
2747 2746
2748 2747 def _journalfiles(self):
2749 2748 return (
2750 2749 (self.svfs, b'journal'),
2751 2750 (self.vfs, b'journal.desc'),
2752 2751 )
2753 2752
2754 2753 def undofiles(self):
2755 2754 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2756 2755
2757 2756 @unfilteredmethod
2758 2757 def _writejournal(self, desc):
2759 2758 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2760 2759
2761 2760 def recover(self):
2762 2761 with self.lock():
2763 2762 if self.svfs.exists(b"journal"):
2764 2763 self.ui.status(_(b"rolling back interrupted transaction\n"))
2765 2764 vfsmap = self.vfs_map
2766 2765 transaction.rollback(
2767 2766 self.svfs,
2768 2767 vfsmap,
2769 2768 b"journal",
2770 2769 self.ui.warn,
2771 2770 checkambigfiles=_cachedfiles,
2772 2771 )
2773 2772 self.invalidate()
2774 2773 return True
2775 2774 else:
2776 2775 self.ui.warn(_(b"no interrupted transaction available\n"))
2777 2776 return False
2778 2777
2779 2778 def rollback(self, dryrun=False, force=False):
2780 2779 wlock = lock = None
2781 2780 try:
2782 2781 wlock = self.wlock()
2783 2782 lock = self.lock()
2784 2783 if self.svfs.exists(b"undo"):
2785 2784 return self._rollback(dryrun, force)
2786 2785 else:
2787 2786 self.ui.warn(_(b"no rollback information available\n"))
2788 2787 return 1
2789 2788 finally:
2790 2789 release(lock, wlock)
2791 2790
2792 2791 @unfilteredmethod # Until we get smarter cache management
2793 2792 def _rollback(self, dryrun, force):
2794 2793 ui = self.ui
2795 2794
2796 2795 parents = self.dirstate.parents()
2797 2796 try:
2798 2797 args = self.vfs.read(b'undo.desc').splitlines()
2799 2798 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2800 2799 if len(args) >= 3:
2801 2800 detail = args[2]
2802 2801 oldtip = oldlen - 1
2803 2802
2804 2803 if detail and ui.verbose:
2805 2804 msg = _(
2806 2805 b'repository tip rolled back to revision %d'
2807 2806 b' (undo %s: %s)\n'
2808 2807 ) % (oldtip, desc, detail)
2809 2808 else:
2810 2809 msg = _(
2811 2810 b'repository tip rolled back to revision %d (undo %s)\n'
2812 2811 ) % (oldtip, desc)
2813 2812 parentgone = any(self[p].rev() > oldtip for p in parents)
2814 2813 except IOError:
2815 2814 msg = _(b'rolling back unknown transaction\n')
2816 2815 desc = None
2817 2816 parentgone = True
2818 2817
2819 2818 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2820 2819 raise error.Abort(
2821 2820 _(
2822 2821 b'rollback of last commit while not checked out '
2823 2822 b'may lose data'
2824 2823 ),
2825 2824 hint=_(b'use -f to force'),
2826 2825 )
2827 2826
2828 2827 ui.status(msg)
2829 2828 if dryrun:
2830 2829 return 0
2831 2830
2832 2831 self.destroying()
2833 2832 vfsmap = self.vfs_map
2834 2833 skip_journal_pattern = None
2835 2834 if not parentgone:
2836 2835 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2837 2836 transaction.rollback(
2838 2837 self.svfs,
2839 2838 vfsmap,
2840 2839 b'undo',
2841 2840 ui.warn,
2842 2841 checkambigfiles=_cachedfiles,
2843 2842 skip_journal_pattern=skip_journal_pattern,
2844 2843 )
2845 2844 self.invalidate()
2846 2845 self.dirstate.invalidate()
2847 2846
2848 2847 if parentgone:
2849 2848 # replace this with some explicit parent update in the future.
2850 2849 has_node = self.changelog.index.has_node
2851 2850 if not all(has_node(p) for p in self.dirstate._pl):
2852 2851 # There was no dirstate to backup initially, we need to drop
2853 2852 # the existing one.
2854 2853 with self.dirstate.changing_parents(self):
2855 2854 self.dirstate.setparents(self.nullid)
2856 2855 self.dirstate.clear()
2857 2856
2858 2857 parents = tuple([p.rev() for p in self[None].parents()])
2859 2858 if len(parents) > 1:
2860 2859 ui.status(
2861 2860 _(
2862 2861 b'working directory now based on '
2863 2862 b'revisions %d and %d\n'
2864 2863 )
2865 2864 % parents
2866 2865 )
2867 2866 else:
2868 2867 ui.status(
2869 2868 _(b'working directory now based on revision %d\n') % parents
2870 2869 )
2871 2870 mergestatemod.mergestate.clean(self)
2872 2871
2873 2872 # TODO: if we know which new heads may result from this rollback, pass
2874 2873 # them to destroy(), which will prevent the branchhead cache from being
2875 2874 # invalidated.
2876 2875 self.destroyed()
2877 2876 return 0
2878 2877
2879 2878 def _buildcacheupdater(self, newtransaction):
2880 2879 """called during transaction to build the callback updating cache
2881 2880
2882 2881 Lives on the repository to help extension who might want to augment
2883 2882 this logic. For this purpose, the created transaction is passed to the
2884 2883 method.
2885 2884 """
2886 2885 # we must avoid cyclic reference between repo and transaction.
2887 2886 reporef = weakref.ref(self)
2888 2887
2889 2888 def updater(tr):
2890 2889 repo = reporef()
2891 2890 assert repo is not None # help pytype
2892 2891 repo.updatecaches(tr)
2893 2892
2894 2893 return updater
2895 2894
2896 2895 @unfilteredmethod
2897 2896 def updatecaches(self, tr=None, full=False, caches=None):
2898 2897 """warm appropriate caches
2899 2898
2900 2899 If this function is called after a transaction closed. The transaction
2901 2900 will be available in the 'tr' argument. This can be used to selectively
2902 2901 update caches relevant to the changes in that transaction.
2903 2902
2904 2903 If 'full' is set, make sure all caches the function knows about have
2905 2904 up-to-date data. Even the ones usually loaded more lazily.
2906 2905
2907 2906 The `full` argument can take a special "post-clone" value. In this case
2908 2907 the cache warming is made after a clone and of the slower cache might
2909 2908 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2910 2909 as we plan for a cleaner way to deal with this for 5.9.
2911 2910 """
2912 2911 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2913 2912 # During strip, many caches are invalid but
2914 2913 # later call to `destroyed` will refresh them.
2915 2914 return
2916 2915
2917 2916 unfi = self.unfiltered()
2918 2917
2919 2918 if caches is None:
2920 2919 caches = repository.CACHES_DEFAULT
2921 2920
2922 2921 if repository.CACHE_BRANCHMAP_SERVED in caches:
2923 2922 if tr is None or tr.changes[b'origrepolen'] < len(self):
2924 2923 self.ui.debug(b'updating the branch cache\n')
2925 2924 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2926 2925 served = self.filtered(b'served')
2927 2926 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2928 2927 served_hidden = self.filtered(b'served.hidden')
2929 2928 self._branchcaches.update_disk(
2930 2929 served_hidden, detect_pure_topo=dpt
2931 2930 )
2932 2931
2933 2932 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 2933 self.changelog.update_caches(transaction=tr)
2935 2934
2936 2935 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 2936 self.manifestlog.update_caches(transaction=tr)
2938 2937 for entry in self.store.walk():
2939 2938 if not entry.is_revlog:
2940 2939 continue
2941 2940 if not entry.is_manifestlog:
2942 2941 continue
2943 2942 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 2943 if manifestrevlog is not None:
2945 2944 manifestrevlog.update_caches(transaction=tr)
2946 2945
2947 2946 if repository.CACHE_REV_BRANCH in caches:
2948 2947 rbc = unfi.revbranchcache()
2949 2948 for r in unfi.changelog:
2950 2949 rbc.branchinfo(r)
2951 2950 rbc.write()
2952 2951
2953 2952 if repository.CACHE_FULL_MANIFEST in caches:
2954 2953 # ensure the working copy parents are in the manifestfulltextcache
2955 2954 for ctx in self[b'.'].parents():
2956 2955 ctx.manifest() # accessing the manifest is enough
2957 2956
2958 2957 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 2958 # accessing fnode cache warms the cache
2960 2959 tagsmod.warm_cache(self)
2961 2960
2962 2961 if repository.CACHE_TAGS_DEFAULT in caches:
2963 2962 # accessing tags warm the cache
2964 2963 self.tags()
2965 2964 if repository.CACHE_TAGS_SERVED in caches:
2966 2965 self.filtered(b'served').tags()
2967 2966
2968 2967 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 2968 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 2969 # so we're forcing a write to cause these caches to be warmed up
2971 2970 # even if they haven't explicitly been requested yet (if they've
2972 2971 # never been used by hg, they won't ever have been written, even if
2973 2972 # they're a subset of another kind of cache that *has* been used).
2974 2973 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2975 2974
2976 2975 for filt in repoview.filtertable.keys():
2977 2976 filtered = self.filtered(filt)
2978 2977 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2979 2978
2980 2979 # flush all possibly delayed write.
2981 2980 self._branchcaches.write_dirty(self)
2982 2981
2983 2982 def invalidatecaches(self):
2984 2983 if '_tagscache' in vars(self):
2985 2984 # can't use delattr on proxy
2986 2985 del self.__dict__['_tagscache']
2987 2986
2988 2987 self._branchcaches.clear()
2989 2988 self.invalidatevolatilesets()
2990 2989 self._sparsesignaturecache.clear()
2991 2990
2992 2991 def invalidatevolatilesets(self):
2993 2992 self.filteredrevcache.clear()
2994 2993 obsolete.clearobscaches(self)
2995 2994 self._quick_access_changeid_invalidate()
2996 2995
2997 2996 def invalidatedirstate(self):
2998 2997 """Invalidates the dirstate, causing the next call to dirstate
2999 2998 to check if it was modified since the last time it was read,
3000 2999 rereading it if it has.
3001 3000
3002 3001 This is different to dirstate.invalidate() that it doesn't always
3003 3002 rereads the dirstate. Use dirstate.invalidate() if you want to
3004 3003 explicitly read the dirstate again (i.e. restoring it to a previous
3005 3004 known good state)."""
3006 3005 unfi = self.unfiltered()
3007 3006 if 'dirstate' in unfi.__dict__:
3008 3007 assert not self.dirstate.is_changing_any
3009 3008 del unfi.__dict__['dirstate']
3010 3009
3011 3010 def invalidate(self, clearfilecache=False):
3012 3011 """Invalidates both store and non-store parts other than dirstate
3013 3012
3014 3013 If a transaction is running, invalidation of store is omitted,
3015 3014 because discarding in-memory changes might cause inconsistency
3016 3015 (e.g. incomplete fncache causes unintentional failure, but
3017 3016 redundant one doesn't).
3018 3017 """
3019 3018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3020 3019 for k in list(self._filecache.keys()):
3021 3020 if (
3022 3021 k == b'changelog'
3023 3022 and self.currenttransaction()
3024 3023 and self.changelog.is_delaying
3025 3024 ):
3026 3025 # The changelog object may store unwritten revisions. We don't
3027 3026 # want to lose them.
3028 3027 # TODO: Solve the problem instead of working around it.
3029 3028 continue
3030 3029
3031 3030 if clearfilecache:
3032 3031 del self._filecache[k]
3033 3032 try:
3034 3033 # XXX ideally, the key would be a unicode string to match the
3035 3034 # fact it refers to an attribut name. However changing this was
3036 3035 # a bit a scope creep compared to the series cleaning up
3037 3036 # del/set/getattr so we kept thing simple here.
3038 3037 delattr(unfiltered, pycompat.sysstr(k))
3039 3038 except AttributeError:
3040 3039 pass
3041 3040 self.invalidatecaches()
3042 3041 if not self.currenttransaction():
3043 3042 # TODO: Changing contents of store outside transaction
3044 3043 # causes inconsistency. We should make in-memory store
3045 3044 # changes detectable, and abort if changed.
3046 3045 self.store.invalidatecaches()
3047 3046
3048 3047 def invalidateall(self):
3049 3048 """Fully invalidates both store and non-store parts, causing the
3050 3049 subsequent operation to reread any outside changes."""
3051 3050 # extension should hook this to invalidate its caches
3052 3051 self.invalidate()
3053 3052 self.invalidatedirstate()
3054 3053
3055 3054 @unfilteredmethod
3056 3055 def _refreshfilecachestats(self, tr):
3057 3056 """Reload stats of cached files so that they are flagged as valid"""
3058 3057 for k, ce in self._filecache.items():
3059 3058 k = pycompat.sysstr(k)
3060 3059 if k == 'dirstate' or k not in self.__dict__:
3061 3060 continue
3062 3061 ce.refresh()
3063 3062
3064 3063 def _lock(
3065 3064 self,
3066 3065 vfs,
3067 3066 lockname,
3068 3067 wait,
3069 3068 releasefn,
3070 3069 acquirefn,
3071 3070 desc,
3072 3071 ):
3073 3072 timeout = 0
3074 3073 warntimeout = 0
3075 3074 if wait:
3076 3075 timeout = self.ui.configint(b"ui", b"timeout")
3077 3076 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3078 3077 # internal config: ui.signal-safe-lock
3079 3078 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3080 3079 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3081 3080 if not sync_file:
3082 3081 sync_file = None
3083 3082
3084 3083 l = lockmod.trylock(
3085 3084 self.ui,
3086 3085 vfs,
3087 3086 lockname,
3088 3087 timeout,
3089 3088 warntimeout,
3090 3089 releasefn=releasefn,
3091 3090 acquirefn=acquirefn,
3092 3091 desc=desc,
3093 3092 signalsafe=signalsafe,
3094 3093 devel_wait_sync_file=sync_file,
3095 3094 )
3096 3095 return l
3097 3096
3098 3097 def _afterlock(self, callback):
3099 3098 """add a callback to be run when the repository is fully unlocked
3100 3099
3101 3100 The callback will be executed when the outermost lock is released
3102 3101 (with wlock being higher level than 'lock')."""
3103 3102 for ref in (self._wlockref, self._lockref):
3104 3103 l = ref and ref()
3105 3104 if l and l.held:
3106 3105 l.postrelease.append(callback)
3107 3106 break
3108 3107 else: # no lock have been found.
3109 3108 callback(True)
3110 3109
3111 3110 def lock(self, wait=True):
3112 3111 """Lock the repository store (.hg/store) and return a weak reference
3113 3112 to the lock. Use this before modifying the store (e.g. committing or
3114 3113 stripping). If you are opening a transaction, get a lock as well.)
3115 3114
3116 3115 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3117 3116 'wlock' first to avoid a dead-lock hazard."""
3118 3117 l = self._currentlock(self._lockref)
3119 3118 if l is not None:
3120 3119 l.lock()
3121 3120 return l
3122 3121
3123 3122 self.hook(b'prelock', throw=True)
3124 3123 l = self._lock(
3125 3124 vfs=self.svfs,
3126 3125 lockname=b"lock",
3127 3126 wait=wait,
3128 3127 releasefn=None,
3129 3128 acquirefn=self.invalidate,
3130 3129 desc=_(b'repository %s') % self.origroot,
3131 3130 )
3132 3131 self._lockref = weakref.ref(l)
3133 3132 return l
3134 3133
3135 3134 def wlock(self, wait=True):
3136 3135 """Lock the non-store parts of the repository (everything under
3137 3136 .hg except .hg/store) and return a weak reference to the lock.
3138 3137
3139 3138 Use this before modifying files in .hg.
3140 3139
3141 3140 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3142 3141 'wlock' first to avoid a dead-lock hazard."""
3143 3142 l = self._wlockref() if self._wlockref else None
3144 3143 if l is not None and l.held:
3145 3144 l.lock()
3146 3145 return l
3147 3146
3148 3147 self.hook(b'prewlock', throw=True)
3149 3148 # We do not need to check for non-waiting lock acquisition. Such
3150 3149 # acquisition would not cause dead-lock as they would just fail.
3151 3150 if wait and (
3152 3151 self.ui.configbool(b'devel', b'all-warnings')
3153 3152 or self.ui.configbool(b'devel', b'check-locks')
3154 3153 ):
3155 3154 if self._currentlock(self._lockref) is not None:
3156 3155 self.ui.develwarn(b'"wlock" acquired after "lock"')
3157 3156
3158 3157 def unlock():
3159 3158 if self.dirstate.is_changing_any:
3160 3159 msg = b"wlock release in the middle of a changing parents"
3161 3160 self.ui.develwarn(msg)
3162 3161 self.dirstate.invalidate()
3163 3162 else:
3164 3163 if self.dirstate._dirty:
3165 3164 msg = b"dirty dirstate on wlock release"
3166 3165 self.ui.develwarn(msg)
3167 3166 self.dirstate.write(None)
3168 3167
3169 3168 unfi = self.unfiltered()
3170 3169 if 'dirstate' in unfi.__dict__:
3171 3170 del unfi.__dict__['dirstate']
3172 3171
3173 3172 l = self._lock(
3174 3173 self.vfs,
3175 3174 b"wlock",
3176 3175 wait,
3177 3176 unlock,
3178 3177 self.invalidatedirstate,
3179 3178 _(b'working directory of %s') % self.origroot,
3180 3179 )
3181 3180 self._wlockref = weakref.ref(l)
3182 3181 return l
3183 3182
3184 3183 def _currentlock(self, lockref):
3185 3184 """Returns the lock if it's held, or None if it's not."""
3186 3185 if lockref is None:
3187 3186 return None
3188 3187 l = lockref()
3189 3188 if l is None or not l.held:
3190 3189 return None
3191 3190 return l
3192 3191
3193 3192 def currentwlock(self):
3194 3193 """Returns the wlock if it's held, or None if it's not."""
3195 3194 return self._currentlock(self._wlockref)
3196 3195
3197 3196 def currentlock(self):
3198 3197 """Returns the lock if it's held, or None if it's not."""
3199 3198 return self._currentlock(self._lockref)
3200 3199
3201 3200 def checkcommitpatterns(self, wctx, match, status, fail):
3202 3201 """check for commit arguments that aren't committable"""
3203 3202 if match.isexact() or match.prefix():
3204 3203 matched = set(status.modified + status.added + status.removed)
3205 3204
3206 3205 for f in match.files():
3207 3206 f = self.dirstate.normalize(f)
3208 3207 if f == b'.' or f in matched or f in wctx.substate:
3209 3208 continue
3210 3209 if f in status.deleted:
3211 3210 fail(f, _(b'file not found!'))
3212 3211 # Is it a directory that exists or used to exist?
3213 3212 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3214 3213 d = f + b'/'
3215 3214 for mf in matched:
3216 3215 if mf.startswith(d):
3217 3216 break
3218 3217 else:
3219 3218 fail(f, _(b"no match under directory!"))
3220 3219 elif f not in self.dirstate:
3221 3220 fail(f, _(b"file not tracked!"))
3222 3221
3223 3222 @unfilteredmethod
3224 3223 def commit(
3225 3224 self,
3226 3225 text=b"",
3227 3226 user=None,
3228 3227 date=None,
3229 3228 match=None,
3230 3229 force=False,
3231 3230 editor=None,
3232 3231 extra=None,
3233 3232 ):
3234 3233 """Add a new revision to current repository.
3235 3234
3236 3235 Revision information is gathered from the working directory,
3237 3236 match can be used to filter the committed files. If editor is
3238 3237 supplied, it is called to get a commit message.
3239 3238 """
3240 3239 if extra is None:
3241 3240 extra = {}
3242 3241
3243 3242 def fail(f, msg):
3244 3243 raise error.InputError(b'%s: %s' % (f, msg))
3245 3244
3246 3245 if not match:
3247 3246 match = matchmod.always()
3248 3247
3249 3248 if not force:
3250 3249 match.bad = fail
3251 3250
3252 3251 # lock() for recent changelog (see issue4368)
3253 3252 with self.wlock(), self.lock():
3254 3253 wctx = self[None]
3255 3254 merge = len(wctx.parents()) > 1
3256 3255
3257 3256 if not force and merge and not match.always():
3258 3257 raise error.Abort(
3259 3258 _(
3260 3259 b'cannot partially commit a merge '
3261 3260 b'(do not specify files or patterns)'
3262 3261 )
3263 3262 )
3264 3263
3265 3264 status = self.status(match=match, clean=force)
3266 3265 if force:
3267 3266 status.modified.extend(
3268 3267 status.clean
3269 3268 ) # mq may commit clean files
3270 3269
3271 3270 # check subrepos
3272 3271 subs, commitsubs, newstate = subrepoutil.precommit(
3273 3272 self.ui, wctx, status, match, force=force
3274 3273 )
3275 3274
3276 3275 # make sure all explicit patterns are matched
3277 3276 if not force:
3278 3277 self.checkcommitpatterns(wctx, match, status, fail)
3279 3278
3280 3279 cctx = context.workingcommitctx(
3281 3280 self, status, text, user, date, extra
3282 3281 )
3283 3282
3284 3283 ms = mergestatemod.mergestate.read(self)
3285 3284 mergeutil.checkunresolved(ms)
3286 3285
3287 3286 # internal config: ui.allowemptycommit
3288 3287 if cctx.isempty() and not self.ui.configbool(
3289 3288 b'ui', b'allowemptycommit'
3290 3289 ):
3291 3290 self.ui.debug(b'nothing to commit, clearing merge state\n')
3292 3291 ms.reset()
3293 3292 return None
3294 3293
3295 3294 if merge and cctx.deleted():
3296 3295 raise error.Abort(_(b"cannot commit merge with missing files"))
3297 3296
3298 3297 if editor:
3299 3298 cctx._text = editor(self, cctx, subs)
3300 3299 edited = text != cctx._text
3301 3300
3302 3301 # Save commit message in case this transaction gets rolled back
3303 3302 # (e.g. by a pretxncommit hook). Leave the content alone on
3304 3303 # the assumption that the user will use the same editor again.
3305 3304 msg_path = self.savecommitmessage(cctx._text)
3306 3305
3307 3306 # commit subs and write new state
3308 3307 if subs:
3309 3308 uipathfn = scmutil.getuipathfn(self)
3310 3309 for s in sorted(commitsubs):
3311 3310 sub = wctx.sub(s)
3312 3311 self.ui.status(
3313 3312 _(b'committing subrepository %s\n')
3314 3313 % uipathfn(subrepoutil.subrelpath(sub))
3315 3314 )
3316 3315 sr = sub.commit(cctx._text, user, date)
3317 3316 newstate[s] = (newstate[s][0], sr)
3318 3317 subrepoutil.writestate(self, newstate)
3319 3318
3320 3319 p1, p2 = self.dirstate.parents()
3321 3320 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3322 3321 try:
3323 3322 self.hook(
3324 3323 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3325 3324 )
3326 3325 with self.transaction(b'commit'):
3327 3326 ret = self.commitctx(cctx, True)
3328 3327 # update bookmarks, dirstate and mergestate
3329 3328 bookmarks.update(self, [p1, p2], ret)
3330 3329 cctx.markcommitted(ret)
3331 3330 ms.reset()
3332 3331 except: # re-raises
3333 3332 if edited:
3334 3333 self.ui.write(
3335 3334 _(b'note: commit message saved in %s\n') % msg_path
3336 3335 )
3337 3336 self.ui.write(
3338 3337 _(
3339 3338 b"note: use 'hg commit --logfile "
3340 3339 b"%s --edit' to reuse it\n"
3341 3340 )
3342 3341 % msg_path
3343 3342 )
3344 3343 raise
3345 3344
3346 3345 def commithook(unused_success):
3347 3346 # hack for command that use a temporary commit (eg: histedit)
3348 3347 # temporary commit got stripped before hook release
3349 3348 if self.changelog.hasnode(ret):
3350 3349 self.hook(
3351 3350 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3352 3351 )
3353 3352
3354 3353 self._afterlock(commithook)
3355 3354 return ret
3356 3355
3357 3356 @unfilteredmethod
3358 3357 def commitctx(self, ctx, error=False, origctx=None):
3359 3358 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3360 3359
3361 3360 @unfilteredmethod
3362 3361 def destroying(self):
3363 3362 """Inform the repository that nodes are about to be destroyed.
3364 3363 Intended for use by strip and rollback, so there's a common
3365 3364 place for anything that has to be done before destroying history.
3366 3365
3367 3366 This is mostly useful for saving state that is in memory and waiting
3368 3367 to be flushed when the current lock is released. Because a call to
3369 3368 destroyed is imminent, the repo will be invalidated causing those
3370 3369 changes to stay in memory (waiting for the next unlock), or vanish
3371 3370 completely.
3372 3371 """
3373 3372 # When using the same lock to commit and strip, the phasecache is left
3374 3373 # dirty after committing. Then when we strip, the repo is invalidated,
3375 3374 # causing those changes to disappear.
3376 3375 if '_phasecache' in vars(self):
3377 3376 self._phasecache.write(self)
3378 3377
3379 3378 @unfilteredmethod
3380 3379 def destroyed(self):
3381 3380 """Inform the repository that nodes have been destroyed.
3382 3381 Intended for use by strip and rollback, so there's a common
3383 3382 place for anything that has to be done after destroying history.
3384 3383 """
3385 3384 # refresh all repository caches
3386 3385 self.updatecaches()
3387 3386
3388 3387 # Ensure the persistent tag cache is updated. Doing it now
3389 3388 # means that the tag cache only has to worry about destroyed
3390 3389 # heads immediately after a strip/rollback. That in turn
3391 3390 # guarantees that "cachetip == currenttip" (comparing both rev
3392 3391 # and node) always means no nodes have been added or destroyed.
3393 3392
3394 3393 # XXX this is suboptimal when qrefresh'ing: we strip the current
3395 3394 # head, refresh the tag cache, then immediately add a new head.
3396 3395 # But I think doing it this way is necessary for the "instant
3397 3396 # tag cache retrieval" case to work.
3398 3397 self.invalidate()
3399 3398
3400 3399 def status(
3401 3400 self,
3402 3401 node1=b'.',
3403 3402 node2=None,
3404 3403 match=None,
3405 3404 ignored=False,
3406 3405 clean=False,
3407 3406 unknown=False,
3408 3407 listsubrepos=False,
3409 3408 ):
3410 3409 '''a convenience method that calls node1.status(node2)'''
3411 3410 return self[node1].status(
3412 3411 node2, match, ignored, clean, unknown, listsubrepos
3413 3412 )
3414 3413
3415 3414 def addpostdsstatus(self, ps):
3416 3415 """Add a callback to run within the wlock, at the point at which status
3417 3416 fixups happen.
3418 3417
3419 3418 On status completion, callback(wctx, status) will be called with the
3420 3419 wlock held, unless the dirstate has changed from underneath or the wlock
3421 3420 couldn't be grabbed.
3422 3421
3423 3422 Callbacks should not capture and use a cached copy of the dirstate --
3424 3423 it might change in the meanwhile. Instead, they should access the
3425 3424 dirstate via wctx.repo().dirstate.
3426 3425
3427 3426 This list is emptied out after each status run -- extensions should
3428 3427 make sure it adds to this list each time dirstate.status is called.
3429 3428 Extensions should also make sure they don't call this for statuses
3430 3429 that don't involve the dirstate.
3431 3430 """
3432 3431
3433 3432 # The list is located here for uniqueness reasons -- it is actually
3434 3433 # managed by the workingctx, but that isn't unique per-repo.
3435 3434 self._postdsstatus.append(ps)
3436 3435
3437 3436 def postdsstatus(self):
3438 3437 """Used by workingctx to get the list of post-dirstate-status hooks."""
3439 3438 return self._postdsstatus
3440 3439
3441 3440 def clearpostdsstatus(self):
3442 3441 """Used by workingctx to clear post-dirstate-status hooks."""
3443 3442 del self._postdsstatus[:]
3444 3443
3445 3444 def heads(self, start=None):
3446 3445 if start is None:
3447 3446 cl = self.changelog
3448 3447 headrevs = reversed(cl.headrevs())
3449 3448 return [cl.node(rev) for rev in headrevs]
3450 3449
3451 3450 heads = self.changelog.heads(start)
3452 3451 # sort the output in rev descending order
3453 3452 return sorted(heads, key=self.changelog.rev, reverse=True)
3454 3453
3455 3454 def branchheads(self, branch=None, start=None, closed=False):
3456 3455 """return a (possibly filtered) list of heads for the given branch
3457 3456
3458 3457 Heads are returned in topological order, from newest to oldest.
3459 3458 If branch is None, use the dirstate branch.
3460 3459 If start is not None, return only heads reachable from start.
3461 3460 If closed is True, return heads that are marked as closed as well.
3462 3461 """
3463 3462 if branch is None:
3464 3463 branch = self[None].branch()
3465 3464 branches = self.branchmap()
3466 3465 if not branches.hasbranch(branch):
3467 3466 return []
3468 3467 # the cache returns heads ordered lowest to highest
3469 3468 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3470 3469 if start is not None:
3471 3470 # filter out the heads that cannot be reached from startrev
3472 3471 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3473 3472 bheads = [h for h in bheads if h in fbheads]
3474 3473 return bheads
3475 3474
3476 3475 def branches(self, nodes):
3477 3476 if not nodes:
3478 3477 nodes = [self.changelog.tip()]
3479 3478 b = []
3480 3479 for n in nodes:
3481 3480 t = n
3482 3481 while True:
3483 3482 p = self.changelog.parents(n)
3484 3483 if p[1] != self.nullid or p[0] == self.nullid:
3485 3484 b.append((t, n, p[0], p[1]))
3486 3485 break
3487 3486 n = p[0]
3488 3487 return b
3489 3488
3490 3489 def between(self, pairs):
3491 3490 r = []
3492 3491
3493 3492 for top, bottom in pairs:
3494 3493 n, l, i = top, [], 0
3495 3494 f = 1
3496 3495
3497 3496 while n != bottom and n != self.nullid:
3498 3497 p = self.changelog.parents(n)[0]
3499 3498 if i == f:
3500 3499 l.append(n)
3501 3500 f = f * 2
3502 3501 n = p
3503 3502 i += 1
3504 3503
3505 3504 r.append(l)
3506 3505
3507 3506 return r
3508 3507
3509 3508 def checkpush(self, pushop):
3510 3509 """Extensions can override this function if additional checks have
3511 3510 to be performed before pushing, or call it if they override push
3512 3511 command.
3513 3512 """
3514 3513
3515 3514 @unfilteredpropertycache
3516 3515 def prepushoutgoinghooks(self):
3517 3516 """Return util.hooks consists of a pushop with repo, remote, outgoing
3518 3517 methods, which are called before pushing changesets.
3519 3518 """
3520 3519 return util.hooks()
3521 3520
3522 3521 def pushkey(self, namespace, key, old, new):
3523 3522 try:
3524 3523 tr = self.currenttransaction()
3525 3524 hookargs = {}
3526 3525 if tr is not None:
3527 3526 hookargs.update(tr.hookargs)
3528 3527 hookargs = pycompat.strkwargs(hookargs)
3529 3528 hookargs['namespace'] = namespace
3530 3529 hookargs['key'] = key
3531 3530 hookargs['old'] = old
3532 3531 hookargs['new'] = new
3533 3532 self.hook(b'prepushkey', throw=True, **hookargs)
3534 3533 except error.HookAbort as exc:
3535 3534 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3536 3535 if exc.hint:
3537 3536 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3538 3537 return False
3539 3538 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3540 3539 ret = pushkey.push(self, namespace, key, old, new)
3541 3540
3542 3541 def runhook(unused_success):
3543 3542 self.hook(
3544 3543 b'pushkey',
3545 3544 namespace=namespace,
3546 3545 key=key,
3547 3546 old=old,
3548 3547 new=new,
3549 3548 ret=ret,
3550 3549 )
3551 3550
3552 3551 self._afterlock(runhook)
3553 3552 return ret
3554 3553
3555 3554 def listkeys(self, namespace):
3556 3555 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3557 3556 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3558 3557 values = pushkey.list(self, namespace)
3559 3558 self.hook(b'listkeys', namespace=namespace, values=values)
3560 3559 return values
3561 3560
3562 3561 def debugwireargs(self, one, two, three=None, four=None, five=None):
3563 3562 '''used to test argument passing over the wire'''
3564 3563 return b"%s %s %s %s %s" % (
3565 3564 one,
3566 3565 two,
3567 3566 pycompat.bytestr(three),
3568 3567 pycompat.bytestr(four),
3569 3568 pycompat.bytestr(five),
3570 3569 )
3571 3570
3572 3571 def savecommitmessage(self, text):
3573 3572 fp = self.vfs(b'last-message.txt', b'wb')
3574 3573 try:
3575 3574 fp.write(text)
3576 3575 finally:
3577 3576 fp.close()
3578 3577 return self.pathto(fp.name[len(self.root) + 1 :])
3579 3578
3580 3579 def register_wanted_sidedata(self, category):
3581 3580 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3582 3581 # Only revlogv2 repos can want sidedata.
3583 3582 return
3584 3583 self._wanted_sidedata.add(pycompat.bytestr(category))
3585 3584
3586 3585 def register_sidedata_computer(
3587 3586 self, kind, category, keys, computer, flags, replace=False
3588 3587 ):
3589 3588 if kind not in revlogconst.ALL_KINDS:
3590 3589 msg = _(b"unexpected revlog kind '%s'.")
3591 3590 raise error.ProgrammingError(msg % kind)
3592 3591 category = pycompat.bytestr(category)
3593 3592 already_registered = category in self._sidedata_computers.get(kind, [])
3594 3593 if already_registered and not replace:
3595 3594 msg = _(
3596 3595 b"cannot register a sidedata computer twice for category '%s'."
3597 3596 )
3598 3597 raise error.ProgrammingError(msg % category)
3599 3598 if replace and not already_registered:
3600 3599 msg = _(
3601 3600 b"cannot replace a sidedata computer that isn't registered "
3602 3601 b"for category '%s'."
3603 3602 )
3604 3603 raise error.ProgrammingError(msg % category)
3605 3604 self._sidedata_computers.setdefault(kind, {})
3606 3605 self._sidedata_computers[kind][category] = (keys, computer, flags)
3607 3606
3608 3607
3609 localrepository = interfaceutil.implementer(repository.ilocalrepositorymain)(
3610 LocalRepository
3611 )
3612
3613 if typing.TYPE_CHECKING:
3614 # Help pytype by hiding the interface stuff that confuses it.
3615 localrepository = LocalRepository
3616
3617
3618 3608 def undoname(fn: bytes) -> bytes:
3619 3609 base, name = os.path.split(fn)
3620 3610 assert name.startswith(b'journal')
3621 3611 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3622 3612
3623 3613
3624 3614 def instance(ui, path: bytes, create, intents=None, createopts=None):
3625 3615 # prevent cyclic import localrepo -> upgrade -> localrepo
3626 3616 from . import upgrade
3627 3617
3628 3618 localpath = urlutil.urllocalpath(path)
3629 3619 if create:
3630 3620 createrepository(ui, localpath, createopts=createopts)
3631 3621
3632 3622 def repo_maker():
3633 3623 return makelocalrepository(ui, localpath, intents=intents)
3634 3624
3635 3625 repo = repo_maker()
3636 3626 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3637 3627 return repo
3638 3628
3639 3629
3640 3630 def islocal(path: bytes) -> bool:
3641 3631 return True
3642 3632
3643 3633
3644 3634 def defaultcreateopts(ui, createopts=None):
3645 3635 """Populate the default creation options for a repository.
3646 3636
3647 3637 A dictionary of explicitly requested creation options can be passed
3648 3638 in. Missing keys will be populated.
3649 3639 """
3650 3640 createopts = dict(createopts or {})
3651 3641
3652 3642 if b'backend' not in createopts:
3653 3643 # experimental config: storage.new-repo-backend
3654 3644 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3655 3645
3656 3646 return createopts
3657 3647
3658 3648
3659 3649 def clone_requirements(ui, createopts, srcrepo):
3660 3650 """clone the requirements of a local repo for a local clone
3661 3651
3662 3652 The store requirements are unchanged while the working copy requirements
3663 3653 depends on the configuration
3664 3654 """
3665 3655 target_requirements = set()
3666 3656 if not srcrepo.requirements:
3667 3657 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3668 3658 # with it.
3669 3659 return target_requirements
3670 3660 createopts = defaultcreateopts(ui, createopts=createopts)
3671 3661 for r in newreporequirements(ui, createopts):
3672 3662 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3673 3663 target_requirements.add(r)
3674 3664
3675 3665 for r in srcrepo.requirements:
3676 3666 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3677 3667 target_requirements.add(r)
3678 3668 return target_requirements
3679 3669
3680 3670
3681 3671 def newreporequirements(ui, createopts):
3682 3672 """Determine the set of requirements for a new local repository.
3683 3673
3684 3674 Extensions can wrap this function to specify custom requirements for
3685 3675 new repositories.
3686 3676 """
3687 3677
3688 3678 if b'backend' not in createopts:
3689 3679 raise error.ProgrammingError(
3690 3680 b'backend key not present in createopts; '
3691 3681 b'was defaultcreateopts() called?'
3692 3682 )
3693 3683
3694 3684 if createopts[b'backend'] != b'revlogv1':
3695 3685 raise error.Abort(
3696 3686 _(
3697 3687 b'unable to determine repository requirements for '
3698 3688 b'storage backend: %s'
3699 3689 )
3700 3690 % createopts[b'backend']
3701 3691 )
3702 3692
3703 3693 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3704 3694 if ui.configbool(b'format', b'usestore'):
3705 3695 requirements.add(requirementsmod.STORE_REQUIREMENT)
3706 3696 if ui.configbool(b'format', b'usefncache'):
3707 3697 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3708 3698 if ui.configbool(b'format', b'dotencode'):
3709 3699 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3710 3700
3711 3701 compengines = ui.configlist(b'format', b'revlog-compression')
3712 3702 for compengine in compengines:
3713 3703 if compengine in util.compengines:
3714 3704 engine = util.compengines[compengine]
3715 3705 if engine.available() and engine.revlogheader():
3716 3706 break
3717 3707 else:
3718 3708 raise error.Abort(
3719 3709 _(
3720 3710 b'compression engines %s defined by '
3721 3711 b'format.revlog-compression not available'
3722 3712 )
3723 3713 % b', '.join(b'"%s"' % e for e in compengines),
3724 3714 hint=_(
3725 3715 b'run "hg debuginstall" to list available '
3726 3716 b'compression engines'
3727 3717 ),
3728 3718 )
3729 3719
3730 3720 # zlib is the historical default and doesn't need an explicit requirement.
3731 3721 if compengine == b'zstd':
3732 3722 requirements.add(b'revlog-compression-zstd')
3733 3723 elif compengine != b'zlib':
3734 3724 requirements.add(b'exp-compression-%s' % compengine)
3735 3725
3736 3726 if scmutil.gdinitconfig(ui):
3737 3727 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3738 3728 if ui.configbool(b'format', b'sparse-revlog'):
3739 3729 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3740 3730
3741 3731 # experimental config: format.use-dirstate-v2
3742 3732 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3743 3733 if ui.configbool(b'format', b'use-dirstate-v2'):
3744 3734 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3745 3735
3746 3736 # experimental config: format.exp-use-copies-side-data-changeset
3747 3737 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3748 3738 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3749 3739 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3750 3740 if ui.configbool(b'experimental', b'treemanifest'):
3751 3741 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3752 3742
3753 3743 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3754 3744 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3755 3745 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3756 3746
3757 3747 revlogv2 = ui.config(b'experimental', b'revlogv2')
3758 3748 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3759 3749 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3760 3750 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3761 3751 # experimental config: format.internal-phase
3762 3752 if ui.configbool(b'format', b'use-internal-phase'):
3763 3753 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3764 3754
3765 3755 # experimental config: format.exp-archived-phase
3766 3756 if ui.configbool(b'format', b'exp-archived-phase'):
3767 3757 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3768 3758
3769 3759 if createopts.get(b'narrowfiles'):
3770 3760 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3771 3761
3772 3762 if createopts.get(b'lfs'):
3773 3763 requirements.add(b'lfs')
3774 3764
3775 3765 if ui.configbool(b'format', b'bookmarks-in-store'):
3776 3766 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3777 3767
3778 3768 # The feature is disabled unless a fast implementation is available.
3779 3769 persistent_nodemap_default = policy.importrust('revlog') is not None
3780 3770 if ui.configbool(
3781 3771 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3782 3772 ):
3783 3773 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3784 3774
3785 3775 # if share-safe is enabled, let's create the new repository with the new
3786 3776 # requirement
3787 3777 if ui.configbool(b'format', b'use-share-safe'):
3788 3778 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3789 3779
3790 3780 # if we are creating a share-repoΒΉ we have to handle requirement
3791 3781 # differently.
3792 3782 #
3793 3783 # [1] (i.e. reusing the store from another repository, just having a
3794 3784 # working copy)
3795 3785 if b'sharedrepo' in createopts:
3796 3786 source_requirements = set(createopts[b'sharedrepo'].requirements)
3797 3787
3798 3788 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3799 3789 # share to an old school repository, we have to copy the
3800 3790 # requirements and hope for the best.
3801 3791 requirements = source_requirements
3802 3792 else:
3803 3793 # We have control on the working copy only, so "copy" the non
3804 3794 # working copy part over, ignoring previous logic.
3805 3795 to_drop = set()
3806 3796 for req in requirements:
3807 3797 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3808 3798 continue
3809 3799 if req in source_requirements:
3810 3800 continue
3811 3801 to_drop.add(req)
3812 3802 requirements -= to_drop
3813 3803 requirements |= source_requirements
3814 3804
3815 3805 if createopts.get(b'sharedrelative'):
3816 3806 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3817 3807 else:
3818 3808 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3819 3809
3820 3810 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3821 3811 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3822 3812 msg = _(b"ignoring unknown tracked key version: %d\n")
3823 3813 hint = _(
3824 3814 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3825 3815 )
3826 3816 if version != 1:
3827 3817 ui.warn(msg % version, hint=hint)
3828 3818 else:
3829 3819 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3830 3820
3831 3821 return requirements
3832 3822
3833 3823
3834 3824 def checkrequirementscompat(ui, requirements):
3835 3825 """Checks compatibility of repository requirements enabled and disabled.
3836 3826
3837 3827 Returns a set of requirements which needs to be dropped because dependend
3838 3828 requirements are not enabled. Also warns users about it"""
3839 3829
3840 3830 dropped = set()
3841 3831
3842 3832 if requirementsmod.STORE_REQUIREMENT not in requirements:
3843 3833 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3844 3834 ui.warn(
3845 3835 _(
3846 3836 b'ignoring enabled \'format.bookmarks-in-store\' config '
3847 3837 b'beacuse it is incompatible with disabled '
3848 3838 b'\'format.usestore\' config\n'
3849 3839 )
3850 3840 )
3851 3841 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3852 3842
3853 3843 if (
3854 3844 requirementsmod.SHARED_REQUIREMENT in requirements
3855 3845 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3856 3846 ):
3857 3847 raise error.Abort(
3858 3848 _(
3859 3849 b"cannot create shared repository as source was created"
3860 3850 b" with 'format.usestore' config disabled"
3861 3851 )
3862 3852 )
3863 3853
3864 3854 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3865 3855 if ui.hasconfig(b'format', b'use-share-safe'):
3866 3856 msg = _(
3867 3857 b"ignoring enabled 'format.use-share-safe' config because "
3868 3858 b"it is incompatible with disabled 'format.usestore'"
3869 3859 b" config\n"
3870 3860 )
3871 3861 ui.warn(msg)
3872 3862 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3873 3863
3874 3864 return dropped
3875 3865
3876 3866
3877 3867 def filterknowncreateopts(ui, createopts):
3878 3868 """Filters a dict of repo creation options against options that are known.
3879 3869
3880 3870 Receives a dict of repo creation options and returns a dict of those
3881 3871 options that we don't know how to handle.
3882 3872
3883 3873 This function is called as part of repository creation. If the
3884 3874 returned dict contains any items, repository creation will not
3885 3875 be allowed, as it means there was a request to create a repository
3886 3876 with options not recognized by loaded code.
3887 3877
3888 3878 Extensions can wrap this function to filter out creation options
3889 3879 they know how to handle.
3890 3880 """
3891 3881 known = {
3892 3882 b'backend',
3893 3883 b'lfs',
3894 3884 b'narrowfiles',
3895 3885 b'sharedrepo',
3896 3886 b'sharedrelative',
3897 3887 b'shareditems',
3898 3888 b'shallowfilestore',
3899 3889 }
3900 3890
3901 3891 return {k: v for k, v in createopts.items() if k not in known}
3902 3892
3903 3893
3904 3894 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3905 3895 """Create a new repository in a vfs.
3906 3896
3907 3897 ``path`` path to the new repo's working directory.
3908 3898 ``createopts`` options for the new repository.
3909 3899 ``requirement`` predefined set of requirements.
3910 3900 (incompatible with ``createopts``)
3911 3901
3912 3902 The following keys for ``createopts`` are recognized:
3913 3903
3914 3904 backend
3915 3905 The storage backend to use.
3916 3906 lfs
3917 3907 Repository will be created with ``lfs`` requirement. The lfs extension
3918 3908 will automatically be loaded when the repository is accessed.
3919 3909 narrowfiles
3920 3910 Set up repository to support narrow file storage.
3921 3911 sharedrepo
3922 3912 Repository object from which storage should be shared.
3923 3913 sharedrelative
3924 3914 Boolean indicating if the path to the shared repo should be
3925 3915 stored as relative. By default, the pointer to the "parent" repo
3926 3916 is stored as an absolute path.
3927 3917 shareditems
3928 3918 Set of items to share to the new repository (in addition to storage).
3929 3919 shallowfilestore
3930 3920 Indicates that storage for files should be shallow (not all ancestor
3931 3921 revisions are known).
3932 3922 """
3933 3923
3934 3924 if requirements is not None:
3935 3925 if createopts is not None:
3936 3926 msg = b'cannot specify both createopts and requirements'
3937 3927 raise error.ProgrammingError(msg)
3938 3928 createopts = {}
3939 3929 else:
3940 3930 createopts = defaultcreateopts(ui, createopts=createopts)
3941 3931
3942 3932 unknownopts = filterknowncreateopts(ui, createopts)
3943 3933
3944 3934 if not isinstance(unknownopts, dict):
3945 3935 raise error.ProgrammingError(
3946 3936 b'filterknowncreateopts() did not return a dict'
3947 3937 )
3948 3938
3949 3939 if unknownopts:
3950 3940 raise error.Abort(
3951 3941 _(
3952 3942 b'unable to create repository because of unknown '
3953 3943 b'creation option: %s'
3954 3944 )
3955 3945 % b', '.join(sorted(unknownopts)),
3956 3946 hint=_(b'is a required extension not loaded?'),
3957 3947 )
3958 3948
3959 3949 requirements = newreporequirements(ui, createopts=createopts)
3960 3950 requirements -= checkrequirementscompat(ui, requirements)
3961 3951
3962 3952 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3963 3953
3964 3954 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3965 3955 if hgvfs.exists():
3966 3956 raise error.RepoError(_(b'repository %s already exists') % path)
3967 3957
3968 3958 if b'sharedrepo' in createopts:
3969 3959 sharedpath = createopts[b'sharedrepo'].sharedpath
3970 3960
3971 3961 if createopts.get(b'sharedrelative'):
3972 3962 try:
3973 3963 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3974 3964 sharedpath = util.pconvert(sharedpath)
3975 3965 except (IOError, ValueError) as e:
3976 3966 # ValueError is raised on Windows if the drive letters differ
3977 3967 # on each path.
3978 3968 raise error.Abort(
3979 3969 _(b'cannot calculate relative path'),
3980 3970 hint=stringutil.forcebytestr(e),
3981 3971 )
3982 3972
3983 3973 if not wdirvfs.exists():
3984 3974 wdirvfs.makedirs()
3985 3975
3986 3976 hgvfs.makedir(notindexed=True)
3987 3977 if b'sharedrepo' not in createopts:
3988 3978 hgvfs.mkdir(b'cache')
3989 3979 hgvfs.mkdir(b'wcache')
3990 3980
3991 3981 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3992 3982 if has_store and b'sharedrepo' not in createopts:
3993 3983 hgvfs.mkdir(b'store')
3994 3984
3995 3985 # We create an invalid changelog outside the store so very old
3996 3986 # Mercurial versions (which didn't know about the requirements
3997 3987 # file) encounter an error on reading the changelog. This
3998 3988 # effectively locks out old clients and prevents them from
3999 3989 # mucking with a repo in an unknown format.
4000 3990 #
4001 3991 # The revlog header has version 65535, which won't be recognized by
4002 3992 # such old clients.
4003 3993 hgvfs.append(
4004 3994 b'00changelog.i',
4005 3995 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
4006 3996 b'layout',
4007 3997 )
4008 3998
4009 3999 # Filter the requirements into working copy and store ones
4010 4000 wcreq, storereq = scmutil.filterrequirements(requirements)
4011 4001 # write working copy ones
4012 4002 scmutil.writerequires(hgvfs, wcreq)
4013 4003 # If there are store requirements and the current repository
4014 4004 # is not a shared one, write stored requirements
4015 4005 # For new shared repository, we don't need to write the store
4016 4006 # requirements as they are already present in store requires
4017 4007 if storereq and b'sharedrepo' not in createopts:
4018 4008 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4019 4009 scmutil.writerequires(storevfs, storereq)
4020 4010
4021 4011 # Write out file telling readers where to find the shared store.
4022 4012 if b'sharedrepo' in createopts:
4023 4013 hgvfs.write(b'sharedpath', sharedpath)
4024 4014
4025 4015 if createopts.get(b'shareditems'):
4026 4016 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4027 4017 hgvfs.write(b'shared', shared)
4028 4018
4029 4019
4030 4020 def poisonrepository(repo):
4031 4021 """Poison a repository instance so it can no longer be used."""
4032 4022 # Perform any cleanup on the instance.
4033 4023 repo.close()
4034 4024
4035 4025 # Our strategy is to replace the type of the object with one that
4036 4026 # has all attribute lookups result in error.
4037 4027 #
4038 4028 # But we have to allow the close() method because some constructors
4039 4029 # of repos call close() on repo references.
4040 4030 class poisonedrepository:
4041 4031 def __getattribute__(self, item):
4042 4032 if item == 'close':
4043 4033 return object.__getattribute__(self, item)
4044 4034
4045 4035 raise error.ProgrammingError(
4046 4036 b'repo instances should not be used after unshare'
4047 4037 )
4048 4038
4049 4039 def close(self):
4050 4040 pass
4051 4041
4052 4042 # We may have a repoview, which intercepts __setattr__. So be sure
4053 4043 # we operate at the lowest level possible.
4054 4044 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now