##// END OF EJS Templates
typing: add basic type hints to localrepo.py...
Matt Harbison -
r50466:8fa3f7c3 default
parent child Browse files
Show More
@@ -1,3954 +1,3971 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from concurrent import futures
18 from typing import (
19 Optional,
20 )
21
18 22 from .i18n import _
19 23 from .node import (
20 24 bin,
21 25 hex,
22 26 nullrev,
23 27 sha1nodeconstants,
24 28 short,
25 29 )
26 30 from .pycompat import (
27 31 delattr,
28 32 getattr,
29 33 )
30 34 from . import (
31 35 bookmarks,
32 36 branchmap,
33 37 bundle2,
34 38 bundlecaches,
35 39 changegroup,
36 40 color,
37 41 commit,
38 42 context,
39 43 dirstate,
40 44 dirstateguard,
41 45 discovery,
42 46 encoding,
43 47 error,
44 48 exchange,
45 49 extensions,
46 50 filelog,
47 51 hook,
48 52 lock as lockmod,
49 53 match as matchmod,
50 54 mergestate as mergestatemod,
51 55 mergeutil,
52 56 namespaces,
53 57 narrowspec,
54 58 obsolete,
55 59 pathutil,
56 60 phases,
57 61 pushkey,
58 62 pycompat,
59 63 rcutil,
60 64 repoview,
61 65 requirements as requirementsmod,
62 66 revlog,
63 67 revset,
64 68 revsetlang,
65 69 scmutil,
66 70 sparse,
67 71 store as storemod,
68 72 subrepoutil,
69 73 tags as tagsmod,
70 74 transaction,
71 75 txnutil,
72 76 util,
73 77 vfs as vfsmod,
74 78 wireprototypes,
75 79 )
76 80
77 81 from .interfaces import (
78 82 repository,
79 83 util as interfaceutil,
80 84 )
81 85
82 86 from .utils import (
83 87 hashutil,
84 88 procutil,
85 89 stringutil,
86 90 urlutil,
87 91 )
88 92
89 93 from .revlogutils import (
90 94 concurrency_checker as revlogchecker,
91 95 constants as revlogconst,
92 96 sidedata as sidedatamod,
93 97 )
94 98
95 99 release = lockmod.release
96 100 urlerr = util.urlerr
97 101 urlreq = util.urlreq
98 102
99 103 # set of (path, vfs-location) tuples. vfs-location is:
100 104 # - 'plain for vfs relative paths
101 105 # - '' for svfs relative paths
102 106 _cachedfiles = set()
103 107
104 108
105 109 class _basefilecache(scmutil.filecache):
106 110 """All filecache usage on repo are done for logic that should be unfiltered"""
107 111
108 112 def __get__(self, repo, type=None):
109 113 if repo is None:
110 114 return self
111 115 # proxy to unfiltered __dict__ since filtered repo has no entry
112 116 unfi = repo.unfiltered()
113 117 try:
114 118 return unfi.__dict__[self.sname]
115 119 except KeyError:
116 120 pass
117 121 return super(_basefilecache, self).__get__(unfi, type)
118 122
119 123 def set(self, repo, value):
120 124 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 125
122 126
123 127 class repofilecache(_basefilecache):
124 128 """filecache for files in .hg but outside of .hg/store"""
125 129
126 130 def __init__(self, *paths):
127 131 super(repofilecache, self).__init__(*paths)
128 132 for path in paths:
129 133 _cachedfiles.add((path, b'plain'))
130 134
131 135 def join(self, obj, fname):
132 136 return obj.vfs.join(fname)
133 137
134 138
135 139 class storecache(_basefilecache):
136 140 """filecache for files in the store"""
137 141
138 142 def __init__(self, *paths):
139 143 super(storecache, self).__init__(*paths)
140 144 for path in paths:
141 145 _cachedfiles.add((path, b''))
142 146
143 147 def join(self, obj, fname):
144 148 return obj.sjoin(fname)
145 149
146 150
147 151 class changelogcache(storecache):
148 152 """filecache for the changelog"""
149 153
150 154 def __init__(self):
151 155 super(changelogcache, self).__init__()
152 156 _cachedfiles.add((b'00changelog.i', b''))
153 157 _cachedfiles.add((b'00changelog.n', b''))
154 158
155 159 def tracked_paths(self, obj):
156 160 paths = [self.join(obj, b'00changelog.i')]
157 161 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 162 paths.append(self.join(obj, b'00changelog.n'))
159 163 return paths
160 164
161 165
162 166 class manifestlogcache(storecache):
163 167 """filecache for the manifestlog"""
164 168
165 169 def __init__(self):
166 170 super(manifestlogcache, self).__init__()
167 171 _cachedfiles.add((b'00manifest.i', b''))
168 172 _cachedfiles.add((b'00manifest.n', b''))
169 173
170 174 def tracked_paths(self, obj):
171 175 paths = [self.join(obj, b'00manifest.i')]
172 176 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 177 paths.append(self.join(obj, b'00manifest.n'))
174 178 return paths
175 179
176 180
177 181 class mixedrepostorecache(_basefilecache):
178 182 """filecache for a mix files in .hg/store and outside"""
179 183
180 184 def __init__(self, *pathsandlocations):
181 185 # scmutil.filecache only uses the path for passing back into our
182 186 # join(), so we can safely pass a list of paths and locations
183 187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 188 _cachedfiles.update(pathsandlocations)
185 189
186 190 def join(self, obj, fnameandlocation):
187 191 fname, location = fnameandlocation
188 192 if location == b'plain':
189 193 return obj.vfs.join(fname)
190 194 else:
191 195 if location != b'':
192 196 raise error.ProgrammingError(
193 197 b'unexpected location: %s' % location
194 198 )
195 199 return obj.sjoin(fname)
196 200
197 201
198 202 def isfilecached(repo, name):
199 203 """check if a repo has already cached "name" filecache-ed property
200 204
201 205 This returns (cachedobj-or-None, iscached) tuple.
202 206 """
203 207 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 208 if not cacheentry:
205 209 return None, False
206 210 return cacheentry.obj, True
207 211
208 212
209 213 class unfilteredpropertycache(util.propertycache):
210 214 """propertycache that apply to unfiltered repo only"""
211 215
212 216 def __get__(self, repo, type=None):
213 217 unfi = repo.unfiltered()
214 218 if unfi is repo:
215 219 return super(unfilteredpropertycache, self).__get__(unfi)
216 220 return getattr(unfi, self.name)
217 221
218 222
219 223 class filteredpropertycache(util.propertycache):
220 224 """propertycache that must take filtering in account"""
221 225
222 226 def cachevalue(self, obj, value):
223 227 object.__setattr__(obj, self.name, value)
224 228
225 229
226 230 def hasunfilteredcache(repo, name):
227 231 """check if a repo has an unfilteredpropertycache value for <name>"""
228 232 return name in vars(repo.unfiltered())
229 233
230 234
231 235 def unfilteredmethod(orig):
232 236 """decorate method that always need to be run on unfiltered version"""
233 237
234 238 @functools.wraps(orig)
235 239 def wrapper(repo, *args, **kwargs):
236 240 return orig(repo.unfiltered(), *args, **kwargs)
237 241
238 242 return wrapper
239 243
240 244
241 245 moderncaps = {
242 246 b'lookup',
243 247 b'branchmap',
244 248 b'pushkey',
245 249 b'known',
246 250 b'getbundle',
247 251 b'unbundle',
248 252 }
249 253 legacycaps = moderncaps.union({b'changegroupsubset'})
250 254
251 255
252 256 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 257 class localcommandexecutor:
254 258 def __init__(self, peer):
255 259 self._peer = peer
256 260 self._sent = False
257 261 self._closed = False
258 262
259 263 def __enter__(self):
260 264 return self
261 265
262 266 def __exit__(self, exctype, excvalue, exctb):
263 267 self.close()
264 268
265 269 def callcommand(self, command, args):
266 270 if self._sent:
267 271 raise error.ProgrammingError(
268 272 b'callcommand() cannot be used after sendcommands()'
269 273 )
270 274
271 275 if self._closed:
272 276 raise error.ProgrammingError(
273 277 b'callcommand() cannot be used after close()'
274 278 )
275 279
276 280 # We don't need to support anything fancy. Just call the named
277 281 # method on the peer and return a resolved future.
278 282 fn = getattr(self._peer, pycompat.sysstr(command))
279 283
280 284 f = futures.Future()
281 285
282 286 try:
283 287 result = fn(**pycompat.strkwargs(args))
284 288 except Exception:
285 289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 290 else:
287 291 f.set_result(result)
288 292
289 293 return f
290 294
291 295 def sendcommands(self):
292 296 self._sent = True
293 297
294 298 def close(self):
295 299 self._closed = True
296 300
297 301
298 302 @interfaceutil.implementer(repository.ipeercommands)
299 303 class localpeer(repository.peer):
300 304 '''peer for a local repo; reflects only the most recent API'''
301 305
302 306 def __init__(self, repo, caps=None):
303 307 super(localpeer, self).__init__()
304 308
305 309 if caps is None:
306 310 caps = moderncaps.copy()
307 311 self._repo = repo.filtered(b'served')
308 312 self.ui = repo.ui
309 313
310 314 if repo._wanted_sidedata:
311 315 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 316 caps.add(b'exp-wanted-sidedata=' + formatted)
313 317
314 318 self._caps = repo._restrictcapabilities(caps)
315 319
316 320 # Begin of _basepeer interface.
317 321
318 322 def url(self):
319 323 return self._repo.url()
320 324
321 325 def local(self):
322 326 return self._repo
323 327
324 328 def peer(self):
325 329 return self
326 330
327 331 def canpush(self):
328 332 return True
329 333
330 334 def close(self):
331 335 self._repo.close()
332 336
333 337 # End of _basepeer interface.
334 338
335 339 # Begin of _basewirecommands interface.
336 340
337 341 def branchmap(self):
338 342 return self._repo.branchmap()
339 343
340 344 def capabilities(self):
341 345 return self._caps
342 346
343 347 def clonebundles(self):
344 348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 349
346 350 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 351 """Used to test argument passing over the wire"""
348 352 return b"%s %s %s %s %s" % (
349 353 one,
350 354 two,
351 355 pycompat.bytestr(three),
352 356 pycompat.bytestr(four),
353 357 pycompat.bytestr(five),
354 358 )
355 359
356 360 def getbundle(
357 361 self,
358 362 source,
359 363 heads=None,
360 364 common=None,
361 365 bundlecaps=None,
362 366 remote_sidedata=None,
363 367 **kwargs
364 368 ):
365 369 chunks = exchange.getbundlechunks(
366 370 self._repo,
367 371 source,
368 372 heads=heads,
369 373 common=common,
370 374 bundlecaps=bundlecaps,
371 375 remote_sidedata=remote_sidedata,
372 376 **kwargs
373 377 )[1]
374 378 cb = util.chunkbuffer(chunks)
375 379
376 380 if exchange.bundle2requested(bundlecaps):
377 381 # When requesting a bundle2, getbundle returns a stream to make the
378 382 # wire level function happier. We need to build a proper object
379 383 # from it in local peer.
380 384 return bundle2.getunbundler(self.ui, cb)
381 385 else:
382 386 return changegroup.getunbundler(b'01', cb, None)
383 387
384 388 def heads(self):
385 389 return self._repo.heads()
386 390
387 391 def known(self, nodes):
388 392 return self._repo.known(nodes)
389 393
390 394 def listkeys(self, namespace):
391 395 return self._repo.listkeys(namespace)
392 396
393 397 def lookup(self, key):
394 398 return self._repo.lookup(key)
395 399
396 400 def pushkey(self, namespace, key, old, new):
397 401 return self._repo.pushkey(namespace, key, old, new)
398 402
399 403 def stream_out(self):
400 404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 405
402 406 def unbundle(self, bundle, heads, url):
403 407 """apply a bundle on a repo
404 408
405 409 This function handles the repo locking itself."""
406 410 try:
407 411 try:
408 412 bundle = exchange.readbundle(self.ui, bundle, None)
409 413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 414 if util.safehasattr(ret, b'getchunks'):
411 415 # This is a bundle20 object, turn it into an unbundler.
412 416 # This little dance should be dropped eventually when the
413 417 # API is finally improved.
414 418 stream = util.chunkbuffer(ret.getchunks())
415 419 ret = bundle2.getunbundler(self.ui, stream)
416 420 return ret
417 421 except Exception as exc:
418 422 # If the exception contains output salvaged from a bundle2
419 423 # reply, we need to make sure it is printed before continuing
420 424 # to fail. So we build a bundle2 with such output and consume
421 425 # it directly.
422 426 #
423 427 # This is not very elegant but allows a "simple" solution for
424 428 # issue4594
425 429 output = getattr(exc, '_bundle2salvagedoutput', ())
426 430 if output:
427 431 bundler = bundle2.bundle20(self._repo.ui)
428 432 for out in output:
429 433 bundler.addpart(out)
430 434 stream = util.chunkbuffer(bundler.getchunks())
431 435 b = bundle2.getunbundler(self.ui, stream)
432 436 bundle2.processbundle(self._repo, b)
433 437 raise
434 438 except error.PushRaced as exc:
435 439 raise error.ResponseError(
436 440 _(b'push failed:'), stringutil.forcebytestr(exc)
437 441 )
438 442
439 443 # End of _basewirecommands interface.
440 444
441 445 # Begin of peer interface.
442 446
443 447 def commandexecutor(self):
444 448 return localcommandexecutor(self)
445 449
446 450 # End of peer interface.
447 451
448 452
449 453 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 454 class locallegacypeer(localpeer):
451 455 """peer extension which implements legacy methods too; used for tests with
452 456 restricted capabilities"""
453 457
454 458 def __init__(self, repo):
455 459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 460
457 461 # Begin of baselegacywirecommands interface.
458 462
459 463 def between(self, pairs):
460 464 return self._repo.between(pairs)
461 465
462 466 def branches(self, nodes):
463 467 return self._repo.branches(nodes)
464 468
465 469 def changegroup(self, nodes, source):
466 470 outgoing = discovery.outgoing(
467 471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 472 )
469 473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 474
471 475 def changegroupsubset(self, bases, heads, source):
472 476 outgoing = discovery.outgoing(
473 477 self._repo, missingroots=bases, ancestorsof=heads
474 478 )
475 479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 480
477 481 # End of baselegacywirecommands interface.
478 482
479 483
480 484 # Functions receiving (ui, features) that extensions can register to impact
481 485 # the ability to load repositories with custom requirements. Only
482 486 # functions defined in loaded extensions are called.
483 487 #
484 488 # The function receives a set of requirement strings that the repository
485 489 # is capable of opening. Functions will typically add elements to the
486 490 # set to reflect that the extension knows how to handle that requirements.
487 491 featuresetupfuncs = set()
488 492
489 493
490 494 def _getsharedvfs(hgvfs, requirements):
491 495 """returns the vfs object pointing to root of shared source
492 496 repo for a shared repository
493 497
494 498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 499 requirements is a set of requirements of current repo (shared one)
496 500 """
497 501 # The ``shared`` or ``relshared`` requirements indicate the
498 502 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 503 # This is an absolute path for ``shared`` and relative to
500 504 # ``.hg/`` for ``relshared``.
501 505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 507 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 508
505 509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 510
507 511 if not sharedvfs.exists():
508 512 raise error.RepoError(
509 513 _(b'.hg/sharedpath points to nonexistent directory %s')
510 514 % sharedvfs.base
511 515 )
512 516 return sharedvfs
513 517
514 518
515 519 def _readrequires(vfs, allowmissing):
516 520 """reads the require file present at root of this vfs
517 521 and return a set of requirements
518 522
519 523 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 524 # requires file contains a newline-delimited list of
521 525 # features/capabilities the opener (us) must have in order to use
522 526 # the repository. This file was introduced in Mercurial 0.9.2,
523 527 # which means very old repositories may not have one. We assume
524 528 # a missing file translates to no requirements.
525 529 read = vfs.tryread if allowmissing else vfs.read
526 530 return set(read(b'requires').splitlines())
527 531
528 532
529 def makelocalrepository(baseui, path, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
530 534 """Create a local repository object.
531 535
532 536 Given arguments needed to construct a local repository, this function
533 537 performs various early repository loading functionality (such as
534 538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 539 the repository can be opened, derives a type suitable for representing
536 540 that repository, and returns an instance of it.
537 541
538 542 The returned object conforms to the ``repository.completelocalrepository``
539 543 interface.
540 544
541 545 The repository type is derived by calling a series of factory functions
542 546 for each aspect/interface of the final repository. These are defined by
543 547 ``REPO_INTERFACES``.
544 548
545 549 Each factory function is called to produce a type implementing a specific
546 550 interface. The cumulative list of returned types will be combined into a
547 551 new type and that type will be instantiated to represent the local
548 552 repository.
549 553
550 554 The factory functions each receive various state that may be consulted
551 555 as part of deriving a type.
552 556
553 557 Extensions should wrap these factory functions to customize repository type
554 558 creation. Note that an extension's wrapped function may be called even if
555 559 that extension is not loaded for the repo being constructed. Extensions
556 560 should check if their ``__name__`` appears in the
557 561 ``extensionmodulenames`` set passed to the factory function and no-op if
558 562 not.
559 563 """
560 564 ui = baseui.copy()
561 565 # Prevent copying repo configuration.
562 566 ui.copy = baseui.copy
563 567
564 568 # Working directory VFS rooted at repository root.
565 569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566 570
567 571 # Main VFS for .hg/ directory.
568 572 hgpath = wdirvfs.join(b'.hg')
569 573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 574 # Whether this repository is shared one or not
571 575 shared = False
572 576 # If this repository is shared, vfs pointing to shared repo
573 577 sharedvfs = None
574 578
575 579 # The .hg/ path should exist and should be a directory. All other
576 580 # cases are errors.
577 581 if not hgvfs.isdir():
578 582 try:
579 583 hgvfs.stat()
580 584 except FileNotFoundError:
581 585 pass
582 586 except ValueError as e:
583 587 # Can be raised on Python 3.8 when path is invalid.
584 588 raise error.Abort(
585 589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 590 )
587 591
588 592 raise error.RepoError(_(b'repository %s not found') % path)
589 593
590 594 requirements = _readrequires(hgvfs, True)
591 595 shared = (
592 596 requirementsmod.SHARED_REQUIREMENT in requirements
593 597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 598 )
595 599 storevfs = None
596 600 if shared:
597 601 # This is a shared repo
598 602 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 604 else:
601 605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602 606
603 607 # if .hg/requires contains the sharesafe requirement, it means
604 608 # there exists a `.hg/store/requires` too and we should read it
605 609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 611 # is not present, refer checkrequirementscompat() for that
608 612 #
609 613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 614 # repository was shared the old way. We check the share source .hg/requires
611 615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 616 # to be reshared
613 617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 619
616 620 if (
617 621 shared
618 622 and requirementsmod.SHARESAFE_REQUIREMENT
619 623 not in _readrequires(sharedvfs, True)
620 624 ):
621 625 mismatch_warn = ui.configbool(
622 626 b'share', b'safe-mismatch.source-not-safe.warn'
623 627 )
624 628 mismatch_config = ui.config(
625 629 b'share', b'safe-mismatch.source-not-safe'
626 630 )
627 631 mismatch_verbose_upgrade = ui.configbool(
628 632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
629 633 )
630 634 if mismatch_config in (
631 635 b'downgrade-allow',
632 636 b'allow',
633 637 b'downgrade-abort',
634 638 ):
635 639 # prevent cyclic import localrepo -> upgrade -> localrepo
636 640 from . import upgrade
637 641
638 642 upgrade.downgrade_share_to_non_safe(
639 643 ui,
640 644 hgvfs,
641 645 sharedvfs,
642 646 requirements,
643 647 mismatch_config,
644 648 mismatch_warn,
645 649 mismatch_verbose_upgrade,
646 650 )
647 651 elif mismatch_config == b'abort':
648 652 raise error.Abort(
649 653 _(b"share source does not support share-safe requirement"),
650 654 hint=hint,
651 655 )
652 656 else:
653 657 raise error.Abort(
654 658 _(
655 659 b"share-safe mismatch with source.\nUnrecognized"
656 660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
657 661 b" set."
658 662 )
659 663 % mismatch_config,
660 664 hint=hint,
661 665 )
662 666 else:
663 667 requirements |= _readrequires(storevfs, False)
664 668 elif shared:
665 669 sourcerequires = _readrequires(sharedvfs, False)
666 670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
667 671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
668 672 mismatch_warn = ui.configbool(
669 673 b'share', b'safe-mismatch.source-safe.warn'
670 674 )
671 675 mismatch_verbose_upgrade = ui.configbool(
672 676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
673 677 )
674 678 if mismatch_config in (
675 679 b'upgrade-allow',
676 680 b'allow',
677 681 b'upgrade-abort',
678 682 ):
679 683 # prevent cyclic import localrepo -> upgrade -> localrepo
680 684 from . import upgrade
681 685
682 686 upgrade.upgrade_share_to_safe(
683 687 ui,
684 688 hgvfs,
685 689 storevfs,
686 690 requirements,
687 691 mismatch_config,
688 692 mismatch_warn,
689 693 mismatch_verbose_upgrade,
690 694 )
691 695 elif mismatch_config == b'abort':
692 696 raise error.Abort(
693 697 _(
694 698 b'version mismatch: source uses share-safe'
695 699 b' functionality while the current share does not'
696 700 ),
697 701 hint=hint,
698 702 )
699 703 else:
700 704 raise error.Abort(
701 705 _(
702 706 b"share-safe mismatch with source.\nUnrecognized"
703 707 b" value '%s' of `share.safe-mismatch.source-safe` set."
704 708 )
705 709 % mismatch_config,
706 710 hint=hint,
707 711 )
708 712
709 713 # The .hg/hgrc file may load extensions or contain config options
710 714 # that influence repository construction. Attempt to load it and
711 715 # process any new extensions that it may have pulled in.
712 716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
713 717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
714 718 extensions.loadall(ui)
715 719 extensions.populateui(ui)
716 720
717 721 # Set of module names of extensions loaded for this repository.
718 722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
719 723
720 724 supportedrequirements = gathersupportedrequirements(ui)
721 725
722 726 # We first validate the requirements are known.
723 727 ensurerequirementsrecognized(requirements, supportedrequirements)
724 728
725 729 # Then we validate that the known set is reasonable to use together.
726 730 ensurerequirementscompatible(ui, requirements)
727 731
728 732 # TODO there are unhandled edge cases related to opening repositories with
729 733 # shared storage. If storage is shared, we should also test for requirements
730 734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
731 735 # that repo, as that repo may load extensions needed to open it. This is a
732 736 # bit complicated because we don't want the other hgrc to overwrite settings
733 737 # in this hgrc.
734 738 #
735 739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
736 740 # file when sharing repos. But if a requirement is added after the share is
737 741 # performed, thereby introducing a new requirement for the opener, we may
738 742 # will not see that and could encounter a run-time error interacting with
739 743 # that shared store since it has an unknown-to-us requirement.
740 744
741 745 # At this point, we know we should be capable of opening the repository.
742 746 # Now get on with doing that.
743 747
744 748 features = set()
745 749
746 750 # The "store" part of the repository holds versioned data. How it is
747 751 # accessed is determined by various requirements. If `shared` or
748 752 # `relshared` requirements are present, this indicates current repository
749 753 # is a share and store exists in path mentioned in `.hg/sharedpath`
750 754 if shared:
751 755 storebasepath = sharedvfs.base
752 756 cachepath = sharedvfs.join(b'cache')
753 757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
754 758 else:
755 759 storebasepath = hgvfs.base
756 760 cachepath = hgvfs.join(b'cache')
757 761 wcachepath = hgvfs.join(b'wcache')
758 762
759 763 # The store has changed over time and the exact layout is dictated by
760 764 # requirements. The store interface abstracts differences across all
761 765 # of them.
762 766 store = makestore(
763 767 requirements,
764 768 storebasepath,
765 769 lambda base: vfsmod.vfs(base, cacheaudited=True),
766 770 )
767 771 hgvfs.createmode = store.createmode
768 772
769 773 storevfs = store.vfs
770 774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
771 775
772 776 if (
773 777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
774 778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
775 779 ):
776 780 features.add(repository.REPO_FEATURE_SIDE_DATA)
777 781 # the revlogv2 docket introduced race condition that we need to fix
778 782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
779 783
780 784 # The cache vfs is used to manage cache files.
781 785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
782 786 cachevfs.createmode = store.createmode
783 787 # The cache vfs is used to manage cache files related to the working copy
784 788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
785 789 wcachevfs.createmode = store.createmode
786 790
787 791 # Now resolve the type for the repository object. We do this by repeatedly
788 792 # calling a factory function to produces types for specific aspects of the
789 793 # repo's operation. The aggregate returned types are used as base classes
790 794 # for a dynamically-derived type, which will represent our new repository.
791 795
792 796 bases = []
793 797 extrastate = {}
794 798
795 799 for iface, fn in REPO_INTERFACES:
796 800 # We pass all potentially useful state to give extensions tons of
797 801 # flexibility.
798 802 typ = fn()(
799 803 ui=ui,
800 804 intents=intents,
801 805 requirements=requirements,
802 806 features=features,
803 807 wdirvfs=wdirvfs,
804 808 hgvfs=hgvfs,
805 809 store=store,
806 810 storevfs=storevfs,
807 811 storeoptions=storevfs.options,
808 812 cachevfs=cachevfs,
809 813 wcachevfs=wcachevfs,
810 814 extensionmodulenames=extensionmodulenames,
811 815 extrastate=extrastate,
812 816 baseclasses=bases,
813 817 )
814 818
815 819 if not isinstance(typ, type):
816 820 raise error.ProgrammingError(
817 821 b'unable to construct type for %s' % iface
818 822 )
819 823
820 824 bases.append(typ)
821 825
822 826 # type() allows you to use characters in type names that wouldn't be
823 827 # recognized as Python symbols in source code. We abuse that to add
824 828 # rich information about our constructed repo.
825 829 name = pycompat.sysstr(
826 830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
827 831 )
828 832
829 833 cls = type(name, tuple(bases), {})
830 834
831 835 return cls(
832 836 baseui=baseui,
833 837 ui=ui,
834 838 origroot=path,
835 839 wdirvfs=wdirvfs,
836 840 hgvfs=hgvfs,
837 841 requirements=requirements,
838 842 supportedrequirements=supportedrequirements,
839 843 sharedpath=storebasepath,
840 844 store=store,
841 845 cachevfs=cachevfs,
842 846 wcachevfs=wcachevfs,
843 847 features=features,
844 848 intents=intents,
845 849 )
846 850
847 851
848 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
852 def loadhgrc(
853 ui,
854 wdirvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
856 requirements,
857 sharedvfs: Optional[vfsmod.vfs] = None,
858 ):
849 859 """Load hgrc files/content into a ui instance.
850 860
851 861 This is called during repository opening to load any additional
852 862 config files or settings relevant to the current repository.
853 863
854 864 Returns a bool indicating whether any additional configs were loaded.
855 865
856 866 Extensions should monkeypatch this function to modify how per-repo
857 867 configs are loaded. For example, an extension may wish to pull in
858 868 configs from alternate files or sources.
859 869
860 870 sharedvfs is vfs object pointing to source repo if the current one is a
861 871 shared one
862 872 """
863 873 if not rcutil.use_repo_hgrc():
864 874 return False
865 875
866 876 ret = False
867 877 # first load config from shared source if we has to
868 878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
869 879 try:
870 880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
871 881 ret = True
872 882 except IOError:
873 883 pass
874 884
875 885 try:
876 886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
877 887 ret = True
878 888 except IOError:
879 889 pass
880 890
881 891 try:
882 892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
883 893 ret = True
884 894 except IOError:
885 895 pass
886 896
887 897 return ret
888 898
889 899
890 900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
891 901 """Perform additional actions after .hg/hgrc is loaded.
892 902
893 903 This function is called during repository loading immediately after
894 904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
895 905
896 906 The function can be used to validate configs, automatically add
897 907 options (including extensions) based on requirements, etc.
898 908 """
899 909
900 910 # Map of requirements to list of extensions to load automatically when
901 911 # requirement is present.
902 912 autoextensions = {
903 913 b'git': [b'git'],
904 914 b'largefiles': [b'largefiles'],
905 915 b'lfs': [b'lfs'],
906 916 }
907 917
908 918 for requirement, names in sorted(autoextensions.items()):
909 919 if requirement not in requirements:
910 920 continue
911 921
912 922 for name in names:
913 923 if not ui.hasconfig(b'extensions', name):
914 924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
915 925
916 926
917 927 def gathersupportedrequirements(ui):
918 928 """Determine the complete set of recognized requirements."""
919 929 # Start with all requirements supported by this file.
920 930 supported = set(localrepository._basesupported)
921 931
922 932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
923 933 # relevant to this ui instance.
924 934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
925 935
926 936 for fn in featuresetupfuncs:
927 937 if fn.__module__ in modules:
928 938 fn(ui, supported)
929 939
930 940 # Add derived requirements from registered compression engines.
931 941 for name in util.compengines:
932 942 engine = util.compengines[name]
933 943 if engine.available() and engine.revlogheader():
934 944 supported.add(b'exp-compression-%s' % name)
935 945 if engine.name() == b'zstd':
936 946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
937 947
938 948 return supported
939 949
940 950
941 951 def ensurerequirementsrecognized(requirements, supported):
942 952 """Validate that a set of local requirements is recognized.
943 953
944 954 Receives a set of requirements. Raises an ``error.RepoError`` if there
945 955 exists any requirement in that set that currently loaded code doesn't
946 956 recognize.
947 957
948 958 Returns a set of supported requirements.
949 959 """
950 960 missing = set()
951 961
952 962 for requirement in requirements:
953 963 if requirement in supported:
954 964 continue
955 965
956 966 if not requirement or not requirement[0:1].isalnum():
957 967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
958 968
959 969 missing.add(requirement)
960 970
961 971 if missing:
962 972 raise error.RequirementError(
963 973 _(b'repository requires features unknown to this Mercurial: %s')
964 974 % b' '.join(sorted(missing)),
965 975 hint=_(
966 976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
967 977 b'for more information'
968 978 ),
969 979 )
970 980
971 981
972 982 def ensurerequirementscompatible(ui, requirements):
973 983 """Validates that a set of recognized requirements is mutually compatible.
974 984
975 985 Some requirements may not be compatible with others or require
976 986 config options that aren't enabled. This function is called during
977 987 repository opening to ensure that the set of requirements needed
978 988 to open a repository is sane and compatible with config options.
979 989
980 990 Extensions can monkeypatch this function to perform additional
981 991 checking.
982 992
983 993 ``error.RepoError`` should be raised on failure.
984 994 """
985 995 if (
986 996 requirementsmod.SPARSE_REQUIREMENT in requirements
987 997 and not sparse.enabled
988 998 ):
989 999 raise error.RepoError(
990 1000 _(
991 1001 b'repository is using sparse feature but '
992 1002 b'sparse is not enabled; enable the '
993 1003 b'"sparse" extensions to access'
994 1004 )
995 1005 )
996 1006
997 1007
998 1008 def makestore(requirements, path, vfstype):
999 1009 """Construct a storage object for a repository."""
1000 1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1001 1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1002 1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1003 1013 return storemod.fncachestore(path, vfstype, dotencode)
1004 1014
1005 1015 return storemod.encodedstore(path, vfstype)
1006 1016
1007 1017 return storemod.basicstore(path, vfstype)
1008 1018
1009 1019
1010 1020 def resolvestorevfsoptions(ui, requirements, features):
1011 1021 """Resolve the options to pass to the store vfs opener.
1012 1022
1013 1023 The returned dict is used to influence behavior of the storage layer.
1014 1024 """
1015 1025 options = {}
1016 1026
1017 1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1018 1028 options[b'treemanifest'] = True
1019 1029
1020 1030 # experimental config: format.manifestcachesize
1021 1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1022 1032 if manifestcachesize is not None:
1023 1033 options[b'manifestcachesize'] = manifestcachesize
1024 1034
1025 1035 # In the absence of another requirement superseding a revlog-related
1026 1036 # requirement, we have to assume the repo is using revlog version 0.
1027 1037 # This revlog format is super old and we don't bother trying to parse
1028 1038 # opener options for it because those options wouldn't do anything
1029 1039 # meaningful on such old repos.
1030 1040 if (
1031 1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1032 1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1033 1043 ):
1034 1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1035 1045 else: # explicitly mark repo as using revlogv0
1036 1046 options[b'revlogv0'] = True
1037 1047
1038 1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1039 1049 options[b'copies-storage'] = b'changeset-sidedata'
1040 1050 else:
1041 1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1042 1052 copiesextramode = (b'changeset-only', b'compatibility')
1043 1053 if writecopiesto in copiesextramode:
1044 1054 options[b'copies-storage'] = b'extra'
1045 1055
1046 1056 return options
1047 1057
1048 1058
1049 1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1050 1060 """Resolve opener options specific to revlogs."""
1051 1061
1052 1062 options = {}
1053 1063 options[b'flagprocessors'] = {}
1054 1064
1055 1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1056 1066 options[b'revlogv1'] = True
1057 1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1058 1068 options[b'revlogv2'] = True
1059 1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1060 1070 options[b'changelogv2'] = True
1061 1071
1062 1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1063 1073 options[b'generaldelta'] = True
1064 1074
1065 1075 # experimental config: format.chunkcachesize
1066 1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1067 1077 if chunkcachesize is not None:
1068 1078 options[b'chunkcachesize'] = chunkcachesize
1069 1079
1070 1080 deltabothparents = ui.configbool(
1071 1081 b'storage', b'revlog.optimize-delta-parent-choice'
1072 1082 )
1073 1083 options[b'deltabothparents'] = deltabothparents
1074 1084 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1075 1085
1076 1086 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1077 1087 options[b'issue6528.fix-incoming'] = issue6528
1078 1088
1079 1089 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1080 1090 lazydeltabase = False
1081 1091 if lazydelta:
1082 1092 lazydeltabase = ui.configbool(
1083 1093 b'storage', b'revlog.reuse-external-delta-parent'
1084 1094 )
1085 1095 if lazydeltabase is None:
1086 1096 lazydeltabase = not scmutil.gddeltaconfig(ui)
1087 1097 options[b'lazydelta'] = lazydelta
1088 1098 options[b'lazydeltabase'] = lazydeltabase
1089 1099
1090 1100 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1091 1101 if 0 <= chainspan:
1092 1102 options[b'maxdeltachainspan'] = chainspan
1093 1103
1094 1104 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1095 1105 if mmapindexthreshold is not None:
1096 1106 options[b'mmapindexthreshold'] = mmapindexthreshold
1097 1107
1098 1108 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1099 1109 srdensitythres = float(
1100 1110 ui.config(b'experimental', b'sparse-read.density-threshold')
1101 1111 )
1102 1112 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1103 1113 options[b'with-sparse-read'] = withsparseread
1104 1114 options[b'sparse-read-density-threshold'] = srdensitythres
1105 1115 options[b'sparse-read-min-gap-size'] = srmingapsize
1106 1116
1107 1117 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1108 1118 options[b'sparse-revlog'] = sparserevlog
1109 1119 if sparserevlog:
1110 1120 options[b'generaldelta'] = True
1111 1121
1112 1122 maxchainlen = None
1113 1123 if sparserevlog:
1114 1124 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1115 1125 # experimental config: format.maxchainlen
1116 1126 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1117 1127 if maxchainlen is not None:
1118 1128 options[b'maxchainlen'] = maxchainlen
1119 1129
1120 1130 for r in requirements:
1121 1131 # we allow multiple compression engine requirement to co-exist because
1122 1132 # strickly speaking, revlog seems to support mixed compression style.
1123 1133 #
1124 1134 # The compression used for new entries will be "the last one"
1125 1135 prefix = r.startswith
1126 1136 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1127 1137 options[b'compengine'] = r.split(b'-', 2)[2]
1128 1138
1129 1139 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1130 1140 if options[b'zlib.level'] is not None:
1131 1141 if not (0 <= options[b'zlib.level'] <= 9):
1132 1142 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1133 1143 raise error.Abort(msg % options[b'zlib.level'])
1134 1144 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1135 1145 if options[b'zstd.level'] is not None:
1136 1146 if not (0 <= options[b'zstd.level'] <= 22):
1137 1147 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1138 1148 raise error.Abort(msg % options[b'zstd.level'])
1139 1149
1140 1150 if requirementsmod.NARROW_REQUIREMENT in requirements:
1141 1151 options[b'enableellipsis'] = True
1142 1152
1143 1153 if ui.configbool(b'experimental', b'rust.index'):
1144 1154 options[b'rust.index'] = True
1145 1155 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1146 1156 slow_path = ui.config(
1147 1157 b'storage', b'revlog.persistent-nodemap.slow-path'
1148 1158 )
1149 1159 if slow_path not in (b'allow', b'warn', b'abort'):
1150 1160 default = ui.config_default(
1151 1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1152 1162 )
1153 1163 msg = _(
1154 1164 b'unknown value for config '
1155 1165 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1156 1166 )
1157 1167 ui.warn(msg % slow_path)
1158 1168 if not ui.quiet:
1159 1169 ui.warn(_(b'falling back to default value: %s\n') % default)
1160 1170 slow_path = default
1161 1171
1162 1172 msg = _(
1163 1173 b"accessing `persistent-nodemap` repository without associated "
1164 1174 b"fast implementation."
1165 1175 )
1166 1176 hint = _(
1167 1177 b"check `hg help config.format.use-persistent-nodemap` "
1168 1178 b"for details"
1169 1179 )
1170 1180 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1171 1181 if slow_path == b'warn':
1172 1182 msg = b"warning: " + msg + b'\n'
1173 1183 ui.warn(msg)
1174 1184 if not ui.quiet:
1175 1185 hint = b'(' + hint + b')\n'
1176 1186 ui.warn(hint)
1177 1187 if slow_path == b'abort':
1178 1188 raise error.Abort(msg, hint=hint)
1179 1189 options[b'persistent-nodemap'] = True
1180 1190 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1181 1191 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1182 1192 if slow_path not in (b'allow', b'warn', b'abort'):
1183 1193 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1184 1194 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1185 1195 ui.warn(msg % slow_path)
1186 1196 if not ui.quiet:
1187 1197 ui.warn(_(b'falling back to default value: %s\n') % default)
1188 1198 slow_path = default
1189 1199
1190 1200 msg = _(
1191 1201 b"accessing `dirstate-v2` repository without associated "
1192 1202 b"fast implementation."
1193 1203 )
1194 1204 hint = _(
1195 1205 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1196 1206 )
1197 1207 if not dirstate.HAS_FAST_DIRSTATE_V2:
1198 1208 if slow_path == b'warn':
1199 1209 msg = b"warning: " + msg + b'\n'
1200 1210 ui.warn(msg)
1201 1211 if not ui.quiet:
1202 1212 hint = b'(' + hint + b')\n'
1203 1213 ui.warn(hint)
1204 1214 if slow_path == b'abort':
1205 1215 raise error.Abort(msg, hint=hint)
1206 1216 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1207 1217 options[b'persistent-nodemap.mmap'] = True
1208 1218 if ui.configbool(b'devel', b'persistent-nodemap'):
1209 1219 options[b'devel-force-nodemap'] = True
1210 1220
1211 1221 return options
1212 1222
1213 1223
1214 1224 def makemain(**kwargs):
1215 1225 """Produce a type conforming to ``ilocalrepositorymain``."""
1216 1226 return localrepository
1217 1227
1218 1228
1219 1229 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1220 1230 class revlogfilestorage:
1221 1231 """File storage when using revlogs."""
1222 1232
1223 1233 def file(self, path):
1224 1234 if path.startswith(b'/'):
1225 1235 path = path[1:]
1226 1236
1227 1237 return filelog.filelog(self.svfs, path)
1228 1238
1229 1239
1230 1240 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1231 1241 class revlognarrowfilestorage:
1232 1242 """File storage when using revlogs and narrow files."""
1233 1243
1234 1244 def file(self, path):
1235 1245 if path.startswith(b'/'):
1236 1246 path = path[1:]
1237 1247
1238 1248 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1239 1249
1240 1250
1241 1251 def makefilestorage(requirements, features, **kwargs):
1242 1252 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1243 1253 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1244 1254 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1245 1255
1246 1256 if requirementsmod.NARROW_REQUIREMENT in requirements:
1247 1257 return revlognarrowfilestorage
1248 1258 else:
1249 1259 return revlogfilestorage
1250 1260
1251 1261
1252 1262 # List of repository interfaces and factory functions for them. Each
1253 1263 # will be called in order during ``makelocalrepository()`` to iteratively
1254 1264 # derive the final type for a local repository instance. We capture the
1255 1265 # function as a lambda so we don't hold a reference and the module-level
1256 1266 # functions can be wrapped.
1257 1267 REPO_INTERFACES = [
1258 1268 (repository.ilocalrepositorymain, lambda: makemain),
1259 1269 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1260 1270 ]
1261 1271
1262 1272
1263 1273 @interfaceutil.implementer(repository.ilocalrepositorymain)
1264 1274 class localrepository:
1265 1275 """Main class for representing local repositories.
1266 1276
1267 1277 All local repositories are instances of this class.
1268 1278
1269 1279 Constructed on its own, instances of this class are not usable as
1270 1280 repository objects. To obtain a usable repository object, call
1271 1281 ``hg.repository()``, ``localrepo.instance()``, or
1272 1282 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1273 1283 ``instance()`` adds support for creating new repositories.
1274 1284 ``hg.repository()`` adds more extension integration, including calling
1275 1285 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1276 1286 used.
1277 1287 """
1278 1288
1279 1289 _basesupported = {
1280 1290 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1281 1291 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1282 1292 requirementsmod.CHANGELOGV2_REQUIREMENT,
1283 1293 requirementsmod.COPIESSDC_REQUIREMENT,
1284 1294 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1285 1295 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1286 1296 requirementsmod.DOTENCODE_REQUIREMENT,
1287 1297 requirementsmod.FNCACHE_REQUIREMENT,
1288 1298 requirementsmod.GENERALDELTA_REQUIREMENT,
1289 1299 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1290 1300 requirementsmod.NODEMAP_REQUIREMENT,
1291 1301 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1292 1302 requirementsmod.REVLOGV1_REQUIREMENT,
1293 1303 requirementsmod.REVLOGV2_REQUIREMENT,
1294 1304 requirementsmod.SHARED_REQUIREMENT,
1295 1305 requirementsmod.SHARESAFE_REQUIREMENT,
1296 1306 requirementsmod.SPARSE_REQUIREMENT,
1297 1307 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1298 1308 requirementsmod.STORE_REQUIREMENT,
1299 1309 requirementsmod.TREEMANIFEST_REQUIREMENT,
1300 1310 }
1301 1311
1302 1312 # list of prefix for file which can be written without 'wlock'
1303 1313 # Extensions should extend this list when needed
1304 1314 _wlockfreeprefix = {
1305 1315 # We migh consider requiring 'wlock' for the next
1306 1316 # two, but pretty much all the existing code assume
1307 1317 # wlock is not needed so we keep them excluded for
1308 1318 # now.
1309 1319 b'hgrc',
1310 1320 b'requires',
1311 1321 # XXX cache is a complicatged business someone
1312 1322 # should investigate this in depth at some point
1313 1323 b'cache/',
1314 1324 # XXX shouldn't be dirstate covered by the wlock?
1315 1325 b'dirstate',
1316 1326 # XXX bisect was still a bit too messy at the time
1317 1327 # this changeset was introduced. Someone should fix
1318 1328 # the remainig bit and drop this line
1319 1329 b'bisect.state',
1320 1330 }
1321 1331
1322 1332 def __init__(
1323 1333 self,
1324 1334 baseui,
1325 1335 ui,
1326 origroot,
1327 wdirvfs,
1328 hgvfs,
1336 origroot: bytes,
1337 wdirvfs: vfsmod.vfs,
1338 hgvfs: vfsmod.vfs,
1329 1339 requirements,
1330 1340 supportedrequirements,
1331 sharedpath,
1341 sharedpath: bytes,
1332 1342 store,
1333 cachevfs,
1334 wcachevfs,
1343 cachevfs: vfsmod.vfs,
1344 wcachevfs: vfsmod.vfs,
1335 1345 features,
1336 1346 intents=None,
1337 1347 ):
1338 1348 """Create a new local repository instance.
1339 1349
1340 1350 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1341 1351 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1342 1352 object.
1343 1353
1344 1354 Arguments:
1345 1355
1346 1356 baseui
1347 1357 ``ui.ui`` instance that ``ui`` argument was based off of.
1348 1358
1349 1359 ui
1350 1360 ``ui.ui`` instance for use by the repository.
1351 1361
1352 1362 origroot
1353 1363 ``bytes`` path to working directory root of this repository.
1354 1364
1355 1365 wdirvfs
1356 1366 ``vfs.vfs`` rooted at the working directory.
1357 1367
1358 1368 hgvfs
1359 1369 ``vfs.vfs`` rooted at .hg/
1360 1370
1361 1371 requirements
1362 1372 ``set`` of bytestrings representing repository opening requirements.
1363 1373
1364 1374 supportedrequirements
1365 1375 ``set`` of bytestrings representing repository requirements that we
1366 1376 know how to open. May be a supetset of ``requirements``.
1367 1377
1368 1378 sharedpath
1369 1379 ``bytes`` Defining path to storage base directory. Points to a
1370 1380 ``.hg/`` directory somewhere.
1371 1381
1372 1382 store
1373 1383 ``store.basicstore`` (or derived) instance providing access to
1374 1384 versioned storage.
1375 1385
1376 1386 cachevfs
1377 1387 ``vfs.vfs`` used for cache files.
1378 1388
1379 1389 wcachevfs
1380 1390 ``vfs.vfs`` used for cache files related to the working copy.
1381 1391
1382 1392 features
1383 1393 ``set`` of bytestrings defining features/capabilities of this
1384 1394 instance.
1385 1395
1386 1396 intents
1387 1397 ``set`` of system strings indicating what this repo will be used
1388 1398 for.
1389 1399 """
1390 1400 self.baseui = baseui
1391 1401 self.ui = ui
1392 1402 self.origroot = origroot
1393 1403 # vfs rooted at working directory.
1394 1404 self.wvfs = wdirvfs
1395 1405 self.root = wdirvfs.base
1396 1406 # vfs rooted at .hg/. Used to access most non-store paths.
1397 1407 self.vfs = hgvfs
1398 1408 self.path = hgvfs.base
1399 1409 self.requirements = requirements
1400 1410 self.nodeconstants = sha1nodeconstants
1401 1411 self.nullid = self.nodeconstants.nullid
1402 1412 self.supported = supportedrequirements
1403 1413 self.sharedpath = sharedpath
1404 1414 self.store = store
1405 1415 self.cachevfs = cachevfs
1406 1416 self.wcachevfs = wcachevfs
1407 1417 self.features = features
1408 1418
1409 1419 self.filtername = None
1410 1420
1411 1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1412 1422 b'devel', b'check-locks'
1413 1423 ):
1414 1424 self.vfs.audit = self._getvfsward(self.vfs.audit)
1415 1425 # A list of callback to shape the phase if no data were found.
1416 1426 # Callback are in the form: func(repo, roots) --> processed root.
1417 1427 # This list it to be filled by extension during repo setup
1418 1428 self._phasedefaults = []
1419 1429
1420 1430 color.setup(self.ui)
1421 1431
1422 1432 self.spath = self.store.path
1423 1433 self.svfs = self.store.vfs
1424 1434 self.sjoin = self.store.join
1425 1435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 1436 b'devel', b'check-locks'
1427 1437 ):
1428 1438 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1429 1439 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1430 1440 else: # standard vfs
1431 1441 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1432 1442
1433 1443 self._dirstatevalidatewarned = False
1434 1444
1435 1445 self._branchcaches = branchmap.BranchMapCache()
1436 1446 self._revbranchcache = None
1437 1447 self._filterpats = {}
1438 1448 self._datafilters = {}
1439 1449 self._transref = self._lockref = self._wlockref = None
1440 1450
1441 1451 # A cache for various files under .hg/ that tracks file changes,
1442 1452 # (used by the filecache decorator)
1443 1453 #
1444 1454 # Maps a property name to its util.filecacheentry
1445 1455 self._filecache = {}
1446 1456
1447 1457 # hold sets of revision to be filtered
1448 1458 # should be cleared when something might have changed the filter value:
1449 1459 # - new changesets,
1450 1460 # - phase change,
1451 1461 # - new obsolescence marker,
1452 1462 # - working directory parent change,
1453 1463 # - bookmark changes
1454 1464 self.filteredrevcache = {}
1455 1465
1456 1466 # post-dirstate-status hooks
1457 1467 self._postdsstatus = []
1458 1468
1459 1469 # generic mapping between names and nodes
1460 1470 self.names = namespaces.namespaces()
1461 1471
1462 1472 # Key to signature value.
1463 1473 self._sparsesignaturecache = {}
1464 1474 # Signature to cached matcher instance.
1465 1475 self._sparsematchercache = {}
1466 1476
1467 1477 self._extrafilterid = repoview.extrafilter(ui)
1468 1478
1469 1479 self.filecopiesmode = None
1470 1480 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1471 1481 self.filecopiesmode = b'changeset-sidedata'
1472 1482
1473 1483 self._wanted_sidedata = set()
1474 1484 self._sidedata_computers = {}
1475 1485 sidedatamod.set_sidedata_spec_for_repo(self)
1476 1486
1477 1487 def _getvfsward(self, origfunc):
1478 1488 """build a ward for self.vfs"""
1479 1489 rref = weakref.ref(self)
1480 1490
1481 1491 def checkvfs(path, mode=None):
1482 1492 ret = origfunc(path, mode=mode)
1483 1493 repo = rref()
1484 1494 if (
1485 1495 repo is None
1486 1496 or not util.safehasattr(repo, b'_wlockref')
1487 1497 or not util.safehasattr(repo, b'_lockref')
1488 1498 ):
1489 1499 return
1490 1500 if mode in (None, b'r', b'rb'):
1491 1501 return
1492 1502 if path.startswith(repo.path):
1493 1503 # truncate name relative to the repository (.hg)
1494 1504 path = path[len(repo.path) + 1 :]
1495 1505 if path.startswith(b'cache/'):
1496 1506 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1497 1507 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1498 1508 # path prefixes covered by 'lock'
1499 1509 vfs_path_prefixes = (
1500 1510 b'journal.',
1501 1511 b'undo.',
1502 1512 b'strip-backup/',
1503 1513 b'cache/',
1504 1514 )
1505 1515 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1506 1516 if repo._currentlock(repo._lockref) is None:
1507 1517 repo.ui.develwarn(
1508 1518 b'write with no lock: "%s"' % path,
1509 1519 stacklevel=3,
1510 1520 config=b'check-locks',
1511 1521 )
1512 1522 elif repo._currentlock(repo._wlockref) is None:
1513 1523 # rest of vfs files are covered by 'wlock'
1514 1524 #
1515 1525 # exclude special files
1516 1526 for prefix in self._wlockfreeprefix:
1517 1527 if path.startswith(prefix):
1518 1528 return
1519 1529 repo.ui.develwarn(
1520 1530 b'write with no wlock: "%s"' % path,
1521 1531 stacklevel=3,
1522 1532 config=b'check-locks',
1523 1533 )
1524 1534 return ret
1525 1535
1526 1536 return checkvfs
1527 1537
1528 1538 def _getsvfsward(self, origfunc):
1529 1539 """build a ward for self.svfs"""
1530 1540 rref = weakref.ref(self)
1531 1541
1532 1542 def checksvfs(path, mode=None):
1533 1543 ret = origfunc(path, mode=mode)
1534 1544 repo = rref()
1535 1545 if repo is None or not util.safehasattr(repo, b'_lockref'):
1536 1546 return
1537 1547 if mode in (None, b'r', b'rb'):
1538 1548 return
1539 1549 if path.startswith(repo.sharedpath):
1540 1550 # truncate name relative to the repository (.hg)
1541 1551 path = path[len(repo.sharedpath) + 1 :]
1542 1552 if repo._currentlock(repo._lockref) is None:
1543 1553 repo.ui.develwarn(
1544 1554 b'write with no lock: "%s"' % path, stacklevel=4
1545 1555 )
1546 1556 return ret
1547 1557
1548 1558 return checksvfs
1549 1559
1550 1560 def close(self):
1551 1561 self._writecaches()
1552 1562
1553 1563 def _writecaches(self):
1554 1564 if self._revbranchcache:
1555 1565 self._revbranchcache.write()
1556 1566
1557 1567 def _restrictcapabilities(self, caps):
1558 1568 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1559 1569 caps = set(caps)
1560 1570 capsblob = bundle2.encodecaps(
1561 1571 bundle2.getrepocaps(self, role=b'client')
1562 1572 )
1563 1573 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1564 1574 if self.ui.configbool(b'experimental', b'narrow'):
1565 1575 caps.add(wireprototypes.NARROWCAP)
1566 1576 return caps
1567 1577
1568 1578 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1569 1579 # self -> auditor -> self._checknested -> self
1570 1580
1571 1581 @property
1572 1582 def auditor(self):
1573 1583 # This is only used by context.workingctx.match in order to
1574 1584 # detect files in subrepos.
1575 1585 return pathutil.pathauditor(self.root, callback=self._checknested)
1576 1586
1577 1587 @property
1578 1588 def nofsauditor(self):
1579 1589 # This is only used by context.basectx.match in order to detect
1580 1590 # files in subrepos.
1581 1591 return pathutil.pathauditor(
1582 1592 self.root, callback=self._checknested, realfs=False, cached=True
1583 1593 )
1584 1594
1585 1595 def _checknested(self, path):
1586 1596 """Determine if path is a legal nested repository."""
1587 1597 if not path.startswith(self.root):
1588 1598 return False
1589 1599 subpath = path[len(self.root) + 1 :]
1590 1600 normsubpath = util.pconvert(subpath)
1591 1601
1592 1602 # XXX: Checking against the current working copy is wrong in
1593 1603 # the sense that it can reject things like
1594 1604 #
1595 1605 # $ hg cat -r 10 sub/x.txt
1596 1606 #
1597 1607 # if sub/ is no longer a subrepository in the working copy
1598 1608 # parent revision.
1599 1609 #
1600 1610 # However, it can of course also allow things that would have
1601 1611 # been rejected before, such as the above cat command if sub/
1602 1612 # is a subrepository now, but was a normal directory before.
1603 1613 # The old path auditor would have rejected by mistake since it
1604 1614 # panics when it sees sub/.hg/.
1605 1615 #
1606 1616 # All in all, checking against the working copy seems sensible
1607 1617 # since we want to prevent access to nested repositories on
1608 1618 # the filesystem *now*.
1609 1619 ctx = self[None]
1610 1620 parts = util.splitpath(subpath)
1611 1621 while parts:
1612 1622 prefix = b'/'.join(parts)
1613 1623 if prefix in ctx.substate:
1614 1624 if prefix == normsubpath:
1615 1625 return True
1616 1626 else:
1617 1627 sub = ctx.sub(prefix)
1618 1628 return sub.checknested(subpath[len(prefix) + 1 :])
1619 1629 else:
1620 1630 parts.pop()
1621 1631 return False
1622 1632
1623 1633 def peer(self):
1624 1634 return localpeer(self) # not cached to avoid reference cycle
1625 1635
1626 1636 def unfiltered(self):
1627 1637 """Return unfiltered version of the repository
1628 1638
1629 1639 Intended to be overwritten by filtered repo."""
1630 1640 return self
1631 1641
1632 1642 def filtered(self, name, visibilityexceptions=None):
1633 1643 """Return a filtered version of a repository
1634 1644
1635 1645 The `name` parameter is the identifier of the requested view. This
1636 1646 will return a repoview object set "exactly" to the specified view.
1637 1647
1638 1648 This function does not apply recursive filtering to a repository. For
1639 1649 example calling `repo.filtered("served")` will return a repoview using
1640 1650 the "served" view, regardless of the initial view used by `repo`.
1641 1651
1642 1652 In other word, there is always only one level of `repoview` "filtering".
1643 1653 """
1644 1654 if self._extrafilterid is not None and b'%' not in name:
1645 1655 name = name + b'%' + self._extrafilterid
1646 1656
1647 1657 cls = repoview.newtype(self.unfiltered().__class__)
1648 1658 return cls(self, name, visibilityexceptions)
1649 1659
1650 1660 @mixedrepostorecache(
1651 1661 (b'bookmarks', b'plain'),
1652 1662 (b'bookmarks.current', b'plain'),
1653 1663 (b'bookmarks', b''),
1654 1664 (b'00changelog.i', b''),
1655 1665 )
1656 1666 def _bookmarks(self):
1657 1667 # Since the multiple files involved in the transaction cannot be
1658 1668 # written atomically (with current repository format), there is a race
1659 1669 # condition here.
1660 1670 #
1661 1671 # 1) changelog content A is read
1662 1672 # 2) outside transaction update changelog to content B
1663 1673 # 3) outside transaction update bookmark file referring to content B
1664 1674 # 4) bookmarks file content is read and filtered against changelog-A
1665 1675 #
1666 1676 # When this happens, bookmarks against nodes missing from A are dropped.
1667 1677 #
1668 1678 # Having this happening during read is not great, but it become worse
1669 1679 # when this happen during write because the bookmarks to the "unknown"
1670 1680 # nodes will be dropped for good. However, writes happen within locks.
1671 1681 # This locking makes it possible to have a race free consistent read.
1672 1682 # For this purpose data read from disc before locking are
1673 1683 # "invalidated" right after the locks are taken. This invalidations are
1674 1684 # "light", the `filecache` mechanism keep the data in memory and will
1675 1685 # reuse them if the underlying files did not changed. Not parsing the
1676 1686 # same data multiple times helps performances.
1677 1687 #
1678 1688 # Unfortunately in the case describe above, the files tracked by the
1679 1689 # bookmarks file cache might not have changed, but the in-memory
1680 1690 # content is still "wrong" because we used an older changelog content
1681 1691 # to process the on-disk data. So after locking, the changelog would be
1682 1692 # refreshed but `_bookmarks` would be preserved.
1683 1693 # Adding `00changelog.i` to the list of tracked file is not
1684 1694 # enough, because at the time we build the content for `_bookmarks` in
1685 1695 # (4), the changelog file has already diverged from the content used
1686 1696 # for loading `changelog` in (1)
1687 1697 #
1688 1698 # To prevent the issue, we force the changelog to be explicitly
1689 1699 # reloaded while computing `_bookmarks`. The data race can still happen
1690 1700 # without the lock (with a narrower window), but it would no longer go
1691 1701 # undetected during the lock time refresh.
1692 1702 #
1693 1703 # The new schedule is as follow
1694 1704 #
1695 1705 # 1) filecache logic detect that `_bookmarks` needs to be computed
1696 1706 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1697 1707 # 3) We force `changelog` filecache to be tested
1698 1708 # 4) cachestat for `changelog` are captured (for changelog)
1699 1709 # 5) `_bookmarks` is computed and cached
1700 1710 #
1701 1711 # The step in (3) ensure we have a changelog at least as recent as the
1702 1712 # cache stat computed in (1). As a result at locking time:
1703 1713 # * if the changelog did not changed since (1) -> we can reuse the data
1704 1714 # * otherwise -> the bookmarks get refreshed.
1705 1715 self._refreshchangelog()
1706 1716 return bookmarks.bmstore(self)
1707 1717
1708 1718 def _refreshchangelog(self):
1709 1719 """make sure the in memory changelog match the on-disk one"""
1710 1720 if 'changelog' in vars(self) and self.currenttransaction() is None:
1711 1721 del self.changelog
1712 1722
1713 1723 @property
1714 1724 def _activebookmark(self):
1715 1725 return self._bookmarks.active
1716 1726
1717 1727 # _phasesets depend on changelog. what we need is to call
1718 1728 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1719 1729 # can't be easily expressed in filecache mechanism.
1720 1730 @storecache(b'phaseroots', b'00changelog.i')
1721 1731 def _phasecache(self):
1722 1732 return phases.phasecache(self, self._phasedefaults)
1723 1733
1724 1734 @storecache(b'obsstore')
1725 1735 def obsstore(self):
1726 1736 return obsolete.makestore(self.ui, self)
1727 1737
1728 1738 @changelogcache()
1729 1739 def changelog(repo):
1730 1740 # load dirstate before changelog to avoid race see issue6303
1731 1741 repo.dirstate.prefetch_parents()
1732 1742 return repo.store.changelog(
1733 1743 txnutil.mayhavepending(repo.root),
1734 1744 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1735 1745 )
1736 1746
1737 1747 @manifestlogcache()
1738 1748 def manifestlog(self):
1739 1749 return self.store.manifestlog(self, self._storenarrowmatch)
1740 1750
1741 1751 @repofilecache(b'dirstate')
1742 1752 def dirstate(self):
1743 1753 return self._makedirstate()
1744 1754
1745 1755 def _makedirstate(self):
1746 1756 """Extension point for wrapping the dirstate per-repo."""
1747 1757 sparsematchfn = None
1748 1758 if sparse.use_sparse(self):
1749 1759 sparsematchfn = lambda: sparse.matcher(self)
1750 1760 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1751 1761 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1752 1762 use_dirstate_v2 = v2_req in self.requirements
1753 1763 use_tracked_hint = th in self.requirements
1754 1764
1755 1765 return dirstate.dirstate(
1756 1766 self.vfs,
1757 1767 self.ui,
1758 1768 self.root,
1759 1769 self._dirstatevalidate,
1760 1770 sparsematchfn,
1761 1771 self.nodeconstants,
1762 1772 use_dirstate_v2,
1763 1773 use_tracked_hint=use_tracked_hint,
1764 1774 )
1765 1775
1766 1776 def _dirstatevalidate(self, node):
1767 1777 try:
1768 1778 self.changelog.rev(node)
1769 1779 return node
1770 1780 except error.LookupError:
1771 1781 if not self._dirstatevalidatewarned:
1772 1782 self._dirstatevalidatewarned = True
1773 1783 self.ui.warn(
1774 1784 _(b"warning: ignoring unknown working parent %s!\n")
1775 1785 % short(node)
1776 1786 )
1777 1787 return self.nullid
1778 1788
1779 1789 @storecache(narrowspec.FILENAME)
1780 1790 def narrowpats(self):
1781 1791 """matcher patterns for this repository's narrowspec
1782 1792
1783 1793 A tuple of (includes, excludes).
1784 1794 """
1785 1795 return narrowspec.load(self)
1786 1796
1787 1797 @storecache(narrowspec.FILENAME)
1788 1798 def _storenarrowmatch(self):
1789 1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1790 1800 return matchmod.always()
1791 1801 include, exclude = self.narrowpats
1792 1802 return narrowspec.match(self.root, include=include, exclude=exclude)
1793 1803
1794 1804 @storecache(narrowspec.FILENAME)
1795 1805 def _narrowmatch(self):
1796 1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1797 1807 return matchmod.always()
1798 1808 narrowspec.checkworkingcopynarrowspec(self)
1799 1809 include, exclude = self.narrowpats
1800 1810 return narrowspec.match(self.root, include=include, exclude=exclude)
1801 1811
1802 1812 def narrowmatch(self, match=None, includeexact=False):
1803 1813 """matcher corresponding the the repo's narrowspec
1804 1814
1805 1815 If `match` is given, then that will be intersected with the narrow
1806 1816 matcher.
1807 1817
1808 1818 If `includeexact` is True, then any exact matches from `match` will
1809 1819 be included even if they're outside the narrowspec.
1810 1820 """
1811 1821 if match:
1812 1822 if includeexact and not self._narrowmatch.always():
1813 1823 # do not exclude explicitly-specified paths so that they can
1814 1824 # be warned later on
1815 1825 em = matchmod.exact(match.files())
1816 1826 nm = matchmod.unionmatcher([self._narrowmatch, em])
1817 1827 return matchmod.intersectmatchers(match, nm)
1818 1828 return matchmod.intersectmatchers(match, self._narrowmatch)
1819 1829 return self._narrowmatch
1820 1830
1821 1831 def setnarrowpats(self, newincludes, newexcludes):
1822 1832 narrowspec.save(self, newincludes, newexcludes)
1823 1833 self.invalidate(clearfilecache=True)
1824 1834
1825 1835 @unfilteredpropertycache
1826 1836 def _quick_access_changeid_null(self):
1827 1837 return {
1828 1838 b'null': (nullrev, self.nodeconstants.nullid),
1829 1839 nullrev: (nullrev, self.nodeconstants.nullid),
1830 1840 self.nullid: (nullrev, self.nullid),
1831 1841 }
1832 1842
1833 1843 @unfilteredpropertycache
1834 1844 def _quick_access_changeid_wc(self):
1835 1845 # also fast path access to the working copy parents
1836 1846 # however, only do it for filter that ensure wc is visible.
1837 1847 quick = self._quick_access_changeid_null.copy()
1838 1848 cl = self.unfiltered().changelog
1839 1849 for node in self.dirstate.parents():
1840 1850 if node == self.nullid:
1841 1851 continue
1842 1852 rev = cl.index.get_rev(node)
1843 1853 if rev is None:
1844 1854 # unknown working copy parent case:
1845 1855 #
1846 1856 # skip the fast path and let higher code deal with it
1847 1857 continue
1848 1858 pair = (rev, node)
1849 1859 quick[rev] = pair
1850 1860 quick[node] = pair
1851 1861 # also add the parents of the parents
1852 1862 for r in cl.parentrevs(rev):
1853 1863 if r == nullrev:
1854 1864 continue
1855 1865 n = cl.node(r)
1856 1866 pair = (r, n)
1857 1867 quick[r] = pair
1858 1868 quick[n] = pair
1859 1869 p1node = self.dirstate.p1()
1860 1870 if p1node != self.nullid:
1861 1871 quick[b'.'] = quick[p1node]
1862 1872 return quick
1863 1873
1864 1874 @unfilteredmethod
1865 1875 def _quick_access_changeid_invalidate(self):
1866 1876 if '_quick_access_changeid_wc' in vars(self):
1867 1877 del self.__dict__['_quick_access_changeid_wc']
1868 1878
1869 1879 @property
1870 1880 def _quick_access_changeid(self):
1871 1881 """an helper dictionnary for __getitem__ calls
1872 1882
1873 1883 This contains a list of symbol we can recognise right away without
1874 1884 further processing.
1875 1885 """
1876 1886 if self.filtername in repoview.filter_has_wc:
1877 1887 return self._quick_access_changeid_wc
1878 1888 return self._quick_access_changeid_null
1879 1889
1880 1890 def __getitem__(self, changeid):
1881 1891 # dealing with special cases
1882 1892 if changeid is None:
1883 1893 return context.workingctx(self)
1884 1894 if isinstance(changeid, context.basectx):
1885 1895 return changeid
1886 1896
1887 1897 # dealing with multiple revisions
1888 1898 if isinstance(changeid, slice):
1889 1899 # wdirrev isn't contiguous so the slice shouldn't include it
1890 1900 return [
1891 1901 self[i]
1892 1902 for i in range(*changeid.indices(len(self)))
1893 1903 if i not in self.changelog.filteredrevs
1894 1904 ]
1895 1905
1896 1906 # dealing with some special values
1897 1907 quick_access = self._quick_access_changeid.get(changeid)
1898 1908 if quick_access is not None:
1899 1909 rev, node = quick_access
1900 1910 return context.changectx(self, rev, node, maybe_filtered=False)
1901 1911 if changeid == b'tip':
1902 1912 node = self.changelog.tip()
1903 1913 rev = self.changelog.rev(node)
1904 1914 return context.changectx(self, rev, node)
1905 1915
1906 1916 # dealing with arbitrary values
1907 1917 try:
1908 1918 if isinstance(changeid, int):
1909 1919 node = self.changelog.node(changeid)
1910 1920 rev = changeid
1911 1921 elif changeid == b'.':
1912 1922 # this is a hack to delay/avoid loading obsmarkers
1913 1923 # when we know that '.' won't be hidden
1914 1924 node = self.dirstate.p1()
1915 1925 rev = self.unfiltered().changelog.rev(node)
1916 1926 elif len(changeid) == self.nodeconstants.nodelen:
1917 1927 try:
1918 1928 node = changeid
1919 1929 rev = self.changelog.rev(changeid)
1920 1930 except error.FilteredLookupError:
1921 1931 changeid = hex(changeid) # for the error message
1922 1932 raise
1923 1933 except LookupError:
1924 1934 # check if it might have come from damaged dirstate
1925 1935 #
1926 1936 # XXX we could avoid the unfiltered if we had a recognizable
1927 1937 # exception for filtered changeset access
1928 1938 if (
1929 1939 self.local()
1930 1940 and changeid in self.unfiltered().dirstate.parents()
1931 1941 ):
1932 1942 msg = _(b"working directory has unknown parent '%s'!")
1933 1943 raise error.Abort(msg % short(changeid))
1934 1944 changeid = hex(changeid) # for the error message
1935 1945 raise
1936 1946
1937 1947 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1938 1948 node = bin(changeid)
1939 1949 rev = self.changelog.rev(node)
1940 1950 else:
1941 1951 raise error.ProgrammingError(
1942 1952 b"unsupported changeid '%s' of type %s"
1943 1953 % (changeid, pycompat.bytestr(type(changeid)))
1944 1954 )
1945 1955
1946 1956 return context.changectx(self, rev, node)
1947 1957
1948 1958 except (error.FilteredIndexError, error.FilteredLookupError):
1949 1959 raise error.FilteredRepoLookupError(
1950 1960 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1951 1961 )
1952 1962 except (IndexError, LookupError):
1953 1963 raise error.RepoLookupError(
1954 1964 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1955 1965 )
1956 1966 except error.WdirUnsupported:
1957 1967 return context.workingctx(self)
1958 1968
1959 1969 def __contains__(self, changeid):
1960 1970 """True if the given changeid exists"""
1961 1971 try:
1962 1972 self[changeid]
1963 1973 return True
1964 1974 except error.RepoLookupError:
1965 1975 return False
1966 1976
1967 1977 def __nonzero__(self):
1968 1978 return True
1969 1979
1970 1980 __bool__ = __nonzero__
1971 1981
1972 1982 def __len__(self):
1973 1983 # no need to pay the cost of repoview.changelog
1974 1984 unfi = self.unfiltered()
1975 1985 return len(unfi.changelog)
1976 1986
1977 1987 def __iter__(self):
1978 1988 return iter(self.changelog)
1979 1989
1980 def revs(self, expr, *args):
1990 def revs(self, expr: bytes, *args):
1981 1991 """Find revisions matching a revset.
1982 1992
1983 1993 The revset is specified as a string ``expr`` that may contain
1984 1994 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1985 1995
1986 1996 Revset aliases from the configuration are not expanded. To expand
1987 1997 user aliases, consider calling ``scmutil.revrange()`` or
1988 1998 ``repo.anyrevs([expr], user=True)``.
1989 1999
1990 2000 Returns a smartset.abstractsmartset, which is a list-like interface
1991 2001 that contains integer revisions.
1992 2002 """
1993 2003 tree = revsetlang.spectree(expr, *args)
1994 2004 return revset.makematcher(tree)(self)
1995 2005
1996 def set(self, expr, *args):
2006 def set(self, expr: bytes, *args):
1997 2007 """Find revisions matching a revset and emit changectx instances.
1998 2008
1999 2009 This is a convenience wrapper around ``revs()`` that iterates the
2000 2010 result and is a generator of changectx instances.
2001 2011
2002 2012 Revset aliases from the configuration are not expanded. To expand
2003 2013 user aliases, consider calling ``scmutil.revrange()``.
2004 2014 """
2005 2015 for r in self.revs(expr, *args):
2006 2016 yield self[r]
2007 2017
2008 def anyrevs(self, specs, user=False, localalias=None):
2018 def anyrevs(self, specs: bytes, user=False, localalias=None):
2009 2019 """Find revisions matching one of the given revsets.
2010 2020
2011 2021 Revset aliases from the configuration are not expanded by default. To
2012 2022 expand user aliases, specify ``user=True``. To provide some local
2013 2023 definitions overriding user aliases, set ``localalias`` to
2014 2024 ``{name: definitionstring}``.
2015 2025 """
2016 2026 if specs == [b'null']:
2017 2027 return revset.baseset([nullrev])
2018 2028 if specs == [b'.']:
2019 2029 quick_data = self._quick_access_changeid.get(b'.')
2020 2030 if quick_data is not None:
2021 2031 return revset.baseset([quick_data[0]])
2022 2032 if user:
2023 2033 m = revset.matchany(
2024 2034 self.ui,
2025 2035 specs,
2026 2036 lookup=revset.lookupfn(self),
2027 2037 localalias=localalias,
2028 2038 )
2029 2039 else:
2030 2040 m = revset.matchany(None, specs, localalias=localalias)
2031 2041 return m(self)
2032 2042
2033 def url(self):
2043 def url(self) -> bytes:
2034 2044 return b'file:' + self.root
2035 2045
2036 2046 def hook(self, name, throw=False, **args):
2037 2047 """Call a hook, passing this repo instance.
2038 2048
2039 2049 This a convenience method to aid invoking hooks. Extensions likely
2040 2050 won't call this unless they have registered a custom hook or are
2041 2051 replacing code that is expected to call a hook.
2042 2052 """
2043 2053 return hook.hook(self.ui, self, name, throw, **args)
2044 2054
2045 2055 @filteredpropertycache
2046 2056 def _tagscache(self):
2047 2057 """Returns a tagscache object that contains various tags related
2048 2058 caches."""
2049 2059
2050 2060 # This simplifies its cache management by having one decorated
2051 2061 # function (this one) and the rest simply fetch things from it.
2052 2062 class tagscache:
2053 2063 def __init__(self):
2054 2064 # These two define the set of tags for this repository. tags
2055 2065 # maps tag name to node; tagtypes maps tag name to 'global' or
2056 2066 # 'local'. (Global tags are defined by .hgtags across all
2057 2067 # heads, and local tags are defined in .hg/localtags.)
2058 2068 # They constitute the in-memory cache of tags.
2059 2069 self.tags = self.tagtypes = None
2060 2070
2061 2071 self.nodetagscache = self.tagslist = None
2062 2072
2063 2073 cache = tagscache()
2064 2074 cache.tags, cache.tagtypes = self._findtags()
2065 2075
2066 2076 return cache
2067 2077
2068 2078 def tags(self):
2069 2079 '''return a mapping of tag to node'''
2070 2080 t = {}
2071 2081 if self.changelog.filteredrevs:
2072 2082 tags, tt = self._findtags()
2073 2083 else:
2074 2084 tags = self._tagscache.tags
2075 2085 rev = self.changelog.rev
2076 2086 for k, v in tags.items():
2077 2087 try:
2078 2088 # ignore tags to unknown nodes
2079 2089 rev(v)
2080 2090 t[k] = v
2081 2091 except (error.LookupError, ValueError):
2082 2092 pass
2083 2093 return t
2084 2094
2085 2095 def _findtags(self):
2086 2096 """Do the hard work of finding tags. Return a pair of dicts
2087 2097 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2088 2098 maps tag name to a string like \'global\' or \'local\'.
2089 2099 Subclasses or extensions are free to add their own tags, but
2090 2100 should be aware that the returned dicts will be retained for the
2091 2101 duration of the localrepo object."""
2092 2102
2093 2103 # XXX what tagtype should subclasses/extensions use? Currently
2094 2104 # mq and bookmarks add tags, but do not set the tagtype at all.
2095 2105 # Should each extension invent its own tag type? Should there
2096 2106 # be one tagtype for all such "virtual" tags? Or is the status
2097 2107 # quo fine?
2098 2108
2099 2109 # map tag name to (node, hist)
2100 2110 alltags = tagsmod.findglobaltags(self.ui, self)
2101 2111 # map tag name to tag type
2102 2112 tagtypes = {tag: b'global' for tag in alltags}
2103 2113
2104 2114 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2105 2115
2106 2116 # Build the return dicts. Have to re-encode tag names because
2107 2117 # the tags module always uses UTF-8 (in order not to lose info
2108 2118 # writing to the cache), but the rest of Mercurial wants them in
2109 2119 # local encoding.
2110 2120 tags = {}
2111 2121 for (name, (node, hist)) in alltags.items():
2112 2122 if node != self.nullid:
2113 2123 tags[encoding.tolocal(name)] = node
2114 2124 tags[b'tip'] = self.changelog.tip()
2115 2125 tagtypes = {
2116 2126 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2117 2127 }
2118 2128 return (tags, tagtypes)
2119 2129
2120 2130 def tagtype(self, tagname):
2121 2131 """
2122 2132 return the type of the given tag. result can be:
2123 2133
2124 2134 'local' : a local tag
2125 2135 'global' : a global tag
2126 2136 None : tag does not exist
2127 2137 """
2128 2138
2129 2139 return self._tagscache.tagtypes.get(tagname)
2130 2140
2131 2141 def tagslist(self):
2132 2142 '''return a list of tags ordered by revision'''
2133 2143 if not self._tagscache.tagslist:
2134 2144 l = []
2135 2145 for t, n in self.tags().items():
2136 2146 l.append((self.changelog.rev(n), t, n))
2137 2147 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2138 2148
2139 2149 return self._tagscache.tagslist
2140 2150
2141 2151 def nodetags(self, node):
2142 2152 '''return the tags associated with a node'''
2143 2153 if not self._tagscache.nodetagscache:
2144 2154 nodetagscache = {}
2145 2155 for t, n in self._tagscache.tags.items():
2146 2156 nodetagscache.setdefault(n, []).append(t)
2147 2157 for tags in nodetagscache.values():
2148 2158 tags.sort()
2149 2159 self._tagscache.nodetagscache = nodetagscache
2150 2160 return self._tagscache.nodetagscache.get(node, [])
2151 2161
2152 2162 def nodebookmarks(self, node):
2153 2163 """return the list of bookmarks pointing to the specified node"""
2154 2164 return self._bookmarks.names(node)
2155 2165
2156 2166 def branchmap(self):
2157 2167 """returns a dictionary {branch: [branchheads]} with branchheads
2158 2168 ordered by increasing revision number"""
2159 2169 return self._branchcaches[self]
2160 2170
2161 2171 @unfilteredmethod
2162 2172 def revbranchcache(self):
2163 2173 if not self._revbranchcache:
2164 2174 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2165 2175 return self._revbranchcache
2166 2176
2167 2177 def register_changeset(self, rev, changelogrevision):
2168 2178 self.revbranchcache().setdata(rev, changelogrevision)
2169 2179
2170 2180 def branchtip(self, branch, ignoremissing=False):
2171 2181 """return the tip node for a given branch
2172 2182
2173 2183 If ignoremissing is True, then this method will not raise an error.
2174 2184 This is helpful for callers that only expect None for a missing branch
2175 2185 (e.g. namespace).
2176 2186
2177 2187 """
2178 2188 try:
2179 2189 return self.branchmap().branchtip(branch)
2180 2190 except KeyError:
2181 2191 if not ignoremissing:
2182 2192 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2183 2193 else:
2184 2194 pass
2185 2195
2186 2196 def lookup(self, key):
2187 2197 node = scmutil.revsymbol(self, key).node()
2188 2198 if node is None:
2189 2199 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2190 2200 return node
2191 2201
2192 2202 def lookupbranch(self, key):
2193 2203 if self.branchmap().hasbranch(key):
2194 2204 return key
2195 2205
2196 2206 return scmutil.revsymbol(self, key).branch()
2197 2207
2198 2208 def known(self, nodes):
2199 2209 cl = self.changelog
2200 2210 get_rev = cl.index.get_rev
2201 2211 filtered = cl.filteredrevs
2202 2212 result = []
2203 2213 for n in nodes:
2204 2214 r = get_rev(n)
2205 2215 resp = not (r is None or r in filtered)
2206 2216 result.append(resp)
2207 2217 return result
2208 2218
2209 2219 def local(self):
2210 2220 return self
2211 2221
2212 2222 def publishing(self):
2213 2223 # it's safe (and desirable) to trust the publish flag unconditionally
2214 2224 # so that we don't finalize changes shared between users via ssh or nfs
2215 2225 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2216 2226
2217 2227 def cancopy(self):
2218 2228 # so statichttprepo's override of local() works
2219 2229 if not self.local():
2220 2230 return False
2221 2231 if not self.publishing():
2222 2232 return True
2223 2233 # if publishing we can't copy if there is filtered content
2224 2234 return not self.filtered(b'visible').changelog.filteredrevs
2225 2235
2226 2236 def shared(self):
2227 2237 '''the type of shared repository (None if not shared)'''
2228 2238 if self.sharedpath != self.path:
2229 2239 return b'store'
2230 2240 return None
2231 2241
2232 def wjoin(self, f, *insidef):
2242 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2233 2243 return self.vfs.reljoin(self.root, f, *insidef)
2234 2244
2235 2245 def setparents(self, p1, p2=None):
2236 2246 if p2 is None:
2237 2247 p2 = self.nullid
2238 2248 self[None].setparents(p1, p2)
2239 2249 self._quick_access_changeid_invalidate()
2240 2250
2241 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2251 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2242 2252 """changeid must be a changeset revision, if specified.
2243 2253 fileid can be a file revision or node."""
2244 2254 return context.filectx(
2245 2255 self, path, changeid, fileid, changectx=changectx
2246 2256 )
2247 2257
2248 def getcwd(self):
2258 def getcwd(self) -> bytes:
2249 2259 return self.dirstate.getcwd()
2250 2260
2251 def pathto(self, f, cwd=None):
2261 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2252 2262 return self.dirstate.pathto(f, cwd)
2253 2263
2254 2264 def _loadfilter(self, filter):
2255 2265 if filter not in self._filterpats:
2256 2266 l = []
2257 2267 for pat, cmd in self.ui.configitems(filter):
2258 2268 if cmd == b'!':
2259 2269 continue
2260 2270 mf = matchmod.match(self.root, b'', [pat])
2261 2271 fn = None
2262 2272 params = cmd
2263 2273 for name, filterfn in self._datafilters.items():
2264 2274 if cmd.startswith(name):
2265 2275 fn = filterfn
2266 2276 params = cmd[len(name) :].lstrip()
2267 2277 break
2268 2278 if not fn:
2269 2279 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2270 2280 fn.__name__ = 'commandfilter'
2271 2281 # Wrap old filters not supporting keyword arguments
2272 2282 if not pycompat.getargspec(fn)[2]:
2273 2283 oldfn = fn
2274 2284 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2275 2285 fn.__name__ = 'compat-' + oldfn.__name__
2276 2286 l.append((mf, fn, params))
2277 2287 self._filterpats[filter] = l
2278 2288 return self._filterpats[filter]
2279 2289
2280 2290 def _filter(self, filterpats, filename, data):
2281 2291 for mf, fn, cmd in filterpats:
2282 2292 if mf(filename):
2283 2293 self.ui.debug(
2284 2294 b"filtering %s through %s\n"
2285 2295 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2286 2296 )
2287 2297 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2288 2298 break
2289 2299
2290 2300 return data
2291 2301
2292 2302 @unfilteredpropertycache
2293 2303 def _encodefilterpats(self):
2294 2304 return self._loadfilter(b'encode')
2295 2305
2296 2306 @unfilteredpropertycache
2297 2307 def _decodefilterpats(self):
2298 2308 return self._loadfilter(b'decode')
2299 2309
2300 2310 def adddatafilter(self, name, filter):
2301 2311 self._datafilters[name] = filter
2302 2312
2303 def wread(self, filename):
2313 def wread(self, filename: bytes) -> bytes:
2304 2314 if self.wvfs.islink(filename):
2305 2315 data = self.wvfs.readlink(filename)
2306 2316 else:
2307 2317 data = self.wvfs.read(filename)
2308 2318 return self._filter(self._encodefilterpats, filename, data)
2309 2319
2310 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2320 def wwrite(
2321 self,
2322 filename: bytes,
2323 data: bytes,
2324 flags: bytes,
2325 backgroundclose=False,
2326 **kwargs
2327 ) -> int:
2311 2328 """write ``data`` into ``filename`` in the working directory
2312 2329
2313 2330 This returns length of written (maybe decoded) data.
2314 2331 """
2315 2332 data = self._filter(self._decodefilterpats, filename, data)
2316 2333 if b'l' in flags:
2317 2334 self.wvfs.symlink(data, filename)
2318 2335 else:
2319 2336 self.wvfs.write(
2320 2337 filename, data, backgroundclose=backgroundclose, **kwargs
2321 2338 )
2322 2339 if b'x' in flags:
2323 2340 self.wvfs.setflags(filename, False, True)
2324 2341 else:
2325 2342 self.wvfs.setflags(filename, False, False)
2326 2343 return len(data)
2327 2344
2328 def wwritedata(self, filename, data):
2345 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2329 2346 return self._filter(self._decodefilterpats, filename, data)
2330 2347
2331 2348 def currenttransaction(self):
2332 2349 """return the current transaction or None if non exists"""
2333 2350 if self._transref:
2334 2351 tr = self._transref()
2335 2352 else:
2336 2353 tr = None
2337 2354
2338 2355 if tr and tr.running():
2339 2356 return tr
2340 2357 return None
2341 2358
2342 2359 def transaction(self, desc, report=None):
2343 2360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2344 2361 b'devel', b'check-locks'
2345 2362 ):
2346 2363 if self._currentlock(self._lockref) is None:
2347 2364 raise error.ProgrammingError(b'transaction requires locking')
2348 2365 tr = self.currenttransaction()
2349 2366 if tr is not None:
2350 2367 return tr.nest(name=desc)
2351 2368
2352 2369 # abort here if the journal already exists
2353 2370 if self.svfs.exists(b"journal"):
2354 2371 raise error.RepoError(
2355 2372 _(b"abandoned transaction found"),
2356 2373 hint=_(b"run 'hg recover' to clean up transaction"),
2357 2374 )
2358 2375
2359 2376 idbase = b"%.40f#%f" % (random.random(), time.time())
2360 2377 ha = hex(hashutil.sha1(idbase).digest())
2361 2378 txnid = b'TXN:' + ha
2362 2379 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2363 2380
2364 2381 self._writejournal(desc)
2365 2382 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2366 2383 if report:
2367 2384 rp = report
2368 2385 else:
2369 2386 rp = self.ui.warn
2370 2387 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2371 2388 # we must avoid cyclic reference between repo and transaction.
2372 2389 reporef = weakref.ref(self)
2373 2390 # Code to track tag movement
2374 2391 #
2375 2392 # Since tags are all handled as file content, it is actually quite hard
2376 2393 # to track these movement from a code perspective. So we fallback to a
2377 2394 # tracking at the repository level. One could envision to track changes
2378 2395 # to the '.hgtags' file through changegroup apply but that fails to
2379 2396 # cope with case where transaction expose new heads without changegroup
2380 2397 # being involved (eg: phase movement).
2381 2398 #
2382 2399 # For now, We gate the feature behind a flag since this likely comes
2383 2400 # with performance impacts. The current code run more often than needed
2384 2401 # and do not use caches as much as it could. The current focus is on
2385 2402 # the behavior of the feature so we disable it by default. The flag
2386 2403 # will be removed when we are happy with the performance impact.
2387 2404 #
2388 2405 # Once this feature is no longer experimental move the following
2389 2406 # documentation to the appropriate help section:
2390 2407 #
2391 2408 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2392 2409 # tags (new or changed or deleted tags). In addition the details of
2393 2410 # these changes are made available in a file at:
2394 2411 # ``REPOROOT/.hg/changes/tags.changes``.
2395 2412 # Make sure you check for HG_TAG_MOVED before reading that file as it
2396 2413 # might exist from a previous transaction even if no tag were touched
2397 2414 # in this one. Changes are recorded in a line base format::
2398 2415 #
2399 2416 # <action> <hex-node> <tag-name>\n
2400 2417 #
2401 2418 # Actions are defined as follow:
2402 2419 # "-R": tag is removed,
2403 2420 # "+A": tag is added,
2404 2421 # "-M": tag is moved (old value),
2405 2422 # "+M": tag is moved (new value),
2406 2423 tracktags = lambda x: None
2407 2424 # experimental config: experimental.hook-track-tags
2408 2425 shouldtracktags = self.ui.configbool(
2409 2426 b'experimental', b'hook-track-tags'
2410 2427 )
2411 2428 if desc != b'strip' and shouldtracktags:
2412 2429 oldheads = self.changelog.headrevs()
2413 2430
2414 2431 def tracktags(tr2):
2415 2432 repo = reporef()
2416 2433 assert repo is not None # help pytype
2417 2434 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2418 2435 newheads = repo.changelog.headrevs()
2419 2436 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2420 2437 # notes: we compare lists here.
2421 2438 # As we do it only once buiding set would not be cheaper
2422 2439 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2423 2440 if changes:
2424 2441 tr2.hookargs[b'tag_moved'] = b'1'
2425 2442 with repo.vfs(
2426 2443 b'changes/tags.changes', b'w', atomictemp=True
2427 2444 ) as changesfile:
2428 2445 # note: we do not register the file to the transaction
2429 2446 # because we needs it to still exist on the transaction
2430 2447 # is close (for txnclose hooks)
2431 2448 tagsmod.writediff(changesfile, changes)
2432 2449
2433 2450 def validate(tr2):
2434 2451 """will run pre-closing hooks"""
2435 2452 # XXX the transaction API is a bit lacking here so we take a hacky
2436 2453 # path for now
2437 2454 #
2438 2455 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2439 2456 # dict is copied before these run. In addition we needs the data
2440 2457 # available to in memory hooks too.
2441 2458 #
2442 2459 # Moreover, we also need to make sure this runs before txnclose
2443 2460 # hooks and there is no "pending" mechanism that would execute
2444 2461 # logic only if hooks are about to run.
2445 2462 #
2446 2463 # Fixing this limitation of the transaction is also needed to track
2447 2464 # other families of changes (bookmarks, phases, obsolescence).
2448 2465 #
2449 2466 # This will have to be fixed before we remove the experimental
2450 2467 # gating.
2451 2468 tracktags(tr2)
2452 2469 repo = reporef()
2453 2470 assert repo is not None # help pytype
2454 2471
2455 2472 singleheadopt = (b'experimental', b'single-head-per-branch')
2456 2473 singlehead = repo.ui.configbool(*singleheadopt)
2457 2474 if singlehead:
2458 2475 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2459 2476 accountclosed = singleheadsub.get(
2460 2477 b"account-closed-heads", False
2461 2478 )
2462 2479 if singleheadsub.get(b"public-changes-only", False):
2463 2480 filtername = b"immutable"
2464 2481 else:
2465 2482 filtername = b"visible"
2466 2483 scmutil.enforcesinglehead(
2467 2484 repo, tr2, desc, accountclosed, filtername
2468 2485 )
2469 2486 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2470 2487 for name, (old, new) in sorted(
2471 2488 tr.changes[b'bookmarks'].items()
2472 2489 ):
2473 2490 args = tr.hookargs.copy()
2474 2491 args.update(bookmarks.preparehookargs(name, old, new))
2475 2492 repo.hook(
2476 2493 b'pretxnclose-bookmark',
2477 2494 throw=True,
2478 2495 **pycompat.strkwargs(args)
2479 2496 )
2480 2497 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2481 2498 cl = repo.unfiltered().changelog
2482 2499 for revs, (old, new) in tr.changes[b'phases']:
2483 2500 for rev in revs:
2484 2501 args = tr.hookargs.copy()
2485 2502 node = hex(cl.node(rev))
2486 2503 args.update(phases.preparehookargs(node, old, new))
2487 2504 repo.hook(
2488 2505 b'pretxnclose-phase',
2489 2506 throw=True,
2490 2507 **pycompat.strkwargs(args)
2491 2508 )
2492 2509
2493 2510 repo.hook(
2494 2511 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2495 2512 )
2496 2513
2497 2514 def releasefn(tr, success):
2498 2515 repo = reporef()
2499 2516 if repo is None:
2500 2517 # If the repo has been GC'd (and this release function is being
2501 2518 # called from transaction.__del__), there's not much we can do,
2502 2519 # so just leave the unfinished transaction there and let the
2503 2520 # user run `hg recover`.
2504 2521 return
2505 2522 if success:
2506 2523 # this should be explicitly invoked here, because
2507 2524 # in-memory changes aren't written out at closing
2508 2525 # transaction, if tr.addfilegenerator (via
2509 2526 # dirstate.write or so) isn't invoked while
2510 2527 # transaction running
2511 2528 repo.dirstate.write(None)
2512 2529 else:
2513 2530 # discard all changes (including ones already written
2514 2531 # out) in this transaction
2515 2532 narrowspec.restorebackup(self, b'journal.narrowspec')
2516 2533 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2517 2534 repo.dirstate.restorebackup(None, b'journal.dirstate')
2518 2535
2519 2536 repo.invalidate(clearfilecache=True)
2520 2537
2521 2538 tr = transaction.transaction(
2522 2539 rp,
2523 2540 self.svfs,
2524 2541 vfsmap,
2525 2542 b"journal",
2526 2543 b"undo",
2527 2544 aftertrans(renames),
2528 2545 self.store.createmode,
2529 2546 validator=validate,
2530 2547 releasefn=releasefn,
2531 2548 checkambigfiles=_cachedfiles,
2532 2549 name=desc,
2533 2550 )
2534 2551 tr.changes[b'origrepolen'] = len(self)
2535 2552 tr.changes[b'obsmarkers'] = set()
2536 2553 tr.changes[b'phases'] = []
2537 2554 tr.changes[b'bookmarks'] = {}
2538 2555
2539 2556 tr.hookargs[b'txnid'] = txnid
2540 2557 tr.hookargs[b'txnname'] = desc
2541 2558 tr.hookargs[b'changes'] = tr.changes
2542 2559 # note: writing the fncache only during finalize mean that the file is
2543 2560 # outdated when running hooks. As fncache is used for streaming clone,
2544 2561 # this is not expected to break anything that happen during the hooks.
2545 2562 tr.addfinalize(b'flush-fncache', self.store.write)
2546 2563
2547 2564 def txnclosehook(tr2):
2548 2565 """To be run if transaction is successful, will schedule a hook run"""
2549 2566 # Don't reference tr2 in hook() so we don't hold a reference.
2550 2567 # This reduces memory consumption when there are multiple
2551 2568 # transactions per lock. This can likely go away if issue5045
2552 2569 # fixes the function accumulation.
2553 2570 hookargs = tr2.hookargs
2554 2571
2555 2572 def hookfunc(unused_success):
2556 2573 repo = reporef()
2557 2574 assert repo is not None # help pytype
2558 2575
2559 2576 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2560 2577 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2561 2578 for name, (old, new) in bmchanges:
2562 2579 args = tr.hookargs.copy()
2563 2580 args.update(bookmarks.preparehookargs(name, old, new))
2564 2581 repo.hook(
2565 2582 b'txnclose-bookmark',
2566 2583 throw=False,
2567 2584 **pycompat.strkwargs(args)
2568 2585 )
2569 2586
2570 2587 if hook.hashook(repo.ui, b'txnclose-phase'):
2571 2588 cl = repo.unfiltered().changelog
2572 2589 phasemv = sorted(
2573 2590 tr.changes[b'phases'], key=lambda r: r[0][0]
2574 2591 )
2575 2592 for revs, (old, new) in phasemv:
2576 2593 for rev in revs:
2577 2594 args = tr.hookargs.copy()
2578 2595 node = hex(cl.node(rev))
2579 2596 args.update(phases.preparehookargs(node, old, new))
2580 2597 repo.hook(
2581 2598 b'txnclose-phase',
2582 2599 throw=False,
2583 2600 **pycompat.strkwargs(args)
2584 2601 )
2585 2602
2586 2603 repo.hook(
2587 2604 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2588 2605 )
2589 2606
2590 2607 repo = reporef()
2591 2608 assert repo is not None # help pytype
2592 2609 repo._afterlock(hookfunc)
2593 2610
2594 2611 tr.addfinalize(b'txnclose-hook', txnclosehook)
2595 2612 # Include a leading "-" to make it happen before the transaction summary
2596 2613 # reports registered via scmutil.registersummarycallback() whose names
2597 2614 # are 00-txnreport etc. That way, the caches will be warm when the
2598 2615 # callbacks run.
2599 2616 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2600 2617
2601 2618 def txnaborthook(tr2):
2602 2619 """To be run if transaction is aborted"""
2603 2620 repo = reporef()
2604 2621 assert repo is not None # help pytype
2605 2622 repo.hook(
2606 2623 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2607 2624 )
2608 2625
2609 2626 tr.addabort(b'txnabort-hook', txnaborthook)
2610 2627 # avoid eager cache invalidation. in-memory data should be identical
2611 2628 # to stored data if transaction has no error.
2612 2629 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2613 2630 self._transref = weakref.ref(tr)
2614 2631 scmutil.registersummarycallback(self, tr, desc)
2615 2632 return tr
2616 2633
2617 2634 def _journalfiles(self):
2618 2635 first = (
2619 2636 (self.svfs, b'journal'),
2620 2637 (self.svfs, b'journal.narrowspec'),
2621 2638 (self.vfs, b'journal.narrowspec.dirstate'),
2622 2639 (self.vfs, b'journal.dirstate'),
2623 2640 )
2624 2641 middle = []
2625 2642 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2626 2643 if dirstate_data is not None:
2627 2644 middle.append((self.vfs, dirstate_data))
2628 2645 end = (
2629 2646 (self.vfs, b'journal.branch'),
2630 2647 (self.vfs, b'journal.desc'),
2631 2648 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2632 2649 (self.svfs, b'journal.phaseroots'),
2633 2650 )
2634 2651 return first + tuple(middle) + end
2635 2652
2636 2653 def undofiles(self):
2637 2654 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2638 2655
2639 2656 @unfilteredmethod
2640 2657 def _writejournal(self, desc):
2641 2658 self.dirstate.savebackup(None, b'journal.dirstate')
2642 2659 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2643 2660 narrowspec.savebackup(self, b'journal.narrowspec')
2644 2661 self.vfs.write(
2645 2662 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2646 2663 )
2647 2664 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2648 2665 bookmarksvfs = bookmarks.bookmarksvfs(self)
2649 2666 bookmarksvfs.write(
2650 2667 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2651 2668 )
2652 2669 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2653 2670
2654 2671 def recover(self):
2655 2672 with self.lock():
2656 2673 if self.svfs.exists(b"journal"):
2657 2674 self.ui.status(_(b"rolling back interrupted transaction\n"))
2658 2675 vfsmap = {
2659 2676 b'': self.svfs,
2660 2677 b'plain': self.vfs,
2661 2678 }
2662 2679 transaction.rollback(
2663 2680 self.svfs,
2664 2681 vfsmap,
2665 2682 b"journal",
2666 2683 self.ui.warn,
2667 2684 checkambigfiles=_cachedfiles,
2668 2685 )
2669 2686 self.invalidate()
2670 2687 return True
2671 2688 else:
2672 2689 self.ui.warn(_(b"no interrupted transaction available\n"))
2673 2690 return False
2674 2691
2675 2692 def rollback(self, dryrun=False, force=False):
2676 2693 wlock = lock = dsguard = None
2677 2694 try:
2678 2695 wlock = self.wlock()
2679 2696 lock = self.lock()
2680 2697 if self.svfs.exists(b"undo"):
2681 2698 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2682 2699
2683 2700 return self._rollback(dryrun, force, dsguard)
2684 2701 else:
2685 2702 self.ui.warn(_(b"no rollback information available\n"))
2686 2703 return 1
2687 2704 finally:
2688 2705 release(dsguard, lock, wlock)
2689 2706
2690 2707 @unfilteredmethod # Until we get smarter cache management
2691 2708 def _rollback(self, dryrun, force, dsguard):
2692 2709 ui = self.ui
2693 2710 try:
2694 2711 args = self.vfs.read(b'undo.desc').splitlines()
2695 2712 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2696 2713 if len(args) >= 3:
2697 2714 detail = args[2]
2698 2715 oldtip = oldlen - 1
2699 2716
2700 2717 if detail and ui.verbose:
2701 2718 msg = _(
2702 2719 b'repository tip rolled back to revision %d'
2703 2720 b' (undo %s: %s)\n'
2704 2721 ) % (oldtip, desc, detail)
2705 2722 else:
2706 2723 msg = _(
2707 2724 b'repository tip rolled back to revision %d (undo %s)\n'
2708 2725 ) % (oldtip, desc)
2709 2726 except IOError:
2710 2727 msg = _(b'rolling back unknown transaction\n')
2711 2728 desc = None
2712 2729
2713 2730 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2714 2731 raise error.Abort(
2715 2732 _(
2716 2733 b'rollback of last commit while not checked out '
2717 2734 b'may lose data'
2718 2735 ),
2719 2736 hint=_(b'use -f to force'),
2720 2737 )
2721 2738
2722 2739 ui.status(msg)
2723 2740 if dryrun:
2724 2741 return 0
2725 2742
2726 2743 parents = self.dirstate.parents()
2727 2744 self.destroying()
2728 2745 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2729 2746 transaction.rollback(
2730 2747 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2731 2748 )
2732 2749 bookmarksvfs = bookmarks.bookmarksvfs(self)
2733 2750 if bookmarksvfs.exists(b'undo.bookmarks'):
2734 2751 bookmarksvfs.rename(
2735 2752 b'undo.bookmarks', b'bookmarks', checkambig=True
2736 2753 )
2737 2754 if self.svfs.exists(b'undo.phaseroots'):
2738 2755 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2739 2756 self.invalidate()
2740 2757
2741 2758 has_node = self.changelog.index.has_node
2742 2759 parentgone = any(not has_node(p) for p in parents)
2743 2760 if parentgone:
2744 2761 # prevent dirstateguard from overwriting already restored one
2745 2762 dsguard.close()
2746 2763
2747 2764 narrowspec.restorebackup(self, b'undo.narrowspec')
2748 2765 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2749 2766 self.dirstate.restorebackup(None, b'undo.dirstate')
2750 2767 try:
2751 2768 branch = self.vfs.read(b'undo.branch')
2752 2769 self.dirstate.setbranch(encoding.tolocal(branch))
2753 2770 except IOError:
2754 2771 ui.warn(
2755 2772 _(
2756 2773 b'named branch could not be reset: '
2757 2774 b'current branch is still \'%s\'\n'
2758 2775 )
2759 2776 % self.dirstate.branch()
2760 2777 )
2761 2778
2762 2779 parents = tuple([p.rev() for p in self[None].parents()])
2763 2780 if len(parents) > 1:
2764 2781 ui.status(
2765 2782 _(
2766 2783 b'working directory now based on '
2767 2784 b'revisions %d and %d\n'
2768 2785 )
2769 2786 % parents
2770 2787 )
2771 2788 else:
2772 2789 ui.status(
2773 2790 _(b'working directory now based on revision %d\n') % parents
2774 2791 )
2775 2792 mergestatemod.mergestate.clean(self)
2776 2793
2777 2794 # TODO: if we know which new heads may result from this rollback, pass
2778 2795 # them to destroy(), which will prevent the branchhead cache from being
2779 2796 # invalidated.
2780 2797 self.destroyed()
2781 2798 return 0
2782 2799
2783 2800 def _buildcacheupdater(self, newtransaction):
2784 2801 """called during transaction to build the callback updating cache
2785 2802
2786 2803 Lives on the repository to help extension who might want to augment
2787 2804 this logic. For this purpose, the created transaction is passed to the
2788 2805 method.
2789 2806 """
2790 2807 # we must avoid cyclic reference between repo and transaction.
2791 2808 reporef = weakref.ref(self)
2792 2809
2793 2810 def updater(tr):
2794 2811 repo = reporef()
2795 2812 assert repo is not None # help pytype
2796 2813 repo.updatecaches(tr)
2797 2814
2798 2815 return updater
2799 2816
2800 2817 @unfilteredmethod
2801 2818 def updatecaches(self, tr=None, full=False, caches=None):
2802 2819 """warm appropriate caches
2803 2820
2804 2821 If this function is called after a transaction closed. The transaction
2805 2822 will be available in the 'tr' argument. This can be used to selectively
2806 2823 update caches relevant to the changes in that transaction.
2807 2824
2808 2825 If 'full' is set, make sure all caches the function knows about have
2809 2826 up-to-date data. Even the ones usually loaded more lazily.
2810 2827
2811 2828 The `full` argument can take a special "post-clone" value. In this case
2812 2829 the cache warming is made after a clone and of the slower cache might
2813 2830 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2814 2831 as we plan for a cleaner way to deal with this for 5.9.
2815 2832 """
2816 2833 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2817 2834 # During strip, many caches are invalid but
2818 2835 # later call to `destroyed` will refresh them.
2819 2836 return
2820 2837
2821 2838 unfi = self.unfiltered()
2822 2839
2823 2840 if full:
2824 2841 msg = (
2825 2842 "`full` argument for `repo.updatecaches` is deprecated\n"
2826 2843 "(use `caches=repository.CACHE_ALL` instead)"
2827 2844 )
2828 2845 self.ui.deprecwarn(msg, b"5.9")
2829 2846 caches = repository.CACHES_ALL
2830 2847 if full == b"post-clone":
2831 2848 caches = repository.CACHES_POST_CLONE
2832 2849 caches = repository.CACHES_ALL
2833 2850 elif caches is None:
2834 2851 caches = repository.CACHES_DEFAULT
2835 2852
2836 2853 if repository.CACHE_BRANCHMAP_SERVED in caches:
2837 2854 if tr is None or tr.changes[b'origrepolen'] < len(self):
2838 2855 # accessing the 'served' branchmap should refresh all the others,
2839 2856 self.ui.debug(b'updating the branch cache\n')
2840 2857 self.filtered(b'served').branchmap()
2841 2858 self.filtered(b'served.hidden').branchmap()
2842 2859 # flush all possibly delayed write.
2843 2860 self._branchcaches.write_delayed(self)
2844 2861
2845 2862 if repository.CACHE_CHANGELOG_CACHE in caches:
2846 2863 self.changelog.update_caches(transaction=tr)
2847 2864
2848 2865 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2849 2866 self.manifestlog.update_caches(transaction=tr)
2850 2867
2851 2868 if repository.CACHE_REV_BRANCH in caches:
2852 2869 rbc = unfi.revbranchcache()
2853 2870 for r in unfi.changelog:
2854 2871 rbc.branchinfo(r)
2855 2872 rbc.write()
2856 2873
2857 2874 if repository.CACHE_FULL_MANIFEST in caches:
2858 2875 # ensure the working copy parents are in the manifestfulltextcache
2859 2876 for ctx in self[b'.'].parents():
2860 2877 ctx.manifest() # accessing the manifest is enough
2861 2878
2862 2879 if repository.CACHE_FILE_NODE_TAGS in caches:
2863 2880 # accessing fnode cache warms the cache
2864 2881 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2865 2882
2866 2883 if repository.CACHE_TAGS_DEFAULT in caches:
2867 2884 # accessing tags warm the cache
2868 2885 self.tags()
2869 2886 if repository.CACHE_TAGS_SERVED in caches:
2870 2887 self.filtered(b'served').tags()
2871 2888
2872 2889 if repository.CACHE_BRANCHMAP_ALL in caches:
2873 2890 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2874 2891 # so we're forcing a write to cause these caches to be warmed up
2875 2892 # even if they haven't explicitly been requested yet (if they've
2876 2893 # never been used by hg, they won't ever have been written, even if
2877 2894 # they're a subset of another kind of cache that *has* been used).
2878 2895 for filt in repoview.filtertable.keys():
2879 2896 filtered = self.filtered(filt)
2880 2897 filtered.branchmap().write(filtered)
2881 2898
2882 2899 def invalidatecaches(self):
2883 2900
2884 2901 if '_tagscache' in vars(self):
2885 2902 # can't use delattr on proxy
2886 2903 del self.__dict__['_tagscache']
2887 2904
2888 2905 self._branchcaches.clear()
2889 2906 self.invalidatevolatilesets()
2890 2907 self._sparsesignaturecache.clear()
2891 2908
2892 2909 def invalidatevolatilesets(self):
2893 2910 self.filteredrevcache.clear()
2894 2911 obsolete.clearobscaches(self)
2895 2912 self._quick_access_changeid_invalidate()
2896 2913
2897 2914 def invalidatedirstate(self):
2898 2915 """Invalidates the dirstate, causing the next call to dirstate
2899 2916 to check if it was modified since the last time it was read,
2900 2917 rereading it if it has.
2901 2918
2902 2919 This is different to dirstate.invalidate() that it doesn't always
2903 2920 rereads the dirstate. Use dirstate.invalidate() if you want to
2904 2921 explicitly read the dirstate again (i.e. restoring it to a previous
2905 2922 known good state)."""
2906 2923 if hasunfilteredcache(self, 'dirstate'):
2907 2924 for k in self.dirstate._filecache:
2908 2925 try:
2909 2926 delattr(self.dirstate, k)
2910 2927 except AttributeError:
2911 2928 pass
2912 2929 delattr(self.unfiltered(), 'dirstate')
2913 2930
2914 2931 def invalidate(self, clearfilecache=False):
2915 2932 """Invalidates both store and non-store parts other than dirstate
2916 2933
2917 2934 If a transaction is running, invalidation of store is omitted,
2918 2935 because discarding in-memory changes might cause inconsistency
2919 2936 (e.g. incomplete fncache causes unintentional failure, but
2920 2937 redundant one doesn't).
2921 2938 """
2922 2939 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2923 2940 for k in list(self._filecache.keys()):
2924 2941 # dirstate is invalidated separately in invalidatedirstate()
2925 2942 if k == b'dirstate':
2926 2943 continue
2927 2944 if (
2928 2945 k == b'changelog'
2929 2946 and self.currenttransaction()
2930 2947 and self.changelog._delayed
2931 2948 ):
2932 2949 # The changelog object may store unwritten revisions. We don't
2933 2950 # want to lose them.
2934 2951 # TODO: Solve the problem instead of working around it.
2935 2952 continue
2936 2953
2937 2954 if clearfilecache:
2938 2955 del self._filecache[k]
2939 2956 try:
2940 2957 delattr(unfiltered, k)
2941 2958 except AttributeError:
2942 2959 pass
2943 2960 self.invalidatecaches()
2944 2961 if not self.currenttransaction():
2945 2962 # TODO: Changing contents of store outside transaction
2946 2963 # causes inconsistency. We should make in-memory store
2947 2964 # changes detectable, and abort if changed.
2948 2965 self.store.invalidatecaches()
2949 2966
2950 2967 def invalidateall(self):
2951 2968 """Fully invalidates both store and non-store parts, causing the
2952 2969 subsequent operation to reread any outside changes."""
2953 2970 # extension should hook this to invalidate its caches
2954 2971 self.invalidate()
2955 2972 self.invalidatedirstate()
2956 2973
2957 2974 @unfilteredmethod
2958 2975 def _refreshfilecachestats(self, tr):
2959 2976 """Reload stats of cached files so that they are flagged as valid"""
2960 2977 for k, ce in self._filecache.items():
2961 2978 k = pycompat.sysstr(k)
2962 2979 if k == 'dirstate' or k not in self.__dict__:
2963 2980 continue
2964 2981 ce.refresh()
2965 2982
2966 2983 def _lock(
2967 2984 self,
2968 2985 vfs,
2969 2986 lockname,
2970 2987 wait,
2971 2988 releasefn,
2972 2989 acquirefn,
2973 2990 desc,
2974 2991 ):
2975 2992 timeout = 0
2976 2993 warntimeout = 0
2977 2994 if wait:
2978 2995 timeout = self.ui.configint(b"ui", b"timeout")
2979 2996 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2980 2997 # internal config: ui.signal-safe-lock
2981 2998 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2982 2999
2983 3000 l = lockmod.trylock(
2984 3001 self.ui,
2985 3002 vfs,
2986 3003 lockname,
2987 3004 timeout,
2988 3005 warntimeout,
2989 3006 releasefn=releasefn,
2990 3007 acquirefn=acquirefn,
2991 3008 desc=desc,
2992 3009 signalsafe=signalsafe,
2993 3010 )
2994 3011 return l
2995 3012
2996 3013 def _afterlock(self, callback):
2997 3014 """add a callback to be run when the repository is fully unlocked
2998 3015
2999 3016 The callback will be executed when the outermost lock is released
3000 3017 (with wlock being higher level than 'lock')."""
3001 3018 for ref in (self._wlockref, self._lockref):
3002 3019 l = ref and ref()
3003 3020 if l and l.held:
3004 3021 l.postrelease.append(callback)
3005 3022 break
3006 3023 else: # no lock have been found.
3007 3024 callback(True)
3008 3025
3009 3026 def lock(self, wait=True):
3010 3027 """Lock the repository store (.hg/store) and return a weak reference
3011 3028 to the lock. Use this before modifying the store (e.g. committing or
3012 3029 stripping). If you are opening a transaction, get a lock as well.)
3013 3030
3014 3031 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3015 3032 'wlock' first to avoid a dead-lock hazard."""
3016 3033 l = self._currentlock(self._lockref)
3017 3034 if l is not None:
3018 3035 l.lock()
3019 3036 return l
3020 3037
3021 3038 l = self._lock(
3022 3039 vfs=self.svfs,
3023 3040 lockname=b"lock",
3024 3041 wait=wait,
3025 3042 releasefn=None,
3026 3043 acquirefn=self.invalidate,
3027 3044 desc=_(b'repository %s') % self.origroot,
3028 3045 )
3029 3046 self._lockref = weakref.ref(l)
3030 3047 return l
3031 3048
3032 3049 def wlock(self, wait=True):
3033 3050 """Lock the non-store parts of the repository (everything under
3034 3051 .hg except .hg/store) and return a weak reference to the lock.
3035 3052
3036 3053 Use this before modifying files in .hg.
3037 3054
3038 3055 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3039 3056 'wlock' first to avoid a dead-lock hazard."""
3040 3057 l = self._wlockref() if self._wlockref else None
3041 3058 if l is not None and l.held:
3042 3059 l.lock()
3043 3060 return l
3044 3061
3045 3062 # We do not need to check for non-waiting lock acquisition. Such
3046 3063 # acquisition would not cause dead-lock as they would just fail.
3047 3064 if wait and (
3048 3065 self.ui.configbool(b'devel', b'all-warnings')
3049 3066 or self.ui.configbool(b'devel', b'check-locks')
3050 3067 ):
3051 3068 if self._currentlock(self._lockref) is not None:
3052 3069 self.ui.develwarn(b'"wlock" acquired after "lock"')
3053 3070
3054 3071 def unlock():
3055 3072 if self.dirstate.pendingparentchange():
3056 3073 self.dirstate.invalidate()
3057 3074 else:
3058 3075 self.dirstate.write(None)
3059 3076
3060 3077 self._filecache[b'dirstate'].refresh()
3061 3078
3062 3079 l = self._lock(
3063 3080 self.vfs,
3064 3081 b"wlock",
3065 3082 wait,
3066 3083 unlock,
3067 3084 self.invalidatedirstate,
3068 3085 _(b'working directory of %s') % self.origroot,
3069 3086 )
3070 3087 self._wlockref = weakref.ref(l)
3071 3088 return l
3072 3089
3073 3090 def _currentlock(self, lockref):
3074 3091 """Returns the lock if it's held, or None if it's not."""
3075 3092 if lockref is None:
3076 3093 return None
3077 3094 l = lockref()
3078 3095 if l is None or not l.held:
3079 3096 return None
3080 3097 return l
3081 3098
3082 3099 def currentwlock(self):
3083 3100 """Returns the wlock if it's held, or None if it's not."""
3084 3101 return self._currentlock(self._wlockref)
3085 3102
3086 3103 def checkcommitpatterns(self, wctx, match, status, fail):
3087 3104 """check for commit arguments that aren't committable"""
3088 3105 if match.isexact() or match.prefix():
3089 3106 matched = set(status.modified + status.added + status.removed)
3090 3107
3091 3108 for f in match.files():
3092 3109 f = self.dirstate.normalize(f)
3093 3110 if f == b'.' or f in matched or f in wctx.substate:
3094 3111 continue
3095 3112 if f in status.deleted:
3096 3113 fail(f, _(b'file not found!'))
3097 3114 # Is it a directory that exists or used to exist?
3098 3115 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3099 3116 d = f + b'/'
3100 3117 for mf in matched:
3101 3118 if mf.startswith(d):
3102 3119 break
3103 3120 else:
3104 3121 fail(f, _(b"no match under directory!"))
3105 3122 elif f not in self.dirstate:
3106 3123 fail(f, _(b"file not tracked!"))
3107 3124
3108 3125 @unfilteredmethod
3109 3126 def commit(
3110 3127 self,
3111 3128 text=b"",
3112 3129 user=None,
3113 3130 date=None,
3114 3131 match=None,
3115 3132 force=False,
3116 3133 editor=None,
3117 3134 extra=None,
3118 3135 ):
3119 3136 """Add a new revision to current repository.
3120 3137
3121 3138 Revision information is gathered from the working directory,
3122 3139 match can be used to filter the committed files. If editor is
3123 3140 supplied, it is called to get a commit message.
3124 3141 """
3125 3142 if extra is None:
3126 3143 extra = {}
3127 3144
3128 3145 def fail(f, msg):
3129 3146 raise error.InputError(b'%s: %s' % (f, msg))
3130 3147
3131 3148 if not match:
3132 3149 match = matchmod.always()
3133 3150
3134 3151 if not force:
3135 3152 match.bad = fail
3136 3153
3137 3154 # lock() for recent changelog (see issue4368)
3138 3155 with self.wlock(), self.lock():
3139 3156 wctx = self[None]
3140 3157 merge = len(wctx.parents()) > 1
3141 3158
3142 3159 if not force and merge and not match.always():
3143 3160 raise error.Abort(
3144 3161 _(
3145 3162 b'cannot partially commit a merge '
3146 3163 b'(do not specify files or patterns)'
3147 3164 )
3148 3165 )
3149 3166
3150 3167 status = self.status(match=match, clean=force)
3151 3168 if force:
3152 3169 status.modified.extend(
3153 3170 status.clean
3154 3171 ) # mq may commit clean files
3155 3172
3156 3173 # check subrepos
3157 3174 subs, commitsubs, newstate = subrepoutil.precommit(
3158 3175 self.ui, wctx, status, match, force=force
3159 3176 )
3160 3177
3161 3178 # make sure all explicit patterns are matched
3162 3179 if not force:
3163 3180 self.checkcommitpatterns(wctx, match, status, fail)
3164 3181
3165 3182 cctx = context.workingcommitctx(
3166 3183 self, status, text, user, date, extra
3167 3184 )
3168 3185
3169 3186 ms = mergestatemod.mergestate.read(self)
3170 3187 mergeutil.checkunresolved(ms)
3171 3188
3172 3189 # internal config: ui.allowemptycommit
3173 3190 if cctx.isempty() and not self.ui.configbool(
3174 3191 b'ui', b'allowemptycommit'
3175 3192 ):
3176 3193 self.ui.debug(b'nothing to commit, clearing merge state\n')
3177 3194 ms.reset()
3178 3195 return None
3179 3196
3180 3197 if merge and cctx.deleted():
3181 3198 raise error.Abort(_(b"cannot commit merge with missing files"))
3182 3199
3183 3200 if editor:
3184 3201 cctx._text = editor(self, cctx, subs)
3185 3202 edited = text != cctx._text
3186 3203
3187 3204 # Save commit message in case this transaction gets rolled back
3188 3205 # (e.g. by a pretxncommit hook). Leave the content alone on
3189 3206 # the assumption that the user will use the same editor again.
3190 3207 msg_path = self.savecommitmessage(cctx._text)
3191 3208
3192 3209 # commit subs and write new state
3193 3210 if subs:
3194 3211 uipathfn = scmutil.getuipathfn(self)
3195 3212 for s in sorted(commitsubs):
3196 3213 sub = wctx.sub(s)
3197 3214 self.ui.status(
3198 3215 _(b'committing subrepository %s\n')
3199 3216 % uipathfn(subrepoutil.subrelpath(sub))
3200 3217 )
3201 3218 sr = sub.commit(cctx._text, user, date)
3202 3219 newstate[s] = (newstate[s][0], sr)
3203 3220 subrepoutil.writestate(self, newstate)
3204 3221
3205 3222 p1, p2 = self.dirstate.parents()
3206 3223 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3207 3224 try:
3208 3225 self.hook(
3209 3226 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3210 3227 )
3211 3228 with self.transaction(b'commit'):
3212 3229 ret = self.commitctx(cctx, True)
3213 3230 # update bookmarks, dirstate and mergestate
3214 3231 bookmarks.update(self, [p1, p2], ret)
3215 3232 cctx.markcommitted(ret)
3216 3233 ms.reset()
3217 3234 except: # re-raises
3218 3235 if edited:
3219 3236 self.ui.write(
3220 3237 _(b'note: commit message saved in %s\n') % msg_path
3221 3238 )
3222 3239 self.ui.write(
3223 3240 _(
3224 3241 b"note: use 'hg commit --logfile "
3225 3242 b"%s --edit' to reuse it\n"
3226 3243 )
3227 3244 % msg_path
3228 3245 )
3229 3246 raise
3230 3247
3231 3248 def commithook(unused_success):
3232 3249 # hack for command that use a temporary commit (eg: histedit)
3233 3250 # temporary commit got stripped before hook release
3234 3251 if self.changelog.hasnode(ret):
3235 3252 self.hook(
3236 3253 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3237 3254 )
3238 3255
3239 3256 self._afterlock(commithook)
3240 3257 return ret
3241 3258
3242 3259 @unfilteredmethod
3243 3260 def commitctx(self, ctx, error=False, origctx=None):
3244 3261 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3245 3262
3246 3263 @unfilteredmethod
3247 3264 def destroying(self):
3248 3265 """Inform the repository that nodes are about to be destroyed.
3249 3266 Intended for use by strip and rollback, so there's a common
3250 3267 place for anything that has to be done before destroying history.
3251 3268
3252 3269 This is mostly useful for saving state that is in memory and waiting
3253 3270 to be flushed when the current lock is released. Because a call to
3254 3271 destroyed is imminent, the repo will be invalidated causing those
3255 3272 changes to stay in memory (waiting for the next unlock), or vanish
3256 3273 completely.
3257 3274 """
3258 3275 # When using the same lock to commit and strip, the phasecache is left
3259 3276 # dirty after committing. Then when we strip, the repo is invalidated,
3260 3277 # causing those changes to disappear.
3261 3278 if '_phasecache' in vars(self):
3262 3279 self._phasecache.write()
3263 3280
3264 3281 @unfilteredmethod
3265 3282 def destroyed(self):
3266 3283 """Inform the repository that nodes have been destroyed.
3267 3284 Intended for use by strip and rollback, so there's a common
3268 3285 place for anything that has to be done after destroying history.
3269 3286 """
3270 3287 # When one tries to:
3271 3288 # 1) destroy nodes thus calling this method (e.g. strip)
3272 3289 # 2) use phasecache somewhere (e.g. commit)
3273 3290 #
3274 3291 # then 2) will fail because the phasecache contains nodes that were
3275 3292 # removed. We can either remove phasecache from the filecache,
3276 3293 # causing it to reload next time it is accessed, or simply filter
3277 3294 # the removed nodes now and write the updated cache.
3278 3295 self._phasecache.filterunknown(self)
3279 3296 self._phasecache.write()
3280 3297
3281 3298 # refresh all repository caches
3282 3299 self.updatecaches()
3283 3300
3284 3301 # Ensure the persistent tag cache is updated. Doing it now
3285 3302 # means that the tag cache only has to worry about destroyed
3286 3303 # heads immediately after a strip/rollback. That in turn
3287 3304 # guarantees that "cachetip == currenttip" (comparing both rev
3288 3305 # and node) always means no nodes have been added or destroyed.
3289 3306
3290 3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3291 3308 # head, refresh the tag cache, then immediately add a new head.
3292 3309 # But I think doing it this way is necessary for the "instant
3293 3310 # tag cache retrieval" case to work.
3294 3311 self.invalidate()
3295 3312
3296 3313 def status(
3297 3314 self,
3298 3315 node1=b'.',
3299 3316 node2=None,
3300 3317 match=None,
3301 3318 ignored=False,
3302 3319 clean=False,
3303 3320 unknown=False,
3304 3321 listsubrepos=False,
3305 3322 ):
3306 3323 '''a convenience method that calls node1.status(node2)'''
3307 3324 return self[node1].status(
3308 3325 node2, match, ignored, clean, unknown, listsubrepos
3309 3326 )
3310 3327
3311 3328 def addpostdsstatus(self, ps):
3312 3329 """Add a callback to run within the wlock, at the point at which status
3313 3330 fixups happen.
3314 3331
3315 3332 On status completion, callback(wctx, status) will be called with the
3316 3333 wlock held, unless the dirstate has changed from underneath or the wlock
3317 3334 couldn't be grabbed.
3318 3335
3319 3336 Callbacks should not capture and use a cached copy of the dirstate --
3320 3337 it might change in the meanwhile. Instead, they should access the
3321 3338 dirstate via wctx.repo().dirstate.
3322 3339
3323 3340 This list is emptied out after each status run -- extensions should
3324 3341 make sure it adds to this list each time dirstate.status is called.
3325 3342 Extensions should also make sure they don't call this for statuses
3326 3343 that don't involve the dirstate.
3327 3344 """
3328 3345
3329 3346 # The list is located here for uniqueness reasons -- it is actually
3330 3347 # managed by the workingctx, but that isn't unique per-repo.
3331 3348 self._postdsstatus.append(ps)
3332 3349
3333 3350 def postdsstatus(self):
3334 3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3335 3352 return self._postdsstatus
3336 3353
3337 3354 def clearpostdsstatus(self):
3338 3355 """Used by workingctx to clear post-dirstate-status hooks."""
3339 3356 del self._postdsstatus[:]
3340 3357
3341 3358 def heads(self, start=None):
3342 3359 if start is None:
3343 3360 cl = self.changelog
3344 3361 headrevs = reversed(cl.headrevs())
3345 3362 return [cl.node(rev) for rev in headrevs]
3346 3363
3347 3364 heads = self.changelog.heads(start)
3348 3365 # sort the output in rev descending order
3349 3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3350 3367
3351 3368 def branchheads(self, branch=None, start=None, closed=False):
3352 3369 """return a (possibly filtered) list of heads for the given branch
3353 3370
3354 3371 Heads are returned in topological order, from newest to oldest.
3355 3372 If branch is None, use the dirstate branch.
3356 3373 If start is not None, return only heads reachable from start.
3357 3374 If closed is True, return heads that are marked as closed as well.
3358 3375 """
3359 3376 if branch is None:
3360 3377 branch = self[None].branch()
3361 3378 branches = self.branchmap()
3362 3379 if not branches.hasbranch(branch):
3363 3380 return []
3364 3381 # the cache returns heads ordered lowest to highest
3365 3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3366 3383 if start is not None:
3367 3384 # filter out the heads that cannot be reached from startrev
3368 3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3369 3386 bheads = [h for h in bheads if h in fbheads]
3370 3387 return bheads
3371 3388
3372 3389 def branches(self, nodes):
3373 3390 if not nodes:
3374 3391 nodes = [self.changelog.tip()]
3375 3392 b = []
3376 3393 for n in nodes:
3377 3394 t = n
3378 3395 while True:
3379 3396 p = self.changelog.parents(n)
3380 3397 if p[1] != self.nullid or p[0] == self.nullid:
3381 3398 b.append((t, n, p[0], p[1]))
3382 3399 break
3383 3400 n = p[0]
3384 3401 return b
3385 3402
3386 3403 def between(self, pairs):
3387 3404 r = []
3388 3405
3389 3406 for top, bottom in pairs:
3390 3407 n, l, i = top, [], 0
3391 3408 f = 1
3392 3409
3393 3410 while n != bottom and n != self.nullid:
3394 3411 p = self.changelog.parents(n)[0]
3395 3412 if i == f:
3396 3413 l.append(n)
3397 3414 f = f * 2
3398 3415 n = p
3399 3416 i += 1
3400 3417
3401 3418 r.append(l)
3402 3419
3403 3420 return r
3404 3421
3405 3422 def checkpush(self, pushop):
3406 3423 """Extensions can override this function if additional checks have
3407 3424 to be performed before pushing, or call it if they override push
3408 3425 command.
3409 3426 """
3410 3427
3411 3428 @unfilteredpropertycache
3412 3429 def prepushoutgoinghooks(self):
3413 3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3414 3431 methods, which are called before pushing changesets.
3415 3432 """
3416 3433 return util.hooks()
3417 3434
3418 3435 def pushkey(self, namespace, key, old, new):
3419 3436 try:
3420 3437 tr = self.currenttransaction()
3421 3438 hookargs = {}
3422 3439 if tr is not None:
3423 3440 hookargs.update(tr.hookargs)
3424 3441 hookargs = pycompat.strkwargs(hookargs)
3425 3442 hookargs['namespace'] = namespace
3426 3443 hookargs['key'] = key
3427 3444 hookargs['old'] = old
3428 3445 hookargs['new'] = new
3429 3446 self.hook(b'prepushkey', throw=True, **hookargs)
3430 3447 except error.HookAbort as exc:
3431 3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3432 3449 if exc.hint:
3433 3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3434 3451 return False
3435 3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3436 3453 ret = pushkey.push(self, namespace, key, old, new)
3437 3454
3438 3455 def runhook(unused_success):
3439 3456 self.hook(
3440 3457 b'pushkey',
3441 3458 namespace=namespace,
3442 3459 key=key,
3443 3460 old=old,
3444 3461 new=new,
3445 3462 ret=ret,
3446 3463 )
3447 3464
3448 3465 self._afterlock(runhook)
3449 3466 return ret
3450 3467
3451 3468 def listkeys(self, namespace):
3452 3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3453 3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3454 3471 values = pushkey.list(self, namespace)
3455 3472 self.hook(b'listkeys', namespace=namespace, values=values)
3456 3473 return values
3457 3474
3458 3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3459 3476 '''used to test argument passing over the wire'''
3460 3477 return b"%s %s %s %s %s" % (
3461 3478 one,
3462 3479 two,
3463 3480 pycompat.bytestr(three),
3464 3481 pycompat.bytestr(four),
3465 3482 pycompat.bytestr(five),
3466 3483 )
3467 3484
3468 3485 def savecommitmessage(self, text):
3469 3486 fp = self.vfs(b'last-message.txt', b'wb')
3470 3487 try:
3471 3488 fp.write(text)
3472 3489 finally:
3473 3490 fp.close()
3474 3491 return self.pathto(fp.name[len(self.root) + 1 :])
3475 3492
3476 3493 def register_wanted_sidedata(self, category):
3477 3494 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3478 3495 # Only revlogv2 repos can want sidedata.
3479 3496 return
3480 3497 self._wanted_sidedata.add(pycompat.bytestr(category))
3481 3498
3482 3499 def register_sidedata_computer(
3483 3500 self, kind, category, keys, computer, flags, replace=False
3484 3501 ):
3485 3502 if kind not in revlogconst.ALL_KINDS:
3486 3503 msg = _(b"unexpected revlog kind '%s'.")
3487 3504 raise error.ProgrammingError(msg % kind)
3488 3505 category = pycompat.bytestr(category)
3489 3506 already_registered = category in self._sidedata_computers.get(kind, [])
3490 3507 if already_registered and not replace:
3491 3508 msg = _(
3492 3509 b"cannot register a sidedata computer twice for category '%s'."
3493 3510 )
3494 3511 raise error.ProgrammingError(msg % category)
3495 3512 if replace and not already_registered:
3496 3513 msg = _(
3497 3514 b"cannot replace a sidedata computer that isn't registered "
3498 3515 b"for category '%s'."
3499 3516 )
3500 3517 raise error.ProgrammingError(msg % category)
3501 3518 self._sidedata_computers.setdefault(kind, {})
3502 3519 self._sidedata_computers[kind][category] = (keys, computer, flags)
3503 3520
3504 3521
3505 3522 # used to avoid circular references so destructors work
3506 3523 def aftertrans(files):
3507 3524 renamefiles = [tuple(t) for t in files]
3508 3525
3509 3526 def a():
3510 3527 for vfs, src, dest in renamefiles:
3511 3528 # if src and dest refer to a same file, vfs.rename is a no-op,
3512 3529 # leaving both src and dest on disk. delete dest to make sure
3513 3530 # the rename couldn't be such a no-op.
3514 3531 vfs.tryunlink(dest)
3515 3532 try:
3516 3533 vfs.rename(src, dest)
3517 3534 except FileNotFoundError: # journal file does not yet exist
3518 3535 pass
3519 3536
3520 3537 return a
3521 3538
3522 3539
3523 def undoname(fn):
3540 def undoname(fn: bytes) -> bytes:
3524 3541 base, name = os.path.split(fn)
3525 3542 assert name.startswith(b'journal')
3526 3543 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3527 3544
3528 3545
3529 def instance(ui, path, create, intents=None, createopts=None):
3546 def instance(ui, path: bytes, create, intents=None, createopts=None):
3530 3547
3531 3548 # prevent cyclic import localrepo -> upgrade -> localrepo
3532 3549 from . import upgrade
3533 3550
3534 3551 localpath = urlutil.urllocalpath(path)
3535 3552 if create:
3536 3553 createrepository(ui, localpath, createopts=createopts)
3537 3554
3538 3555 def repo_maker():
3539 3556 return makelocalrepository(ui, localpath, intents=intents)
3540 3557
3541 3558 repo = repo_maker()
3542 3559 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3543 3560 return repo
3544 3561
3545 3562
3546 def islocal(path):
3563 def islocal(path: bytes) -> bool:
3547 3564 return True
3548 3565
3549 3566
3550 3567 def defaultcreateopts(ui, createopts=None):
3551 3568 """Populate the default creation options for a repository.
3552 3569
3553 3570 A dictionary of explicitly requested creation options can be passed
3554 3571 in. Missing keys will be populated.
3555 3572 """
3556 3573 createopts = dict(createopts or {})
3557 3574
3558 3575 if b'backend' not in createopts:
3559 3576 # experimental config: storage.new-repo-backend
3560 3577 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3561 3578
3562 3579 return createopts
3563 3580
3564 3581
3565 3582 def clone_requirements(ui, createopts, srcrepo):
3566 3583 """clone the requirements of a local repo for a local clone
3567 3584
3568 3585 The store requirements are unchanged while the working copy requirements
3569 3586 depends on the configuration
3570 3587 """
3571 3588 target_requirements = set()
3572 3589 if not srcrepo.requirements:
3573 3590 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3574 3591 # with it.
3575 3592 return target_requirements
3576 3593 createopts = defaultcreateopts(ui, createopts=createopts)
3577 3594 for r in newreporequirements(ui, createopts):
3578 3595 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3579 3596 target_requirements.add(r)
3580 3597
3581 3598 for r in srcrepo.requirements:
3582 3599 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3583 3600 target_requirements.add(r)
3584 3601 return target_requirements
3585 3602
3586 3603
3587 3604 def newreporequirements(ui, createopts):
3588 3605 """Determine the set of requirements for a new local repository.
3589 3606
3590 3607 Extensions can wrap this function to specify custom requirements for
3591 3608 new repositories.
3592 3609 """
3593 3610
3594 3611 if b'backend' not in createopts:
3595 3612 raise error.ProgrammingError(
3596 3613 b'backend key not present in createopts; '
3597 3614 b'was defaultcreateopts() called?'
3598 3615 )
3599 3616
3600 3617 if createopts[b'backend'] != b'revlogv1':
3601 3618 raise error.Abort(
3602 3619 _(
3603 3620 b'unable to determine repository requirements for '
3604 3621 b'storage backend: %s'
3605 3622 )
3606 3623 % createopts[b'backend']
3607 3624 )
3608 3625
3609 3626 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3610 3627 if ui.configbool(b'format', b'usestore'):
3611 3628 requirements.add(requirementsmod.STORE_REQUIREMENT)
3612 3629 if ui.configbool(b'format', b'usefncache'):
3613 3630 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3614 3631 if ui.configbool(b'format', b'dotencode'):
3615 3632 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3616 3633
3617 3634 compengines = ui.configlist(b'format', b'revlog-compression')
3618 3635 for compengine in compengines:
3619 3636 if compengine in util.compengines:
3620 3637 engine = util.compengines[compengine]
3621 3638 if engine.available() and engine.revlogheader():
3622 3639 break
3623 3640 else:
3624 3641 raise error.Abort(
3625 3642 _(
3626 3643 b'compression engines %s defined by '
3627 3644 b'format.revlog-compression not available'
3628 3645 )
3629 3646 % b', '.join(b'"%s"' % e for e in compengines),
3630 3647 hint=_(
3631 3648 b'run "hg debuginstall" to list available '
3632 3649 b'compression engines'
3633 3650 ),
3634 3651 )
3635 3652
3636 3653 # zlib is the historical default and doesn't need an explicit requirement.
3637 3654 if compengine == b'zstd':
3638 3655 requirements.add(b'revlog-compression-zstd')
3639 3656 elif compengine != b'zlib':
3640 3657 requirements.add(b'exp-compression-%s' % compengine)
3641 3658
3642 3659 if scmutil.gdinitconfig(ui):
3643 3660 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3644 3661 if ui.configbool(b'format', b'sparse-revlog'):
3645 3662 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3646 3663
3647 3664 # experimental config: format.use-dirstate-v2
3648 3665 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3649 3666 if ui.configbool(b'format', b'use-dirstate-v2'):
3650 3667 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3651 3668
3652 3669 # experimental config: format.exp-use-copies-side-data-changeset
3653 3670 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3654 3671 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3655 3672 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3656 3673 if ui.configbool(b'experimental', b'treemanifest'):
3657 3674 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3658 3675
3659 3676 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3660 3677 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3661 3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3662 3679
3663 3680 revlogv2 = ui.config(b'experimental', b'revlogv2')
3664 3681 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3665 3682 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3666 3683 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3667 3684 # experimental config: format.internal-phase
3668 3685 if ui.configbool(b'format', b'use-internal-phase'):
3669 3686 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3670 3687
3671 3688 # experimental config: format.exp-archived-phase
3672 3689 if ui.configbool(b'format', b'exp-archived-phase'):
3673 3690 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3674 3691
3675 3692 if createopts.get(b'narrowfiles'):
3676 3693 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3677 3694
3678 3695 if createopts.get(b'lfs'):
3679 3696 requirements.add(b'lfs')
3680 3697
3681 3698 if ui.configbool(b'format', b'bookmarks-in-store'):
3682 3699 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3683 3700
3684 3701 if ui.configbool(b'format', b'use-persistent-nodemap'):
3685 3702 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3686 3703
3687 3704 # if share-safe is enabled, let's create the new repository with the new
3688 3705 # requirement
3689 3706 if ui.configbool(b'format', b'use-share-safe'):
3690 3707 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3691 3708
3692 3709 # if we are creating a share-repoΒΉ we have to handle requirement
3693 3710 # differently.
3694 3711 #
3695 3712 # [1] (i.e. reusing the store from another repository, just having a
3696 3713 # working copy)
3697 3714 if b'sharedrepo' in createopts:
3698 3715 source_requirements = set(createopts[b'sharedrepo'].requirements)
3699 3716
3700 3717 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3701 3718 # share to an old school repository, we have to copy the
3702 3719 # requirements and hope for the best.
3703 3720 requirements = source_requirements
3704 3721 else:
3705 3722 # We have control on the working copy only, so "copy" the non
3706 3723 # working copy part over, ignoring previous logic.
3707 3724 to_drop = set()
3708 3725 for req in requirements:
3709 3726 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3710 3727 continue
3711 3728 if req in source_requirements:
3712 3729 continue
3713 3730 to_drop.add(req)
3714 3731 requirements -= to_drop
3715 3732 requirements |= source_requirements
3716 3733
3717 3734 if createopts.get(b'sharedrelative'):
3718 3735 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3719 3736 else:
3720 3737 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3721 3738
3722 3739 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3723 3740 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3724 3741 msg = _("ignoring unknown tracked key version: %d\n")
3725 3742 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3726 3743 if version != 1:
3727 3744 ui.warn(msg % version, hint=hint)
3728 3745 else:
3729 3746 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3730 3747
3731 3748 return requirements
3732 3749
3733 3750
3734 3751 def checkrequirementscompat(ui, requirements):
3735 3752 """Checks compatibility of repository requirements enabled and disabled.
3736 3753
3737 3754 Returns a set of requirements which needs to be dropped because dependend
3738 3755 requirements are not enabled. Also warns users about it"""
3739 3756
3740 3757 dropped = set()
3741 3758
3742 3759 if requirementsmod.STORE_REQUIREMENT not in requirements:
3743 3760 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3744 3761 ui.warn(
3745 3762 _(
3746 3763 b'ignoring enabled \'format.bookmarks-in-store\' config '
3747 3764 b'beacuse it is incompatible with disabled '
3748 3765 b'\'format.usestore\' config\n'
3749 3766 )
3750 3767 )
3751 3768 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3752 3769
3753 3770 if (
3754 3771 requirementsmod.SHARED_REQUIREMENT in requirements
3755 3772 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3756 3773 ):
3757 3774 raise error.Abort(
3758 3775 _(
3759 3776 b"cannot create shared repository as source was created"
3760 3777 b" with 'format.usestore' config disabled"
3761 3778 )
3762 3779 )
3763 3780
3764 3781 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3765 3782 if ui.hasconfig(b'format', b'use-share-safe'):
3766 3783 msg = _(
3767 3784 b"ignoring enabled 'format.use-share-safe' config because "
3768 3785 b"it is incompatible with disabled 'format.usestore'"
3769 3786 b" config\n"
3770 3787 )
3771 3788 ui.warn(msg)
3772 3789 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3773 3790
3774 3791 return dropped
3775 3792
3776 3793
3777 3794 def filterknowncreateopts(ui, createopts):
3778 3795 """Filters a dict of repo creation options against options that are known.
3779 3796
3780 3797 Receives a dict of repo creation options and returns a dict of those
3781 3798 options that we don't know how to handle.
3782 3799
3783 3800 This function is called as part of repository creation. If the
3784 3801 returned dict contains any items, repository creation will not
3785 3802 be allowed, as it means there was a request to create a repository
3786 3803 with options not recognized by loaded code.
3787 3804
3788 3805 Extensions can wrap this function to filter out creation options
3789 3806 they know how to handle.
3790 3807 """
3791 3808 known = {
3792 3809 b'backend',
3793 3810 b'lfs',
3794 3811 b'narrowfiles',
3795 3812 b'sharedrepo',
3796 3813 b'sharedrelative',
3797 3814 b'shareditems',
3798 3815 b'shallowfilestore',
3799 3816 }
3800 3817
3801 3818 return {k: v for k, v in createopts.items() if k not in known}
3802 3819
3803 3820
3804 def createrepository(ui, path, createopts=None, requirements=None):
3821 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3805 3822 """Create a new repository in a vfs.
3806 3823
3807 3824 ``path`` path to the new repo's working directory.
3808 3825 ``createopts`` options for the new repository.
3809 3826 ``requirement`` predefined set of requirements.
3810 3827 (incompatible with ``createopts``)
3811 3828
3812 3829 The following keys for ``createopts`` are recognized:
3813 3830
3814 3831 backend
3815 3832 The storage backend to use.
3816 3833 lfs
3817 3834 Repository will be created with ``lfs`` requirement. The lfs extension
3818 3835 will automatically be loaded when the repository is accessed.
3819 3836 narrowfiles
3820 3837 Set up repository to support narrow file storage.
3821 3838 sharedrepo
3822 3839 Repository object from which storage should be shared.
3823 3840 sharedrelative
3824 3841 Boolean indicating if the path to the shared repo should be
3825 3842 stored as relative. By default, the pointer to the "parent" repo
3826 3843 is stored as an absolute path.
3827 3844 shareditems
3828 3845 Set of items to share to the new repository (in addition to storage).
3829 3846 shallowfilestore
3830 3847 Indicates that storage for files should be shallow (not all ancestor
3831 3848 revisions are known).
3832 3849 """
3833 3850
3834 3851 if requirements is not None:
3835 3852 if createopts is not None:
3836 3853 msg = b'cannot specify both createopts and requirements'
3837 3854 raise error.ProgrammingError(msg)
3838 3855 createopts = {}
3839 3856 else:
3840 3857 createopts = defaultcreateopts(ui, createopts=createopts)
3841 3858
3842 3859 unknownopts = filterknowncreateopts(ui, createopts)
3843 3860
3844 3861 if not isinstance(unknownopts, dict):
3845 3862 raise error.ProgrammingError(
3846 3863 b'filterknowncreateopts() did not return a dict'
3847 3864 )
3848 3865
3849 3866 if unknownopts:
3850 3867 raise error.Abort(
3851 3868 _(
3852 3869 b'unable to create repository because of unknown '
3853 3870 b'creation option: %s'
3854 3871 )
3855 3872 % b', '.join(sorted(unknownopts)),
3856 3873 hint=_(b'is a required extension not loaded?'),
3857 3874 )
3858 3875
3859 3876 requirements = newreporequirements(ui, createopts=createopts)
3860 3877 requirements -= checkrequirementscompat(ui, requirements)
3861 3878
3862 3879 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3863 3880
3864 3881 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3865 3882 if hgvfs.exists():
3866 3883 raise error.RepoError(_(b'repository %s already exists') % path)
3867 3884
3868 3885 if b'sharedrepo' in createopts:
3869 3886 sharedpath = createopts[b'sharedrepo'].sharedpath
3870 3887
3871 3888 if createopts.get(b'sharedrelative'):
3872 3889 try:
3873 3890 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3874 3891 sharedpath = util.pconvert(sharedpath)
3875 3892 except (IOError, ValueError) as e:
3876 3893 # ValueError is raised on Windows if the drive letters differ
3877 3894 # on each path.
3878 3895 raise error.Abort(
3879 3896 _(b'cannot calculate relative path'),
3880 3897 hint=stringutil.forcebytestr(e),
3881 3898 )
3882 3899
3883 3900 if not wdirvfs.exists():
3884 3901 wdirvfs.makedirs()
3885 3902
3886 3903 hgvfs.makedir(notindexed=True)
3887 3904 if b'sharedrepo' not in createopts:
3888 3905 hgvfs.mkdir(b'cache')
3889 3906 hgvfs.mkdir(b'wcache')
3890 3907
3891 3908 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3892 3909 if has_store and b'sharedrepo' not in createopts:
3893 3910 hgvfs.mkdir(b'store')
3894 3911
3895 3912 # We create an invalid changelog outside the store so very old
3896 3913 # Mercurial versions (which didn't know about the requirements
3897 3914 # file) encounter an error on reading the changelog. This
3898 3915 # effectively locks out old clients and prevents them from
3899 3916 # mucking with a repo in an unknown format.
3900 3917 #
3901 3918 # The revlog header has version 65535, which won't be recognized by
3902 3919 # such old clients.
3903 3920 hgvfs.append(
3904 3921 b'00changelog.i',
3905 3922 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3906 3923 b'layout',
3907 3924 )
3908 3925
3909 3926 # Filter the requirements into working copy and store ones
3910 3927 wcreq, storereq = scmutil.filterrequirements(requirements)
3911 3928 # write working copy ones
3912 3929 scmutil.writerequires(hgvfs, wcreq)
3913 3930 # If there are store requirements and the current repository
3914 3931 # is not a shared one, write stored requirements
3915 3932 # For new shared repository, we don't need to write the store
3916 3933 # requirements as they are already present in store requires
3917 3934 if storereq and b'sharedrepo' not in createopts:
3918 3935 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3919 3936 scmutil.writerequires(storevfs, storereq)
3920 3937
3921 3938 # Write out file telling readers where to find the shared store.
3922 3939 if b'sharedrepo' in createopts:
3923 3940 hgvfs.write(b'sharedpath', sharedpath)
3924 3941
3925 3942 if createopts.get(b'shareditems'):
3926 3943 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3927 3944 hgvfs.write(b'shared', shared)
3928 3945
3929 3946
3930 3947 def poisonrepository(repo):
3931 3948 """Poison a repository instance so it can no longer be used."""
3932 3949 # Perform any cleanup on the instance.
3933 3950 repo.close()
3934 3951
3935 3952 # Our strategy is to replace the type of the object with one that
3936 3953 # has all attribute lookups result in error.
3937 3954 #
3938 3955 # But we have to allow the close() method because some constructors
3939 3956 # of repos call close() on repo references.
3940 3957 class poisonedrepository:
3941 3958 def __getattribute__(self, item):
3942 3959 if item == 'close':
3943 3960 return object.__getattribute__(self, item)
3944 3961
3945 3962 raise error.ProgrammingError(
3946 3963 b'repo instances should not be used after unshare'
3947 3964 )
3948 3965
3949 3966 def close(self):
3950 3967 pass
3951 3968
3952 3969 # We may have a repoview, which intercepts __setattr__. So be sure
3953 3970 # we operate at the lowest level possible.
3954 3971 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now