##// END OF EJS Templates
branchcache: unconditionally write delayed branchmap
marmoute -
r52350:c0d51565 default
parent child Browse files
Show More
@@ -1,4034 +1,4035 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from . import (
32 32 bookmarks,
33 33 branchmap,
34 34 bundle2,
35 35 bundlecaches,
36 36 changegroup,
37 37 color,
38 38 commit,
39 39 context,
40 40 dirstate,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 policy,
58 58 pushkey,
59 59 pycompat,
60 60 rcutil,
61 61 repoview,
62 62 requirements as requirementsmod,
63 63 revlog,
64 64 revset,
65 65 revsetlang,
66 66 scmutil,
67 67 sparse,
68 68 store as storemod,
69 69 subrepoutil,
70 70 tags as tagsmod,
71 71 transaction,
72 72 txnutil,
73 73 util,
74 74 vfs as vfsmod,
75 75 wireprototypes,
76 76 )
77 77
78 78 from .interfaces import (
79 79 repository,
80 80 util as interfaceutil,
81 81 )
82 82
83 83 from .utils import (
84 84 hashutil,
85 85 procutil,
86 86 stringutil,
87 87 urlutil,
88 88 )
89 89
90 90 from .revlogutils import (
91 91 concurrency_checker as revlogchecker,
92 92 constants as revlogconst,
93 93 sidedata as sidedatamod,
94 94 )
95 95
96 96 release = lockmod.release
97 97 urlerr = util.urlerr
98 98 urlreq = util.urlreq
99 99
100 100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
101 101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
102 102 )
103 103
104 104 # set of (path, vfs-location) tuples. vfs-location is:
105 105 # - 'plain for vfs relative paths
106 106 # - '' for svfs relative paths
107 107 _cachedfiles = set()
108 108
109 109
110 110 class _basefilecache(scmutil.filecache):
111 111 """All filecache usage on repo are done for logic that should be unfiltered"""
112 112
113 113 def __get__(self, repo, type=None):
114 114 if repo is None:
115 115 return self
116 116 # proxy to unfiltered __dict__ since filtered repo has no entry
117 117 unfi = repo.unfiltered()
118 118 try:
119 119 return unfi.__dict__[self.sname]
120 120 except KeyError:
121 121 pass
122 122 return super(_basefilecache, self).__get__(unfi, type)
123 123
124 124 def set(self, repo, value):
125 125 return super(_basefilecache, self).set(repo.unfiltered(), value)
126 126
127 127
128 128 class repofilecache(_basefilecache):
129 129 """filecache for files in .hg but outside of .hg/store"""
130 130
131 131 def __init__(self, *paths):
132 132 super(repofilecache, self).__init__(*paths)
133 133 for path in paths:
134 134 _cachedfiles.add((path, b'plain'))
135 135
136 136 def join(self, obj, fname):
137 137 return obj.vfs.join(fname)
138 138
139 139
140 140 class storecache(_basefilecache):
141 141 """filecache for files in the store"""
142 142
143 143 def __init__(self, *paths):
144 144 super(storecache, self).__init__(*paths)
145 145 for path in paths:
146 146 _cachedfiles.add((path, b''))
147 147
148 148 def join(self, obj, fname):
149 149 return obj.sjoin(fname)
150 150
151 151
152 152 class changelogcache(storecache):
153 153 """filecache for the changelog"""
154 154
155 155 def __init__(self):
156 156 super(changelogcache, self).__init__()
157 157 _cachedfiles.add((b'00changelog.i', b''))
158 158 _cachedfiles.add((b'00changelog.n', b''))
159 159
160 160 def tracked_paths(self, obj):
161 161 paths = [self.join(obj, b'00changelog.i')]
162 162 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 163 paths.append(self.join(obj, b'00changelog.n'))
164 164 return paths
165 165
166 166
167 167 class manifestlogcache(storecache):
168 168 """filecache for the manifestlog"""
169 169
170 170 def __init__(self):
171 171 super(manifestlogcache, self).__init__()
172 172 _cachedfiles.add((b'00manifest.i', b''))
173 173 _cachedfiles.add((b'00manifest.n', b''))
174 174
175 175 def tracked_paths(self, obj):
176 176 paths = [self.join(obj, b'00manifest.i')]
177 177 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 178 paths.append(self.join(obj, b'00manifest.n'))
179 179 return paths
180 180
181 181
182 182 class mixedrepostorecache(_basefilecache):
183 183 """filecache for a mix files in .hg/store and outside"""
184 184
185 185 def __init__(self, *pathsandlocations):
186 186 # scmutil.filecache only uses the path for passing back into our
187 187 # join(), so we can safely pass a list of paths and locations
188 188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 189 _cachedfiles.update(pathsandlocations)
190 190
191 191 def join(self, obj, fnameandlocation):
192 192 fname, location = fnameandlocation
193 193 if location == b'plain':
194 194 return obj.vfs.join(fname)
195 195 else:
196 196 if location != b'':
197 197 raise error.ProgrammingError(
198 198 b'unexpected location: %s' % location
199 199 )
200 200 return obj.sjoin(fname)
201 201
202 202
203 203 def isfilecached(repo, name):
204 204 """check if a repo has already cached "name" filecache-ed property
205 205
206 206 This returns (cachedobj-or-None, iscached) tuple.
207 207 """
208 208 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 209 if not cacheentry:
210 210 return None, False
211 211 return cacheentry.obj, True
212 212
213 213
214 214 class unfilteredpropertycache(util.propertycache):
215 215 """propertycache that apply to unfiltered repo only"""
216 216
217 217 def __get__(self, repo, type=None):
218 218 unfi = repo.unfiltered()
219 219 if unfi is repo:
220 220 return super(unfilteredpropertycache, self).__get__(unfi)
221 221 return getattr(unfi, self.name)
222 222
223 223
224 224 class filteredpropertycache(util.propertycache):
225 225 """propertycache that must take filtering in account"""
226 226
227 227 def cachevalue(self, obj, value):
228 228 object.__setattr__(obj, self.name, value)
229 229
230 230
231 231 def hasunfilteredcache(repo, name):
232 232 """check if a repo has an unfilteredpropertycache value for <name>"""
233 233 return name in vars(repo.unfiltered())
234 234
235 235
236 236 def unfilteredmethod(orig):
237 237 """decorate method that always need to be run on unfiltered version"""
238 238
239 239 @functools.wraps(orig)
240 240 def wrapper(repo, *args, **kwargs):
241 241 return orig(repo.unfiltered(), *args, **kwargs)
242 242
243 243 return wrapper
244 244
245 245
246 246 moderncaps = {
247 247 b'lookup',
248 248 b'branchmap',
249 249 b'pushkey',
250 250 b'known',
251 251 b'getbundle',
252 252 b'unbundle',
253 253 }
254 254 legacycaps = moderncaps.union({b'changegroupsubset'})
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 258 class localcommandexecutor:
259 259 def __init__(self, peer):
260 260 self._peer = peer
261 261 self._sent = False
262 262 self._closed = False
263 263
264 264 def __enter__(self):
265 265 return self
266 266
267 267 def __exit__(self, exctype, excvalue, exctb):
268 268 self.close()
269 269
270 270 def callcommand(self, command, args):
271 271 if self._sent:
272 272 raise error.ProgrammingError(
273 273 b'callcommand() cannot be used after sendcommands()'
274 274 )
275 275
276 276 if self._closed:
277 277 raise error.ProgrammingError(
278 278 b'callcommand() cannot be used after close()'
279 279 )
280 280
281 281 # We don't need to support anything fancy. Just call the named
282 282 # method on the peer and return a resolved future.
283 283 fn = getattr(self._peer, pycompat.sysstr(command))
284 284
285 285 f = futures.Future()
286 286
287 287 try:
288 288 result = fn(**pycompat.strkwargs(args))
289 289 except Exception:
290 290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 291 else:
292 292 f.set_result(result)
293 293
294 294 return f
295 295
296 296 def sendcommands(self):
297 297 self._sent = True
298 298
299 299 def close(self):
300 300 self._closed = True
301 301
302 302
303 303 @interfaceutil.implementer(repository.ipeercommands)
304 304 class localpeer(repository.peer):
305 305 '''peer for a local repo; reflects only the most recent API'''
306 306
307 307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
308 308 super(localpeer, self).__init__(
309 309 repo.ui, path=path, remotehidden=remotehidden
310 310 )
311 311
312 312 if caps is None:
313 313 caps = moderncaps.copy()
314 314 if remotehidden:
315 315 self._repo = repo.filtered(b'served.hidden')
316 316 else:
317 317 self._repo = repo.filtered(b'served')
318 318 if repo._wanted_sidedata:
319 319 formatted = bundle2.format_remote_wanted_sidedata(repo)
320 320 caps.add(b'exp-wanted-sidedata=' + formatted)
321 321
322 322 self._caps = repo._restrictcapabilities(caps)
323 323
324 324 # Begin of _basepeer interface.
325 325
326 326 def url(self):
327 327 return self._repo.url()
328 328
329 329 def local(self):
330 330 return self._repo
331 331
332 332 def canpush(self):
333 333 return True
334 334
335 335 def close(self):
336 336 self._repo.close()
337 337
338 338 # End of _basepeer interface.
339 339
340 340 # Begin of _basewirecommands interface.
341 341
342 342 def branchmap(self):
343 343 return self._repo.branchmap()
344 344
345 345 def capabilities(self):
346 346 return self._caps
347 347
348 348 def get_cached_bundle_inline(self, path):
349 349 # not needed with local peer
350 350 raise NotImplementedError
351 351
352 352 def clonebundles(self):
353 353 return bundlecaches.get_manifest(self._repo)
354 354
355 355 def debugwireargs(self, one, two, three=None, four=None, five=None):
356 356 """Used to test argument passing over the wire"""
357 357 return b"%s %s %s %s %s" % (
358 358 one,
359 359 two,
360 360 pycompat.bytestr(three),
361 361 pycompat.bytestr(four),
362 362 pycompat.bytestr(five),
363 363 )
364 364
365 365 def getbundle(
366 366 self,
367 367 source,
368 368 heads=None,
369 369 common=None,
370 370 bundlecaps=None,
371 371 remote_sidedata=None,
372 372 **kwargs,
373 373 ):
374 374 chunks = exchange.getbundlechunks(
375 375 self._repo,
376 376 source,
377 377 heads=heads,
378 378 common=common,
379 379 bundlecaps=bundlecaps,
380 380 remote_sidedata=remote_sidedata,
381 381 **kwargs,
382 382 )[1]
383 383 cb = util.chunkbuffer(chunks)
384 384
385 385 if exchange.bundle2requested(bundlecaps):
386 386 # When requesting a bundle2, getbundle returns a stream to make the
387 387 # wire level function happier. We need to build a proper object
388 388 # from it in local peer.
389 389 return bundle2.getunbundler(self.ui, cb)
390 390 else:
391 391 return changegroup.getunbundler(b'01', cb, None)
392 392
393 393 def heads(self):
394 394 return self._repo.heads()
395 395
396 396 def known(self, nodes):
397 397 return self._repo.known(nodes)
398 398
399 399 def listkeys(self, namespace):
400 400 return self._repo.listkeys(namespace)
401 401
402 402 def lookup(self, key):
403 403 return self._repo.lookup(key)
404 404
405 405 def pushkey(self, namespace, key, old, new):
406 406 return self._repo.pushkey(namespace, key, old, new)
407 407
408 408 def stream_out(self):
409 409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
410 410
411 411 def unbundle(self, bundle, heads, url):
412 412 """apply a bundle on a repo
413 413
414 414 This function handles the repo locking itself."""
415 415 try:
416 416 try:
417 417 bundle = exchange.readbundle(self.ui, bundle, None)
418 418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
419 419 if hasattr(ret, 'getchunks'):
420 420 # This is a bundle20 object, turn it into an unbundler.
421 421 # This little dance should be dropped eventually when the
422 422 # API is finally improved.
423 423 stream = util.chunkbuffer(ret.getchunks())
424 424 ret = bundle2.getunbundler(self.ui, stream)
425 425 return ret
426 426 except Exception as exc:
427 427 # If the exception contains output salvaged from a bundle2
428 428 # reply, we need to make sure it is printed before continuing
429 429 # to fail. So we build a bundle2 with such output and consume
430 430 # it directly.
431 431 #
432 432 # This is not very elegant but allows a "simple" solution for
433 433 # issue4594
434 434 output = getattr(exc, '_bundle2salvagedoutput', ())
435 435 if output:
436 436 bundler = bundle2.bundle20(self._repo.ui)
437 437 for out in output:
438 438 bundler.addpart(out)
439 439 stream = util.chunkbuffer(bundler.getchunks())
440 440 b = bundle2.getunbundler(self.ui, stream)
441 441 bundle2.processbundle(self._repo, b)
442 442 raise
443 443 except error.PushRaced as exc:
444 444 raise error.ResponseError(
445 445 _(b'push failed:'), stringutil.forcebytestr(exc)
446 446 )
447 447
448 448 # End of _basewirecommands interface.
449 449
450 450 # Begin of peer interface.
451 451
452 452 def commandexecutor(self):
453 453 return localcommandexecutor(self)
454 454
455 455 # End of peer interface.
456 456
457 457
458 458 @interfaceutil.implementer(repository.ipeerlegacycommands)
459 459 class locallegacypeer(localpeer):
460 460 """peer extension which implements legacy methods too; used for tests with
461 461 restricted capabilities"""
462 462
463 463 def __init__(self, repo, path=None, remotehidden=False):
464 464 super(locallegacypeer, self).__init__(
465 465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 466 )
467 467
468 468 # Begin of baselegacywirecommands interface.
469 469
470 470 def between(self, pairs):
471 471 return self._repo.between(pairs)
472 472
473 473 def branches(self, nodes):
474 474 return self._repo.branches(nodes)
475 475
476 476 def changegroup(self, nodes, source):
477 477 outgoing = discovery.outgoing(
478 478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 479 )
480 480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481 481
482 482 def changegroupsubset(self, bases, heads, source):
483 483 outgoing = discovery.outgoing(
484 484 self._repo, missingroots=bases, ancestorsof=heads
485 485 )
486 486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487 487
488 488 # End of baselegacywirecommands interface.
489 489
490 490
491 491 # Functions receiving (ui, features) that extensions can register to impact
492 492 # the ability to load repositories with custom requirements. Only
493 493 # functions defined in loaded extensions are called.
494 494 #
495 495 # The function receives a set of requirement strings that the repository
496 496 # is capable of opening. Functions will typically add elements to the
497 497 # set to reflect that the extension knows how to handle that requirements.
498 498 featuresetupfuncs = set()
499 499
500 500
501 501 def _getsharedvfs(hgvfs, requirements):
502 502 """returns the vfs object pointing to root of shared source
503 503 repo for a shared repository
504 504
505 505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 506 requirements is a set of requirements of current repo (shared one)
507 507 """
508 508 # The ``shared`` or ``relshared`` requirements indicate the
509 509 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 510 # This is an absolute path for ``shared`` and relative to
511 511 # ``.hg/`` for ``relshared``.
512 512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 514 sharedpath = util.normpath(hgvfs.join(sharedpath))
515 515
516 516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 517
518 518 if not sharedvfs.exists():
519 519 raise error.RepoError(
520 520 _(b'.hg/sharedpath points to nonexistent directory %s')
521 521 % sharedvfs.base
522 522 )
523 523 return sharedvfs
524 524
525 525
526 526 def _readrequires(vfs, allowmissing):
527 527 """reads the require file present at root of this vfs
528 528 and return a set of requirements
529 529
530 530 If allowmissing is True, we suppress FileNotFoundError if raised"""
531 531 # requires file contains a newline-delimited list of
532 532 # features/capabilities the opener (us) must have in order to use
533 533 # the repository. This file was introduced in Mercurial 0.9.2,
534 534 # which means very old repositories may not have one. We assume
535 535 # a missing file translates to no requirements.
536 536 read = vfs.tryread if allowmissing else vfs.read
537 537 return set(read(b'requires').splitlines())
538 538
539 539
540 540 def makelocalrepository(baseui, path: bytes, intents=None):
541 541 """Create a local repository object.
542 542
543 543 Given arguments needed to construct a local repository, this function
544 544 performs various early repository loading functionality (such as
545 545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
546 546 the repository can be opened, derives a type suitable for representing
547 547 that repository, and returns an instance of it.
548 548
549 549 The returned object conforms to the ``repository.completelocalrepository``
550 550 interface.
551 551
552 552 The repository type is derived by calling a series of factory functions
553 553 for each aspect/interface of the final repository. These are defined by
554 554 ``REPO_INTERFACES``.
555 555
556 556 Each factory function is called to produce a type implementing a specific
557 557 interface. The cumulative list of returned types will be combined into a
558 558 new type and that type will be instantiated to represent the local
559 559 repository.
560 560
561 561 The factory functions each receive various state that may be consulted
562 562 as part of deriving a type.
563 563
564 564 Extensions should wrap these factory functions to customize repository type
565 565 creation. Note that an extension's wrapped function may be called even if
566 566 that extension is not loaded for the repo being constructed. Extensions
567 567 should check if their ``__name__`` appears in the
568 568 ``extensionmodulenames`` set passed to the factory function and no-op if
569 569 not.
570 570 """
571 571 ui = baseui.copy()
572 572 # Prevent copying repo configuration.
573 573 ui.copy = baseui.copy
574 574
575 575 # Working directory VFS rooted at repository root.
576 576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
577 577
578 578 # Main VFS for .hg/ directory.
579 579 hgpath = wdirvfs.join(b'.hg')
580 580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
581 581 # Whether this repository is shared one or not
582 582 shared = False
583 583 # If this repository is shared, vfs pointing to shared repo
584 584 sharedvfs = None
585 585
586 586 # The .hg/ path should exist and should be a directory. All other
587 587 # cases are errors.
588 588 if not hgvfs.isdir():
589 589 try:
590 590 hgvfs.stat()
591 591 except FileNotFoundError:
592 592 pass
593 593 except ValueError as e:
594 594 # Can be raised on Python 3.8 when path is invalid.
595 595 raise error.Abort(
596 596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
597 597 )
598 598
599 599 raise error.RepoError(_(b'repository %s not found') % path)
600 600
601 601 requirements = _readrequires(hgvfs, True)
602 602 shared = (
603 603 requirementsmod.SHARED_REQUIREMENT in requirements
604 604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
605 605 )
606 606 storevfs = None
607 607 if shared:
608 608 # This is a shared repo
609 609 sharedvfs = _getsharedvfs(hgvfs, requirements)
610 610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
611 611 else:
612 612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
613 613
614 614 # if .hg/requires contains the sharesafe requirement, it means
615 615 # there exists a `.hg/store/requires` too and we should read it
616 616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
617 617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
618 618 # is not present, refer checkrequirementscompat() for that
619 619 #
620 620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
621 621 # repository was shared the old way. We check the share source .hg/requires
622 622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
623 623 # to be reshared
624 624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
625 625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
626 626 if (
627 627 shared
628 628 and requirementsmod.SHARESAFE_REQUIREMENT
629 629 not in _readrequires(sharedvfs, True)
630 630 ):
631 631 mismatch_warn = ui.configbool(
632 632 b'share', b'safe-mismatch.source-not-safe.warn'
633 633 )
634 634 mismatch_config = ui.config(
635 635 b'share', b'safe-mismatch.source-not-safe'
636 636 )
637 637 mismatch_verbose_upgrade = ui.configbool(
638 638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
639 639 )
640 640 if mismatch_config in (
641 641 b'downgrade-allow',
642 642 b'allow',
643 643 b'downgrade-abort',
644 644 ):
645 645 # prevent cyclic import localrepo -> upgrade -> localrepo
646 646 from . import upgrade
647 647
648 648 upgrade.downgrade_share_to_non_safe(
649 649 ui,
650 650 hgvfs,
651 651 sharedvfs,
652 652 requirements,
653 653 mismatch_config,
654 654 mismatch_warn,
655 655 mismatch_verbose_upgrade,
656 656 )
657 657 elif mismatch_config == b'abort':
658 658 raise error.Abort(
659 659 _(b"share source does not support share-safe requirement"),
660 660 hint=hint,
661 661 )
662 662 else:
663 663 raise error.Abort(
664 664 _(
665 665 b"share-safe mismatch with source.\nUnrecognized"
666 666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
667 667 b" set."
668 668 )
669 669 % mismatch_config,
670 670 hint=hint,
671 671 )
672 672 else:
673 673 requirements |= _readrequires(storevfs, False)
674 674 elif shared:
675 675 sourcerequires = _readrequires(sharedvfs, False)
676 676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
677 677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
678 678 mismatch_warn = ui.configbool(
679 679 b'share', b'safe-mismatch.source-safe.warn'
680 680 )
681 681 mismatch_verbose_upgrade = ui.configbool(
682 682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
683 683 )
684 684 if mismatch_config in (
685 685 b'upgrade-allow',
686 686 b'allow',
687 687 b'upgrade-abort',
688 688 ):
689 689 # prevent cyclic import localrepo -> upgrade -> localrepo
690 690 from . import upgrade
691 691
692 692 upgrade.upgrade_share_to_safe(
693 693 ui,
694 694 hgvfs,
695 695 storevfs,
696 696 requirements,
697 697 mismatch_config,
698 698 mismatch_warn,
699 699 mismatch_verbose_upgrade,
700 700 )
701 701 elif mismatch_config == b'abort':
702 702 raise error.Abort(
703 703 _(
704 704 b'version mismatch: source uses share-safe'
705 705 b' functionality while the current share does not'
706 706 ),
707 707 hint=hint,
708 708 )
709 709 else:
710 710 raise error.Abort(
711 711 _(
712 712 b"share-safe mismatch with source.\nUnrecognized"
713 713 b" value '%s' of `share.safe-mismatch.source-safe` set."
714 714 )
715 715 % mismatch_config,
716 716 hint=hint,
717 717 )
718 718
719 719 # The .hg/hgrc file may load extensions or contain config options
720 720 # that influence repository construction. Attempt to load it and
721 721 # process any new extensions that it may have pulled in.
722 722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
723 723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
724 724 extensions.loadall(ui)
725 725 extensions.populateui(ui)
726 726
727 727 # Set of module names of extensions loaded for this repository.
728 728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
729 729
730 730 supportedrequirements = gathersupportedrequirements(ui)
731 731
732 732 # We first validate the requirements are known.
733 733 ensurerequirementsrecognized(requirements, supportedrequirements)
734 734
735 735 # Then we validate that the known set is reasonable to use together.
736 736 ensurerequirementscompatible(ui, requirements)
737 737
738 738 # TODO there are unhandled edge cases related to opening repositories with
739 739 # shared storage. If storage is shared, we should also test for requirements
740 740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
741 741 # that repo, as that repo may load extensions needed to open it. This is a
742 742 # bit complicated because we don't want the other hgrc to overwrite settings
743 743 # in this hgrc.
744 744 #
745 745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
746 746 # file when sharing repos. But if a requirement is added after the share is
747 747 # performed, thereby introducing a new requirement for the opener, we may
748 748 # will not see that and could encounter a run-time error interacting with
749 749 # that shared store since it has an unknown-to-us requirement.
750 750
751 751 # At this point, we know we should be capable of opening the repository.
752 752 # Now get on with doing that.
753 753
754 754 features = set()
755 755
756 756 # The "store" part of the repository holds versioned data. How it is
757 757 # accessed is determined by various requirements. If `shared` or
758 758 # `relshared` requirements are present, this indicates current repository
759 759 # is a share and store exists in path mentioned in `.hg/sharedpath`
760 760 if shared:
761 761 storebasepath = sharedvfs.base
762 762 cachepath = sharedvfs.join(b'cache')
763 763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
764 764 else:
765 765 storebasepath = hgvfs.base
766 766 cachepath = hgvfs.join(b'cache')
767 767 wcachepath = hgvfs.join(b'wcache')
768 768
769 769 # The store has changed over time and the exact layout is dictated by
770 770 # requirements. The store interface abstracts differences across all
771 771 # of them.
772 772 store = makestore(
773 773 requirements,
774 774 storebasepath,
775 775 lambda base: vfsmod.vfs(base, cacheaudited=True),
776 776 )
777 777 hgvfs.createmode = store.createmode
778 778
779 779 storevfs = store.vfs
780 780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
781 781
782 782 if (
783 783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
784 784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
785 785 ):
786 786 features.add(repository.REPO_FEATURE_SIDE_DATA)
787 787 # the revlogv2 docket introduced race condition that we need to fix
788 788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
789 789
790 790 # The cache vfs is used to manage cache files.
791 791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
792 792 cachevfs.createmode = store.createmode
793 793 # The cache vfs is used to manage cache files related to the working copy
794 794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
795 795 wcachevfs.createmode = store.createmode
796 796
797 797 # Now resolve the type for the repository object. We do this by repeatedly
798 798 # calling a factory function to produces types for specific aspects of the
799 799 # repo's operation. The aggregate returned types are used as base classes
800 800 # for a dynamically-derived type, which will represent our new repository.
801 801
802 802 bases = []
803 803 extrastate = {}
804 804
805 805 for iface, fn in REPO_INTERFACES:
806 806 # We pass all potentially useful state to give extensions tons of
807 807 # flexibility.
808 808 typ = fn()(
809 809 ui=ui,
810 810 intents=intents,
811 811 requirements=requirements,
812 812 features=features,
813 813 wdirvfs=wdirvfs,
814 814 hgvfs=hgvfs,
815 815 store=store,
816 816 storevfs=storevfs,
817 817 storeoptions=storevfs.options,
818 818 cachevfs=cachevfs,
819 819 wcachevfs=wcachevfs,
820 820 extensionmodulenames=extensionmodulenames,
821 821 extrastate=extrastate,
822 822 baseclasses=bases,
823 823 )
824 824
825 825 if not isinstance(typ, type):
826 826 raise error.ProgrammingError(
827 827 b'unable to construct type for %s' % iface
828 828 )
829 829
830 830 bases.append(typ)
831 831
832 832 # type() allows you to use characters in type names that wouldn't be
833 833 # recognized as Python symbols in source code. We abuse that to add
834 834 # rich information about our constructed repo.
835 835 name = pycompat.sysstr(
836 836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
837 837 )
838 838
839 839 cls = type(name, tuple(bases), {})
840 840
841 841 return cls(
842 842 baseui=baseui,
843 843 ui=ui,
844 844 origroot=path,
845 845 wdirvfs=wdirvfs,
846 846 hgvfs=hgvfs,
847 847 requirements=requirements,
848 848 supportedrequirements=supportedrequirements,
849 849 sharedpath=storebasepath,
850 850 store=store,
851 851 cachevfs=cachevfs,
852 852 wcachevfs=wcachevfs,
853 853 features=features,
854 854 intents=intents,
855 855 )
856 856
857 857
858 858 def loadhgrc(
859 859 ui,
860 860 wdirvfs: vfsmod.vfs,
861 861 hgvfs: vfsmod.vfs,
862 862 requirements,
863 863 sharedvfs: Optional[vfsmod.vfs] = None,
864 864 ):
865 865 """Load hgrc files/content into a ui instance.
866 866
867 867 This is called during repository opening to load any additional
868 868 config files or settings relevant to the current repository.
869 869
870 870 Returns a bool indicating whether any additional configs were loaded.
871 871
872 872 Extensions should monkeypatch this function to modify how per-repo
873 873 configs are loaded. For example, an extension may wish to pull in
874 874 configs from alternate files or sources.
875 875
876 876 sharedvfs is vfs object pointing to source repo if the current one is a
877 877 shared one
878 878 """
879 879 if not rcutil.use_repo_hgrc():
880 880 return False
881 881
882 882 ret = False
883 883 # first load config from shared source if we has to
884 884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
885 885 try:
886 886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
887 887 ret = True
888 888 except IOError:
889 889 pass
890 890
891 891 try:
892 892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
893 893 ret = True
894 894 except IOError:
895 895 pass
896 896
897 897 try:
898 898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
899 899 ret = True
900 900 except IOError:
901 901 pass
902 902
903 903 return ret
904 904
905 905
906 906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
907 907 """Perform additional actions after .hg/hgrc is loaded.
908 908
909 909 This function is called during repository loading immediately after
910 910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
911 911
912 912 The function can be used to validate configs, automatically add
913 913 options (including extensions) based on requirements, etc.
914 914 """
915 915
916 916 # Map of requirements to list of extensions to load automatically when
917 917 # requirement is present.
918 918 autoextensions = {
919 919 b'git': [b'git'],
920 920 b'largefiles': [b'largefiles'],
921 921 b'lfs': [b'lfs'],
922 922 }
923 923
924 924 for requirement, names in sorted(autoextensions.items()):
925 925 if requirement not in requirements:
926 926 continue
927 927
928 928 for name in names:
929 929 if not ui.hasconfig(b'extensions', name):
930 930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
931 931
932 932
933 933 def gathersupportedrequirements(ui):
934 934 """Determine the complete set of recognized requirements."""
935 935 # Start with all requirements supported by this file.
936 936 supported = set(localrepository._basesupported)
937 937
938 938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
939 939 # relevant to this ui instance.
940 940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
941 941
942 942 for fn in featuresetupfuncs:
943 943 if fn.__module__ in modules:
944 944 fn(ui, supported)
945 945
946 946 # Add derived requirements from registered compression engines.
947 947 for name in util.compengines:
948 948 engine = util.compengines[name]
949 949 if engine.available() and engine.revlogheader():
950 950 supported.add(b'exp-compression-%s' % name)
951 951 if engine.name() == b'zstd':
952 952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
953 953
954 954 return supported
955 955
956 956
957 957 def ensurerequirementsrecognized(requirements, supported):
958 958 """Validate that a set of local requirements is recognized.
959 959
960 960 Receives a set of requirements. Raises an ``error.RepoError`` if there
961 961 exists any requirement in that set that currently loaded code doesn't
962 962 recognize.
963 963
964 964 Returns a set of supported requirements.
965 965 """
966 966 missing = set()
967 967
968 968 for requirement in requirements:
969 969 if requirement in supported:
970 970 continue
971 971
972 972 if not requirement or not requirement[0:1].isalnum():
973 973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
974 974
975 975 missing.add(requirement)
976 976
977 977 if missing:
978 978 raise error.RequirementError(
979 979 _(b'repository requires features unknown to this Mercurial: %s')
980 980 % b' '.join(sorted(missing)),
981 981 hint=_(
982 982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
983 983 b'for more information'
984 984 ),
985 985 )
986 986
987 987
988 988 def ensurerequirementscompatible(ui, requirements):
989 989 """Validates that a set of recognized requirements is mutually compatible.
990 990
991 991 Some requirements may not be compatible with others or require
992 992 config options that aren't enabled. This function is called during
993 993 repository opening to ensure that the set of requirements needed
994 994 to open a repository is sane and compatible with config options.
995 995
996 996 Extensions can monkeypatch this function to perform additional
997 997 checking.
998 998
999 999 ``error.RepoError`` should be raised on failure.
1000 1000 """
1001 1001 if (
1002 1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1003 1003 and not sparse.enabled
1004 1004 ):
1005 1005 raise error.RepoError(
1006 1006 _(
1007 1007 b'repository is using sparse feature but '
1008 1008 b'sparse is not enabled; enable the '
1009 1009 b'"sparse" extensions to access'
1010 1010 )
1011 1011 )
1012 1012
1013 1013
1014 1014 def makestore(requirements, path, vfstype):
1015 1015 """Construct a storage object for a repository."""
1016 1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1017 1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1018 1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1019 1019 return storemod.fncachestore(path, vfstype, dotencode)
1020 1020
1021 1021 return storemod.encodedstore(path, vfstype)
1022 1022
1023 1023 return storemod.basicstore(path, vfstype)
1024 1024
1025 1025
1026 1026 def resolvestorevfsoptions(ui, requirements, features):
1027 1027 """Resolve the options to pass to the store vfs opener.
1028 1028
1029 1029 The returned dict is used to influence behavior of the storage layer.
1030 1030 """
1031 1031 options = {}
1032 1032
1033 1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1034 1034 options[b'treemanifest'] = True
1035 1035
1036 1036 # experimental config: format.manifestcachesize
1037 1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1038 1038 if manifestcachesize is not None:
1039 1039 options[b'manifestcachesize'] = manifestcachesize
1040 1040
1041 1041 # In the absence of another requirement superseding a revlog-related
1042 1042 # requirement, we have to assume the repo is using revlog version 0.
1043 1043 # This revlog format is super old and we don't bother trying to parse
1044 1044 # opener options for it because those options wouldn't do anything
1045 1045 # meaningful on such old repos.
1046 1046 if (
1047 1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1048 1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1049 1049 ):
1050 1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1051 1051 else: # explicitly mark repo as using revlogv0
1052 1052 options[b'revlogv0'] = True
1053 1053
1054 1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1055 1055 options[b'copies-storage'] = b'changeset-sidedata'
1056 1056 else:
1057 1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1058 1058 copiesextramode = (b'changeset-only', b'compatibility')
1059 1059 if writecopiesto in copiesextramode:
1060 1060 options[b'copies-storage'] = b'extra'
1061 1061
1062 1062 return options
1063 1063
1064 1064
1065 1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1066 1066 """Resolve opener options specific to revlogs."""
1067 1067
1068 1068 options = {}
1069 1069 options[b'flagprocessors'] = {}
1070 1070
1071 1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1072 1072 data_config = options[b'data-config'] = revlog.DataConfig()
1073 1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1074 1074
1075 1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 1076 options[b'revlogv1'] = True
1077 1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 1078 options[b'revlogv2'] = True
1079 1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 1080 options[b'changelogv2'] = True
1081 1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083 1083
1084 1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 1085 options[b'generaldelta'] = True
1086 1086
1087 1087 # experimental config: format.chunkcachesize
1088 1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 1089 if chunkcachesize is not None:
1090 1090 data_config.chunk_cache_size = chunkcachesize
1091 1091
1092 1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1093 1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1094 1094 data_config.uncompressed_cache_count = 10_000
1095 1095 data_config.uncompressed_cache_factor = 4
1096 1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1097 1097 data_config.uncompressed_cache_factor = 10
1098 1098
1099 1099 delta_config.delta_both_parents = ui.configbool(
1100 1100 b'storage', b'revlog.optimize-delta-parent-choice'
1101 1101 )
1102 1102 delta_config.candidate_group_chunk_size = ui.configint(
1103 1103 b'storage',
1104 1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1105 1105 )
1106 1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1107 1107
1108 1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1109 1109 options[b'issue6528.fix-incoming'] = issue6528
1110 1110
1111 1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1112 1112 lazydeltabase = False
1113 1113 if lazydelta:
1114 1114 lazydeltabase = ui.configbool(
1115 1115 b'storage', b'revlog.reuse-external-delta-parent'
1116 1116 )
1117 1117 if lazydeltabase is None:
1118 1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1119 1119 delta_config.lazy_delta = lazydelta
1120 1120 delta_config.lazy_delta_base = lazydeltabase
1121 1121
1122 1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1123 1123 if 0 <= chainspan:
1124 1124 delta_config.max_deltachain_span = chainspan
1125 1125
1126 1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1127 1127 if mmapindexthreshold is not None:
1128 1128 data_config.mmap_index_threshold = mmapindexthreshold
1129 1129
1130 1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1131 1131 srdensitythres = float(
1132 1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1133 1133 )
1134 1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1135 1135 data_config.with_sparse_read = withsparseread
1136 1136 data_config.sr_density_threshold = srdensitythres
1137 1137 data_config.sr_min_gap_size = srmingapsize
1138 1138
1139 1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1140 1140 delta_config.sparse_revlog = sparserevlog
1141 1141 if sparserevlog:
1142 1142 options[b'generaldelta'] = True
1143 1143 data_config.with_sparse_read = True
1144 1144
1145 1145 maxchainlen = None
1146 1146 if sparserevlog:
1147 1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1148 1148 # experimental config: format.maxchainlen
1149 1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1150 1150 if maxchainlen is not None:
1151 1151 delta_config.max_chain_len = maxchainlen
1152 1152
1153 1153 for r in requirements:
1154 1154 # we allow multiple compression engine requirement to co-exist because
1155 1155 # strickly speaking, revlog seems to support mixed compression style.
1156 1156 #
1157 1157 # The compression used for new entries will be "the last one"
1158 1158 prefix = r.startswith
1159 1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1160 1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1161 1161
1162 1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1163 1163 if zlib_level is not None:
1164 1164 if not (0 <= zlib_level <= 9):
1165 1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1166 1166 raise error.Abort(msg % zlib_level)
1167 1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1168 1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1169 1169 if zstd_level is not None:
1170 1170 if not (0 <= zstd_level <= 22):
1171 1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1172 1172 raise error.Abort(msg % zstd_level)
1173 1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1174 1174
1175 1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1176 1176 feature_config.enable_ellipsis = True
1177 1177
1178 1178 if ui.configbool(b'experimental', b'rust.index'):
1179 1179 options[b'rust.index'] = True
1180 1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1181 1181 slow_path = ui.config(
1182 1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1183 1183 )
1184 1184 if slow_path not in (b'allow', b'warn', b'abort'):
1185 1185 default = ui.config_default(
1186 1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1187 1187 )
1188 1188 msg = _(
1189 1189 b'unknown value for config '
1190 1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1191 1191 )
1192 1192 ui.warn(msg % slow_path)
1193 1193 if not ui.quiet:
1194 1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1195 1195 slow_path = default
1196 1196
1197 1197 msg = _(
1198 1198 b"accessing `persistent-nodemap` repository without associated "
1199 1199 b"fast implementation."
1200 1200 )
1201 1201 hint = _(
1202 1202 b"check `hg help config.format.use-persistent-nodemap` "
1203 1203 b"for details"
1204 1204 )
1205 1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1206 1206 if slow_path == b'warn':
1207 1207 msg = b"warning: " + msg + b'\n'
1208 1208 ui.warn(msg)
1209 1209 if not ui.quiet:
1210 1210 hint = b'(' + hint + b')\n'
1211 1211 ui.warn(hint)
1212 1212 if slow_path == b'abort':
1213 1213 raise error.Abort(msg, hint=hint)
1214 1214 options[b'persistent-nodemap'] = True
1215 1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1216 1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1217 1217 if slow_path not in (b'allow', b'warn', b'abort'):
1218 1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1219 1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1220 1220 ui.warn(msg % slow_path)
1221 1221 if not ui.quiet:
1222 1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1223 1223 slow_path = default
1224 1224
1225 1225 msg = _(
1226 1226 b"accessing `dirstate-v2` repository without associated "
1227 1227 b"fast implementation."
1228 1228 )
1229 1229 hint = _(
1230 1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1231 1231 )
1232 1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1233 1233 if slow_path == b'warn':
1234 1234 msg = b"warning: " + msg + b'\n'
1235 1235 ui.warn(msg)
1236 1236 if not ui.quiet:
1237 1237 hint = b'(' + hint + b')\n'
1238 1238 ui.warn(hint)
1239 1239 if slow_path == b'abort':
1240 1240 raise error.Abort(msg, hint=hint)
1241 1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1242 1242 options[b'persistent-nodemap.mmap'] = True
1243 1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1244 1244 options[b'devel-force-nodemap'] = True
1245 1245
1246 1246 return options
1247 1247
1248 1248
1249 1249 def makemain(**kwargs):
1250 1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1251 1251 return localrepository
1252 1252
1253 1253
1254 1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1255 1255 class revlogfilestorage:
1256 1256 """File storage when using revlogs."""
1257 1257
1258 1258 def file(self, path):
1259 1259 if path.startswith(b'/'):
1260 1260 path = path[1:]
1261 1261
1262 1262 try_split = (
1263 1263 self.currenttransaction() is not None
1264 1264 or txnutil.mayhavepending(self.root)
1265 1265 )
1266 1266
1267 1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1268 1268
1269 1269
1270 1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 1271 class revlognarrowfilestorage:
1272 1272 """File storage when using revlogs and narrow files."""
1273 1273
1274 1274 def file(self, path):
1275 1275 if path.startswith(b'/'):
1276 1276 path = path[1:]
1277 1277
1278 1278 try_split = (
1279 1279 self.currenttransaction() is not None
1280 1280 or txnutil.mayhavepending(self.root)
1281 1281 )
1282 1282 return filelog.narrowfilelog(
1283 1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1284 1284 )
1285 1285
1286 1286
1287 1287 def makefilestorage(requirements, features, **kwargs):
1288 1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1289 1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1290 1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1291 1291
1292 1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1293 1293 return revlognarrowfilestorage
1294 1294 else:
1295 1295 return revlogfilestorage
1296 1296
1297 1297
1298 1298 # List of repository interfaces and factory functions for them. Each
1299 1299 # will be called in order during ``makelocalrepository()`` to iteratively
1300 1300 # derive the final type for a local repository instance. We capture the
1301 1301 # function as a lambda so we don't hold a reference and the module-level
1302 1302 # functions can be wrapped.
1303 1303 REPO_INTERFACES = [
1304 1304 (repository.ilocalrepositorymain, lambda: makemain),
1305 1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1306 1306 ]
1307 1307
1308 1308
1309 1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1310 1310 class localrepository:
1311 1311 """Main class for representing local repositories.
1312 1312
1313 1313 All local repositories are instances of this class.
1314 1314
1315 1315 Constructed on its own, instances of this class are not usable as
1316 1316 repository objects. To obtain a usable repository object, call
1317 1317 ``hg.repository()``, ``localrepo.instance()``, or
1318 1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1319 1319 ``instance()`` adds support for creating new repositories.
1320 1320 ``hg.repository()`` adds more extension integration, including calling
1321 1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1322 1322 used.
1323 1323 """
1324 1324
1325 1325 _basesupported = {
1326 1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1327 1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1328 1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1329 1329 requirementsmod.COPIESSDC_REQUIREMENT,
1330 1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1331 1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1332 1332 requirementsmod.DOTENCODE_REQUIREMENT,
1333 1333 requirementsmod.FNCACHE_REQUIREMENT,
1334 1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1335 1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1336 1336 requirementsmod.NODEMAP_REQUIREMENT,
1337 1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1338 1338 requirementsmod.REVLOGV1_REQUIREMENT,
1339 1339 requirementsmod.REVLOGV2_REQUIREMENT,
1340 1340 requirementsmod.SHARED_REQUIREMENT,
1341 1341 requirementsmod.SHARESAFE_REQUIREMENT,
1342 1342 requirementsmod.SPARSE_REQUIREMENT,
1343 1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1344 1344 requirementsmod.STORE_REQUIREMENT,
1345 1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1346 1346 }
1347 1347
1348 1348 # list of prefix for file which can be written without 'wlock'
1349 1349 # Extensions should extend this list when needed
1350 1350 _wlockfreeprefix = {
1351 1351 # We migh consider requiring 'wlock' for the next
1352 1352 # two, but pretty much all the existing code assume
1353 1353 # wlock is not needed so we keep them excluded for
1354 1354 # now.
1355 1355 b'hgrc',
1356 1356 b'requires',
1357 1357 # XXX cache is a complicatged business someone
1358 1358 # should investigate this in depth at some point
1359 1359 b'cache/',
1360 1360 # XXX bisect was still a bit too messy at the time
1361 1361 # this changeset was introduced. Someone should fix
1362 1362 # the remainig bit and drop this line
1363 1363 b'bisect.state',
1364 1364 }
1365 1365
1366 1366 def __init__(
1367 1367 self,
1368 1368 baseui,
1369 1369 ui,
1370 1370 origroot: bytes,
1371 1371 wdirvfs: vfsmod.vfs,
1372 1372 hgvfs: vfsmod.vfs,
1373 1373 requirements,
1374 1374 supportedrequirements,
1375 1375 sharedpath: bytes,
1376 1376 store,
1377 1377 cachevfs: vfsmod.vfs,
1378 1378 wcachevfs: vfsmod.vfs,
1379 1379 features,
1380 1380 intents=None,
1381 1381 ):
1382 1382 """Create a new local repository instance.
1383 1383
1384 1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1385 1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1386 1386 object.
1387 1387
1388 1388 Arguments:
1389 1389
1390 1390 baseui
1391 1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1392 1392
1393 1393 ui
1394 1394 ``ui.ui`` instance for use by the repository.
1395 1395
1396 1396 origroot
1397 1397 ``bytes`` path to working directory root of this repository.
1398 1398
1399 1399 wdirvfs
1400 1400 ``vfs.vfs`` rooted at the working directory.
1401 1401
1402 1402 hgvfs
1403 1403 ``vfs.vfs`` rooted at .hg/
1404 1404
1405 1405 requirements
1406 1406 ``set`` of bytestrings representing repository opening requirements.
1407 1407
1408 1408 supportedrequirements
1409 1409 ``set`` of bytestrings representing repository requirements that we
1410 1410 know how to open. May be a supetset of ``requirements``.
1411 1411
1412 1412 sharedpath
1413 1413 ``bytes`` Defining path to storage base directory. Points to a
1414 1414 ``.hg/`` directory somewhere.
1415 1415
1416 1416 store
1417 1417 ``store.basicstore`` (or derived) instance providing access to
1418 1418 versioned storage.
1419 1419
1420 1420 cachevfs
1421 1421 ``vfs.vfs`` used for cache files.
1422 1422
1423 1423 wcachevfs
1424 1424 ``vfs.vfs`` used for cache files related to the working copy.
1425 1425
1426 1426 features
1427 1427 ``set`` of bytestrings defining features/capabilities of this
1428 1428 instance.
1429 1429
1430 1430 intents
1431 1431 ``set`` of system strings indicating what this repo will be used
1432 1432 for.
1433 1433 """
1434 1434 self.baseui = baseui
1435 1435 self.ui = ui
1436 1436 self.origroot = origroot
1437 1437 # vfs rooted at working directory.
1438 1438 self.wvfs = wdirvfs
1439 1439 self.root = wdirvfs.base
1440 1440 # vfs rooted at .hg/. Used to access most non-store paths.
1441 1441 self.vfs = hgvfs
1442 1442 self.path = hgvfs.base
1443 1443 self.requirements = requirements
1444 1444 self.nodeconstants = sha1nodeconstants
1445 1445 self.nullid = self.nodeconstants.nullid
1446 1446 self.supported = supportedrequirements
1447 1447 self.sharedpath = sharedpath
1448 1448 self.store = store
1449 1449 self.cachevfs = cachevfs
1450 1450 self.wcachevfs = wcachevfs
1451 1451 self.features = features
1452 1452
1453 1453 self.filtername = None
1454 1454
1455 1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 1456 b'devel', b'check-locks'
1457 1457 ):
1458 1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1459 1459 # A list of callback to shape the phase if no data were found.
1460 1460 # Callback are in the form: func(repo, roots) --> processed root.
1461 1461 # This list it to be filled by extension during repo setup
1462 1462 self._phasedefaults = []
1463 1463
1464 1464 color.setup(self.ui)
1465 1465
1466 1466 self.spath = self.store.path
1467 1467 self.svfs = self.store.vfs
1468 1468 self.sjoin = self.store.join
1469 1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1470 1470 b'devel', b'check-locks'
1471 1471 ):
1472 1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1473 1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1474 1474 else: # standard vfs
1475 1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1476 1476
1477 1477 self._dirstatevalidatewarned = False
1478 1478
1479 1479 self._branchcaches = branchmap.BranchMapCache()
1480 1480 self._revbranchcache = None
1481 1481 self._filterpats = {}
1482 1482 self._datafilters = {}
1483 1483 self._transref = self._lockref = self._wlockref = None
1484 1484
1485 1485 # A cache for various files under .hg/ that tracks file changes,
1486 1486 # (used by the filecache decorator)
1487 1487 #
1488 1488 # Maps a property name to its util.filecacheentry
1489 1489 self._filecache = {}
1490 1490
1491 1491 # hold sets of revision to be filtered
1492 1492 # should be cleared when something might have changed the filter value:
1493 1493 # - new changesets,
1494 1494 # - phase change,
1495 1495 # - new obsolescence marker,
1496 1496 # - working directory parent change,
1497 1497 # - bookmark changes
1498 1498 self.filteredrevcache = {}
1499 1499
1500 1500 self._dirstate = None
1501 1501 # post-dirstate-status hooks
1502 1502 self._postdsstatus = []
1503 1503
1504 1504 self._pending_narrow_pats = None
1505 1505 self._pending_narrow_pats_dirstate = None
1506 1506
1507 1507 # generic mapping between names and nodes
1508 1508 self.names = namespaces.namespaces()
1509 1509
1510 1510 # Key to signature value.
1511 1511 self._sparsesignaturecache = {}
1512 1512 # Signature to cached matcher instance.
1513 1513 self._sparsematchercache = {}
1514 1514
1515 1515 self._extrafilterid = repoview.extrafilter(ui)
1516 1516
1517 1517 self.filecopiesmode = None
1518 1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1519 1519 self.filecopiesmode = b'changeset-sidedata'
1520 1520
1521 1521 self._wanted_sidedata = set()
1522 1522 self._sidedata_computers = {}
1523 1523 sidedatamod.set_sidedata_spec_for_repo(self)
1524 1524
1525 1525 def _getvfsward(self, origfunc):
1526 1526 """build a ward for self.vfs"""
1527 1527 rref = weakref.ref(self)
1528 1528
1529 1529 def checkvfs(path, mode=None):
1530 1530 ret = origfunc(path, mode=mode)
1531 1531 repo = rref()
1532 1532 if (
1533 1533 repo is None
1534 1534 or not hasattr(repo, '_wlockref')
1535 1535 or not hasattr(repo, '_lockref')
1536 1536 ):
1537 1537 return
1538 1538 if mode in (None, b'r', b'rb'):
1539 1539 return
1540 1540 if path.startswith(repo.path):
1541 1541 # truncate name relative to the repository (.hg)
1542 1542 path = path[len(repo.path) + 1 :]
1543 1543 if path.startswith(b'cache/'):
1544 1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1545 1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1546 1546 # path prefixes covered by 'lock'
1547 1547 vfs_path_prefixes = (
1548 1548 b'journal.',
1549 1549 b'undo.',
1550 1550 b'strip-backup/',
1551 1551 b'cache/',
1552 1552 )
1553 1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1554 1554 if repo._currentlock(repo._lockref) is None:
1555 1555 repo.ui.develwarn(
1556 1556 b'write with no lock: "%s"' % path,
1557 1557 stacklevel=3,
1558 1558 config=b'check-locks',
1559 1559 )
1560 1560 elif repo._currentlock(repo._wlockref) is None:
1561 1561 # rest of vfs files are covered by 'wlock'
1562 1562 #
1563 1563 # exclude special files
1564 1564 for prefix in self._wlockfreeprefix:
1565 1565 if path.startswith(prefix):
1566 1566 return
1567 1567 repo.ui.develwarn(
1568 1568 b'write with no wlock: "%s"' % path,
1569 1569 stacklevel=3,
1570 1570 config=b'check-locks',
1571 1571 )
1572 1572 return ret
1573 1573
1574 1574 return checkvfs
1575 1575
1576 1576 def _getsvfsward(self, origfunc):
1577 1577 """build a ward for self.svfs"""
1578 1578 rref = weakref.ref(self)
1579 1579
1580 1580 def checksvfs(path, mode=None):
1581 1581 ret = origfunc(path, mode=mode)
1582 1582 repo = rref()
1583 1583 if repo is None or not hasattr(repo, '_lockref'):
1584 1584 return
1585 1585 if mode in (None, b'r', b'rb'):
1586 1586 return
1587 1587 if path.startswith(repo.sharedpath):
1588 1588 # truncate name relative to the repository (.hg)
1589 1589 path = path[len(repo.sharedpath) + 1 :]
1590 1590 if repo._currentlock(repo._lockref) is None:
1591 1591 repo.ui.develwarn(
1592 1592 b'write with no lock: "%s"' % path, stacklevel=4
1593 1593 )
1594 1594 return ret
1595 1595
1596 1596 return checksvfs
1597 1597
1598 1598 @property
1599 1599 def vfs_map(self):
1600 1600 return {
1601 1601 b'': self.svfs,
1602 1602 b'plain': self.vfs,
1603 1603 b'store': self.svfs,
1604 1604 }
1605 1605
1606 1606 def close(self):
1607 1607 self._writecaches()
1608 1608
1609 1609 def _writecaches(self):
1610 1610 if self._revbranchcache:
1611 1611 self._revbranchcache.write()
1612 1612
1613 1613 def _restrictcapabilities(self, caps):
1614 1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1615 1615 caps = set(caps)
1616 1616 capsblob = bundle2.encodecaps(
1617 1617 bundle2.getrepocaps(self, role=b'client')
1618 1618 )
1619 1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1620 1620 if self.ui.configbool(b'experimental', b'narrow'):
1621 1621 caps.add(wireprototypes.NARROWCAP)
1622 1622 return caps
1623 1623
1624 1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1625 1625 # self -> auditor -> self._checknested -> self
1626 1626
1627 1627 @property
1628 1628 def auditor(self):
1629 1629 # This is only used by context.workingctx.match in order to
1630 1630 # detect files in subrepos.
1631 1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1632 1632
1633 1633 @property
1634 1634 def nofsauditor(self):
1635 1635 # This is only used by context.basectx.match in order to detect
1636 1636 # files in subrepos.
1637 1637 return pathutil.pathauditor(
1638 1638 self.root, callback=self._checknested, realfs=False, cached=True
1639 1639 )
1640 1640
1641 1641 def _checknested(self, path):
1642 1642 """Determine if path is a legal nested repository."""
1643 1643 if not path.startswith(self.root):
1644 1644 return False
1645 1645 subpath = path[len(self.root) + 1 :]
1646 1646 normsubpath = util.pconvert(subpath)
1647 1647
1648 1648 # XXX: Checking against the current working copy is wrong in
1649 1649 # the sense that it can reject things like
1650 1650 #
1651 1651 # $ hg cat -r 10 sub/x.txt
1652 1652 #
1653 1653 # if sub/ is no longer a subrepository in the working copy
1654 1654 # parent revision.
1655 1655 #
1656 1656 # However, it can of course also allow things that would have
1657 1657 # been rejected before, such as the above cat command if sub/
1658 1658 # is a subrepository now, but was a normal directory before.
1659 1659 # The old path auditor would have rejected by mistake since it
1660 1660 # panics when it sees sub/.hg/.
1661 1661 #
1662 1662 # All in all, checking against the working copy seems sensible
1663 1663 # since we want to prevent access to nested repositories on
1664 1664 # the filesystem *now*.
1665 1665 ctx = self[None]
1666 1666 parts = util.splitpath(subpath)
1667 1667 while parts:
1668 1668 prefix = b'/'.join(parts)
1669 1669 if prefix in ctx.substate:
1670 1670 if prefix == normsubpath:
1671 1671 return True
1672 1672 else:
1673 1673 sub = ctx.sub(prefix)
1674 1674 return sub.checknested(subpath[len(prefix) + 1 :])
1675 1675 else:
1676 1676 parts.pop()
1677 1677 return False
1678 1678
1679 1679 def peer(self, path=None, remotehidden=False):
1680 1680 return localpeer(
1681 1681 self, path=path, remotehidden=remotehidden
1682 1682 ) # not cached to avoid reference cycle
1683 1683
1684 1684 def unfiltered(self):
1685 1685 """Return unfiltered version of the repository
1686 1686
1687 1687 Intended to be overwritten by filtered repo."""
1688 1688 return self
1689 1689
1690 1690 def filtered(self, name, visibilityexceptions=None):
1691 1691 """Return a filtered version of a repository
1692 1692
1693 1693 The `name` parameter is the identifier of the requested view. This
1694 1694 will return a repoview object set "exactly" to the specified view.
1695 1695
1696 1696 This function does not apply recursive filtering to a repository. For
1697 1697 example calling `repo.filtered("served")` will return a repoview using
1698 1698 the "served" view, regardless of the initial view used by `repo`.
1699 1699
1700 1700 In other word, there is always only one level of `repoview` "filtering".
1701 1701 """
1702 1702 if self._extrafilterid is not None and b'%' not in name:
1703 1703 name = name + b'%' + self._extrafilterid
1704 1704
1705 1705 cls = repoview.newtype(self.unfiltered().__class__)
1706 1706 return cls(self, name, visibilityexceptions)
1707 1707
1708 1708 @mixedrepostorecache(
1709 1709 (b'bookmarks', b'plain'),
1710 1710 (b'bookmarks.current', b'plain'),
1711 1711 (b'bookmarks', b''),
1712 1712 (b'00changelog.i', b''),
1713 1713 )
1714 1714 def _bookmarks(self):
1715 1715 # Since the multiple files involved in the transaction cannot be
1716 1716 # written atomically (with current repository format), there is a race
1717 1717 # condition here.
1718 1718 #
1719 1719 # 1) changelog content A is read
1720 1720 # 2) outside transaction update changelog to content B
1721 1721 # 3) outside transaction update bookmark file referring to content B
1722 1722 # 4) bookmarks file content is read and filtered against changelog-A
1723 1723 #
1724 1724 # When this happens, bookmarks against nodes missing from A are dropped.
1725 1725 #
1726 1726 # Having this happening during read is not great, but it become worse
1727 1727 # when this happen during write because the bookmarks to the "unknown"
1728 1728 # nodes will be dropped for good. However, writes happen within locks.
1729 1729 # This locking makes it possible to have a race free consistent read.
1730 1730 # For this purpose data read from disc before locking are
1731 1731 # "invalidated" right after the locks are taken. This invalidations are
1732 1732 # "light", the `filecache` mechanism keep the data in memory and will
1733 1733 # reuse them if the underlying files did not changed. Not parsing the
1734 1734 # same data multiple times helps performances.
1735 1735 #
1736 1736 # Unfortunately in the case describe above, the files tracked by the
1737 1737 # bookmarks file cache might not have changed, but the in-memory
1738 1738 # content is still "wrong" because we used an older changelog content
1739 1739 # to process the on-disk data. So after locking, the changelog would be
1740 1740 # refreshed but `_bookmarks` would be preserved.
1741 1741 # Adding `00changelog.i` to the list of tracked file is not
1742 1742 # enough, because at the time we build the content for `_bookmarks` in
1743 1743 # (4), the changelog file has already diverged from the content used
1744 1744 # for loading `changelog` in (1)
1745 1745 #
1746 1746 # To prevent the issue, we force the changelog to be explicitly
1747 1747 # reloaded while computing `_bookmarks`. The data race can still happen
1748 1748 # without the lock (with a narrower window), but it would no longer go
1749 1749 # undetected during the lock time refresh.
1750 1750 #
1751 1751 # The new schedule is as follow
1752 1752 #
1753 1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1754 1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1755 1755 # 3) We force `changelog` filecache to be tested
1756 1756 # 4) cachestat for `changelog` are captured (for changelog)
1757 1757 # 5) `_bookmarks` is computed and cached
1758 1758 #
1759 1759 # The step in (3) ensure we have a changelog at least as recent as the
1760 1760 # cache stat computed in (1). As a result at locking time:
1761 1761 # * if the changelog did not changed since (1) -> we can reuse the data
1762 1762 # * otherwise -> the bookmarks get refreshed.
1763 1763 self._refreshchangelog()
1764 1764 return bookmarks.bmstore(self)
1765 1765
1766 1766 def _refreshchangelog(self):
1767 1767 """make sure the in memory changelog match the on-disk one"""
1768 1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1769 1769 del self.changelog
1770 1770
1771 1771 @property
1772 1772 def _activebookmark(self):
1773 1773 return self._bookmarks.active
1774 1774
1775 1775 # _phasesets depend on changelog. what we need is to call
1776 1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1777 1777 # can't be easily expressed in filecache mechanism.
1778 1778 @storecache(b'phaseroots', b'00changelog.i')
1779 1779 def _phasecache(self):
1780 1780 return phases.phasecache(self, self._phasedefaults)
1781 1781
1782 1782 @storecache(b'obsstore')
1783 1783 def obsstore(self):
1784 1784 return obsolete.makestore(self.ui, self)
1785 1785
1786 1786 @changelogcache()
1787 1787 def changelog(repo):
1788 1788 # load dirstate before changelog to avoid race see issue6303
1789 1789 repo.dirstate.prefetch_parents()
1790 1790 return repo.store.changelog(
1791 1791 txnutil.mayhavepending(repo.root),
1792 1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1793 1793 )
1794 1794
1795 1795 @manifestlogcache()
1796 1796 def manifestlog(self):
1797 1797 return self.store.manifestlog(self, self._storenarrowmatch)
1798 1798
1799 1799 @unfilteredpropertycache
1800 1800 def dirstate(self):
1801 1801 if self._dirstate is None:
1802 1802 self._dirstate = self._makedirstate()
1803 1803 else:
1804 1804 self._dirstate.refresh()
1805 1805 return self._dirstate
1806 1806
1807 1807 def _makedirstate(self):
1808 1808 """Extension point for wrapping the dirstate per-repo."""
1809 1809 sparsematchfn = None
1810 1810 if sparse.use_sparse(self):
1811 1811 sparsematchfn = lambda: sparse.matcher(self)
1812 1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1813 1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1814 1814 use_dirstate_v2 = v2_req in self.requirements
1815 1815 use_tracked_hint = th in self.requirements
1816 1816
1817 1817 return dirstate.dirstate(
1818 1818 self.vfs,
1819 1819 self.ui,
1820 1820 self.root,
1821 1821 self._dirstatevalidate,
1822 1822 sparsematchfn,
1823 1823 self.nodeconstants,
1824 1824 use_dirstate_v2,
1825 1825 use_tracked_hint=use_tracked_hint,
1826 1826 )
1827 1827
1828 1828 def _dirstatevalidate(self, node):
1829 1829 okay = True
1830 1830 try:
1831 1831 self.changelog.rev(node)
1832 1832 except error.LookupError:
1833 1833 # If the parent are unknown it might just be because the changelog
1834 1834 # in memory is lagging behind the dirstate in memory. So try to
1835 1835 # refresh the changelog first.
1836 1836 #
1837 1837 # We only do so if we don't hold the lock, if we do hold the lock
1838 1838 # the invalidation at that time should have taken care of this and
1839 1839 # something is very fishy.
1840 1840 if self.currentlock() is None:
1841 1841 self.invalidate()
1842 1842 try:
1843 1843 self.changelog.rev(node)
1844 1844 except error.LookupError:
1845 1845 okay = False
1846 1846 else:
1847 1847 # XXX we should consider raising an error here.
1848 1848 okay = False
1849 1849 if okay:
1850 1850 return node
1851 1851 else:
1852 1852 if not self._dirstatevalidatewarned:
1853 1853 self._dirstatevalidatewarned = True
1854 1854 self.ui.warn(
1855 1855 _(b"warning: ignoring unknown working parent %s!\n")
1856 1856 % short(node)
1857 1857 )
1858 1858 return self.nullid
1859 1859
1860 1860 @storecache(narrowspec.FILENAME)
1861 1861 def narrowpats(self):
1862 1862 """matcher patterns for this repository's narrowspec
1863 1863
1864 1864 A tuple of (includes, excludes).
1865 1865 """
1866 1866 # the narrow management should probably move into its own object
1867 1867 val = self._pending_narrow_pats
1868 1868 if val is None:
1869 1869 val = narrowspec.load(self)
1870 1870 return val
1871 1871
1872 1872 @storecache(narrowspec.FILENAME)
1873 1873 def _storenarrowmatch(self):
1874 1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1875 1875 return matchmod.always()
1876 1876 include, exclude = self.narrowpats
1877 1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878 1878
1879 1879 @storecache(narrowspec.FILENAME)
1880 1880 def _narrowmatch(self):
1881 1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1882 1882 return matchmod.always()
1883 1883 narrowspec.checkworkingcopynarrowspec(self)
1884 1884 include, exclude = self.narrowpats
1885 1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1886 1886
1887 1887 def narrowmatch(self, match=None, includeexact=False):
1888 1888 """matcher corresponding the the repo's narrowspec
1889 1889
1890 1890 If `match` is given, then that will be intersected with the narrow
1891 1891 matcher.
1892 1892
1893 1893 If `includeexact` is True, then any exact matches from `match` will
1894 1894 be included even if they're outside the narrowspec.
1895 1895 """
1896 1896 if match:
1897 1897 if includeexact and not self._narrowmatch.always():
1898 1898 # do not exclude explicitly-specified paths so that they can
1899 1899 # be warned later on
1900 1900 em = matchmod.exact(match.files())
1901 1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1902 1902 return matchmod.intersectmatchers(match, nm)
1903 1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1904 1904 return self._narrowmatch
1905 1905
1906 1906 def setnarrowpats(self, newincludes, newexcludes):
1907 1907 narrowspec.save(self, newincludes, newexcludes)
1908 1908 self.invalidate(clearfilecache=True)
1909 1909
1910 1910 @unfilteredpropertycache
1911 1911 def _quick_access_changeid_null(self):
1912 1912 return {
1913 1913 b'null': (nullrev, self.nodeconstants.nullid),
1914 1914 nullrev: (nullrev, self.nodeconstants.nullid),
1915 1915 self.nullid: (nullrev, self.nullid),
1916 1916 }
1917 1917
1918 1918 @unfilteredpropertycache
1919 1919 def _quick_access_changeid_wc(self):
1920 1920 # also fast path access to the working copy parents
1921 1921 # however, only do it for filter that ensure wc is visible.
1922 1922 quick = self._quick_access_changeid_null.copy()
1923 1923 cl = self.unfiltered().changelog
1924 1924 for node in self.dirstate.parents():
1925 1925 if node == self.nullid:
1926 1926 continue
1927 1927 rev = cl.index.get_rev(node)
1928 1928 if rev is None:
1929 1929 # unknown working copy parent case:
1930 1930 #
1931 1931 # skip the fast path and let higher code deal with it
1932 1932 continue
1933 1933 pair = (rev, node)
1934 1934 quick[rev] = pair
1935 1935 quick[node] = pair
1936 1936 # also add the parents of the parents
1937 1937 for r in cl.parentrevs(rev):
1938 1938 if r == nullrev:
1939 1939 continue
1940 1940 n = cl.node(r)
1941 1941 pair = (r, n)
1942 1942 quick[r] = pair
1943 1943 quick[n] = pair
1944 1944 p1node = self.dirstate.p1()
1945 1945 if p1node != self.nullid:
1946 1946 quick[b'.'] = quick[p1node]
1947 1947 return quick
1948 1948
1949 1949 @unfilteredmethod
1950 1950 def _quick_access_changeid_invalidate(self):
1951 1951 if '_quick_access_changeid_wc' in vars(self):
1952 1952 del self.__dict__['_quick_access_changeid_wc']
1953 1953
1954 1954 @property
1955 1955 def _quick_access_changeid(self):
1956 1956 """an helper dictionnary for __getitem__ calls
1957 1957
1958 1958 This contains a list of symbol we can recognise right away without
1959 1959 further processing.
1960 1960 """
1961 1961 if self.filtername in repoview.filter_has_wc:
1962 1962 return self._quick_access_changeid_wc
1963 1963 return self._quick_access_changeid_null
1964 1964
1965 1965 def __getitem__(self, changeid):
1966 1966 # dealing with special cases
1967 1967 if changeid is None:
1968 1968 return context.workingctx(self)
1969 1969 if isinstance(changeid, context.basectx):
1970 1970 return changeid
1971 1971
1972 1972 # dealing with multiple revisions
1973 1973 if isinstance(changeid, slice):
1974 1974 # wdirrev isn't contiguous so the slice shouldn't include it
1975 1975 return [
1976 1976 self[i]
1977 1977 for i in range(*changeid.indices(len(self)))
1978 1978 if i not in self.changelog.filteredrevs
1979 1979 ]
1980 1980
1981 1981 # dealing with some special values
1982 1982 quick_access = self._quick_access_changeid.get(changeid)
1983 1983 if quick_access is not None:
1984 1984 rev, node = quick_access
1985 1985 return context.changectx(self, rev, node, maybe_filtered=False)
1986 1986 if changeid == b'tip':
1987 1987 node = self.changelog.tip()
1988 1988 rev = self.changelog.rev(node)
1989 1989 return context.changectx(self, rev, node)
1990 1990
1991 1991 # dealing with arbitrary values
1992 1992 try:
1993 1993 if isinstance(changeid, int):
1994 1994 node = self.changelog.node(changeid)
1995 1995 rev = changeid
1996 1996 elif changeid == b'.':
1997 1997 # this is a hack to delay/avoid loading obsmarkers
1998 1998 # when we know that '.' won't be hidden
1999 1999 node = self.dirstate.p1()
2000 2000 rev = self.unfiltered().changelog.rev(node)
2001 2001 elif len(changeid) == self.nodeconstants.nodelen:
2002 2002 try:
2003 2003 node = changeid
2004 2004 rev = self.changelog.rev(changeid)
2005 2005 except error.FilteredLookupError:
2006 2006 changeid = hex(changeid) # for the error message
2007 2007 raise
2008 2008 except LookupError:
2009 2009 # check if it might have come from damaged dirstate
2010 2010 #
2011 2011 # XXX we could avoid the unfiltered if we had a recognizable
2012 2012 # exception for filtered changeset access
2013 2013 if (
2014 2014 self.local()
2015 2015 and changeid in self.unfiltered().dirstate.parents()
2016 2016 ):
2017 2017 msg = _(b"working directory has unknown parent '%s'!")
2018 2018 raise error.Abort(msg % short(changeid))
2019 2019 changeid = hex(changeid) # for the error message
2020 2020 raise
2021 2021
2022 2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2023 2023 node = bin(changeid)
2024 2024 rev = self.changelog.rev(node)
2025 2025 else:
2026 2026 raise error.ProgrammingError(
2027 2027 b"unsupported changeid '%s' of type %s"
2028 2028 % (changeid, pycompat.bytestr(type(changeid)))
2029 2029 )
2030 2030
2031 2031 return context.changectx(self, rev, node)
2032 2032
2033 2033 except (error.FilteredIndexError, error.FilteredLookupError):
2034 2034 raise error.FilteredRepoLookupError(
2035 2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2036 2036 )
2037 2037 except (IndexError, LookupError):
2038 2038 raise error.RepoLookupError(
2039 2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2040 2040 )
2041 2041 except error.WdirUnsupported:
2042 2042 return context.workingctx(self)
2043 2043
2044 2044 def __contains__(self, changeid):
2045 2045 """True if the given changeid exists"""
2046 2046 try:
2047 2047 self[changeid]
2048 2048 return True
2049 2049 except error.RepoLookupError:
2050 2050 return False
2051 2051
2052 2052 def __nonzero__(self):
2053 2053 return True
2054 2054
2055 2055 __bool__ = __nonzero__
2056 2056
2057 2057 def __len__(self):
2058 2058 # no need to pay the cost of repoview.changelog
2059 2059 unfi = self.unfiltered()
2060 2060 return len(unfi.changelog)
2061 2061
2062 2062 def __iter__(self):
2063 2063 return iter(self.changelog)
2064 2064
2065 2065 def revs(self, expr: bytes, *args):
2066 2066 """Find revisions matching a revset.
2067 2067
2068 2068 The revset is specified as a string ``expr`` that may contain
2069 2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2070 2070
2071 2071 Revset aliases from the configuration are not expanded. To expand
2072 2072 user aliases, consider calling ``scmutil.revrange()`` or
2073 2073 ``repo.anyrevs([expr], user=True)``.
2074 2074
2075 2075 Returns a smartset.abstractsmartset, which is a list-like interface
2076 2076 that contains integer revisions.
2077 2077 """
2078 2078 tree = revsetlang.spectree(expr, *args)
2079 2079 return revset.makematcher(tree)(self)
2080 2080
2081 2081 def set(self, expr: bytes, *args):
2082 2082 """Find revisions matching a revset and emit changectx instances.
2083 2083
2084 2084 This is a convenience wrapper around ``revs()`` that iterates the
2085 2085 result and is a generator of changectx instances.
2086 2086
2087 2087 Revset aliases from the configuration are not expanded. To expand
2088 2088 user aliases, consider calling ``scmutil.revrange()``.
2089 2089 """
2090 2090 for r in self.revs(expr, *args):
2091 2091 yield self[r]
2092 2092
2093 2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2094 2094 """Find revisions matching one of the given revsets.
2095 2095
2096 2096 Revset aliases from the configuration are not expanded by default. To
2097 2097 expand user aliases, specify ``user=True``. To provide some local
2098 2098 definitions overriding user aliases, set ``localalias`` to
2099 2099 ``{name: definitionstring}``.
2100 2100 """
2101 2101 if specs == [b'null']:
2102 2102 return revset.baseset([nullrev])
2103 2103 if specs == [b'.']:
2104 2104 quick_data = self._quick_access_changeid.get(b'.')
2105 2105 if quick_data is not None:
2106 2106 return revset.baseset([quick_data[0]])
2107 2107 if user:
2108 2108 m = revset.matchany(
2109 2109 self.ui,
2110 2110 specs,
2111 2111 lookup=revset.lookupfn(self),
2112 2112 localalias=localalias,
2113 2113 )
2114 2114 else:
2115 2115 m = revset.matchany(None, specs, localalias=localalias)
2116 2116 return m(self)
2117 2117
2118 2118 def url(self) -> bytes:
2119 2119 return b'file:' + self.root
2120 2120
2121 2121 def hook(self, name, throw=False, **args):
2122 2122 """Call a hook, passing this repo instance.
2123 2123
2124 2124 This a convenience method to aid invoking hooks. Extensions likely
2125 2125 won't call this unless they have registered a custom hook or are
2126 2126 replacing code that is expected to call a hook.
2127 2127 """
2128 2128 return hook.hook(self.ui, self, name, throw, **args)
2129 2129
2130 2130 @filteredpropertycache
2131 2131 def _tagscache(self):
2132 2132 """Returns a tagscache object that contains various tags related
2133 2133 caches."""
2134 2134
2135 2135 # This simplifies its cache management by having one decorated
2136 2136 # function (this one) and the rest simply fetch things from it.
2137 2137 class tagscache:
2138 2138 def __init__(self):
2139 2139 # These two define the set of tags for this repository. tags
2140 2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2141 2141 # 'local'. (Global tags are defined by .hgtags across all
2142 2142 # heads, and local tags are defined in .hg/localtags.)
2143 2143 # They constitute the in-memory cache of tags.
2144 2144 self.tags = self.tagtypes = None
2145 2145
2146 2146 self.nodetagscache = self.tagslist = None
2147 2147
2148 2148 cache = tagscache()
2149 2149 cache.tags, cache.tagtypes = self._findtags()
2150 2150
2151 2151 return cache
2152 2152
2153 2153 def tags(self):
2154 2154 '''return a mapping of tag to node'''
2155 2155 t = {}
2156 2156 if self.changelog.filteredrevs:
2157 2157 tags, tt = self._findtags()
2158 2158 else:
2159 2159 tags = self._tagscache.tags
2160 2160 rev = self.changelog.rev
2161 2161 for k, v in tags.items():
2162 2162 try:
2163 2163 # ignore tags to unknown nodes
2164 2164 rev(v)
2165 2165 t[k] = v
2166 2166 except (error.LookupError, ValueError):
2167 2167 pass
2168 2168 return t
2169 2169
2170 2170 def _findtags(self):
2171 2171 """Do the hard work of finding tags. Return a pair of dicts
2172 2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2173 2173 maps tag name to a string like \'global\' or \'local\'.
2174 2174 Subclasses or extensions are free to add their own tags, but
2175 2175 should be aware that the returned dicts will be retained for the
2176 2176 duration of the localrepo object."""
2177 2177
2178 2178 # XXX what tagtype should subclasses/extensions use? Currently
2179 2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2180 2180 # Should each extension invent its own tag type? Should there
2181 2181 # be one tagtype for all such "virtual" tags? Or is the status
2182 2182 # quo fine?
2183 2183
2184 2184 # map tag name to (node, hist)
2185 2185 alltags = tagsmod.findglobaltags(self.ui, self)
2186 2186 # map tag name to tag type
2187 2187 tagtypes = {tag: b'global' for tag in alltags}
2188 2188
2189 2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2190 2190
2191 2191 # Build the return dicts. Have to re-encode tag names because
2192 2192 # the tags module always uses UTF-8 (in order not to lose info
2193 2193 # writing to the cache), but the rest of Mercurial wants them in
2194 2194 # local encoding.
2195 2195 tags = {}
2196 2196 for name, (node, hist) in alltags.items():
2197 2197 if node != self.nullid:
2198 2198 tags[encoding.tolocal(name)] = node
2199 2199 tags[b'tip'] = self.changelog.tip()
2200 2200 tagtypes = {
2201 2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2202 2202 }
2203 2203 return (tags, tagtypes)
2204 2204
2205 2205 def tagtype(self, tagname):
2206 2206 """
2207 2207 return the type of the given tag. result can be:
2208 2208
2209 2209 'local' : a local tag
2210 2210 'global' : a global tag
2211 2211 None : tag does not exist
2212 2212 """
2213 2213
2214 2214 return self._tagscache.tagtypes.get(tagname)
2215 2215
2216 2216 def tagslist(self):
2217 2217 '''return a list of tags ordered by revision'''
2218 2218 if not self._tagscache.tagslist:
2219 2219 l = []
2220 2220 for t, n in self.tags().items():
2221 2221 l.append((self.changelog.rev(n), t, n))
2222 2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2223 2223
2224 2224 return self._tagscache.tagslist
2225 2225
2226 2226 def nodetags(self, node):
2227 2227 '''return the tags associated with a node'''
2228 2228 if not self._tagscache.nodetagscache:
2229 2229 nodetagscache = {}
2230 2230 for t, n in self._tagscache.tags.items():
2231 2231 nodetagscache.setdefault(n, []).append(t)
2232 2232 for tags in nodetagscache.values():
2233 2233 tags.sort()
2234 2234 self._tagscache.nodetagscache = nodetagscache
2235 2235 return self._tagscache.nodetagscache.get(node, [])
2236 2236
2237 2237 def nodebookmarks(self, node):
2238 2238 """return the list of bookmarks pointing to the specified node"""
2239 2239 return self._bookmarks.names(node)
2240 2240
2241 2241 def branchmap(self):
2242 2242 """returns a dictionary {branch: [branchheads]} with branchheads
2243 2243 ordered by increasing revision number"""
2244 2244 return self._branchcaches[self]
2245 2245
2246 2246 @unfilteredmethod
2247 2247 def revbranchcache(self):
2248 2248 if not self._revbranchcache:
2249 2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2250 2250 return self._revbranchcache
2251 2251
2252 2252 def register_changeset(self, rev, changelogrevision):
2253 2253 self.revbranchcache().setdata(rev, changelogrevision)
2254 2254
2255 2255 def branchtip(self, branch, ignoremissing=False):
2256 2256 """return the tip node for a given branch
2257 2257
2258 2258 If ignoremissing is True, then this method will not raise an error.
2259 2259 This is helpful for callers that only expect None for a missing branch
2260 2260 (e.g. namespace).
2261 2261
2262 2262 """
2263 2263 try:
2264 2264 return self.branchmap().branchtip(branch)
2265 2265 except KeyError:
2266 2266 if not ignoremissing:
2267 2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2268 2268 else:
2269 2269 pass
2270 2270
2271 2271 def lookup(self, key):
2272 2272 node = scmutil.revsymbol(self, key).node()
2273 2273 if node is None:
2274 2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2275 2275 return node
2276 2276
2277 2277 def lookupbranch(self, key):
2278 2278 if self.branchmap().hasbranch(key):
2279 2279 return key
2280 2280
2281 2281 return scmutil.revsymbol(self, key).branch()
2282 2282
2283 2283 def known(self, nodes):
2284 2284 cl = self.changelog
2285 2285 get_rev = cl.index.get_rev
2286 2286 filtered = cl.filteredrevs
2287 2287 result = []
2288 2288 for n in nodes:
2289 2289 r = get_rev(n)
2290 2290 resp = not (r is None or r in filtered)
2291 2291 result.append(resp)
2292 2292 return result
2293 2293
2294 2294 def local(self):
2295 2295 return self
2296 2296
2297 2297 def publishing(self):
2298 2298 # it's safe (and desirable) to trust the publish flag unconditionally
2299 2299 # so that we don't finalize changes shared between users via ssh or nfs
2300 2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2301 2301
2302 2302 def cancopy(self):
2303 2303 # so statichttprepo's override of local() works
2304 2304 if not self.local():
2305 2305 return False
2306 2306 if not self.publishing():
2307 2307 return True
2308 2308 # if publishing we can't copy if there is filtered content
2309 2309 return not self.filtered(b'visible').changelog.filteredrevs
2310 2310
2311 2311 def shared(self):
2312 2312 '''the type of shared repository (None if not shared)'''
2313 2313 if self.sharedpath != self.path:
2314 2314 return b'store'
2315 2315 return None
2316 2316
2317 2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2318 2318 return self.vfs.reljoin(self.root, f, *insidef)
2319 2319
2320 2320 def setparents(self, p1, p2=None):
2321 2321 if p2 is None:
2322 2322 p2 = self.nullid
2323 2323 self[None].setparents(p1, p2)
2324 2324 self._quick_access_changeid_invalidate()
2325 2325
2326 2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2327 2327 """changeid must be a changeset revision, if specified.
2328 2328 fileid can be a file revision or node."""
2329 2329 return context.filectx(
2330 2330 self, path, changeid, fileid, changectx=changectx
2331 2331 )
2332 2332
2333 2333 def getcwd(self) -> bytes:
2334 2334 return self.dirstate.getcwd()
2335 2335
2336 2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2337 2337 return self.dirstate.pathto(f, cwd)
2338 2338
2339 2339 def _loadfilter(self, filter):
2340 2340 if filter not in self._filterpats:
2341 2341 l = []
2342 2342 for pat, cmd in self.ui.configitems(filter):
2343 2343 if cmd == b'!':
2344 2344 continue
2345 2345 mf = matchmod.match(self.root, b'', [pat])
2346 2346 fn = None
2347 2347 params = cmd
2348 2348 for name, filterfn in self._datafilters.items():
2349 2349 if cmd.startswith(name):
2350 2350 fn = filterfn
2351 2351 params = cmd[len(name) :].lstrip()
2352 2352 break
2353 2353 if not fn:
2354 2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2355 2355 fn.__name__ = 'commandfilter'
2356 2356 # Wrap old filters not supporting keyword arguments
2357 2357 if not pycompat.getargspec(fn)[2]:
2358 2358 oldfn = fn
2359 2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2360 2360 fn.__name__ = 'compat-' + oldfn.__name__
2361 2361 l.append((mf, fn, params))
2362 2362 self._filterpats[filter] = l
2363 2363 return self._filterpats[filter]
2364 2364
2365 2365 def _filter(self, filterpats, filename, data):
2366 2366 for mf, fn, cmd in filterpats:
2367 2367 if mf(filename):
2368 2368 self.ui.debug(
2369 2369 b"filtering %s through %s\n"
2370 2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2371 2371 )
2372 2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2373 2373 break
2374 2374
2375 2375 return data
2376 2376
2377 2377 @unfilteredpropertycache
2378 2378 def _encodefilterpats(self):
2379 2379 return self._loadfilter(b'encode')
2380 2380
2381 2381 @unfilteredpropertycache
2382 2382 def _decodefilterpats(self):
2383 2383 return self._loadfilter(b'decode')
2384 2384
2385 2385 def adddatafilter(self, name, filter):
2386 2386 self._datafilters[name] = filter
2387 2387
2388 2388 def wread(self, filename: bytes) -> bytes:
2389 2389 if self.wvfs.islink(filename):
2390 2390 data = self.wvfs.readlink(filename)
2391 2391 else:
2392 2392 data = self.wvfs.read(filename)
2393 2393 return self._filter(self._encodefilterpats, filename, data)
2394 2394
2395 2395 def wwrite(
2396 2396 self,
2397 2397 filename: bytes,
2398 2398 data: bytes,
2399 2399 flags: bytes,
2400 2400 backgroundclose=False,
2401 2401 **kwargs,
2402 2402 ) -> int:
2403 2403 """write ``data`` into ``filename`` in the working directory
2404 2404
2405 2405 This returns length of written (maybe decoded) data.
2406 2406 """
2407 2407 data = self._filter(self._decodefilterpats, filename, data)
2408 2408 if b'l' in flags:
2409 2409 self.wvfs.symlink(data, filename)
2410 2410 else:
2411 2411 self.wvfs.write(
2412 2412 filename, data, backgroundclose=backgroundclose, **kwargs
2413 2413 )
2414 2414 if b'x' in flags:
2415 2415 self.wvfs.setflags(filename, False, True)
2416 2416 else:
2417 2417 self.wvfs.setflags(filename, False, False)
2418 2418 return len(data)
2419 2419
2420 2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2421 2421 return self._filter(self._decodefilterpats, filename, data)
2422 2422
2423 2423 def currenttransaction(self):
2424 2424 """return the current transaction or None if non exists"""
2425 2425 if self._transref:
2426 2426 tr = self._transref()
2427 2427 else:
2428 2428 tr = None
2429 2429
2430 2430 if tr and tr.running():
2431 2431 return tr
2432 2432 return None
2433 2433
2434 2434 def transaction(self, desc, report=None):
2435 2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2436 2436 b'devel', b'check-locks'
2437 2437 ):
2438 2438 if self._currentlock(self._lockref) is None:
2439 2439 raise error.ProgrammingError(b'transaction requires locking')
2440 2440 tr = self.currenttransaction()
2441 2441 if tr is not None:
2442 2442 return tr.nest(name=desc)
2443 2443
2444 2444 # abort here if the journal already exists
2445 2445 if self.svfs.exists(b"journal"):
2446 2446 raise error.RepoError(
2447 2447 _(b"abandoned transaction found"),
2448 2448 hint=_(b"run 'hg recover' to clean up transaction"),
2449 2449 )
2450 2450
2451 2451 # At that point your dirstate should be clean:
2452 2452 #
2453 2453 # - If you don't have the wlock, why would you still have a dirty
2454 2454 # dirstate ?
2455 2455 #
2456 2456 # - If you hold the wlock, you should not be opening a transaction in
2457 2457 # the middle of a `distate.changing_*` block. The transaction needs to
2458 2458 # be open before that and wrap the change-context.
2459 2459 #
2460 2460 # - If you are not within a `dirstate.changing_*` context, why is our
2461 2461 # dirstate dirty?
2462 2462 if self.dirstate._dirty:
2463 2463 m = "cannot open a transaction with a dirty dirstate"
2464 2464 raise error.ProgrammingError(m)
2465 2465
2466 2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2467 2467 ha = hex(hashutil.sha1(idbase).digest())
2468 2468 txnid = b'TXN:' + ha
2469 2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2470 2470
2471 2471 self._writejournal(desc)
2472 2472 if report:
2473 2473 rp = report
2474 2474 else:
2475 2475 rp = self.ui.warn
2476 2476 vfsmap = self.vfs_map
2477 2477 # we must avoid cyclic reference between repo and transaction.
2478 2478 reporef = weakref.ref(self)
2479 2479 # Code to track tag movement
2480 2480 #
2481 2481 # Since tags are all handled as file content, it is actually quite hard
2482 2482 # to track these movement from a code perspective. So we fallback to a
2483 2483 # tracking at the repository level. One could envision to track changes
2484 2484 # to the '.hgtags' file through changegroup apply but that fails to
2485 2485 # cope with case where transaction expose new heads without changegroup
2486 2486 # being involved (eg: phase movement).
2487 2487 #
2488 2488 # For now, We gate the feature behind a flag since this likely comes
2489 2489 # with performance impacts. The current code run more often than needed
2490 2490 # and do not use caches as much as it could. The current focus is on
2491 2491 # the behavior of the feature so we disable it by default. The flag
2492 2492 # will be removed when we are happy with the performance impact.
2493 2493 #
2494 2494 # Once this feature is no longer experimental move the following
2495 2495 # documentation to the appropriate help section:
2496 2496 #
2497 2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2498 2498 # tags (new or changed or deleted tags). In addition the details of
2499 2499 # these changes are made available in a file at:
2500 2500 # ``REPOROOT/.hg/changes/tags.changes``.
2501 2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2502 2502 # might exist from a previous transaction even if no tag were touched
2503 2503 # in this one. Changes are recorded in a line base format::
2504 2504 #
2505 2505 # <action> <hex-node> <tag-name>\n
2506 2506 #
2507 2507 # Actions are defined as follow:
2508 2508 # "-R": tag is removed,
2509 2509 # "+A": tag is added,
2510 2510 # "-M": tag is moved (old value),
2511 2511 # "+M": tag is moved (new value),
2512 2512 tracktags = lambda x: None
2513 2513 # experimental config: experimental.hook-track-tags
2514 2514 shouldtracktags = self.ui.configbool(
2515 2515 b'experimental', b'hook-track-tags'
2516 2516 )
2517 2517 if desc != b'strip' and shouldtracktags:
2518 2518 oldheads = self.changelog.headrevs()
2519 2519
2520 2520 def tracktags(tr2):
2521 2521 repo = reporef()
2522 2522 assert repo is not None # help pytype
2523 2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2524 2524 newheads = repo.changelog.headrevs()
2525 2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2526 2526 # notes: we compare lists here.
2527 2527 # As we do it only once buiding set would not be cheaper
2528 2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2529 2529 if changes:
2530 2530 tr2.hookargs[b'tag_moved'] = b'1'
2531 2531 with repo.vfs(
2532 2532 b'changes/tags.changes', b'w', atomictemp=True
2533 2533 ) as changesfile:
2534 2534 # note: we do not register the file to the transaction
2535 2535 # because we needs it to still exist on the transaction
2536 2536 # is close (for txnclose hooks)
2537 2537 tagsmod.writediff(changesfile, changes)
2538 2538
2539 2539 def validate(tr2):
2540 2540 """will run pre-closing hooks"""
2541 2541 # XXX the transaction API is a bit lacking here so we take a hacky
2542 2542 # path for now
2543 2543 #
2544 2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2545 2545 # dict is copied before these run. In addition we needs the data
2546 2546 # available to in memory hooks too.
2547 2547 #
2548 2548 # Moreover, we also need to make sure this runs before txnclose
2549 2549 # hooks and there is no "pending" mechanism that would execute
2550 2550 # logic only if hooks are about to run.
2551 2551 #
2552 2552 # Fixing this limitation of the transaction is also needed to track
2553 2553 # other families of changes (bookmarks, phases, obsolescence).
2554 2554 #
2555 2555 # This will have to be fixed before we remove the experimental
2556 2556 # gating.
2557 2557 tracktags(tr2)
2558 2558 repo = reporef()
2559 2559 assert repo is not None # help pytype
2560 2560
2561 2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2562 2562 singlehead = repo.ui.configbool(*singleheadopt)
2563 2563 if singlehead:
2564 2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2565 2565 accountclosed = singleheadsub.get(
2566 2566 b"account-closed-heads", False
2567 2567 )
2568 2568 if singleheadsub.get(b"public-changes-only", False):
2569 2569 filtername = b"immutable"
2570 2570 else:
2571 2571 filtername = b"visible"
2572 2572 scmutil.enforcesinglehead(
2573 2573 repo, tr2, desc, accountclosed, filtername
2574 2574 )
2575 2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2576 2576 for name, (old, new) in sorted(
2577 2577 tr.changes[b'bookmarks'].items()
2578 2578 ):
2579 2579 args = tr.hookargs.copy()
2580 2580 args.update(bookmarks.preparehookargs(name, old, new))
2581 2581 repo.hook(
2582 2582 b'pretxnclose-bookmark',
2583 2583 throw=True,
2584 2584 **pycompat.strkwargs(args),
2585 2585 )
2586 2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2587 2587 cl = repo.unfiltered().changelog
2588 2588 for revs, (old, new) in tr.changes[b'phases']:
2589 2589 for rev in revs:
2590 2590 args = tr.hookargs.copy()
2591 2591 node = hex(cl.node(rev))
2592 2592 args.update(phases.preparehookargs(node, old, new))
2593 2593 repo.hook(
2594 2594 b'pretxnclose-phase',
2595 2595 throw=True,
2596 2596 **pycompat.strkwargs(args),
2597 2597 )
2598 2598
2599 2599 repo.hook(
2600 2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2601 2601 )
2602 2602
2603 2603 def releasefn(tr, success):
2604 2604 repo = reporef()
2605 2605 if repo is None:
2606 2606 # If the repo has been GC'd (and this release function is being
2607 2607 # called from transaction.__del__), there's not much we can do,
2608 2608 # so just leave the unfinished transaction there and let the
2609 2609 # user run `hg recover`.
2610 2610 return
2611 2611 if success:
2612 2612 # this should be explicitly invoked here, because
2613 2613 # in-memory changes aren't written out at closing
2614 2614 # transaction, if tr.addfilegenerator (via
2615 2615 # dirstate.write or so) isn't invoked while
2616 2616 # transaction running
2617 2617 repo.dirstate.write(None)
2618 2618 else:
2619 2619 # discard all changes (including ones already written
2620 2620 # out) in this transaction
2621 2621 repo.invalidate(clearfilecache=True)
2622 2622
2623 2623 tr = transaction.transaction(
2624 2624 rp,
2625 2625 self.svfs,
2626 2626 vfsmap,
2627 2627 b"journal",
2628 2628 b"undo",
2629 2629 lambda: None,
2630 2630 self.store.createmode,
2631 2631 validator=validate,
2632 2632 releasefn=releasefn,
2633 2633 checkambigfiles=_cachedfiles,
2634 2634 name=desc,
2635 2635 )
2636 2636 for vfs_id, path in self._journalfiles():
2637 2637 tr.add_journal(vfs_id, path)
2638 2638 tr.changes[b'origrepolen'] = len(self)
2639 2639 tr.changes[b'obsmarkers'] = set()
2640 2640 tr.changes[b'phases'] = []
2641 2641 tr.changes[b'bookmarks'] = {}
2642 2642
2643 2643 tr.hookargs[b'txnid'] = txnid
2644 2644 tr.hookargs[b'txnname'] = desc
2645 2645 tr.hookargs[b'changes'] = tr.changes
2646 2646 # note: writing the fncache only during finalize mean that the file is
2647 2647 # outdated when running hooks. As fncache is used for streaming clone,
2648 2648 # this is not expected to break anything that happen during the hooks.
2649 2649 tr.addfinalize(b'flush-fncache', self.store.write)
2650 2650
2651 2651 def txnclosehook(tr2):
2652 2652 """To be run if transaction is successful, will schedule a hook run"""
2653 2653 # Don't reference tr2 in hook() so we don't hold a reference.
2654 2654 # This reduces memory consumption when there are multiple
2655 2655 # transactions per lock. This can likely go away if issue5045
2656 2656 # fixes the function accumulation.
2657 2657 hookargs = tr2.hookargs
2658 2658
2659 2659 def hookfunc(unused_success):
2660 2660 repo = reporef()
2661 2661 assert repo is not None # help pytype
2662 2662
2663 2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2664 2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2665 2665 for name, (old, new) in bmchanges:
2666 2666 args = tr.hookargs.copy()
2667 2667 args.update(bookmarks.preparehookargs(name, old, new))
2668 2668 repo.hook(
2669 2669 b'txnclose-bookmark',
2670 2670 throw=False,
2671 2671 **pycompat.strkwargs(args),
2672 2672 )
2673 2673
2674 2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2675 2675 cl = repo.unfiltered().changelog
2676 2676 phasemv = sorted(
2677 2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2678 2678 )
2679 2679 for revs, (old, new) in phasemv:
2680 2680 for rev in revs:
2681 2681 args = tr.hookargs.copy()
2682 2682 node = hex(cl.node(rev))
2683 2683 args.update(phases.preparehookargs(node, old, new))
2684 2684 repo.hook(
2685 2685 b'txnclose-phase',
2686 2686 throw=False,
2687 2687 **pycompat.strkwargs(args),
2688 2688 )
2689 2689
2690 2690 repo.hook(
2691 2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2692 2692 )
2693 2693
2694 2694 repo = reporef()
2695 2695 assert repo is not None # help pytype
2696 2696 repo._afterlock(hookfunc)
2697 2697
2698 2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2699 2699 # Include a leading "-" to make it happen before the transaction summary
2700 2700 # reports registered via scmutil.registersummarycallback() whose names
2701 2701 # are 00-txnreport etc. That way, the caches will be warm when the
2702 2702 # callbacks run.
2703 2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2704 2704
2705 2705 def txnaborthook(tr2):
2706 2706 """To be run if transaction is aborted"""
2707 2707 repo = reporef()
2708 2708 assert repo is not None # help pytype
2709 2709 repo.hook(
2710 2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2711 2711 )
2712 2712
2713 2713 tr.addabort(b'txnabort-hook', txnaborthook)
2714 2714 # avoid eager cache invalidation. in-memory data should be identical
2715 2715 # to stored data if transaction has no error.
2716 2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2717 2717 self._transref = weakref.ref(tr)
2718 2718 scmutil.registersummarycallback(self, tr, desc)
2719 2719 # This only exist to deal with the need of rollback to have viable
2720 2720 # parents at the end of the operation. So backup viable parents at the
2721 2721 # time of this operation.
2722 2722 #
2723 2723 # We only do it when the `wlock` is taken, otherwise other might be
2724 2724 # altering the dirstate under us.
2725 2725 #
2726 2726 # This is really not a great way to do this (first, because we cannot
2727 2727 # always do it). There are more viable alternative that exists
2728 2728 #
2729 2729 # - backing only the working copy parent in a dedicated files and doing
2730 2730 # a clean "keep-update" to them on `hg rollback`.
2731 2731 #
2732 2732 # - slightly changing the behavior an applying a logic similar to "hg
2733 2733 # strip" to pick a working copy destination on `hg rollback`
2734 2734 if self.currentwlock() is not None:
2735 2735 ds = self.dirstate
2736 2736 if not self.vfs.exists(b'branch'):
2737 2737 # force a file to be written if None exist
2738 2738 ds.setbranch(b'default', None)
2739 2739
2740 2740 def backup_dirstate(tr):
2741 2741 for f in ds.all_file_names():
2742 2742 # hardlink backup is okay because `dirstate` is always
2743 2743 # atomically written and possible data file are append only
2744 2744 # and resistant to trailing data.
2745 2745 tr.addbackup(f, hardlink=True, location=b'plain')
2746 2746
2747 2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2748 2748 return tr
2749 2749
2750 2750 def _journalfiles(self):
2751 2751 return (
2752 2752 (self.svfs, b'journal'),
2753 2753 (self.vfs, b'journal.desc'),
2754 2754 )
2755 2755
2756 2756 def undofiles(self):
2757 2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2758 2758
2759 2759 @unfilteredmethod
2760 2760 def _writejournal(self, desc):
2761 2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2762 2762
2763 2763 def recover(self):
2764 2764 with self.lock():
2765 2765 if self.svfs.exists(b"journal"):
2766 2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2767 2767 vfsmap = self.vfs_map
2768 2768 transaction.rollback(
2769 2769 self.svfs,
2770 2770 vfsmap,
2771 2771 b"journal",
2772 2772 self.ui.warn,
2773 2773 checkambigfiles=_cachedfiles,
2774 2774 )
2775 2775 self.invalidate()
2776 2776 return True
2777 2777 else:
2778 2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2779 2779 return False
2780 2780
2781 2781 def rollback(self, dryrun=False, force=False):
2782 2782 wlock = lock = None
2783 2783 try:
2784 2784 wlock = self.wlock()
2785 2785 lock = self.lock()
2786 2786 if self.svfs.exists(b"undo"):
2787 2787 return self._rollback(dryrun, force)
2788 2788 else:
2789 2789 self.ui.warn(_(b"no rollback information available\n"))
2790 2790 return 1
2791 2791 finally:
2792 2792 release(lock, wlock)
2793 2793
2794 2794 @unfilteredmethod # Until we get smarter cache management
2795 2795 def _rollback(self, dryrun, force):
2796 2796 ui = self.ui
2797 2797
2798 2798 parents = self.dirstate.parents()
2799 2799 try:
2800 2800 args = self.vfs.read(b'undo.desc').splitlines()
2801 2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2802 2802 if len(args) >= 3:
2803 2803 detail = args[2]
2804 2804 oldtip = oldlen - 1
2805 2805
2806 2806 if detail and ui.verbose:
2807 2807 msg = _(
2808 2808 b'repository tip rolled back to revision %d'
2809 2809 b' (undo %s: %s)\n'
2810 2810 ) % (oldtip, desc, detail)
2811 2811 else:
2812 2812 msg = _(
2813 2813 b'repository tip rolled back to revision %d (undo %s)\n'
2814 2814 ) % (oldtip, desc)
2815 2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2816 2816 except IOError:
2817 2817 msg = _(b'rolling back unknown transaction\n')
2818 2818 desc = None
2819 2819 parentgone = True
2820 2820
2821 2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2822 2822 raise error.Abort(
2823 2823 _(
2824 2824 b'rollback of last commit while not checked out '
2825 2825 b'may lose data'
2826 2826 ),
2827 2827 hint=_(b'use -f to force'),
2828 2828 )
2829 2829
2830 2830 ui.status(msg)
2831 2831 if dryrun:
2832 2832 return 0
2833 2833
2834 2834 self.destroying()
2835 2835 vfsmap = self.vfs_map
2836 2836 skip_journal_pattern = None
2837 2837 if not parentgone:
2838 2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2839 2839 transaction.rollback(
2840 2840 self.svfs,
2841 2841 vfsmap,
2842 2842 b'undo',
2843 2843 ui.warn,
2844 2844 checkambigfiles=_cachedfiles,
2845 2845 skip_journal_pattern=skip_journal_pattern,
2846 2846 )
2847 2847 self.invalidate()
2848 2848 self.dirstate.invalidate()
2849 2849
2850 2850 if parentgone:
2851 2851 # replace this with some explicit parent update in the future.
2852 2852 has_node = self.changelog.index.has_node
2853 2853 if not all(has_node(p) for p in self.dirstate._pl):
2854 2854 # There was no dirstate to backup initially, we need to drop
2855 2855 # the existing one.
2856 2856 with self.dirstate.changing_parents(self):
2857 2857 self.dirstate.setparents(self.nullid)
2858 2858 self.dirstate.clear()
2859 2859
2860 2860 parents = tuple([p.rev() for p in self[None].parents()])
2861 2861 if len(parents) > 1:
2862 2862 ui.status(
2863 2863 _(
2864 2864 b'working directory now based on '
2865 2865 b'revisions %d and %d\n'
2866 2866 )
2867 2867 % parents
2868 2868 )
2869 2869 else:
2870 2870 ui.status(
2871 2871 _(b'working directory now based on revision %d\n') % parents
2872 2872 )
2873 2873 mergestatemod.mergestate.clean(self)
2874 2874
2875 2875 # TODO: if we know which new heads may result from this rollback, pass
2876 2876 # them to destroy(), which will prevent the branchhead cache from being
2877 2877 # invalidated.
2878 2878 self.destroyed()
2879 2879 return 0
2880 2880
2881 2881 def _buildcacheupdater(self, newtransaction):
2882 2882 """called during transaction to build the callback updating cache
2883 2883
2884 2884 Lives on the repository to help extension who might want to augment
2885 2885 this logic. For this purpose, the created transaction is passed to the
2886 2886 method.
2887 2887 """
2888 2888 # we must avoid cyclic reference between repo and transaction.
2889 2889 reporef = weakref.ref(self)
2890 2890
2891 2891 def updater(tr):
2892 2892 repo = reporef()
2893 2893 assert repo is not None # help pytype
2894 2894 repo.updatecaches(tr)
2895 2895
2896 2896 return updater
2897 2897
2898 2898 @unfilteredmethod
2899 2899 def updatecaches(self, tr=None, full=False, caches=None):
2900 2900 """warm appropriate caches
2901 2901
2902 2902 If this function is called after a transaction closed. The transaction
2903 2903 will be available in the 'tr' argument. This can be used to selectively
2904 2904 update caches relevant to the changes in that transaction.
2905 2905
2906 2906 If 'full' is set, make sure all caches the function knows about have
2907 2907 up-to-date data. Even the ones usually loaded more lazily.
2908 2908
2909 2909 The `full` argument can take a special "post-clone" value. In this case
2910 2910 the cache warming is made after a clone and of the slower cache might
2911 2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2912 2912 as we plan for a cleaner way to deal with this for 5.9.
2913 2913 """
2914 2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2915 2915 # During strip, many caches are invalid but
2916 2916 # later call to `destroyed` will refresh them.
2917 2917 return
2918 2918
2919 2919 unfi = self.unfiltered()
2920 2920
2921 2921 if caches is None:
2922 2922 caches = repository.CACHES_DEFAULT
2923 2923
2924 2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2925 2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2926 2926 # accessing the 'served' branchmap should refresh all the others,
2927 2927 self.ui.debug(b'updating the branch cache\n')
2928 2928 self.filtered(b'served').branchmap()
2929 2929 self.filtered(b'served.hidden').branchmap()
2930 # flush all possibly delayed write.
2931 self._branchcaches.write_delayed(self)
2932 2930
2933 2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 2932 self.changelog.update_caches(transaction=tr)
2935 2933
2936 2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 2935 self.manifestlog.update_caches(transaction=tr)
2938 2936 for entry in self.store.walk():
2939 2937 if not entry.is_revlog:
2940 2938 continue
2941 2939 if not entry.is_manifestlog:
2942 2940 continue
2943 2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 2942 if manifestrevlog is not None:
2945 2943 manifestrevlog.update_caches(transaction=tr)
2946 2944
2947 2945 if repository.CACHE_REV_BRANCH in caches:
2948 2946 rbc = unfi.revbranchcache()
2949 2947 for r in unfi.changelog:
2950 2948 rbc.branchinfo(r)
2951 2949 rbc.write()
2952 2950
2953 2951 if repository.CACHE_FULL_MANIFEST in caches:
2954 2952 # ensure the working copy parents are in the manifestfulltextcache
2955 2953 for ctx in self[b'.'].parents():
2956 2954 ctx.manifest() # accessing the manifest is enough
2957 2955
2958 2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 2957 # accessing fnode cache warms the cache
2960 2958 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2961 2959
2962 2960 if repository.CACHE_TAGS_DEFAULT in caches:
2963 2961 # accessing tags warm the cache
2964 2962 self.tags()
2965 2963 if repository.CACHE_TAGS_SERVED in caches:
2966 2964 self.filtered(b'served').tags()
2967 2965
2968 2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 2968 # so we're forcing a write to cause these caches to be warmed up
2971 2969 # even if they haven't explicitly been requested yet (if they've
2972 2970 # never been used by hg, they won't ever have been written, even if
2973 2971 # they're a subset of another kind of cache that *has* been used).
2974 2972 for filt in repoview.filtertable.keys():
2975 2973 filtered = self.filtered(filt)
2976 2974 self._branchcaches.update_disk(filtered)
2977 2975
2976 # flush all possibly delayed write.
2977 self._branchcaches.write_delayed(self)
2978
2978 2979 def invalidatecaches(self):
2979 2980 if '_tagscache' in vars(self):
2980 2981 # can't use delattr on proxy
2981 2982 del self.__dict__['_tagscache']
2982 2983
2983 2984 self._branchcaches.clear()
2984 2985 self.invalidatevolatilesets()
2985 2986 self._sparsesignaturecache.clear()
2986 2987
2987 2988 def invalidatevolatilesets(self):
2988 2989 self.filteredrevcache.clear()
2989 2990 obsolete.clearobscaches(self)
2990 2991 self._quick_access_changeid_invalidate()
2991 2992
2992 2993 def invalidatedirstate(self):
2993 2994 """Invalidates the dirstate, causing the next call to dirstate
2994 2995 to check if it was modified since the last time it was read,
2995 2996 rereading it if it has.
2996 2997
2997 2998 This is different to dirstate.invalidate() that it doesn't always
2998 2999 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 3000 explicitly read the dirstate again (i.e. restoring it to a previous
3000 3001 known good state)."""
3001 3002 unfi = self.unfiltered()
3002 3003 if 'dirstate' in unfi.__dict__:
3003 3004 assert not self.dirstate.is_changing_any
3004 3005 del unfi.__dict__['dirstate']
3005 3006
3006 3007 def invalidate(self, clearfilecache=False):
3007 3008 """Invalidates both store and non-store parts other than dirstate
3008 3009
3009 3010 If a transaction is running, invalidation of store is omitted,
3010 3011 because discarding in-memory changes might cause inconsistency
3011 3012 (e.g. incomplete fncache causes unintentional failure, but
3012 3013 redundant one doesn't).
3013 3014 """
3014 3015 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 3016 for k in list(self._filecache.keys()):
3016 3017 if (
3017 3018 k == b'changelog'
3018 3019 and self.currenttransaction()
3019 3020 and self.changelog.is_delaying
3020 3021 ):
3021 3022 # The changelog object may store unwritten revisions. We don't
3022 3023 # want to lose them.
3023 3024 # TODO: Solve the problem instead of working around it.
3024 3025 continue
3025 3026
3026 3027 if clearfilecache:
3027 3028 del self._filecache[k]
3028 3029 try:
3029 3030 # XXX ideally, the key would be a unicode string to match the
3030 3031 # fact it refers to an attribut name. However changing this was
3031 3032 # a bit a scope creep compared to the series cleaning up
3032 3033 # del/set/getattr so we kept thing simple here.
3033 3034 delattr(unfiltered, pycompat.sysstr(k))
3034 3035 except AttributeError:
3035 3036 pass
3036 3037 self.invalidatecaches()
3037 3038 if not self.currenttransaction():
3038 3039 # TODO: Changing contents of store outside transaction
3039 3040 # causes inconsistency. We should make in-memory store
3040 3041 # changes detectable, and abort if changed.
3041 3042 self.store.invalidatecaches()
3042 3043
3043 3044 def invalidateall(self):
3044 3045 """Fully invalidates both store and non-store parts, causing the
3045 3046 subsequent operation to reread any outside changes."""
3046 3047 # extension should hook this to invalidate its caches
3047 3048 self.invalidate()
3048 3049 self.invalidatedirstate()
3049 3050
3050 3051 @unfilteredmethod
3051 3052 def _refreshfilecachestats(self, tr):
3052 3053 """Reload stats of cached files so that they are flagged as valid"""
3053 3054 for k, ce in self._filecache.items():
3054 3055 k = pycompat.sysstr(k)
3055 3056 if k == 'dirstate' or k not in self.__dict__:
3056 3057 continue
3057 3058 ce.refresh()
3058 3059
3059 3060 def _lock(
3060 3061 self,
3061 3062 vfs,
3062 3063 lockname,
3063 3064 wait,
3064 3065 releasefn,
3065 3066 acquirefn,
3066 3067 desc,
3067 3068 ):
3068 3069 timeout = 0
3069 3070 warntimeout = 0
3070 3071 if wait:
3071 3072 timeout = self.ui.configint(b"ui", b"timeout")
3072 3073 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 3074 # internal config: ui.signal-safe-lock
3074 3075 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075 3076
3076 3077 l = lockmod.trylock(
3077 3078 self.ui,
3078 3079 vfs,
3079 3080 lockname,
3080 3081 timeout,
3081 3082 warntimeout,
3082 3083 releasefn=releasefn,
3083 3084 acquirefn=acquirefn,
3084 3085 desc=desc,
3085 3086 signalsafe=signalsafe,
3086 3087 )
3087 3088 return l
3088 3089
3089 3090 def _afterlock(self, callback):
3090 3091 """add a callback to be run when the repository is fully unlocked
3091 3092
3092 3093 The callback will be executed when the outermost lock is released
3093 3094 (with wlock being higher level than 'lock')."""
3094 3095 for ref in (self._wlockref, self._lockref):
3095 3096 l = ref and ref()
3096 3097 if l and l.held:
3097 3098 l.postrelease.append(callback)
3098 3099 break
3099 3100 else: # no lock have been found.
3100 3101 callback(True)
3101 3102
3102 3103 def lock(self, wait=True):
3103 3104 """Lock the repository store (.hg/store) and return a weak reference
3104 3105 to the lock. Use this before modifying the store (e.g. committing or
3105 3106 stripping). If you are opening a transaction, get a lock as well.)
3106 3107
3107 3108 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 3109 'wlock' first to avoid a dead-lock hazard."""
3109 3110 l = self._currentlock(self._lockref)
3110 3111 if l is not None:
3111 3112 l.lock()
3112 3113 return l
3113 3114
3114 3115 l = self._lock(
3115 3116 vfs=self.svfs,
3116 3117 lockname=b"lock",
3117 3118 wait=wait,
3118 3119 releasefn=None,
3119 3120 acquirefn=self.invalidate,
3120 3121 desc=_(b'repository %s') % self.origroot,
3121 3122 )
3122 3123 self._lockref = weakref.ref(l)
3123 3124 return l
3124 3125
3125 3126 def wlock(self, wait=True):
3126 3127 """Lock the non-store parts of the repository (everything under
3127 3128 .hg except .hg/store) and return a weak reference to the lock.
3128 3129
3129 3130 Use this before modifying files in .hg.
3130 3131
3131 3132 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 3133 'wlock' first to avoid a dead-lock hazard."""
3133 3134 l = self._wlockref() if self._wlockref else None
3134 3135 if l is not None and l.held:
3135 3136 l.lock()
3136 3137 return l
3137 3138
3138 3139 # We do not need to check for non-waiting lock acquisition. Such
3139 3140 # acquisition would not cause dead-lock as they would just fail.
3140 3141 if wait and (
3141 3142 self.ui.configbool(b'devel', b'all-warnings')
3142 3143 or self.ui.configbool(b'devel', b'check-locks')
3143 3144 ):
3144 3145 if self._currentlock(self._lockref) is not None:
3145 3146 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146 3147
3147 3148 def unlock():
3148 3149 if self.dirstate.is_changing_any:
3149 3150 msg = b"wlock release in the middle of a changing parents"
3150 3151 self.ui.develwarn(msg)
3151 3152 self.dirstate.invalidate()
3152 3153 else:
3153 3154 if self.dirstate._dirty:
3154 3155 msg = b"dirty dirstate on wlock release"
3155 3156 self.ui.develwarn(msg)
3156 3157 self.dirstate.write(None)
3157 3158
3158 3159 unfi = self.unfiltered()
3159 3160 if 'dirstate' in unfi.__dict__:
3160 3161 del unfi.__dict__['dirstate']
3161 3162
3162 3163 l = self._lock(
3163 3164 self.vfs,
3164 3165 b"wlock",
3165 3166 wait,
3166 3167 unlock,
3167 3168 self.invalidatedirstate,
3168 3169 _(b'working directory of %s') % self.origroot,
3169 3170 )
3170 3171 self._wlockref = weakref.ref(l)
3171 3172 return l
3172 3173
3173 3174 def _currentlock(self, lockref):
3174 3175 """Returns the lock if it's held, or None if it's not."""
3175 3176 if lockref is None:
3176 3177 return None
3177 3178 l = lockref()
3178 3179 if l is None or not l.held:
3179 3180 return None
3180 3181 return l
3181 3182
3182 3183 def currentwlock(self):
3183 3184 """Returns the wlock if it's held, or None if it's not."""
3184 3185 return self._currentlock(self._wlockref)
3185 3186
3186 3187 def currentlock(self):
3187 3188 """Returns the lock if it's held, or None if it's not."""
3188 3189 return self._currentlock(self._lockref)
3189 3190
3190 3191 def checkcommitpatterns(self, wctx, match, status, fail):
3191 3192 """check for commit arguments that aren't committable"""
3192 3193 if match.isexact() or match.prefix():
3193 3194 matched = set(status.modified + status.added + status.removed)
3194 3195
3195 3196 for f in match.files():
3196 3197 f = self.dirstate.normalize(f)
3197 3198 if f == b'.' or f in matched or f in wctx.substate:
3198 3199 continue
3199 3200 if f in status.deleted:
3200 3201 fail(f, _(b'file not found!'))
3201 3202 # Is it a directory that exists or used to exist?
3202 3203 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 3204 d = f + b'/'
3204 3205 for mf in matched:
3205 3206 if mf.startswith(d):
3206 3207 break
3207 3208 else:
3208 3209 fail(f, _(b"no match under directory!"))
3209 3210 elif f not in self.dirstate:
3210 3211 fail(f, _(b"file not tracked!"))
3211 3212
3212 3213 @unfilteredmethod
3213 3214 def commit(
3214 3215 self,
3215 3216 text=b"",
3216 3217 user=None,
3217 3218 date=None,
3218 3219 match=None,
3219 3220 force=False,
3220 3221 editor=None,
3221 3222 extra=None,
3222 3223 ):
3223 3224 """Add a new revision to current repository.
3224 3225
3225 3226 Revision information is gathered from the working directory,
3226 3227 match can be used to filter the committed files. If editor is
3227 3228 supplied, it is called to get a commit message.
3228 3229 """
3229 3230 if extra is None:
3230 3231 extra = {}
3231 3232
3232 3233 def fail(f, msg):
3233 3234 raise error.InputError(b'%s: %s' % (f, msg))
3234 3235
3235 3236 if not match:
3236 3237 match = matchmod.always()
3237 3238
3238 3239 if not force:
3239 3240 match.bad = fail
3240 3241
3241 3242 # lock() for recent changelog (see issue4368)
3242 3243 with self.wlock(), self.lock():
3243 3244 wctx = self[None]
3244 3245 merge = len(wctx.parents()) > 1
3245 3246
3246 3247 if not force and merge and not match.always():
3247 3248 raise error.Abort(
3248 3249 _(
3249 3250 b'cannot partially commit a merge '
3250 3251 b'(do not specify files or patterns)'
3251 3252 )
3252 3253 )
3253 3254
3254 3255 status = self.status(match=match, clean=force)
3255 3256 if force:
3256 3257 status.modified.extend(
3257 3258 status.clean
3258 3259 ) # mq may commit clean files
3259 3260
3260 3261 # check subrepos
3261 3262 subs, commitsubs, newstate = subrepoutil.precommit(
3262 3263 self.ui, wctx, status, match, force=force
3263 3264 )
3264 3265
3265 3266 # make sure all explicit patterns are matched
3266 3267 if not force:
3267 3268 self.checkcommitpatterns(wctx, match, status, fail)
3268 3269
3269 3270 cctx = context.workingcommitctx(
3270 3271 self, status, text, user, date, extra
3271 3272 )
3272 3273
3273 3274 ms = mergestatemod.mergestate.read(self)
3274 3275 mergeutil.checkunresolved(ms)
3275 3276
3276 3277 # internal config: ui.allowemptycommit
3277 3278 if cctx.isempty() and not self.ui.configbool(
3278 3279 b'ui', b'allowemptycommit'
3279 3280 ):
3280 3281 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 3282 ms.reset()
3282 3283 return None
3283 3284
3284 3285 if merge and cctx.deleted():
3285 3286 raise error.Abort(_(b"cannot commit merge with missing files"))
3286 3287
3287 3288 if editor:
3288 3289 cctx._text = editor(self, cctx, subs)
3289 3290 edited = text != cctx._text
3290 3291
3291 3292 # Save commit message in case this transaction gets rolled back
3292 3293 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 3294 # the assumption that the user will use the same editor again.
3294 3295 msg_path = self.savecommitmessage(cctx._text)
3295 3296
3296 3297 # commit subs and write new state
3297 3298 if subs:
3298 3299 uipathfn = scmutil.getuipathfn(self)
3299 3300 for s in sorted(commitsubs):
3300 3301 sub = wctx.sub(s)
3301 3302 self.ui.status(
3302 3303 _(b'committing subrepository %s\n')
3303 3304 % uipathfn(subrepoutil.subrelpath(sub))
3304 3305 )
3305 3306 sr = sub.commit(cctx._text, user, date)
3306 3307 newstate[s] = (newstate[s][0], sr)
3307 3308 subrepoutil.writestate(self, newstate)
3308 3309
3309 3310 p1, p2 = self.dirstate.parents()
3310 3311 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 3312 try:
3312 3313 self.hook(
3313 3314 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 3315 )
3315 3316 with self.transaction(b'commit'):
3316 3317 ret = self.commitctx(cctx, True)
3317 3318 # update bookmarks, dirstate and mergestate
3318 3319 bookmarks.update(self, [p1, p2], ret)
3319 3320 cctx.markcommitted(ret)
3320 3321 ms.reset()
3321 3322 except: # re-raises
3322 3323 if edited:
3323 3324 self.ui.write(
3324 3325 _(b'note: commit message saved in %s\n') % msg_path
3325 3326 )
3326 3327 self.ui.write(
3327 3328 _(
3328 3329 b"note: use 'hg commit --logfile "
3329 3330 b"%s --edit' to reuse it\n"
3330 3331 )
3331 3332 % msg_path
3332 3333 )
3333 3334 raise
3334 3335
3335 3336 def commithook(unused_success):
3336 3337 # hack for command that use a temporary commit (eg: histedit)
3337 3338 # temporary commit got stripped before hook release
3338 3339 if self.changelog.hasnode(ret):
3339 3340 self.hook(
3340 3341 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 3342 )
3342 3343
3343 3344 self._afterlock(commithook)
3344 3345 return ret
3345 3346
3346 3347 @unfilteredmethod
3347 3348 def commitctx(self, ctx, error=False, origctx=None):
3348 3349 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349 3350
3350 3351 @unfilteredmethod
3351 3352 def destroying(self):
3352 3353 """Inform the repository that nodes are about to be destroyed.
3353 3354 Intended for use by strip and rollback, so there's a common
3354 3355 place for anything that has to be done before destroying history.
3355 3356
3356 3357 This is mostly useful for saving state that is in memory and waiting
3357 3358 to be flushed when the current lock is released. Because a call to
3358 3359 destroyed is imminent, the repo will be invalidated causing those
3359 3360 changes to stay in memory (waiting for the next unlock), or vanish
3360 3361 completely.
3361 3362 """
3362 3363 # When using the same lock to commit and strip, the phasecache is left
3363 3364 # dirty after committing. Then when we strip, the repo is invalidated,
3364 3365 # causing those changes to disappear.
3365 3366 if '_phasecache' in vars(self):
3366 3367 self._phasecache.write(self)
3367 3368
3368 3369 @unfilteredmethod
3369 3370 def destroyed(self):
3370 3371 """Inform the repository that nodes have been destroyed.
3371 3372 Intended for use by strip and rollback, so there's a common
3372 3373 place for anything that has to be done after destroying history.
3373 3374 """
3374 3375 # refresh all repository caches
3375 3376 self.updatecaches()
3376 3377
3377 3378 # Ensure the persistent tag cache is updated. Doing it now
3378 3379 # means that the tag cache only has to worry about destroyed
3379 3380 # heads immediately after a strip/rollback. That in turn
3380 3381 # guarantees that "cachetip == currenttip" (comparing both rev
3381 3382 # and node) always means no nodes have been added or destroyed.
3382 3383
3383 3384 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 3385 # head, refresh the tag cache, then immediately add a new head.
3385 3386 # But I think doing it this way is necessary for the "instant
3386 3387 # tag cache retrieval" case to work.
3387 3388 self.invalidate()
3388 3389
3389 3390 def status(
3390 3391 self,
3391 3392 node1=b'.',
3392 3393 node2=None,
3393 3394 match=None,
3394 3395 ignored=False,
3395 3396 clean=False,
3396 3397 unknown=False,
3397 3398 listsubrepos=False,
3398 3399 ):
3399 3400 '''a convenience method that calls node1.status(node2)'''
3400 3401 return self[node1].status(
3401 3402 node2, match, ignored, clean, unknown, listsubrepos
3402 3403 )
3403 3404
3404 3405 def addpostdsstatus(self, ps):
3405 3406 """Add a callback to run within the wlock, at the point at which status
3406 3407 fixups happen.
3407 3408
3408 3409 On status completion, callback(wctx, status) will be called with the
3409 3410 wlock held, unless the dirstate has changed from underneath or the wlock
3410 3411 couldn't be grabbed.
3411 3412
3412 3413 Callbacks should not capture and use a cached copy of the dirstate --
3413 3414 it might change in the meanwhile. Instead, they should access the
3414 3415 dirstate via wctx.repo().dirstate.
3415 3416
3416 3417 This list is emptied out after each status run -- extensions should
3417 3418 make sure it adds to this list each time dirstate.status is called.
3418 3419 Extensions should also make sure they don't call this for statuses
3419 3420 that don't involve the dirstate.
3420 3421 """
3421 3422
3422 3423 # The list is located here for uniqueness reasons -- it is actually
3423 3424 # managed by the workingctx, but that isn't unique per-repo.
3424 3425 self._postdsstatus.append(ps)
3425 3426
3426 3427 def postdsstatus(self):
3427 3428 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 3429 return self._postdsstatus
3429 3430
3430 3431 def clearpostdsstatus(self):
3431 3432 """Used by workingctx to clear post-dirstate-status hooks."""
3432 3433 del self._postdsstatus[:]
3433 3434
3434 3435 def heads(self, start=None):
3435 3436 if start is None:
3436 3437 cl = self.changelog
3437 3438 headrevs = reversed(cl.headrevs())
3438 3439 return [cl.node(rev) for rev in headrevs]
3439 3440
3440 3441 heads = self.changelog.heads(start)
3441 3442 # sort the output in rev descending order
3442 3443 return sorted(heads, key=self.changelog.rev, reverse=True)
3443 3444
3444 3445 def branchheads(self, branch=None, start=None, closed=False):
3445 3446 """return a (possibly filtered) list of heads for the given branch
3446 3447
3447 3448 Heads are returned in topological order, from newest to oldest.
3448 3449 If branch is None, use the dirstate branch.
3449 3450 If start is not None, return only heads reachable from start.
3450 3451 If closed is True, return heads that are marked as closed as well.
3451 3452 """
3452 3453 if branch is None:
3453 3454 branch = self[None].branch()
3454 3455 branches = self.branchmap()
3455 3456 if not branches.hasbranch(branch):
3456 3457 return []
3457 3458 # the cache returns heads ordered lowest to highest
3458 3459 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 3460 if start is not None:
3460 3461 # filter out the heads that cannot be reached from startrev
3461 3462 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 3463 bheads = [h for h in bheads if h in fbheads]
3463 3464 return bheads
3464 3465
3465 3466 def branches(self, nodes):
3466 3467 if not nodes:
3467 3468 nodes = [self.changelog.tip()]
3468 3469 b = []
3469 3470 for n in nodes:
3470 3471 t = n
3471 3472 while True:
3472 3473 p = self.changelog.parents(n)
3473 3474 if p[1] != self.nullid or p[0] == self.nullid:
3474 3475 b.append((t, n, p[0], p[1]))
3475 3476 break
3476 3477 n = p[0]
3477 3478 return b
3478 3479
3479 3480 def between(self, pairs):
3480 3481 r = []
3481 3482
3482 3483 for top, bottom in pairs:
3483 3484 n, l, i = top, [], 0
3484 3485 f = 1
3485 3486
3486 3487 while n != bottom and n != self.nullid:
3487 3488 p = self.changelog.parents(n)[0]
3488 3489 if i == f:
3489 3490 l.append(n)
3490 3491 f = f * 2
3491 3492 n = p
3492 3493 i += 1
3493 3494
3494 3495 r.append(l)
3495 3496
3496 3497 return r
3497 3498
3498 3499 def checkpush(self, pushop):
3499 3500 """Extensions can override this function if additional checks have
3500 3501 to be performed before pushing, or call it if they override push
3501 3502 command.
3502 3503 """
3503 3504
3504 3505 @unfilteredpropertycache
3505 3506 def prepushoutgoinghooks(self):
3506 3507 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 3508 methods, which are called before pushing changesets.
3508 3509 """
3509 3510 return util.hooks()
3510 3511
3511 3512 def pushkey(self, namespace, key, old, new):
3512 3513 try:
3513 3514 tr = self.currenttransaction()
3514 3515 hookargs = {}
3515 3516 if tr is not None:
3516 3517 hookargs.update(tr.hookargs)
3517 3518 hookargs = pycompat.strkwargs(hookargs)
3518 3519 hookargs['namespace'] = namespace
3519 3520 hookargs['key'] = key
3520 3521 hookargs['old'] = old
3521 3522 hookargs['new'] = new
3522 3523 self.hook(b'prepushkey', throw=True, **hookargs)
3523 3524 except error.HookAbort as exc:
3524 3525 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 3526 if exc.hint:
3526 3527 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 3528 return False
3528 3529 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 3530 ret = pushkey.push(self, namespace, key, old, new)
3530 3531
3531 3532 def runhook(unused_success):
3532 3533 self.hook(
3533 3534 b'pushkey',
3534 3535 namespace=namespace,
3535 3536 key=key,
3536 3537 old=old,
3537 3538 new=new,
3538 3539 ret=ret,
3539 3540 )
3540 3541
3541 3542 self._afterlock(runhook)
3542 3543 return ret
3543 3544
3544 3545 def listkeys(self, namespace):
3545 3546 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 3547 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 3548 values = pushkey.list(self, namespace)
3548 3549 self.hook(b'listkeys', namespace=namespace, values=values)
3549 3550 return values
3550 3551
3551 3552 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 3553 '''used to test argument passing over the wire'''
3553 3554 return b"%s %s %s %s %s" % (
3554 3555 one,
3555 3556 two,
3556 3557 pycompat.bytestr(three),
3557 3558 pycompat.bytestr(four),
3558 3559 pycompat.bytestr(five),
3559 3560 )
3560 3561
3561 3562 def savecommitmessage(self, text):
3562 3563 fp = self.vfs(b'last-message.txt', b'wb')
3563 3564 try:
3564 3565 fp.write(text)
3565 3566 finally:
3566 3567 fp.close()
3567 3568 return self.pathto(fp.name[len(self.root) + 1 :])
3568 3569
3569 3570 def register_wanted_sidedata(self, category):
3570 3571 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 3572 # Only revlogv2 repos can want sidedata.
3572 3573 return
3573 3574 self._wanted_sidedata.add(pycompat.bytestr(category))
3574 3575
3575 3576 def register_sidedata_computer(
3576 3577 self, kind, category, keys, computer, flags, replace=False
3577 3578 ):
3578 3579 if kind not in revlogconst.ALL_KINDS:
3579 3580 msg = _(b"unexpected revlog kind '%s'.")
3580 3581 raise error.ProgrammingError(msg % kind)
3581 3582 category = pycompat.bytestr(category)
3582 3583 already_registered = category in self._sidedata_computers.get(kind, [])
3583 3584 if already_registered and not replace:
3584 3585 msg = _(
3585 3586 b"cannot register a sidedata computer twice for category '%s'."
3586 3587 )
3587 3588 raise error.ProgrammingError(msg % category)
3588 3589 if replace and not already_registered:
3589 3590 msg = _(
3590 3591 b"cannot replace a sidedata computer that isn't registered "
3591 3592 b"for category '%s'."
3592 3593 )
3593 3594 raise error.ProgrammingError(msg % category)
3594 3595 self._sidedata_computers.setdefault(kind, {})
3595 3596 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596 3597
3597 3598
3598 3599 def undoname(fn: bytes) -> bytes:
3599 3600 base, name = os.path.split(fn)
3600 3601 assert name.startswith(b'journal')
3601 3602 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602 3603
3603 3604
3604 3605 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 3606 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 3607 from . import upgrade
3607 3608
3608 3609 localpath = urlutil.urllocalpath(path)
3609 3610 if create:
3610 3611 createrepository(ui, localpath, createopts=createopts)
3611 3612
3612 3613 def repo_maker():
3613 3614 return makelocalrepository(ui, localpath, intents=intents)
3614 3615
3615 3616 repo = repo_maker()
3616 3617 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 3618 return repo
3618 3619
3619 3620
3620 3621 def islocal(path: bytes) -> bool:
3621 3622 return True
3622 3623
3623 3624
3624 3625 def defaultcreateopts(ui, createopts=None):
3625 3626 """Populate the default creation options for a repository.
3626 3627
3627 3628 A dictionary of explicitly requested creation options can be passed
3628 3629 in. Missing keys will be populated.
3629 3630 """
3630 3631 createopts = dict(createopts or {})
3631 3632
3632 3633 if b'backend' not in createopts:
3633 3634 # experimental config: storage.new-repo-backend
3634 3635 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635 3636
3636 3637 return createopts
3637 3638
3638 3639
3639 3640 def clone_requirements(ui, createopts, srcrepo):
3640 3641 """clone the requirements of a local repo for a local clone
3641 3642
3642 3643 The store requirements are unchanged while the working copy requirements
3643 3644 depends on the configuration
3644 3645 """
3645 3646 target_requirements = set()
3646 3647 if not srcrepo.requirements:
3647 3648 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 3649 # with it.
3649 3650 return target_requirements
3650 3651 createopts = defaultcreateopts(ui, createopts=createopts)
3651 3652 for r in newreporequirements(ui, createopts):
3652 3653 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 3654 target_requirements.add(r)
3654 3655
3655 3656 for r in srcrepo.requirements:
3656 3657 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 3658 target_requirements.add(r)
3658 3659 return target_requirements
3659 3660
3660 3661
3661 3662 def newreporequirements(ui, createopts):
3662 3663 """Determine the set of requirements for a new local repository.
3663 3664
3664 3665 Extensions can wrap this function to specify custom requirements for
3665 3666 new repositories.
3666 3667 """
3667 3668
3668 3669 if b'backend' not in createopts:
3669 3670 raise error.ProgrammingError(
3670 3671 b'backend key not present in createopts; '
3671 3672 b'was defaultcreateopts() called?'
3672 3673 )
3673 3674
3674 3675 if createopts[b'backend'] != b'revlogv1':
3675 3676 raise error.Abort(
3676 3677 _(
3677 3678 b'unable to determine repository requirements for '
3678 3679 b'storage backend: %s'
3679 3680 )
3680 3681 % createopts[b'backend']
3681 3682 )
3682 3683
3683 3684 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 3685 if ui.configbool(b'format', b'usestore'):
3685 3686 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 3687 if ui.configbool(b'format', b'usefncache'):
3687 3688 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 3689 if ui.configbool(b'format', b'dotencode'):
3689 3690 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690 3691
3691 3692 compengines = ui.configlist(b'format', b'revlog-compression')
3692 3693 for compengine in compengines:
3693 3694 if compengine in util.compengines:
3694 3695 engine = util.compengines[compengine]
3695 3696 if engine.available() and engine.revlogheader():
3696 3697 break
3697 3698 else:
3698 3699 raise error.Abort(
3699 3700 _(
3700 3701 b'compression engines %s defined by '
3701 3702 b'format.revlog-compression not available'
3702 3703 )
3703 3704 % b', '.join(b'"%s"' % e for e in compengines),
3704 3705 hint=_(
3705 3706 b'run "hg debuginstall" to list available '
3706 3707 b'compression engines'
3707 3708 ),
3708 3709 )
3709 3710
3710 3711 # zlib is the historical default and doesn't need an explicit requirement.
3711 3712 if compengine == b'zstd':
3712 3713 requirements.add(b'revlog-compression-zstd')
3713 3714 elif compengine != b'zlib':
3714 3715 requirements.add(b'exp-compression-%s' % compengine)
3715 3716
3716 3717 if scmutil.gdinitconfig(ui):
3717 3718 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 3719 if ui.configbool(b'format', b'sparse-revlog'):
3719 3720 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720 3721
3721 3722 # experimental config: format.use-dirstate-v2
3722 3723 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 3724 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 3725 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725 3726
3726 3727 # experimental config: format.exp-use-copies-side-data-changeset
3727 3728 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 3729 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 3730 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 3731 if ui.configbool(b'experimental', b'treemanifest'):
3731 3732 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732 3733
3733 3734 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 3735 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 3736 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736 3737
3737 3738 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 3739 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 3740 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 3741 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 3742 # experimental config: format.internal-phase
3742 3743 if ui.configbool(b'format', b'use-internal-phase'):
3743 3744 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744 3745
3745 3746 # experimental config: format.exp-archived-phase
3746 3747 if ui.configbool(b'format', b'exp-archived-phase'):
3747 3748 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748 3749
3749 3750 if createopts.get(b'narrowfiles'):
3750 3751 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751 3752
3752 3753 if createopts.get(b'lfs'):
3753 3754 requirements.add(b'lfs')
3754 3755
3755 3756 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 3757 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757 3758
3758 3759 # The feature is disabled unless a fast implementation is available.
3759 3760 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 3761 if ui.configbool(
3761 3762 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 3763 ):
3763 3764 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764 3765
3765 3766 # if share-safe is enabled, let's create the new repository with the new
3766 3767 # requirement
3767 3768 if ui.configbool(b'format', b'use-share-safe'):
3768 3769 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769 3770
3770 3771 # if we are creating a share-repoΒΉ we have to handle requirement
3771 3772 # differently.
3772 3773 #
3773 3774 # [1] (i.e. reusing the store from another repository, just having a
3774 3775 # working copy)
3775 3776 if b'sharedrepo' in createopts:
3776 3777 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777 3778
3778 3779 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 3780 # share to an old school repository, we have to copy the
3780 3781 # requirements and hope for the best.
3781 3782 requirements = source_requirements
3782 3783 else:
3783 3784 # We have control on the working copy only, so "copy" the non
3784 3785 # working copy part over, ignoring previous logic.
3785 3786 to_drop = set()
3786 3787 for req in requirements:
3787 3788 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 3789 continue
3789 3790 if req in source_requirements:
3790 3791 continue
3791 3792 to_drop.add(req)
3792 3793 requirements -= to_drop
3793 3794 requirements |= source_requirements
3794 3795
3795 3796 if createopts.get(b'sharedrelative'):
3796 3797 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 3798 else:
3798 3799 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799 3800
3800 3801 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 3802 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 3803 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 3804 hint = _(
3804 3805 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 3806 )
3806 3807 if version != 1:
3807 3808 ui.warn(msg % version, hint=hint)
3808 3809 else:
3809 3810 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810 3811
3811 3812 return requirements
3812 3813
3813 3814
3814 3815 def checkrequirementscompat(ui, requirements):
3815 3816 """Checks compatibility of repository requirements enabled and disabled.
3816 3817
3817 3818 Returns a set of requirements which needs to be dropped because dependend
3818 3819 requirements are not enabled. Also warns users about it"""
3819 3820
3820 3821 dropped = set()
3821 3822
3822 3823 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 3824 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 3825 ui.warn(
3825 3826 _(
3826 3827 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 3828 b'beacuse it is incompatible with disabled '
3828 3829 b'\'format.usestore\' config\n'
3829 3830 )
3830 3831 )
3831 3832 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832 3833
3833 3834 if (
3834 3835 requirementsmod.SHARED_REQUIREMENT in requirements
3835 3836 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 3837 ):
3837 3838 raise error.Abort(
3838 3839 _(
3839 3840 b"cannot create shared repository as source was created"
3840 3841 b" with 'format.usestore' config disabled"
3841 3842 )
3842 3843 )
3843 3844
3844 3845 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 3846 if ui.hasconfig(b'format', b'use-share-safe'):
3846 3847 msg = _(
3847 3848 b"ignoring enabled 'format.use-share-safe' config because "
3848 3849 b"it is incompatible with disabled 'format.usestore'"
3849 3850 b" config\n"
3850 3851 )
3851 3852 ui.warn(msg)
3852 3853 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853 3854
3854 3855 return dropped
3855 3856
3856 3857
3857 3858 def filterknowncreateopts(ui, createopts):
3858 3859 """Filters a dict of repo creation options against options that are known.
3859 3860
3860 3861 Receives a dict of repo creation options and returns a dict of those
3861 3862 options that we don't know how to handle.
3862 3863
3863 3864 This function is called as part of repository creation. If the
3864 3865 returned dict contains any items, repository creation will not
3865 3866 be allowed, as it means there was a request to create a repository
3866 3867 with options not recognized by loaded code.
3867 3868
3868 3869 Extensions can wrap this function to filter out creation options
3869 3870 they know how to handle.
3870 3871 """
3871 3872 known = {
3872 3873 b'backend',
3873 3874 b'lfs',
3874 3875 b'narrowfiles',
3875 3876 b'sharedrepo',
3876 3877 b'sharedrelative',
3877 3878 b'shareditems',
3878 3879 b'shallowfilestore',
3879 3880 }
3880 3881
3881 3882 return {k: v for k, v in createopts.items() if k not in known}
3882 3883
3883 3884
3884 3885 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 3886 """Create a new repository in a vfs.
3886 3887
3887 3888 ``path`` path to the new repo's working directory.
3888 3889 ``createopts`` options for the new repository.
3889 3890 ``requirement`` predefined set of requirements.
3890 3891 (incompatible with ``createopts``)
3891 3892
3892 3893 The following keys for ``createopts`` are recognized:
3893 3894
3894 3895 backend
3895 3896 The storage backend to use.
3896 3897 lfs
3897 3898 Repository will be created with ``lfs`` requirement. The lfs extension
3898 3899 will automatically be loaded when the repository is accessed.
3899 3900 narrowfiles
3900 3901 Set up repository to support narrow file storage.
3901 3902 sharedrepo
3902 3903 Repository object from which storage should be shared.
3903 3904 sharedrelative
3904 3905 Boolean indicating if the path to the shared repo should be
3905 3906 stored as relative. By default, the pointer to the "parent" repo
3906 3907 is stored as an absolute path.
3907 3908 shareditems
3908 3909 Set of items to share to the new repository (in addition to storage).
3909 3910 shallowfilestore
3910 3911 Indicates that storage for files should be shallow (not all ancestor
3911 3912 revisions are known).
3912 3913 """
3913 3914
3914 3915 if requirements is not None:
3915 3916 if createopts is not None:
3916 3917 msg = b'cannot specify both createopts and requirements'
3917 3918 raise error.ProgrammingError(msg)
3918 3919 createopts = {}
3919 3920 else:
3920 3921 createopts = defaultcreateopts(ui, createopts=createopts)
3921 3922
3922 3923 unknownopts = filterknowncreateopts(ui, createopts)
3923 3924
3924 3925 if not isinstance(unknownopts, dict):
3925 3926 raise error.ProgrammingError(
3926 3927 b'filterknowncreateopts() did not return a dict'
3927 3928 )
3928 3929
3929 3930 if unknownopts:
3930 3931 raise error.Abort(
3931 3932 _(
3932 3933 b'unable to create repository because of unknown '
3933 3934 b'creation option: %s'
3934 3935 )
3935 3936 % b', '.join(sorted(unknownopts)),
3936 3937 hint=_(b'is a required extension not loaded?'),
3937 3938 )
3938 3939
3939 3940 requirements = newreporequirements(ui, createopts=createopts)
3940 3941 requirements -= checkrequirementscompat(ui, requirements)
3941 3942
3942 3943 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943 3944
3944 3945 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 3946 if hgvfs.exists():
3946 3947 raise error.RepoError(_(b'repository %s already exists') % path)
3947 3948
3948 3949 if b'sharedrepo' in createopts:
3949 3950 sharedpath = createopts[b'sharedrepo'].sharedpath
3950 3951
3951 3952 if createopts.get(b'sharedrelative'):
3952 3953 try:
3953 3954 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 3955 sharedpath = util.pconvert(sharedpath)
3955 3956 except (IOError, ValueError) as e:
3956 3957 # ValueError is raised on Windows if the drive letters differ
3957 3958 # on each path.
3958 3959 raise error.Abort(
3959 3960 _(b'cannot calculate relative path'),
3960 3961 hint=stringutil.forcebytestr(e),
3961 3962 )
3962 3963
3963 3964 if not wdirvfs.exists():
3964 3965 wdirvfs.makedirs()
3965 3966
3966 3967 hgvfs.makedir(notindexed=True)
3967 3968 if b'sharedrepo' not in createopts:
3968 3969 hgvfs.mkdir(b'cache')
3969 3970 hgvfs.mkdir(b'wcache')
3970 3971
3971 3972 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 3973 if has_store and b'sharedrepo' not in createopts:
3973 3974 hgvfs.mkdir(b'store')
3974 3975
3975 3976 # We create an invalid changelog outside the store so very old
3976 3977 # Mercurial versions (which didn't know about the requirements
3977 3978 # file) encounter an error on reading the changelog. This
3978 3979 # effectively locks out old clients and prevents them from
3979 3980 # mucking with a repo in an unknown format.
3980 3981 #
3981 3982 # The revlog header has version 65535, which won't be recognized by
3982 3983 # such old clients.
3983 3984 hgvfs.append(
3984 3985 b'00changelog.i',
3985 3986 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 3987 b'layout',
3987 3988 )
3988 3989
3989 3990 # Filter the requirements into working copy and store ones
3990 3991 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 3992 # write working copy ones
3992 3993 scmutil.writerequires(hgvfs, wcreq)
3993 3994 # If there are store requirements and the current repository
3994 3995 # is not a shared one, write stored requirements
3995 3996 # For new shared repository, we don't need to write the store
3996 3997 # requirements as they are already present in store requires
3997 3998 if storereq and b'sharedrepo' not in createopts:
3998 3999 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 4000 scmutil.writerequires(storevfs, storereq)
4000 4001
4001 4002 # Write out file telling readers where to find the shared store.
4002 4003 if b'sharedrepo' in createopts:
4003 4004 hgvfs.write(b'sharedpath', sharedpath)
4004 4005
4005 4006 if createopts.get(b'shareditems'):
4006 4007 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 4008 hgvfs.write(b'shared', shared)
4008 4009
4009 4010
4010 4011 def poisonrepository(repo):
4011 4012 """Poison a repository instance so it can no longer be used."""
4012 4013 # Perform any cleanup on the instance.
4013 4014 repo.close()
4014 4015
4015 4016 # Our strategy is to replace the type of the object with one that
4016 4017 # has all attribute lookups result in error.
4017 4018 #
4018 4019 # But we have to allow the close() method because some constructors
4019 4020 # of repos call close() on repo references.
4020 4021 class poisonedrepository:
4021 4022 def __getattribute__(self, item):
4022 4023 if item == 'close':
4023 4024 return object.__getattribute__(self, item)
4024 4025
4025 4026 raise error.ProgrammingError(
4026 4027 b'repo instances should not be used after unshare'
4027 4028 )
4028 4029
4029 4030 def close(self):
4030 4031 pass
4031 4032
4032 4033 # We may have a repoview, which intercepts __setattr__. So be sure
4033 4034 # we operate at the lowest level possible.
4034 4035 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now