##// END OF EJS Templates
requires: re-use vfs.tryread for simplicity...
Jason R. Coombs -
r50388:4367c46a default
parent child Browse files
Show More
@@ -1,3958 +1,3954 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from concurrent import futures
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 147 class changelogcache(storecache):
148 148 """filecache for the changelog"""
149 149
150 150 def __init__(self):
151 151 super(changelogcache, self).__init__()
152 152 _cachedfiles.add((b'00changelog.i', b''))
153 153 _cachedfiles.add((b'00changelog.n', b''))
154 154
155 155 def tracked_paths(self, obj):
156 156 paths = [self.join(obj, b'00changelog.i')]
157 157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 158 paths.append(self.join(obj, b'00changelog.n'))
159 159 return paths
160 160
161 161
162 162 class manifestlogcache(storecache):
163 163 """filecache for the manifestlog"""
164 164
165 165 def __init__(self):
166 166 super(manifestlogcache, self).__init__()
167 167 _cachedfiles.add((b'00manifest.i', b''))
168 168 _cachedfiles.add((b'00manifest.n', b''))
169 169
170 170 def tracked_paths(self, obj):
171 171 paths = [self.join(obj, b'00manifest.i')]
172 172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 173 paths.append(self.join(obj, b'00manifest.n'))
174 174 return paths
175 175
176 176
177 177 class mixedrepostorecache(_basefilecache):
178 178 """filecache for a mix files in .hg/store and outside"""
179 179
180 180 def __init__(self, *pathsandlocations):
181 181 # scmutil.filecache only uses the path for passing back into our
182 182 # join(), so we can safely pass a list of paths and locations
183 183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 184 _cachedfiles.update(pathsandlocations)
185 185
186 186 def join(self, obj, fnameandlocation):
187 187 fname, location = fnameandlocation
188 188 if location == b'plain':
189 189 return obj.vfs.join(fname)
190 190 else:
191 191 if location != b'':
192 192 raise error.ProgrammingError(
193 193 b'unexpected location: %s' % location
194 194 )
195 195 return obj.sjoin(fname)
196 196
197 197
198 198 def isfilecached(repo, name):
199 199 """check if a repo has already cached "name" filecache-ed property
200 200
201 201 This returns (cachedobj-or-None, iscached) tuple.
202 202 """
203 203 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 204 if not cacheentry:
205 205 return None, False
206 206 return cacheentry.obj, True
207 207
208 208
209 209 class unfilteredpropertycache(util.propertycache):
210 210 """propertycache that apply to unfiltered repo only"""
211 211
212 212 def __get__(self, repo, type=None):
213 213 unfi = repo.unfiltered()
214 214 if unfi is repo:
215 215 return super(unfilteredpropertycache, self).__get__(unfi)
216 216 return getattr(unfi, self.name)
217 217
218 218
219 219 class filteredpropertycache(util.propertycache):
220 220 """propertycache that must take filtering in account"""
221 221
222 222 def cachevalue(self, obj, value):
223 223 object.__setattr__(obj, self.name, value)
224 224
225 225
226 226 def hasunfilteredcache(repo, name):
227 227 """check if a repo has an unfilteredpropertycache value for <name>"""
228 228 return name in vars(repo.unfiltered())
229 229
230 230
231 231 def unfilteredmethod(orig):
232 232 """decorate method that always need to be run on unfiltered version"""
233 233
234 234 @functools.wraps(orig)
235 235 def wrapper(repo, *args, **kwargs):
236 236 return orig(repo.unfiltered(), *args, **kwargs)
237 237
238 238 return wrapper
239 239
240 240
241 241 moderncaps = {
242 242 b'lookup',
243 243 b'branchmap',
244 244 b'pushkey',
245 245 b'known',
246 246 b'getbundle',
247 247 b'unbundle',
248 248 }
249 249 legacycaps = moderncaps.union({b'changegroupsubset'})
250 250
251 251
252 252 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 253 class localcommandexecutor:
254 254 def __init__(self, peer):
255 255 self._peer = peer
256 256 self._sent = False
257 257 self._closed = False
258 258
259 259 def __enter__(self):
260 260 return self
261 261
262 262 def __exit__(self, exctype, excvalue, exctb):
263 263 self.close()
264 264
265 265 def callcommand(self, command, args):
266 266 if self._sent:
267 267 raise error.ProgrammingError(
268 268 b'callcommand() cannot be used after sendcommands()'
269 269 )
270 270
271 271 if self._closed:
272 272 raise error.ProgrammingError(
273 273 b'callcommand() cannot be used after close()'
274 274 )
275 275
276 276 # We don't need to support anything fancy. Just call the named
277 277 # method on the peer and return a resolved future.
278 278 fn = getattr(self._peer, pycompat.sysstr(command))
279 279
280 280 f = futures.Future()
281 281
282 282 try:
283 283 result = fn(**pycompat.strkwargs(args))
284 284 except Exception:
285 285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 286 else:
287 287 f.set_result(result)
288 288
289 289 return f
290 290
291 291 def sendcommands(self):
292 292 self._sent = True
293 293
294 294 def close(self):
295 295 self._closed = True
296 296
297 297
298 298 @interfaceutil.implementer(repository.ipeercommands)
299 299 class localpeer(repository.peer):
300 300 '''peer for a local repo; reflects only the most recent API'''
301 301
302 302 def __init__(self, repo, caps=None):
303 303 super(localpeer, self).__init__()
304 304
305 305 if caps is None:
306 306 caps = moderncaps.copy()
307 307 self._repo = repo.filtered(b'served')
308 308 self.ui = repo.ui
309 309
310 310 if repo._wanted_sidedata:
311 311 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 312 caps.add(b'exp-wanted-sidedata=' + formatted)
313 313
314 314 self._caps = repo._restrictcapabilities(caps)
315 315
316 316 # Begin of _basepeer interface.
317 317
318 318 def url(self):
319 319 return self._repo.url()
320 320
321 321 def local(self):
322 322 return self._repo
323 323
324 324 def peer(self):
325 325 return self
326 326
327 327 def canpush(self):
328 328 return True
329 329
330 330 def close(self):
331 331 self._repo.close()
332 332
333 333 # End of _basepeer interface.
334 334
335 335 # Begin of _basewirecommands interface.
336 336
337 337 def branchmap(self):
338 338 return self._repo.branchmap()
339 339
340 340 def capabilities(self):
341 341 return self._caps
342 342
343 343 def clonebundles(self):
344 344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 345
346 346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 347 """Used to test argument passing over the wire"""
348 348 return b"%s %s %s %s %s" % (
349 349 one,
350 350 two,
351 351 pycompat.bytestr(three),
352 352 pycompat.bytestr(four),
353 353 pycompat.bytestr(five),
354 354 )
355 355
356 356 def getbundle(
357 357 self,
358 358 source,
359 359 heads=None,
360 360 common=None,
361 361 bundlecaps=None,
362 362 remote_sidedata=None,
363 363 **kwargs
364 364 ):
365 365 chunks = exchange.getbundlechunks(
366 366 self._repo,
367 367 source,
368 368 heads=heads,
369 369 common=common,
370 370 bundlecaps=bundlecaps,
371 371 remote_sidedata=remote_sidedata,
372 372 **kwargs
373 373 )[1]
374 374 cb = util.chunkbuffer(chunks)
375 375
376 376 if exchange.bundle2requested(bundlecaps):
377 377 # When requesting a bundle2, getbundle returns a stream to make the
378 378 # wire level function happier. We need to build a proper object
379 379 # from it in local peer.
380 380 return bundle2.getunbundler(self.ui, cb)
381 381 else:
382 382 return changegroup.getunbundler(b'01', cb, None)
383 383
384 384 def heads(self):
385 385 return self._repo.heads()
386 386
387 387 def known(self, nodes):
388 388 return self._repo.known(nodes)
389 389
390 390 def listkeys(self, namespace):
391 391 return self._repo.listkeys(namespace)
392 392
393 393 def lookup(self, key):
394 394 return self._repo.lookup(key)
395 395
396 396 def pushkey(self, namespace, key, old, new):
397 397 return self._repo.pushkey(namespace, key, old, new)
398 398
399 399 def stream_out(self):
400 400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 401
402 402 def unbundle(self, bundle, heads, url):
403 403 """apply a bundle on a repo
404 404
405 405 This function handles the repo locking itself."""
406 406 try:
407 407 try:
408 408 bundle = exchange.readbundle(self.ui, bundle, None)
409 409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 410 if util.safehasattr(ret, b'getchunks'):
411 411 # This is a bundle20 object, turn it into an unbundler.
412 412 # This little dance should be dropped eventually when the
413 413 # API is finally improved.
414 414 stream = util.chunkbuffer(ret.getchunks())
415 415 ret = bundle2.getunbundler(self.ui, stream)
416 416 return ret
417 417 except Exception as exc:
418 418 # If the exception contains output salvaged from a bundle2
419 419 # reply, we need to make sure it is printed before continuing
420 420 # to fail. So we build a bundle2 with such output and consume
421 421 # it directly.
422 422 #
423 423 # This is not very elegant but allows a "simple" solution for
424 424 # issue4594
425 425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 426 if output:
427 427 bundler = bundle2.bundle20(self._repo.ui)
428 428 for out in output:
429 429 bundler.addpart(out)
430 430 stream = util.chunkbuffer(bundler.getchunks())
431 431 b = bundle2.getunbundler(self.ui, stream)
432 432 bundle2.processbundle(self._repo, b)
433 433 raise
434 434 except error.PushRaced as exc:
435 435 raise error.ResponseError(
436 436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 437 )
438 438
439 439 # End of _basewirecommands interface.
440 440
441 441 # Begin of peer interface.
442 442
443 443 def commandexecutor(self):
444 444 return localcommandexecutor(self)
445 445
446 446 # End of peer interface.
447 447
448 448
449 449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 450 class locallegacypeer(localpeer):
451 451 """peer extension which implements legacy methods too; used for tests with
452 452 restricted capabilities"""
453 453
454 454 def __init__(self, repo):
455 455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 456
457 457 # Begin of baselegacywirecommands interface.
458 458
459 459 def between(self, pairs):
460 460 return self._repo.between(pairs)
461 461
462 462 def branches(self, nodes):
463 463 return self._repo.branches(nodes)
464 464
465 465 def changegroup(self, nodes, source):
466 466 outgoing = discovery.outgoing(
467 467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 468 )
469 469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 470
471 471 def changegroupsubset(self, bases, heads, source):
472 472 outgoing = discovery.outgoing(
473 473 self._repo, missingroots=bases, ancestorsof=heads
474 474 )
475 475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 476
477 477 # End of baselegacywirecommands interface.
478 478
479 479
480 480 # Functions receiving (ui, features) that extensions can register to impact
481 481 # the ability to load repositories with custom requirements. Only
482 482 # functions defined in loaded extensions are called.
483 483 #
484 484 # The function receives a set of requirement strings that the repository
485 485 # is capable of opening. Functions will typically add elements to the
486 486 # set to reflect that the extension knows how to handle that requirements.
487 487 featuresetupfuncs = set()
488 488
489 489
490 490 def _getsharedvfs(hgvfs, requirements):
491 491 """returns the vfs object pointing to root of shared source
492 492 repo for a shared repository
493 493
494 494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 495 requirements is a set of requirements of current repo (shared one)
496 496 """
497 497 # The ``shared`` or ``relshared`` requirements indicate the
498 498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 499 # This is an absolute path for ``shared`` and relative to
500 500 # ``.hg/`` for ``relshared``.
501 501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 504
505 505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 506
507 507 if not sharedvfs.exists():
508 508 raise error.RepoError(
509 509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 510 % sharedvfs.base
511 511 )
512 512 return sharedvfs
513 513
514 514
515 515 def _readrequires(vfs, allowmissing):
516 516 """reads the require file present at root of this vfs
517 517 and return a set of requirements
518 518
519 519 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 520 # requires file contains a newline-delimited list of
521 521 # features/capabilities the opener (us) must have in order to use
522 522 # the repository. This file was introduced in Mercurial 0.9.2,
523 523 # which means very old repositories may not have one. We assume
524 524 # a missing file translates to no requirements.
525 try:
526 return set(vfs.read(b'requires').splitlines())
527 except FileNotFoundError:
528 if not allowmissing:
529 raise
530 return set()
525 read = vfs.tryread if allowmissing else vfs.read
526 return set(read(b'requires').splitlines())
531 527
532 528
533 529 def makelocalrepository(baseui, path, intents=None):
534 530 """Create a local repository object.
535 531
536 532 Given arguments needed to construct a local repository, this function
537 533 performs various early repository loading functionality (such as
538 534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 535 the repository can be opened, derives a type suitable for representing
540 536 that repository, and returns an instance of it.
541 537
542 538 The returned object conforms to the ``repository.completelocalrepository``
543 539 interface.
544 540
545 541 The repository type is derived by calling a series of factory functions
546 542 for each aspect/interface of the final repository. These are defined by
547 543 ``REPO_INTERFACES``.
548 544
549 545 Each factory function is called to produce a type implementing a specific
550 546 interface. The cumulative list of returned types will be combined into a
551 547 new type and that type will be instantiated to represent the local
552 548 repository.
553 549
554 550 The factory functions each receive various state that may be consulted
555 551 as part of deriving a type.
556 552
557 553 Extensions should wrap these factory functions to customize repository type
558 554 creation. Note that an extension's wrapped function may be called even if
559 555 that extension is not loaded for the repo being constructed. Extensions
560 556 should check if their ``__name__`` appears in the
561 557 ``extensionmodulenames`` set passed to the factory function and no-op if
562 558 not.
563 559 """
564 560 ui = baseui.copy()
565 561 # Prevent copying repo configuration.
566 562 ui.copy = baseui.copy
567 563
568 564 # Working directory VFS rooted at repository root.
569 565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 566
571 567 # Main VFS for .hg/ directory.
572 568 hgpath = wdirvfs.join(b'.hg')
573 569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 570 # Whether this repository is shared one or not
575 571 shared = False
576 572 # If this repository is shared, vfs pointing to shared repo
577 573 sharedvfs = None
578 574
579 575 # The .hg/ path should exist and should be a directory. All other
580 576 # cases are errors.
581 577 if not hgvfs.isdir():
582 578 try:
583 579 hgvfs.stat()
584 580 except FileNotFoundError:
585 581 pass
586 582 except ValueError as e:
587 583 # Can be raised on Python 3.8 when path is invalid.
588 584 raise error.Abort(
589 585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 586 )
591 587
592 588 raise error.RepoError(_(b'repository %s not found') % path)
593 589
594 590 requirements = _readrequires(hgvfs, True)
595 591 shared = (
596 592 requirementsmod.SHARED_REQUIREMENT in requirements
597 593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 594 )
599 595 storevfs = None
600 596 if shared:
601 597 # This is a shared repo
602 598 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 600 else:
605 601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606 602
607 603 # if .hg/requires contains the sharesafe requirement, it means
608 604 # there exists a `.hg/store/requires` too and we should read it
609 605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 607 # is not present, refer checkrequirementscompat() for that
612 608 #
613 609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 610 # repository was shared the old way. We check the share source .hg/requires
615 611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 612 # to be reshared
617 613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 615
620 616 if (
621 617 shared
622 618 and requirementsmod.SHARESAFE_REQUIREMENT
623 619 not in _readrequires(sharedvfs, True)
624 620 ):
625 621 mismatch_warn = ui.configbool(
626 622 b'share', b'safe-mismatch.source-not-safe.warn'
627 623 )
628 624 mismatch_config = ui.config(
629 625 b'share', b'safe-mismatch.source-not-safe'
630 626 )
631 627 mismatch_verbose_upgrade = ui.configbool(
632 628 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 629 )
634 630 if mismatch_config in (
635 631 b'downgrade-allow',
636 632 b'allow',
637 633 b'downgrade-abort',
638 634 ):
639 635 # prevent cyclic import localrepo -> upgrade -> localrepo
640 636 from . import upgrade
641 637
642 638 upgrade.downgrade_share_to_non_safe(
643 639 ui,
644 640 hgvfs,
645 641 sharedvfs,
646 642 requirements,
647 643 mismatch_config,
648 644 mismatch_warn,
649 645 mismatch_verbose_upgrade,
650 646 )
651 647 elif mismatch_config == b'abort':
652 648 raise error.Abort(
653 649 _(b"share source does not support share-safe requirement"),
654 650 hint=hint,
655 651 )
656 652 else:
657 653 raise error.Abort(
658 654 _(
659 655 b"share-safe mismatch with source.\nUnrecognized"
660 656 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 657 b" set."
662 658 )
663 659 % mismatch_config,
664 660 hint=hint,
665 661 )
666 662 else:
667 663 requirements |= _readrequires(storevfs, False)
668 664 elif shared:
669 665 sourcerequires = _readrequires(sharedvfs, False)
670 666 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 667 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 668 mismatch_warn = ui.configbool(
673 669 b'share', b'safe-mismatch.source-safe.warn'
674 670 )
675 671 mismatch_verbose_upgrade = ui.configbool(
676 672 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 673 )
678 674 if mismatch_config in (
679 675 b'upgrade-allow',
680 676 b'allow',
681 677 b'upgrade-abort',
682 678 ):
683 679 # prevent cyclic import localrepo -> upgrade -> localrepo
684 680 from . import upgrade
685 681
686 682 upgrade.upgrade_share_to_safe(
687 683 ui,
688 684 hgvfs,
689 685 storevfs,
690 686 requirements,
691 687 mismatch_config,
692 688 mismatch_warn,
693 689 mismatch_verbose_upgrade,
694 690 )
695 691 elif mismatch_config == b'abort':
696 692 raise error.Abort(
697 693 _(
698 694 b'version mismatch: source uses share-safe'
699 695 b' functionality while the current share does not'
700 696 ),
701 697 hint=hint,
702 698 )
703 699 else:
704 700 raise error.Abort(
705 701 _(
706 702 b"share-safe mismatch with source.\nUnrecognized"
707 703 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 704 )
709 705 % mismatch_config,
710 706 hint=hint,
711 707 )
712 708
713 709 # The .hg/hgrc file may load extensions or contain config options
714 710 # that influence repository construction. Attempt to load it and
715 711 # process any new extensions that it may have pulled in.
716 712 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 713 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 714 extensions.loadall(ui)
719 715 extensions.populateui(ui)
720 716
721 717 # Set of module names of extensions loaded for this repository.
722 718 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723 719
724 720 supportedrequirements = gathersupportedrequirements(ui)
725 721
726 722 # We first validate the requirements are known.
727 723 ensurerequirementsrecognized(requirements, supportedrequirements)
728 724
729 725 # Then we validate that the known set is reasonable to use together.
730 726 ensurerequirementscompatible(ui, requirements)
731 727
732 728 # TODO there are unhandled edge cases related to opening repositories with
733 729 # shared storage. If storage is shared, we should also test for requirements
734 730 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 731 # that repo, as that repo may load extensions needed to open it. This is a
736 732 # bit complicated because we don't want the other hgrc to overwrite settings
737 733 # in this hgrc.
738 734 #
739 735 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 736 # file when sharing repos. But if a requirement is added after the share is
741 737 # performed, thereby introducing a new requirement for the opener, we may
742 738 # will not see that and could encounter a run-time error interacting with
743 739 # that shared store since it has an unknown-to-us requirement.
744 740
745 741 # At this point, we know we should be capable of opening the repository.
746 742 # Now get on with doing that.
747 743
748 744 features = set()
749 745
750 746 # The "store" part of the repository holds versioned data. How it is
751 747 # accessed is determined by various requirements. If `shared` or
752 748 # `relshared` requirements are present, this indicates current repository
753 749 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 750 if shared:
755 751 storebasepath = sharedvfs.base
756 752 cachepath = sharedvfs.join(b'cache')
757 753 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 754 else:
759 755 storebasepath = hgvfs.base
760 756 cachepath = hgvfs.join(b'cache')
761 757 wcachepath = hgvfs.join(b'wcache')
762 758
763 759 # The store has changed over time and the exact layout is dictated by
764 760 # requirements. The store interface abstracts differences across all
765 761 # of them.
766 762 store = makestore(
767 763 requirements,
768 764 storebasepath,
769 765 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 766 )
771 767 hgvfs.createmode = store.createmode
772 768
773 769 storevfs = store.vfs
774 770 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775 771
776 772 if (
777 773 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 774 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 775 ):
780 776 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 777 # the revlogv2 docket introduced race condition that we need to fix
782 778 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783 779
784 780 # The cache vfs is used to manage cache files.
785 781 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 782 cachevfs.createmode = store.createmode
787 783 # The cache vfs is used to manage cache files related to the working copy
788 784 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 785 wcachevfs.createmode = store.createmode
790 786
791 787 # Now resolve the type for the repository object. We do this by repeatedly
792 788 # calling a factory function to produces types for specific aspects of the
793 789 # repo's operation. The aggregate returned types are used as base classes
794 790 # for a dynamically-derived type, which will represent our new repository.
795 791
796 792 bases = []
797 793 extrastate = {}
798 794
799 795 for iface, fn in REPO_INTERFACES:
800 796 # We pass all potentially useful state to give extensions tons of
801 797 # flexibility.
802 798 typ = fn()(
803 799 ui=ui,
804 800 intents=intents,
805 801 requirements=requirements,
806 802 features=features,
807 803 wdirvfs=wdirvfs,
808 804 hgvfs=hgvfs,
809 805 store=store,
810 806 storevfs=storevfs,
811 807 storeoptions=storevfs.options,
812 808 cachevfs=cachevfs,
813 809 wcachevfs=wcachevfs,
814 810 extensionmodulenames=extensionmodulenames,
815 811 extrastate=extrastate,
816 812 baseclasses=bases,
817 813 )
818 814
819 815 if not isinstance(typ, type):
820 816 raise error.ProgrammingError(
821 817 b'unable to construct type for %s' % iface
822 818 )
823 819
824 820 bases.append(typ)
825 821
826 822 # type() allows you to use characters in type names that wouldn't be
827 823 # recognized as Python symbols in source code. We abuse that to add
828 824 # rich information about our constructed repo.
829 825 name = pycompat.sysstr(
830 826 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 827 )
832 828
833 829 cls = type(name, tuple(bases), {})
834 830
835 831 return cls(
836 832 baseui=baseui,
837 833 ui=ui,
838 834 origroot=path,
839 835 wdirvfs=wdirvfs,
840 836 hgvfs=hgvfs,
841 837 requirements=requirements,
842 838 supportedrequirements=supportedrequirements,
843 839 sharedpath=storebasepath,
844 840 store=store,
845 841 cachevfs=cachevfs,
846 842 wcachevfs=wcachevfs,
847 843 features=features,
848 844 intents=intents,
849 845 )
850 846
851 847
852 848 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
853 849 """Load hgrc files/content into a ui instance.
854 850
855 851 This is called during repository opening to load any additional
856 852 config files or settings relevant to the current repository.
857 853
858 854 Returns a bool indicating whether any additional configs were loaded.
859 855
860 856 Extensions should monkeypatch this function to modify how per-repo
861 857 configs are loaded. For example, an extension may wish to pull in
862 858 configs from alternate files or sources.
863 859
864 860 sharedvfs is vfs object pointing to source repo if the current one is a
865 861 shared one
866 862 """
867 863 if not rcutil.use_repo_hgrc():
868 864 return False
869 865
870 866 ret = False
871 867 # first load config from shared source if we has to
872 868 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
873 869 try:
874 870 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
875 871 ret = True
876 872 except IOError:
877 873 pass
878 874
879 875 try:
880 876 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
881 877 ret = True
882 878 except IOError:
883 879 pass
884 880
885 881 try:
886 882 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
887 883 ret = True
888 884 except IOError:
889 885 pass
890 886
891 887 return ret
892 888
893 889
894 890 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
895 891 """Perform additional actions after .hg/hgrc is loaded.
896 892
897 893 This function is called during repository loading immediately after
898 894 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
899 895
900 896 The function can be used to validate configs, automatically add
901 897 options (including extensions) based on requirements, etc.
902 898 """
903 899
904 900 # Map of requirements to list of extensions to load automatically when
905 901 # requirement is present.
906 902 autoextensions = {
907 903 b'git': [b'git'],
908 904 b'largefiles': [b'largefiles'],
909 905 b'lfs': [b'lfs'],
910 906 }
911 907
912 908 for requirement, names in sorted(autoextensions.items()):
913 909 if requirement not in requirements:
914 910 continue
915 911
916 912 for name in names:
917 913 if not ui.hasconfig(b'extensions', name):
918 914 ui.setconfig(b'extensions', name, b'', source=b'autoload')
919 915
920 916
921 917 def gathersupportedrequirements(ui):
922 918 """Determine the complete set of recognized requirements."""
923 919 # Start with all requirements supported by this file.
924 920 supported = set(localrepository._basesupported)
925 921
926 922 # Execute ``featuresetupfuncs`` entries if they belong to an extension
927 923 # relevant to this ui instance.
928 924 modules = {m.__name__ for n, m in extensions.extensions(ui)}
929 925
930 926 for fn in featuresetupfuncs:
931 927 if fn.__module__ in modules:
932 928 fn(ui, supported)
933 929
934 930 # Add derived requirements from registered compression engines.
935 931 for name in util.compengines:
936 932 engine = util.compengines[name]
937 933 if engine.available() and engine.revlogheader():
938 934 supported.add(b'exp-compression-%s' % name)
939 935 if engine.name() == b'zstd':
940 936 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
941 937
942 938 return supported
943 939
944 940
945 941 def ensurerequirementsrecognized(requirements, supported):
946 942 """Validate that a set of local requirements is recognized.
947 943
948 944 Receives a set of requirements. Raises an ``error.RepoError`` if there
949 945 exists any requirement in that set that currently loaded code doesn't
950 946 recognize.
951 947
952 948 Returns a set of supported requirements.
953 949 """
954 950 missing = set()
955 951
956 952 for requirement in requirements:
957 953 if requirement in supported:
958 954 continue
959 955
960 956 if not requirement or not requirement[0:1].isalnum():
961 957 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
962 958
963 959 missing.add(requirement)
964 960
965 961 if missing:
966 962 raise error.RequirementError(
967 963 _(b'repository requires features unknown to this Mercurial: %s')
968 964 % b' '.join(sorted(missing)),
969 965 hint=_(
970 966 b'see https://mercurial-scm.org/wiki/MissingRequirement '
971 967 b'for more information'
972 968 ),
973 969 )
974 970
975 971
976 972 def ensurerequirementscompatible(ui, requirements):
977 973 """Validates that a set of recognized requirements is mutually compatible.
978 974
979 975 Some requirements may not be compatible with others or require
980 976 config options that aren't enabled. This function is called during
981 977 repository opening to ensure that the set of requirements needed
982 978 to open a repository is sane and compatible with config options.
983 979
984 980 Extensions can monkeypatch this function to perform additional
985 981 checking.
986 982
987 983 ``error.RepoError`` should be raised on failure.
988 984 """
989 985 if (
990 986 requirementsmod.SPARSE_REQUIREMENT in requirements
991 987 and not sparse.enabled
992 988 ):
993 989 raise error.RepoError(
994 990 _(
995 991 b'repository is using sparse feature but '
996 992 b'sparse is not enabled; enable the '
997 993 b'"sparse" extensions to access'
998 994 )
999 995 )
1000 996
1001 997
1002 998 def makestore(requirements, path, vfstype):
1003 999 """Construct a storage object for a repository."""
1004 1000 if requirementsmod.STORE_REQUIREMENT in requirements:
1005 1001 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1006 1002 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1007 1003 return storemod.fncachestore(path, vfstype, dotencode)
1008 1004
1009 1005 return storemod.encodedstore(path, vfstype)
1010 1006
1011 1007 return storemod.basicstore(path, vfstype)
1012 1008
1013 1009
1014 1010 def resolvestorevfsoptions(ui, requirements, features):
1015 1011 """Resolve the options to pass to the store vfs opener.
1016 1012
1017 1013 The returned dict is used to influence behavior of the storage layer.
1018 1014 """
1019 1015 options = {}
1020 1016
1021 1017 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1022 1018 options[b'treemanifest'] = True
1023 1019
1024 1020 # experimental config: format.manifestcachesize
1025 1021 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1026 1022 if manifestcachesize is not None:
1027 1023 options[b'manifestcachesize'] = manifestcachesize
1028 1024
1029 1025 # In the absence of another requirement superseding a revlog-related
1030 1026 # requirement, we have to assume the repo is using revlog version 0.
1031 1027 # This revlog format is super old and we don't bother trying to parse
1032 1028 # opener options for it because those options wouldn't do anything
1033 1029 # meaningful on such old repos.
1034 1030 if (
1035 1031 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1036 1032 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1037 1033 ):
1038 1034 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1039 1035 else: # explicitly mark repo as using revlogv0
1040 1036 options[b'revlogv0'] = True
1041 1037
1042 1038 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1043 1039 options[b'copies-storage'] = b'changeset-sidedata'
1044 1040 else:
1045 1041 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1046 1042 copiesextramode = (b'changeset-only', b'compatibility')
1047 1043 if writecopiesto in copiesextramode:
1048 1044 options[b'copies-storage'] = b'extra'
1049 1045
1050 1046 return options
1051 1047
1052 1048
1053 1049 def resolverevlogstorevfsoptions(ui, requirements, features):
1054 1050 """Resolve opener options specific to revlogs."""
1055 1051
1056 1052 options = {}
1057 1053 options[b'flagprocessors'] = {}
1058 1054
1059 1055 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1060 1056 options[b'revlogv1'] = True
1061 1057 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1062 1058 options[b'revlogv2'] = True
1063 1059 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1064 1060 options[b'changelogv2'] = True
1065 1061
1066 1062 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1067 1063 options[b'generaldelta'] = True
1068 1064
1069 1065 # experimental config: format.chunkcachesize
1070 1066 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1071 1067 if chunkcachesize is not None:
1072 1068 options[b'chunkcachesize'] = chunkcachesize
1073 1069
1074 1070 deltabothparents = ui.configbool(
1075 1071 b'storage', b'revlog.optimize-delta-parent-choice'
1076 1072 )
1077 1073 options[b'deltabothparents'] = deltabothparents
1078 1074 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1079 1075
1080 1076 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1081 1077 options[b'issue6528.fix-incoming'] = issue6528
1082 1078
1083 1079 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1084 1080 lazydeltabase = False
1085 1081 if lazydelta:
1086 1082 lazydeltabase = ui.configbool(
1087 1083 b'storage', b'revlog.reuse-external-delta-parent'
1088 1084 )
1089 1085 if lazydeltabase is None:
1090 1086 lazydeltabase = not scmutil.gddeltaconfig(ui)
1091 1087 options[b'lazydelta'] = lazydelta
1092 1088 options[b'lazydeltabase'] = lazydeltabase
1093 1089
1094 1090 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1095 1091 if 0 <= chainspan:
1096 1092 options[b'maxdeltachainspan'] = chainspan
1097 1093
1098 1094 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1099 1095 if mmapindexthreshold is not None:
1100 1096 options[b'mmapindexthreshold'] = mmapindexthreshold
1101 1097
1102 1098 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1103 1099 srdensitythres = float(
1104 1100 ui.config(b'experimental', b'sparse-read.density-threshold')
1105 1101 )
1106 1102 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1107 1103 options[b'with-sparse-read'] = withsparseread
1108 1104 options[b'sparse-read-density-threshold'] = srdensitythres
1109 1105 options[b'sparse-read-min-gap-size'] = srmingapsize
1110 1106
1111 1107 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1112 1108 options[b'sparse-revlog'] = sparserevlog
1113 1109 if sparserevlog:
1114 1110 options[b'generaldelta'] = True
1115 1111
1116 1112 maxchainlen = None
1117 1113 if sparserevlog:
1118 1114 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1119 1115 # experimental config: format.maxchainlen
1120 1116 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1121 1117 if maxchainlen is not None:
1122 1118 options[b'maxchainlen'] = maxchainlen
1123 1119
1124 1120 for r in requirements:
1125 1121 # we allow multiple compression engine requirement to co-exist because
1126 1122 # strickly speaking, revlog seems to support mixed compression style.
1127 1123 #
1128 1124 # The compression used for new entries will be "the last one"
1129 1125 prefix = r.startswith
1130 1126 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1131 1127 options[b'compengine'] = r.split(b'-', 2)[2]
1132 1128
1133 1129 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1134 1130 if options[b'zlib.level'] is not None:
1135 1131 if not (0 <= options[b'zlib.level'] <= 9):
1136 1132 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1137 1133 raise error.Abort(msg % options[b'zlib.level'])
1138 1134 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1139 1135 if options[b'zstd.level'] is not None:
1140 1136 if not (0 <= options[b'zstd.level'] <= 22):
1141 1137 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1142 1138 raise error.Abort(msg % options[b'zstd.level'])
1143 1139
1144 1140 if requirementsmod.NARROW_REQUIREMENT in requirements:
1145 1141 options[b'enableellipsis'] = True
1146 1142
1147 1143 if ui.configbool(b'experimental', b'rust.index'):
1148 1144 options[b'rust.index'] = True
1149 1145 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1150 1146 slow_path = ui.config(
1151 1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1152 1148 )
1153 1149 if slow_path not in (b'allow', b'warn', b'abort'):
1154 1150 default = ui.config_default(
1155 1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1156 1152 )
1157 1153 msg = _(
1158 1154 b'unknown value for config '
1159 1155 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1160 1156 )
1161 1157 ui.warn(msg % slow_path)
1162 1158 if not ui.quiet:
1163 1159 ui.warn(_(b'falling back to default value: %s\n') % default)
1164 1160 slow_path = default
1165 1161
1166 1162 msg = _(
1167 1163 b"accessing `persistent-nodemap` repository without associated "
1168 1164 b"fast implementation."
1169 1165 )
1170 1166 hint = _(
1171 1167 b"check `hg help config.format.use-persistent-nodemap` "
1172 1168 b"for details"
1173 1169 )
1174 1170 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1175 1171 if slow_path == b'warn':
1176 1172 msg = b"warning: " + msg + b'\n'
1177 1173 ui.warn(msg)
1178 1174 if not ui.quiet:
1179 1175 hint = b'(' + hint + b')\n'
1180 1176 ui.warn(hint)
1181 1177 if slow_path == b'abort':
1182 1178 raise error.Abort(msg, hint=hint)
1183 1179 options[b'persistent-nodemap'] = True
1184 1180 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1185 1181 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1186 1182 if slow_path not in (b'allow', b'warn', b'abort'):
1187 1183 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1188 1184 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1189 1185 ui.warn(msg % slow_path)
1190 1186 if not ui.quiet:
1191 1187 ui.warn(_(b'falling back to default value: %s\n') % default)
1192 1188 slow_path = default
1193 1189
1194 1190 msg = _(
1195 1191 b"accessing `dirstate-v2` repository without associated "
1196 1192 b"fast implementation."
1197 1193 )
1198 1194 hint = _(
1199 1195 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1200 1196 )
1201 1197 if not dirstate.HAS_FAST_DIRSTATE_V2:
1202 1198 if slow_path == b'warn':
1203 1199 msg = b"warning: " + msg + b'\n'
1204 1200 ui.warn(msg)
1205 1201 if not ui.quiet:
1206 1202 hint = b'(' + hint + b')\n'
1207 1203 ui.warn(hint)
1208 1204 if slow_path == b'abort':
1209 1205 raise error.Abort(msg, hint=hint)
1210 1206 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1211 1207 options[b'persistent-nodemap.mmap'] = True
1212 1208 if ui.configbool(b'devel', b'persistent-nodemap'):
1213 1209 options[b'devel-force-nodemap'] = True
1214 1210
1215 1211 return options
1216 1212
1217 1213
1218 1214 def makemain(**kwargs):
1219 1215 """Produce a type conforming to ``ilocalrepositorymain``."""
1220 1216 return localrepository
1221 1217
1222 1218
1223 1219 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1224 1220 class revlogfilestorage:
1225 1221 """File storage when using revlogs."""
1226 1222
1227 1223 def file(self, path):
1228 1224 if path.startswith(b'/'):
1229 1225 path = path[1:]
1230 1226
1231 1227 return filelog.filelog(self.svfs, path)
1232 1228
1233 1229
1234 1230 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 1231 class revlognarrowfilestorage:
1236 1232 """File storage when using revlogs and narrow files."""
1237 1233
1238 1234 def file(self, path):
1239 1235 if path.startswith(b'/'):
1240 1236 path = path[1:]
1241 1237
1242 1238 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1243 1239
1244 1240
1245 1241 def makefilestorage(requirements, features, **kwargs):
1246 1242 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1247 1243 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1248 1244 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1249 1245
1250 1246 if requirementsmod.NARROW_REQUIREMENT in requirements:
1251 1247 return revlognarrowfilestorage
1252 1248 else:
1253 1249 return revlogfilestorage
1254 1250
1255 1251
1256 1252 # List of repository interfaces and factory functions for them. Each
1257 1253 # will be called in order during ``makelocalrepository()`` to iteratively
1258 1254 # derive the final type for a local repository instance. We capture the
1259 1255 # function as a lambda so we don't hold a reference and the module-level
1260 1256 # functions can be wrapped.
1261 1257 REPO_INTERFACES = [
1262 1258 (repository.ilocalrepositorymain, lambda: makemain),
1263 1259 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1264 1260 ]
1265 1261
1266 1262
1267 1263 @interfaceutil.implementer(repository.ilocalrepositorymain)
1268 1264 class localrepository:
1269 1265 """Main class for representing local repositories.
1270 1266
1271 1267 All local repositories are instances of this class.
1272 1268
1273 1269 Constructed on its own, instances of this class are not usable as
1274 1270 repository objects. To obtain a usable repository object, call
1275 1271 ``hg.repository()``, ``localrepo.instance()``, or
1276 1272 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1277 1273 ``instance()`` adds support for creating new repositories.
1278 1274 ``hg.repository()`` adds more extension integration, including calling
1279 1275 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1280 1276 used.
1281 1277 """
1282 1278
1283 1279 _basesupported = {
1284 1280 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1285 1281 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1286 1282 requirementsmod.CHANGELOGV2_REQUIREMENT,
1287 1283 requirementsmod.COPIESSDC_REQUIREMENT,
1288 1284 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1289 1285 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1290 1286 requirementsmod.DOTENCODE_REQUIREMENT,
1291 1287 requirementsmod.FNCACHE_REQUIREMENT,
1292 1288 requirementsmod.GENERALDELTA_REQUIREMENT,
1293 1289 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1294 1290 requirementsmod.NODEMAP_REQUIREMENT,
1295 1291 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1296 1292 requirementsmod.REVLOGV1_REQUIREMENT,
1297 1293 requirementsmod.REVLOGV2_REQUIREMENT,
1298 1294 requirementsmod.SHARED_REQUIREMENT,
1299 1295 requirementsmod.SHARESAFE_REQUIREMENT,
1300 1296 requirementsmod.SPARSE_REQUIREMENT,
1301 1297 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1302 1298 requirementsmod.STORE_REQUIREMENT,
1303 1299 requirementsmod.TREEMANIFEST_REQUIREMENT,
1304 1300 }
1305 1301
1306 1302 # list of prefix for file which can be written without 'wlock'
1307 1303 # Extensions should extend this list when needed
1308 1304 _wlockfreeprefix = {
1309 1305 # We migh consider requiring 'wlock' for the next
1310 1306 # two, but pretty much all the existing code assume
1311 1307 # wlock is not needed so we keep them excluded for
1312 1308 # now.
1313 1309 b'hgrc',
1314 1310 b'requires',
1315 1311 # XXX cache is a complicatged business someone
1316 1312 # should investigate this in depth at some point
1317 1313 b'cache/',
1318 1314 # XXX shouldn't be dirstate covered by the wlock?
1319 1315 b'dirstate',
1320 1316 # XXX bisect was still a bit too messy at the time
1321 1317 # this changeset was introduced. Someone should fix
1322 1318 # the remainig bit and drop this line
1323 1319 b'bisect.state',
1324 1320 }
1325 1321
1326 1322 def __init__(
1327 1323 self,
1328 1324 baseui,
1329 1325 ui,
1330 1326 origroot,
1331 1327 wdirvfs,
1332 1328 hgvfs,
1333 1329 requirements,
1334 1330 supportedrequirements,
1335 1331 sharedpath,
1336 1332 store,
1337 1333 cachevfs,
1338 1334 wcachevfs,
1339 1335 features,
1340 1336 intents=None,
1341 1337 ):
1342 1338 """Create a new local repository instance.
1343 1339
1344 1340 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1345 1341 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1346 1342 object.
1347 1343
1348 1344 Arguments:
1349 1345
1350 1346 baseui
1351 1347 ``ui.ui`` instance that ``ui`` argument was based off of.
1352 1348
1353 1349 ui
1354 1350 ``ui.ui`` instance for use by the repository.
1355 1351
1356 1352 origroot
1357 1353 ``bytes`` path to working directory root of this repository.
1358 1354
1359 1355 wdirvfs
1360 1356 ``vfs.vfs`` rooted at the working directory.
1361 1357
1362 1358 hgvfs
1363 1359 ``vfs.vfs`` rooted at .hg/
1364 1360
1365 1361 requirements
1366 1362 ``set`` of bytestrings representing repository opening requirements.
1367 1363
1368 1364 supportedrequirements
1369 1365 ``set`` of bytestrings representing repository requirements that we
1370 1366 know how to open. May be a supetset of ``requirements``.
1371 1367
1372 1368 sharedpath
1373 1369 ``bytes`` Defining path to storage base directory. Points to a
1374 1370 ``.hg/`` directory somewhere.
1375 1371
1376 1372 store
1377 1373 ``store.basicstore`` (or derived) instance providing access to
1378 1374 versioned storage.
1379 1375
1380 1376 cachevfs
1381 1377 ``vfs.vfs`` used for cache files.
1382 1378
1383 1379 wcachevfs
1384 1380 ``vfs.vfs`` used for cache files related to the working copy.
1385 1381
1386 1382 features
1387 1383 ``set`` of bytestrings defining features/capabilities of this
1388 1384 instance.
1389 1385
1390 1386 intents
1391 1387 ``set`` of system strings indicating what this repo will be used
1392 1388 for.
1393 1389 """
1394 1390 self.baseui = baseui
1395 1391 self.ui = ui
1396 1392 self.origroot = origroot
1397 1393 # vfs rooted at working directory.
1398 1394 self.wvfs = wdirvfs
1399 1395 self.root = wdirvfs.base
1400 1396 # vfs rooted at .hg/. Used to access most non-store paths.
1401 1397 self.vfs = hgvfs
1402 1398 self.path = hgvfs.base
1403 1399 self.requirements = requirements
1404 1400 self.nodeconstants = sha1nodeconstants
1405 1401 self.nullid = self.nodeconstants.nullid
1406 1402 self.supported = supportedrequirements
1407 1403 self.sharedpath = sharedpath
1408 1404 self.store = store
1409 1405 self.cachevfs = cachevfs
1410 1406 self.wcachevfs = wcachevfs
1411 1407 self.features = features
1412 1408
1413 1409 self.filtername = None
1414 1410
1415 1411 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1416 1412 b'devel', b'check-locks'
1417 1413 ):
1418 1414 self.vfs.audit = self._getvfsward(self.vfs.audit)
1419 1415 # A list of callback to shape the phase if no data were found.
1420 1416 # Callback are in the form: func(repo, roots) --> processed root.
1421 1417 # This list it to be filled by extension during repo setup
1422 1418 self._phasedefaults = []
1423 1419
1424 1420 color.setup(self.ui)
1425 1421
1426 1422 self.spath = self.store.path
1427 1423 self.svfs = self.store.vfs
1428 1424 self.sjoin = self.store.join
1429 1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1430 1426 b'devel', b'check-locks'
1431 1427 ):
1432 1428 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1433 1429 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1434 1430 else: # standard vfs
1435 1431 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1436 1432
1437 1433 self._dirstatevalidatewarned = False
1438 1434
1439 1435 self._branchcaches = branchmap.BranchMapCache()
1440 1436 self._revbranchcache = None
1441 1437 self._filterpats = {}
1442 1438 self._datafilters = {}
1443 1439 self._transref = self._lockref = self._wlockref = None
1444 1440
1445 1441 # A cache for various files under .hg/ that tracks file changes,
1446 1442 # (used by the filecache decorator)
1447 1443 #
1448 1444 # Maps a property name to its util.filecacheentry
1449 1445 self._filecache = {}
1450 1446
1451 1447 # hold sets of revision to be filtered
1452 1448 # should be cleared when something might have changed the filter value:
1453 1449 # - new changesets,
1454 1450 # - phase change,
1455 1451 # - new obsolescence marker,
1456 1452 # - working directory parent change,
1457 1453 # - bookmark changes
1458 1454 self.filteredrevcache = {}
1459 1455
1460 1456 # post-dirstate-status hooks
1461 1457 self._postdsstatus = []
1462 1458
1463 1459 # generic mapping between names and nodes
1464 1460 self.names = namespaces.namespaces()
1465 1461
1466 1462 # Key to signature value.
1467 1463 self._sparsesignaturecache = {}
1468 1464 # Signature to cached matcher instance.
1469 1465 self._sparsematchercache = {}
1470 1466
1471 1467 self._extrafilterid = repoview.extrafilter(ui)
1472 1468
1473 1469 self.filecopiesmode = None
1474 1470 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1475 1471 self.filecopiesmode = b'changeset-sidedata'
1476 1472
1477 1473 self._wanted_sidedata = set()
1478 1474 self._sidedata_computers = {}
1479 1475 sidedatamod.set_sidedata_spec_for_repo(self)
1480 1476
1481 1477 def _getvfsward(self, origfunc):
1482 1478 """build a ward for self.vfs"""
1483 1479 rref = weakref.ref(self)
1484 1480
1485 1481 def checkvfs(path, mode=None):
1486 1482 ret = origfunc(path, mode=mode)
1487 1483 repo = rref()
1488 1484 if (
1489 1485 repo is None
1490 1486 or not util.safehasattr(repo, b'_wlockref')
1491 1487 or not util.safehasattr(repo, b'_lockref')
1492 1488 ):
1493 1489 return
1494 1490 if mode in (None, b'r', b'rb'):
1495 1491 return
1496 1492 if path.startswith(repo.path):
1497 1493 # truncate name relative to the repository (.hg)
1498 1494 path = path[len(repo.path) + 1 :]
1499 1495 if path.startswith(b'cache/'):
1500 1496 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1501 1497 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1502 1498 # path prefixes covered by 'lock'
1503 1499 vfs_path_prefixes = (
1504 1500 b'journal.',
1505 1501 b'undo.',
1506 1502 b'strip-backup/',
1507 1503 b'cache/',
1508 1504 )
1509 1505 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1510 1506 if repo._currentlock(repo._lockref) is None:
1511 1507 repo.ui.develwarn(
1512 1508 b'write with no lock: "%s"' % path,
1513 1509 stacklevel=3,
1514 1510 config=b'check-locks',
1515 1511 )
1516 1512 elif repo._currentlock(repo._wlockref) is None:
1517 1513 # rest of vfs files are covered by 'wlock'
1518 1514 #
1519 1515 # exclude special files
1520 1516 for prefix in self._wlockfreeprefix:
1521 1517 if path.startswith(prefix):
1522 1518 return
1523 1519 repo.ui.develwarn(
1524 1520 b'write with no wlock: "%s"' % path,
1525 1521 stacklevel=3,
1526 1522 config=b'check-locks',
1527 1523 )
1528 1524 return ret
1529 1525
1530 1526 return checkvfs
1531 1527
1532 1528 def _getsvfsward(self, origfunc):
1533 1529 """build a ward for self.svfs"""
1534 1530 rref = weakref.ref(self)
1535 1531
1536 1532 def checksvfs(path, mode=None):
1537 1533 ret = origfunc(path, mode=mode)
1538 1534 repo = rref()
1539 1535 if repo is None or not util.safehasattr(repo, b'_lockref'):
1540 1536 return
1541 1537 if mode in (None, b'r', b'rb'):
1542 1538 return
1543 1539 if path.startswith(repo.sharedpath):
1544 1540 # truncate name relative to the repository (.hg)
1545 1541 path = path[len(repo.sharedpath) + 1 :]
1546 1542 if repo._currentlock(repo._lockref) is None:
1547 1543 repo.ui.develwarn(
1548 1544 b'write with no lock: "%s"' % path, stacklevel=4
1549 1545 )
1550 1546 return ret
1551 1547
1552 1548 return checksvfs
1553 1549
1554 1550 def close(self):
1555 1551 self._writecaches()
1556 1552
1557 1553 def _writecaches(self):
1558 1554 if self._revbranchcache:
1559 1555 self._revbranchcache.write()
1560 1556
1561 1557 def _restrictcapabilities(self, caps):
1562 1558 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1563 1559 caps = set(caps)
1564 1560 capsblob = bundle2.encodecaps(
1565 1561 bundle2.getrepocaps(self, role=b'client')
1566 1562 )
1567 1563 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1568 1564 if self.ui.configbool(b'experimental', b'narrow'):
1569 1565 caps.add(wireprototypes.NARROWCAP)
1570 1566 return caps
1571 1567
1572 1568 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1573 1569 # self -> auditor -> self._checknested -> self
1574 1570
1575 1571 @property
1576 1572 def auditor(self):
1577 1573 # This is only used by context.workingctx.match in order to
1578 1574 # detect files in subrepos.
1579 1575 return pathutil.pathauditor(self.root, callback=self._checknested)
1580 1576
1581 1577 @property
1582 1578 def nofsauditor(self):
1583 1579 # This is only used by context.basectx.match in order to detect
1584 1580 # files in subrepos.
1585 1581 return pathutil.pathauditor(
1586 1582 self.root, callback=self._checknested, realfs=False, cached=True
1587 1583 )
1588 1584
1589 1585 def _checknested(self, path):
1590 1586 """Determine if path is a legal nested repository."""
1591 1587 if not path.startswith(self.root):
1592 1588 return False
1593 1589 subpath = path[len(self.root) + 1 :]
1594 1590 normsubpath = util.pconvert(subpath)
1595 1591
1596 1592 # XXX: Checking against the current working copy is wrong in
1597 1593 # the sense that it can reject things like
1598 1594 #
1599 1595 # $ hg cat -r 10 sub/x.txt
1600 1596 #
1601 1597 # if sub/ is no longer a subrepository in the working copy
1602 1598 # parent revision.
1603 1599 #
1604 1600 # However, it can of course also allow things that would have
1605 1601 # been rejected before, such as the above cat command if sub/
1606 1602 # is a subrepository now, but was a normal directory before.
1607 1603 # The old path auditor would have rejected by mistake since it
1608 1604 # panics when it sees sub/.hg/.
1609 1605 #
1610 1606 # All in all, checking against the working copy seems sensible
1611 1607 # since we want to prevent access to nested repositories on
1612 1608 # the filesystem *now*.
1613 1609 ctx = self[None]
1614 1610 parts = util.splitpath(subpath)
1615 1611 while parts:
1616 1612 prefix = b'/'.join(parts)
1617 1613 if prefix in ctx.substate:
1618 1614 if prefix == normsubpath:
1619 1615 return True
1620 1616 else:
1621 1617 sub = ctx.sub(prefix)
1622 1618 return sub.checknested(subpath[len(prefix) + 1 :])
1623 1619 else:
1624 1620 parts.pop()
1625 1621 return False
1626 1622
1627 1623 def peer(self):
1628 1624 return localpeer(self) # not cached to avoid reference cycle
1629 1625
1630 1626 def unfiltered(self):
1631 1627 """Return unfiltered version of the repository
1632 1628
1633 1629 Intended to be overwritten by filtered repo."""
1634 1630 return self
1635 1631
1636 1632 def filtered(self, name, visibilityexceptions=None):
1637 1633 """Return a filtered version of a repository
1638 1634
1639 1635 The `name` parameter is the identifier of the requested view. This
1640 1636 will return a repoview object set "exactly" to the specified view.
1641 1637
1642 1638 This function does not apply recursive filtering to a repository. For
1643 1639 example calling `repo.filtered("served")` will return a repoview using
1644 1640 the "served" view, regardless of the initial view used by `repo`.
1645 1641
1646 1642 In other word, there is always only one level of `repoview` "filtering".
1647 1643 """
1648 1644 if self._extrafilterid is not None and b'%' not in name:
1649 1645 name = name + b'%' + self._extrafilterid
1650 1646
1651 1647 cls = repoview.newtype(self.unfiltered().__class__)
1652 1648 return cls(self, name, visibilityexceptions)
1653 1649
1654 1650 @mixedrepostorecache(
1655 1651 (b'bookmarks', b'plain'),
1656 1652 (b'bookmarks.current', b'plain'),
1657 1653 (b'bookmarks', b''),
1658 1654 (b'00changelog.i', b''),
1659 1655 )
1660 1656 def _bookmarks(self):
1661 1657 # Since the multiple files involved in the transaction cannot be
1662 1658 # written atomically (with current repository format), there is a race
1663 1659 # condition here.
1664 1660 #
1665 1661 # 1) changelog content A is read
1666 1662 # 2) outside transaction update changelog to content B
1667 1663 # 3) outside transaction update bookmark file referring to content B
1668 1664 # 4) bookmarks file content is read and filtered against changelog-A
1669 1665 #
1670 1666 # When this happens, bookmarks against nodes missing from A are dropped.
1671 1667 #
1672 1668 # Having this happening during read is not great, but it become worse
1673 1669 # when this happen during write because the bookmarks to the "unknown"
1674 1670 # nodes will be dropped for good. However, writes happen within locks.
1675 1671 # This locking makes it possible to have a race free consistent read.
1676 1672 # For this purpose data read from disc before locking are
1677 1673 # "invalidated" right after the locks are taken. This invalidations are
1678 1674 # "light", the `filecache` mechanism keep the data in memory and will
1679 1675 # reuse them if the underlying files did not changed. Not parsing the
1680 1676 # same data multiple times helps performances.
1681 1677 #
1682 1678 # Unfortunately in the case describe above, the files tracked by the
1683 1679 # bookmarks file cache might not have changed, but the in-memory
1684 1680 # content is still "wrong" because we used an older changelog content
1685 1681 # to process the on-disk data. So after locking, the changelog would be
1686 1682 # refreshed but `_bookmarks` would be preserved.
1687 1683 # Adding `00changelog.i` to the list of tracked file is not
1688 1684 # enough, because at the time we build the content for `_bookmarks` in
1689 1685 # (4), the changelog file has already diverged from the content used
1690 1686 # for loading `changelog` in (1)
1691 1687 #
1692 1688 # To prevent the issue, we force the changelog to be explicitly
1693 1689 # reloaded while computing `_bookmarks`. The data race can still happen
1694 1690 # without the lock (with a narrower window), but it would no longer go
1695 1691 # undetected during the lock time refresh.
1696 1692 #
1697 1693 # The new schedule is as follow
1698 1694 #
1699 1695 # 1) filecache logic detect that `_bookmarks` needs to be computed
1700 1696 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1701 1697 # 3) We force `changelog` filecache to be tested
1702 1698 # 4) cachestat for `changelog` are captured (for changelog)
1703 1699 # 5) `_bookmarks` is computed and cached
1704 1700 #
1705 1701 # The step in (3) ensure we have a changelog at least as recent as the
1706 1702 # cache stat computed in (1). As a result at locking time:
1707 1703 # * if the changelog did not changed since (1) -> we can reuse the data
1708 1704 # * otherwise -> the bookmarks get refreshed.
1709 1705 self._refreshchangelog()
1710 1706 return bookmarks.bmstore(self)
1711 1707
1712 1708 def _refreshchangelog(self):
1713 1709 """make sure the in memory changelog match the on-disk one"""
1714 1710 if 'changelog' in vars(self) and self.currenttransaction() is None:
1715 1711 del self.changelog
1716 1712
1717 1713 @property
1718 1714 def _activebookmark(self):
1719 1715 return self._bookmarks.active
1720 1716
1721 1717 # _phasesets depend on changelog. what we need is to call
1722 1718 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1723 1719 # can't be easily expressed in filecache mechanism.
1724 1720 @storecache(b'phaseroots', b'00changelog.i')
1725 1721 def _phasecache(self):
1726 1722 return phases.phasecache(self, self._phasedefaults)
1727 1723
1728 1724 @storecache(b'obsstore')
1729 1725 def obsstore(self):
1730 1726 return obsolete.makestore(self.ui, self)
1731 1727
1732 1728 @changelogcache()
1733 1729 def changelog(repo):
1734 1730 # load dirstate before changelog to avoid race see issue6303
1735 1731 repo.dirstate.prefetch_parents()
1736 1732 return repo.store.changelog(
1737 1733 txnutil.mayhavepending(repo.root),
1738 1734 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1739 1735 )
1740 1736
1741 1737 @manifestlogcache()
1742 1738 def manifestlog(self):
1743 1739 return self.store.manifestlog(self, self._storenarrowmatch)
1744 1740
1745 1741 @repofilecache(b'dirstate')
1746 1742 def dirstate(self):
1747 1743 return self._makedirstate()
1748 1744
1749 1745 def _makedirstate(self):
1750 1746 """Extension point for wrapping the dirstate per-repo."""
1751 1747 sparsematchfn = None
1752 1748 if sparse.use_sparse(self):
1753 1749 sparsematchfn = lambda: sparse.matcher(self)
1754 1750 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1755 1751 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1756 1752 use_dirstate_v2 = v2_req in self.requirements
1757 1753 use_tracked_hint = th in self.requirements
1758 1754
1759 1755 return dirstate.dirstate(
1760 1756 self.vfs,
1761 1757 self.ui,
1762 1758 self.root,
1763 1759 self._dirstatevalidate,
1764 1760 sparsematchfn,
1765 1761 self.nodeconstants,
1766 1762 use_dirstate_v2,
1767 1763 use_tracked_hint=use_tracked_hint,
1768 1764 )
1769 1765
1770 1766 def _dirstatevalidate(self, node):
1771 1767 try:
1772 1768 self.changelog.rev(node)
1773 1769 return node
1774 1770 except error.LookupError:
1775 1771 if not self._dirstatevalidatewarned:
1776 1772 self._dirstatevalidatewarned = True
1777 1773 self.ui.warn(
1778 1774 _(b"warning: ignoring unknown working parent %s!\n")
1779 1775 % short(node)
1780 1776 )
1781 1777 return self.nullid
1782 1778
1783 1779 @storecache(narrowspec.FILENAME)
1784 1780 def narrowpats(self):
1785 1781 """matcher patterns for this repository's narrowspec
1786 1782
1787 1783 A tuple of (includes, excludes).
1788 1784 """
1789 1785 return narrowspec.load(self)
1790 1786
1791 1787 @storecache(narrowspec.FILENAME)
1792 1788 def _storenarrowmatch(self):
1793 1789 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1794 1790 return matchmod.always()
1795 1791 include, exclude = self.narrowpats
1796 1792 return narrowspec.match(self.root, include=include, exclude=exclude)
1797 1793
1798 1794 @storecache(narrowspec.FILENAME)
1799 1795 def _narrowmatch(self):
1800 1796 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1801 1797 return matchmod.always()
1802 1798 narrowspec.checkworkingcopynarrowspec(self)
1803 1799 include, exclude = self.narrowpats
1804 1800 return narrowspec.match(self.root, include=include, exclude=exclude)
1805 1801
1806 1802 def narrowmatch(self, match=None, includeexact=False):
1807 1803 """matcher corresponding the the repo's narrowspec
1808 1804
1809 1805 If `match` is given, then that will be intersected with the narrow
1810 1806 matcher.
1811 1807
1812 1808 If `includeexact` is True, then any exact matches from `match` will
1813 1809 be included even if they're outside the narrowspec.
1814 1810 """
1815 1811 if match:
1816 1812 if includeexact and not self._narrowmatch.always():
1817 1813 # do not exclude explicitly-specified paths so that they can
1818 1814 # be warned later on
1819 1815 em = matchmod.exact(match.files())
1820 1816 nm = matchmod.unionmatcher([self._narrowmatch, em])
1821 1817 return matchmod.intersectmatchers(match, nm)
1822 1818 return matchmod.intersectmatchers(match, self._narrowmatch)
1823 1819 return self._narrowmatch
1824 1820
1825 1821 def setnarrowpats(self, newincludes, newexcludes):
1826 1822 narrowspec.save(self, newincludes, newexcludes)
1827 1823 self.invalidate(clearfilecache=True)
1828 1824
1829 1825 @unfilteredpropertycache
1830 1826 def _quick_access_changeid_null(self):
1831 1827 return {
1832 1828 b'null': (nullrev, self.nodeconstants.nullid),
1833 1829 nullrev: (nullrev, self.nodeconstants.nullid),
1834 1830 self.nullid: (nullrev, self.nullid),
1835 1831 }
1836 1832
1837 1833 @unfilteredpropertycache
1838 1834 def _quick_access_changeid_wc(self):
1839 1835 # also fast path access to the working copy parents
1840 1836 # however, only do it for filter that ensure wc is visible.
1841 1837 quick = self._quick_access_changeid_null.copy()
1842 1838 cl = self.unfiltered().changelog
1843 1839 for node in self.dirstate.parents():
1844 1840 if node == self.nullid:
1845 1841 continue
1846 1842 rev = cl.index.get_rev(node)
1847 1843 if rev is None:
1848 1844 # unknown working copy parent case:
1849 1845 #
1850 1846 # skip the fast path and let higher code deal with it
1851 1847 continue
1852 1848 pair = (rev, node)
1853 1849 quick[rev] = pair
1854 1850 quick[node] = pair
1855 1851 # also add the parents of the parents
1856 1852 for r in cl.parentrevs(rev):
1857 1853 if r == nullrev:
1858 1854 continue
1859 1855 n = cl.node(r)
1860 1856 pair = (r, n)
1861 1857 quick[r] = pair
1862 1858 quick[n] = pair
1863 1859 p1node = self.dirstate.p1()
1864 1860 if p1node != self.nullid:
1865 1861 quick[b'.'] = quick[p1node]
1866 1862 return quick
1867 1863
1868 1864 @unfilteredmethod
1869 1865 def _quick_access_changeid_invalidate(self):
1870 1866 if '_quick_access_changeid_wc' in vars(self):
1871 1867 del self.__dict__['_quick_access_changeid_wc']
1872 1868
1873 1869 @property
1874 1870 def _quick_access_changeid(self):
1875 1871 """an helper dictionnary for __getitem__ calls
1876 1872
1877 1873 This contains a list of symbol we can recognise right away without
1878 1874 further processing.
1879 1875 """
1880 1876 if self.filtername in repoview.filter_has_wc:
1881 1877 return self._quick_access_changeid_wc
1882 1878 return self._quick_access_changeid_null
1883 1879
1884 1880 def __getitem__(self, changeid):
1885 1881 # dealing with special cases
1886 1882 if changeid is None:
1887 1883 return context.workingctx(self)
1888 1884 if isinstance(changeid, context.basectx):
1889 1885 return changeid
1890 1886
1891 1887 # dealing with multiple revisions
1892 1888 if isinstance(changeid, slice):
1893 1889 # wdirrev isn't contiguous so the slice shouldn't include it
1894 1890 return [
1895 1891 self[i]
1896 1892 for i in range(*changeid.indices(len(self)))
1897 1893 if i not in self.changelog.filteredrevs
1898 1894 ]
1899 1895
1900 1896 # dealing with some special values
1901 1897 quick_access = self._quick_access_changeid.get(changeid)
1902 1898 if quick_access is not None:
1903 1899 rev, node = quick_access
1904 1900 return context.changectx(self, rev, node, maybe_filtered=False)
1905 1901 if changeid == b'tip':
1906 1902 node = self.changelog.tip()
1907 1903 rev = self.changelog.rev(node)
1908 1904 return context.changectx(self, rev, node)
1909 1905
1910 1906 # dealing with arbitrary values
1911 1907 try:
1912 1908 if isinstance(changeid, int):
1913 1909 node = self.changelog.node(changeid)
1914 1910 rev = changeid
1915 1911 elif changeid == b'.':
1916 1912 # this is a hack to delay/avoid loading obsmarkers
1917 1913 # when we know that '.' won't be hidden
1918 1914 node = self.dirstate.p1()
1919 1915 rev = self.unfiltered().changelog.rev(node)
1920 1916 elif len(changeid) == self.nodeconstants.nodelen:
1921 1917 try:
1922 1918 node = changeid
1923 1919 rev = self.changelog.rev(changeid)
1924 1920 except error.FilteredLookupError:
1925 1921 changeid = hex(changeid) # for the error message
1926 1922 raise
1927 1923 except LookupError:
1928 1924 # check if it might have come from damaged dirstate
1929 1925 #
1930 1926 # XXX we could avoid the unfiltered if we had a recognizable
1931 1927 # exception for filtered changeset access
1932 1928 if (
1933 1929 self.local()
1934 1930 and changeid in self.unfiltered().dirstate.parents()
1935 1931 ):
1936 1932 msg = _(b"working directory has unknown parent '%s'!")
1937 1933 raise error.Abort(msg % short(changeid))
1938 1934 changeid = hex(changeid) # for the error message
1939 1935 raise
1940 1936
1941 1937 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1942 1938 node = bin(changeid)
1943 1939 rev = self.changelog.rev(node)
1944 1940 else:
1945 1941 raise error.ProgrammingError(
1946 1942 b"unsupported changeid '%s' of type %s"
1947 1943 % (changeid, pycompat.bytestr(type(changeid)))
1948 1944 )
1949 1945
1950 1946 return context.changectx(self, rev, node)
1951 1947
1952 1948 except (error.FilteredIndexError, error.FilteredLookupError):
1953 1949 raise error.FilteredRepoLookupError(
1954 1950 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1955 1951 )
1956 1952 except (IndexError, LookupError):
1957 1953 raise error.RepoLookupError(
1958 1954 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1959 1955 )
1960 1956 except error.WdirUnsupported:
1961 1957 return context.workingctx(self)
1962 1958
1963 1959 def __contains__(self, changeid):
1964 1960 """True if the given changeid exists"""
1965 1961 try:
1966 1962 self[changeid]
1967 1963 return True
1968 1964 except error.RepoLookupError:
1969 1965 return False
1970 1966
1971 1967 def __nonzero__(self):
1972 1968 return True
1973 1969
1974 1970 __bool__ = __nonzero__
1975 1971
1976 1972 def __len__(self):
1977 1973 # no need to pay the cost of repoview.changelog
1978 1974 unfi = self.unfiltered()
1979 1975 return len(unfi.changelog)
1980 1976
1981 1977 def __iter__(self):
1982 1978 return iter(self.changelog)
1983 1979
1984 1980 def revs(self, expr, *args):
1985 1981 """Find revisions matching a revset.
1986 1982
1987 1983 The revset is specified as a string ``expr`` that may contain
1988 1984 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1989 1985
1990 1986 Revset aliases from the configuration are not expanded. To expand
1991 1987 user aliases, consider calling ``scmutil.revrange()`` or
1992 1988 ``repo.anyrevs([expr], user=True)``.
1993 1989
1994 1990 Returns a smartset.abstractsmartset, which is a list-like interface
1995 1991 that contains integer revisions.
1996 1992 """
1997 1993 tree = revsetlang.spectree(expr, *args)
1998 1994 return revset.makematcher(tree)(self)
1999 1995
2000 1996 def set(self, expr, *args):
2001 1997 """Find revisions matching a revset and emit changectx instances.
2002 1998
2003 1999 This is a convenience wrapper around ``revs()`` that iterates the
2004 2000 result and is a generator of changectx instances.
2005 2001
2006 2002 Revset aliases from the configuration are not expanded. To expand
2007 2003 user aliases, consider calling ``scmutil.revrange()``.
2008 2004 """
2009 2005 for r in self.revs(expr, *args):
2010 2006 yield self[r]
2011 2007
2012 2008 def anyrevs(self, specs, user=False, localalias=None):
2013 2009 """Find revisions matching one of the given revsets.
2014 2010
2015 2011 Revset aliases from the configuration are not expanded by default. To
2016 2012 expand user aliases, specify ``user=True``. To provide some local
2017 2013 definitions overriding user aliases, set ``localalias`` to
2018 2014 ``{name: definitionstring}``.
2019 2015 """
2020 2016 if specs == [b'null']:
2021 2017 return revset.baseset([nullrev])
2022 2018 if specs == [b'.']:
2023 2019 quick_data = self._quick_access_changeid.get(b'.')
2024 2020 if quick_data is not None:
2025 2021 return revset.baseset([quick_data[0]])
2026 2022 if user:
2027 2023 m = revset.matchany(
2028 2024 self.ui,
2029 2025 specs,
2030 2026 lookup=revset.lookupfn(self),
2031 2027 localalias=localalias,
2032 2028 )
2033 2029 else:
2034 2030 m = revset.matchany(None, specs, localalias=localalias)
2035 2031 return m(self)
2036 2032
2037 2033 def url(self):
2038 2034 return b'file:' + self.root
2039 2035
2040 2036 def hook(self, name, throw=False, **args):
2041 2037 """Call a hook, passing this repo instance.
2042 2038
2043 2039 This a convenience method to aid invoking hooks. Extensions likely
2044 2040 won't call this unless they have registered a custom hook or are
2045 2041 replacing code that is expected to call a hook.
2046 2042 """
2047 2043 return hook.hook(self.ui, self, name, throw, **args)
2048 2044
2049 2045 @filteredpropertycache
2050 2046 def _tagscache(self):
2051 2047 """Returns a tagscache object that contains various tags related
2052 2048 caches."""
2053 2049
2054 2050 # This simplifies its cache management by having one decorated
2055 2051 # function (this one) and the rest simply fetch things from it.
2056 2052 class tagscache:
2057 2053 def __init__(self):
2058 2054 # These two define the set of tags for this repository. tags
2059 2055 # maps tag name to node; tagtypes maps tag name to 'global' or
2060 2056 # 'local'. (Global tags are defined by .hgtags across all
2061 2057 # heads, and local tags are defined in .hg/localtags.)
2062 2058 # They constitute the in-memory cache of tags.
2063 2059 self.tags = self.tagtypes = None
2064 2060
2065 2061 self.nodetagscache = self.tagslist = None
2066 2062
2067 2063 cache = tagscache()
2068 2064 cache.tags, cache.tagtypes = self._findtags()
2069 2065
2070 2066 return cache
2071 2067
2072 2068 def tags(self):
2073 2069 '''return a mapping of tag to node'''
2074 2070 t = {}
2075 2071 if self.changelog.filteredrevs:
2076 2072 tags, tt = self._findtags()
2077 2073 else:
2078 2074 tags = self._tagscache.tags
2079 2075 rev = self.changelog.rev
2080 2076 for k, v in tags.items():
2081 2077 try:
2082 2078 # ignore tags to unknown nodes
2083 2079 rev(v)
2084 2080 t[k] = v
2085 2081 except (error.LookupError, ValueError):
2086 2082 pass
2087 2083 return t
2088 2084
2089 2085 def _findtags(self):
2090 2086 """Do the hard work of finding tags. Return a pair of dicts
2091 2087 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2092 2088 maps tag name to a string like \'global\' or \'local\'.
2093 2089 Subclasses or extensions are free to add their own tags, but
2094 2090 should be aware that the returned dicts will be retained for the
2095 2091 duration of the localrepo object."""
2096 2092
2097 2093 # XXX what tagtype should subclasses/extensions use? Currently
2098 2094 # mq and bookmarks add tags, but do not set the tagtype at all.
2099 2095 # Should each extension invent its own tag type? Should there
2100 2096 # be one tagtype for all such "virtual" tags? Or is the status
2101 2097 # quo fine?
2102 2098
2103 2099 # map tag name to (node, hist)
2104 2100 alltags = tagsmod.findglobaltags(self.ui, self)
2105 2101 # map tag name to tag type
2106 2102 tagtypes = {tag: b'global' for tag in alltags}
2107 2103
2108 2104 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2109 2105
2110 2106 # Build the return dicts. Have to re-encode tag names because
2111 2107 # the tags module always uses UTF-8 (in order not to lose info
2112 2108 # writing to the cache), but the rest of Mercurial wants them in
2113 2109 # local encoding.
2114 2110 tags = {}
2115 2111 for (name, (node, hist)) in alltags.items():
2116 2112 if node != self.nullid:
2117 2113 tags[encoding.tolocal(name)] = node
2118 2114 tags[b'tip'] = self.changelog.tip()
2119 2115 tagtypes = {
2120 2116 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2121 2117 }
2122 2118 return (tags, tagtypes)
2123 2119
2124 2120 def tagtype(self, tagname):
2125 2121 """
2126 2122 return the type of the given tag. result can be:
2127 2123
2128 2124 'local' : a local tag
2129 2125 'global' : a global tag
2130 2126 None : tag does not exist
2131 2127 """
2132 2128
2133 2129 return self._tagscache.tagtypes.get(tagname)
2134 2130
2135 2131 def tagslist(self):
2136 2132 '''return a list of tags ordered by revision'''
2137 2133 if not self._tagscache.tagslist:
2138 2134 l = []
2139 2135 for t, n in self.tags().items():
2140 2136 l.append((self.changelog.rev(n), t, n))
2141 2137 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2142 2138
2143 2139 return self._tagscache.tagslist
2144 2140
2145 2141 def nodetags(self, node):
2146 2142 '''return the tags associated with a node'''
2147 2143 if not self._tagscache.nodetagscache:
2148 2144 nodetagscache = {}
2149 2145 for t, n in self._tagscache.tags.items():
2150 2146 nodetagscache.setdefault(n, []).append(t)
2151 2147 for tags in nodetagscache.values():
2152 2148 tags.sort()
2153 2149 self._tagscache.nodetagscache = nodetagscache
2154 2150 return self._tagscache.nodetagscache.get(node, [])
2155 2151
2156 2152 def nodebookmarks(self, node):
2157 2153 """return the list of bookmarks pointing to the specified node"""
2158 2154 return self._bookmarks.names(node)
2159 2155
2160 2156 def branchmap(self):
2161 2157 """returns a dictionary {branch: [branchheads]} with branchheads
2162 2158 ordered by increasing revision number"""
2163 2159 return self._branchcaches[self]
2164 2160
2165 2161 @unfilteredmethod
2166 2162 def revbranchcache(self):
2167 2163 if not self._revbranchcache:
2168 2164 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2169 2165 return self._revbranchcache
2170 2166
2171 2167 def register_changeset(self, rev, changelogrevision):
2172 2168 self.revbranchcache().setdata(rev, changelogrevision)
2173 2169
2174 2170 def branchtip(self, branch, ignoremissing=False):
2175 2171 """return the tip node for a given branch
2176 2172
2177 2173 If ignoremissing is True, then this method will not raise an error.
2178 2174 This is helpful for callers that only expect None for a missing branch
2179 2175 (e.g. namespace).
2180 2176
2181 2177 """
2182 2178 try:
2183 2179 return self.branchmap().branchtip(branch)
2184 2180 except KeyError:
2185 2181 if not ignoremissing:
2186 2182 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2187 2183 else:
2188 2184 pass
2189 2185
2190 2186 def lookup(self, key):
2191 2187 node = scmutil.revsymbol(self, key).node()
2192 2188 if node is None:
2193 2189 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2194 2190 return node
2195 2191
2196 2192 def lookupbranch(self, key):
2197 2193 if self.branchmap().hasbranch(key):
2198 2194 return key
2199 2195
2200 2196 return scmutil.revsymbol(self, key).branch()
2201 2197
2202 2198 def known(self, nodes):
2203 2199 cl = self.changelog
2204 2200 get_rev = cl.index.get_rev
2205 2201 filtered = cl.filteredrevs
2206 2202 result = []
2207 2203 for n in nodes:
2208 2204 r = get_rev(n)
2209 2205 resp = not (r is None or r in filtered)
2210 2206 result.append(resp)
2211 2207 return result
2212 2208
2213 2209 def local(self):
2214 2210 return self
2215 2211
2216 2212 def publishing(self):
2217 2213 # it's safe (and desirable) to trust the publish flag unconditionally
2218 2214 # so that we don't finalize changes shared between users via ssh or nfs
2219 2215 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2220 2216
2221 2217 def cancopy(self):
2222 2218 # so statichttprepo's override of local() works
2223 2219 if not self.local():
2224 2220 return False
2225 2221 if not self.publishing():
2226 2222 return True
2227 2223 # if publishing we can't copy if there is filtered content
2228 2224 return not self.filtered(b'visible').changelog.filteredrevs
2229 2225
2230 2226 def shared(self):
2231 2227 '''the type of shared repository (None if not shared)'''
2232 2228 if self.sharedpath != self.path:
2233 2229 return b'store'
2234 2230 return None
2235 2231
2236 2232 def wjoin(self, f, *insidef):
2237 2233 return self.vfs.reljoin(self.root, f, *insidef)
2238 2234
2239 2235 def setparents(self, p1, p2=None):
2240 2236 if p2 is None:
2241 2237 p2 = self.nullid
2242 2238 self[None].setparents(p1, p2)
2243 2239 self._quick_access_changeid_invalidate()
2244 2240
2245 2241 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2246 2242 """changeid must be a changeset revision, if specified.
2247 2243 fileid can be a file revision or node."""
2248 2244 return context.filectx(
2249 2245 self, path, changeid, fileid, changectx=changectx
2250 2246 )
2251 2247
2252 2248 def getcwd(self):
2253 2249 return self.dirstate.getcwd()
2254 2250
2255 2251 def pathto(self, f, cwd=None):
2256 2252 return self.dirstate.pathto(f, cwd)
2257 2253
2258 2254 def _loadfilter(self, filter):
2259 2255 if filter not in self._filterpats:
2260 2256 l = []
2261 2257 for pat, cmd in self.ui.configitems(filter):
2262 2258 if cmd == b'!':
2263 2259 continue
2264 2260 mf = matchmod.match(self.root, b'', [pat])
2265 2261 fn = None
2266 2262 params = cmd
2267 2263 for name, filterfn in self._datafilters.items():
2268 2264 if cmd.startswith(name):
2269 2265 fn = filterfn
2270 2266 params = cmd[len(name) :].lstrip()
2271 2267 break
2272 2268 if not fn:
2273 2269 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2274 2270 fn.__name__ = 'commandfilter'
2275 2271 # Wrap old filters not supporting keyword arguments
2276 2272 if not pycompat.getargspec(fn)[2]:
2277 2273 oldfn = fn
2278 2274 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2279 2275 fn.__name__ = 'compat-' + oldfn.__name__
2280 2276 l.append((mf, fn, params))
2281 2277 self._filterpats[filter] = l
2282 2278 return self._filterpats[filter]
2283 2279
2284 2280 def _filter(self, filterpats, filename, data):
2285 2281 for mf, fn, cmd in filterpats:
2286 2282 if mf(filename):
2287 2283 self.ui.debug(
2288 2284 b"filtering %s through %s\n"
2289 2285 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2290 2286 )
2291 2287 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2292 2288 break
2293 2289
2294 2290 return data
2295 2291
2296 2292 @unfilteredpropertycache
2297 2293 def _encodefilterpats(self):
2298 2294 return self._loadfilter(b'encode')
2299 2295
2300 2296 @unfilteredpropertycache
2301 2297 def _decodefilterpats(self):
2302 2298 return self._loadfilter(b'decode')
2303 2299
2304 2300 def adddatafilter(self, name, filter):
2305 2301 self._datafilters[name] = filter
2306 2302
2307 2303 def wread(self, filename):
2308 2304 if self.wvfs.islink(filename):
2309 2305 data = self.wvfs.readlink(filename)
2310 2306 else:
2311 2307 data = self.wvfs.read(filename)
2312 2308 return self._filter(self._encodefilterpats, filename, data)
2313 2309
2314 2310 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2315 2311 """write ``data`` into ``filename`` in the working directory
2316 2312
2317 2313 This returns length of written (maybe decoded) data.
2318 2314 """
2319 2315 data = self._filter(self._decodefilterpats, filename, data)
2320 2316 if b'l' in flags:
2321 2317 self.wvfs.symlink(data, filename)
2322 2318 else:
2323 2319 self.wvfs.write(
2324 2320 filename, data, backgroundclose=backgroundclose, **kwargs
2325 2321 )
2326 2322 if b'x' in flags:
2327 2323 self.wvfs.setflags(filename, False, True)
2328 2324 else:
2329 2325 self.wvfs.setflags(filename, False, False)
2330 2326 return len(data)
2331 2327
2332 2328 def wwritedata(self, filename, data):
2333 2329 return self._filter(self._decodefilterpats, filename, data)
2334 2330
2335 2331 def currenttransaction(self):
2336 2332 """return the current transaction or None if non exists"""
2337 2333 if self._transref:
2338 2334 tr = self._transref()
2339 2335 else:
2340 2336 tr = None
2341 2337
2342 2338 if tr and tr.running():
2343 2339 return tr
2344 2340 return None
2345 2341
2346 2342 def transaction(self, desc, report=None):
2347 2343 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2348 2344 b'devel', b'check-locks'
2349 2345 ):
2350 2346 if self._currentlock(self._lockref) is None:
2351 2347 raise error.ProgrammingError(b'transaction requires locking')
2352 2348 tr = self.currenttransaction()
2353 2349 if tr is not None:
2354 2350 return tr.nest(name=desc)
2355 2351
2356 2352 # abort here if the journal already exists
2357 2353 if self.svfs.exists(b"journal"):
2358 2354 raise error.RepoError(
2359 2355 _(b"abandoned transaction found"),
2360 2356 hint=_(b"run 'hg recover' to clean up transaction"),
2361 2357 )
2362 2358
2363 2359 idbase = b"%.40f#%f" % (random.random(), time.time())
2364 2360 ha = hex(hashutil.sha1(idbase).digest())
2365 2361 txnid = b'TXN:' + ha
2366 2362 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2367 2363
2368 2364 self._writejournal(desc)
2369 2365 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2370 2366 if report:
2371 2367 rp = report
2372 2368 else:
2373 2369 rp = self.ui.warn
2374 2370 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2375 2371 # we must avoid cyclic reference between repo and transaction.
2376 2372 reporef = weakref.ref(self)
2377 2373 # Code to track tag movement
2378 2374 #
2379 2375 # Since tags are all handled as file content, it is actually quite hard
2380 2376 # to track these movement from a code perspective. So we fallback to a
2381 2377 # tracking at the repository level. One could envision to track changes
2382 2378 # to the '.hgtags' file through changegroup apply but that fails to
2383 2379 # cope with case where transaction expose new heads without changegroup
2384 2380 # being involved (eg: phase movement).
2385 2381 #
2386 2382 # For now, We gate the feature behind a flag since this likely comes
2387 2383 # with performance impacts. The current code run more often than needed
2388 2384 # and do not use caches as much as it could. The current focus is on
2389 2385 # the behavior of the feature so we disable it by default. The flag
2390 2386 # will be removed when we are happy with the performance impact.
2391 2387 #
2392 2388 # Once this feature is no longer experimental move the following
2393 2389 # documentation to the appropriate help section:
2394 2390 #
2395 2391 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2396 2392 # tags (new or changed or deleted tags). In addition the details of
2397 2393 # these changes are made available in a file at:
2398 2394 # ``REPOROOT/.hg/changes/tags.changes``.
2399 2395 # Make sure you check for HG_TAG_MOVED before reading that file as it
2400 2396 # might exist from a previous transaction even if no tag were touched
2401 2397 # in this one. Changes are recorded in a line base format::
2402 2398 #
2403 2399 # <action> <hex-node> <tag-name>\n
2404 2400 #
2405 2401 # Actions are defined as follow:
2406 2402 # "-R": tag is removed,
2407 2403 # "+A": tag is added,
2408 2404 # "-M": tag is moved (old value),
2409 2405 # "+M": tag is moved (new value),
2410 2406 tracktags = lambda x: None
2411 2407 # experimental config: experimental.hook-track-tags
2412 2408 shouldtracktags = self.ui.configbool(
2413 2409 b'experimental', b'hook-track-tags'
2414 2410 )
2415 2411 if desc != b'strip' and shouldtracktags:
2416 2412 oldheads = self.changelog.headrevs()
2417 2413
2418 2414 def tracktags(tr2):
2419 2415 repo = reporef()
2420 2416 assert repo is not None # help pytype
2421 2417 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2422 2418 newheads = repo.changelog.headrevs()
2423 2419 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2424 2420 # notes: we compare lists here.
2425 2421 # As we do it only once buiding set would not be cheaper
2426 2422 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2427 2423 if changes:
2428 2424 tr2.hookargs[b'tag_moved'] = b'1'
2429 2425 with repo.vfs(
2430 2426 b'changes/tags.changes', b'w', atomictemp=True
2431 2427 ) as changesfile:
2432 2428 # note: we do not register the file to the transaction
2433 2429 # because we needs it to still exist on the transaction
2434 2430 # is close (for txnclose hooks)
2435 2431 tagsmod.writediff(changesfile, changes)
2436 2432
2437 2433 def validate(tr2):
2438 2434 """will run pre-closing hooks"""
2439 2435 # XXX the transaction API is a bit lacking here so we take a hacky
2440 2436 # path for now
2441 2437 #
2442 2438 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2443 2439 # dict is copied before these run. In addition we needs the data
2444 2440 # available to in memory hooks too.
2445 2441 #
2446 2442 # Moreover, we also need to make sure this runs before txnclose
2447 2443 # hooks and there is no "pending" mechanism that would execute
2448 2444 # logic only if hooks are about to run.
2449 2445 #
2450 2446 # Fixing this limitation of the transaction is also needed to track
2451 2447 # other families of changes (bookmarks, phases, obsolescence).
2452 2448 #
2453 2449 # This will have to be fixed before we remove the experimental
2454 2450 # gating.
2455 2451 tracktags(tr2)
2456 2452 repo = reporef()
2457 2453 assert repo is not None # help pytype
2458 2454
2459 2455 singleheadopt = (b'experimental', b'single-head-per-branch')
2460 2456 singlehead = repo.ui.configbool(*singleheadopt)
2461 2457 if singlehead:
2462 2458 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2463 2459 accountclosed = singleheadsub.get(
2464 2460 b"account-closed-heads", False
2465 2461 )
2466 2462 if singleheadsub.get(b"public-changes-only", False):
2467 2463 filtername = b"immutable"
2468 2464 else:
2469 2465 filtername = b"visible"
2470 2466 scmutil.enforcesinglehead(
2471 2467 repo, tr2, desc, accountclosed, filtername
2472 2468 )
2473 2469 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2474 2470 for name, (old, new) in sorted(
2475 2471 tr.changes[b'bookmarks'].items()
2476 2472 ):
2477 2473 args = tr.hookargs.copy()
2478 2474 args.update(bookmarks.preparehookargs(name, old, new))
2479 2475 repo.hook(
2480 2476 b'pretxnclose-bookmark',
2481 2477 throw=True,
2482 2478 **pycompat.strkwargs(args)
2483 2479 )
2484 2480 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2485 2481 cl = repo.unfiltered().changelog
2486 2482 for revs, (old, new) in tr.changes[b'phases']:
2487 2483 for rev in revs:
2488 2484 args = tr.hookargs.copy()
2489 2485 node = hex(cl.node(rev))
2490 2486 args.update(phases.preparehookargs(node, old, new))
2491 2487 repo.hook(
2492 2488 b'pretxnclose-phase',
2493 2489 throw=True,
2494 2490 **pycompat.strkwargs(args)
2495 2491 )
2496 2492
2497 2493 repo.hook(
2498 2494 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2499 2495 )
2500 2496
2501 2497 def releasefn(tr, success):
2502 2498 repo = reporef()
2503 2499 if repo is None:
2504 2500 # If the repo has been GC'd (and this release function is being
2505 2501 # called from transaction.__del__), there's not much we can do,
2506 2502 # so just leave the unfinished transaction there and let the
2507 2503 # user run `hg recover`.
2508 2504 return
2509 2505 if success:
2510 2506 # this should be explicitly invoked here, because
2511 2507 # in-memory changes aren't written out at closing
2512 2508 # transaction, if tr.addfilegenerator (via
2513 2509 # dirstate.write or so) isn't invoked while
2514 2510 # transaction running
2515 2511 repo.dirstate.write(None)
2516 2512 else:
2517 2513 # discard all changes (including ones already written
2518 2514 # out) in this transaction
2519 2515 narrowspec.restorebackup(self, b'journal.narrowspec')
2520 2516 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2521 2517 repo.dirstate.restorebackup(None, b'journal.dirstate')
2522 2518
2523 2519 repo.invalidate(clearfilecache=True)
2524 2520
2525 2521 tr = transaction.transaction(
2526 2522 rp,
2527 2523 self.svfs,
2528 2524 vfsmap,
2529 2525 b"journal",
2530 2526 b"undo",
2531 2527 aftertrans(renames),
2532 2528 self.store.createmode,
2533 2529 validator=validate,
2534 2530 releasefn=releasefn,
2535 2531 checkambigfiles=_cachedfiles,
2536 2532 name=desc,
2537 2533 )
2538 2534 tr.changes[b'origrepolen'] = len(self)
2539 2535 tr.changes[b'obsmarkers'] = set()
2540 2536 tr.changes[b'phases'] = []
2541 2537 tr.changes[b'bookmarks'] = {}
2542 2538
2543 2539 tr.hookargs[b'txnid'] = txnid
2544 2540 tr.hookargs[b'txnname'] = desc
2545 2541 tr.hookargs[b'changes'] = tr.changes
2546 2542 # note: writing the fncache only during finalize mean that the file is
2547 2543 # outdated when running hooks. As fncache is used for streaming clone,
2548 2544 # this is not expected to break anything that happen during the hooks.
2549 2545 tr.addfinalize(b'flush-fncache', self.store.write)
2550 2546
2551 2547 def txnclosehook(tr2):
2552 2548 """To be run if transaction is successful, will schedule a hook run"""
2553 2549 # Don't reference tr2 in hook() so we don't hold a reference.
2554 2550 # This reduces memory consumption when there are multiple
2555 2551 # transactions per lock. This can likely go away if issue5045
2556 2552 # fixes the function accumulation.
2557 2553 hookargs = tr2.hookargs
2558 2554
2559 2555 def hookfunc(unused_success):
2560 2556 repo = reporef()
2561 2557 assert repo is not None # help pytype
2562 2558
2563 2559 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2564 2560 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2565 2561 for name, (old, new) in bmchanges:
2566 2562 args = tr.hookargs.copy()
2567 2563 args.update(bookmarks.preparehookargs(name, old, new))
2568 2564 repo.hook(
2569 2565 b'txnclose-bookmark',
2570 2566 throw=False,
2571 2567 **pycompat.strkwargs(args)
2572 2568 )
2573 2569
2574 2570 if hook.hashook(repo.ui, b'txnclose-phase'):
2575 2571 cl = repo.unfiltered().changelog
2576 2572 phasemv = sorted(
2577 2573 tr.changes[b'phases'], key=lambda r: r[0][0]
2578 2574 )
2579 2575 for revs, (old, new) in phasemv:
2580 2576 for rev in revs:
2581 2577 args = tr.hookargs.copy()
2582 2578 node = hex(cl.node(rev))
2583 2579 args.update(phases.preparehookargs(node, old, new))
2584 2580 repo.hook(
2585 2581 b'txnclose-phase',
2586 2582 throw=False,
2587 2583 **pycompat.strkwargs(args)
2588 2584 )
2589 2585
2590 2586 repo.hook(
2591 2587 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2592 2588 )
2593 2589
2594 2590 repo = reporef()
2595 2591 assert repo is not None # help pytype
2596 2592 repo._afterlock(hookfunc)
2597 2593
2598 2594 tr.addfinalize(b'txnclose-hook', txnclosehook)
2599 2595 # Include a leading "-" to make it happen before the transaction summary
2600 2596 # reports registered via scmutil.registersummarycallback() whose names
2601 2597 # are 00-txnreport etc. That way, the caches will be warm when the
2602 2598 # callbacks run.
2603 2599 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2604 2600
2605 2601 def txnaborthook(tr2):
2606 2602 """To be run if transaction is aborted"""
2607 2603 repo = reporef()
2608 2604 assert repo is not None # help pytype
2609 2605 repo.hook(
2610 2606 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2611 2607 )
2612 2608
2613 2609 tr.addabort(b'txnabort-hook', txnaborthook)
2614 2610 # avoid eager cache invalidation. in-memory data should be identical
2615 2611 # to stored data if transaction has no error.
2616 2612 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2617 2613 self._transref = weakref.ref(tr)
2618 2614 scmutil.registersummarycallback(self, tr, desc)
2619 2615 return tr
2620 2616
2621 2617 def _journalfiles(self):
2622 2618 first = (
2623 2619 (self.svfs, b'journal'),
2624 2620 (self.svfs, b'journal.narrowspec'),
2625 2621 (self.vfs, b'journal.narrowspec.dirstate'),
2626 2622 (self.vfs, b'journal.dirstate'),
2627 2623 )
2628 2624 middle = []
2629 2625 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2630 2626 if dirstate_data is not None:
2631 2627 middle.append((self.vfs, dirstate_data))
2632 2628 end = (
2633 2629 (self.vfs, b'journal.branch'),
2634 2630 (self.vfs, b'journal.desc'),
2635 2631 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2636 2632 (self.svfs, b'journal.phaseroots'),
2637 2633 )
2638 2634 return first + tuple(middle) + end
2639 2635
2640 2636 def undofiles(self):
2641 2637 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2642 2638
2643 2639 @unfilteredmethod
2644 2640 def _writejournal(self, desc):
2645 2641 self.dirstate.savebackup(None, b'journal.dirstate')
2646 2642 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2647 2643 narrowspec.savebackup(self, b'journal.narrowspec')
2648 2644 self.vfs.write(
2649 2645 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2650 2646 )
2651 2647 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2652 2648 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 2649 bookmarksvfs.write(
2654 2650 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2655 2651 )
2656 2652 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2657 2653
2658 2654 def recover(self):
2659 2655 with self.lock():
2660 2656 if self.svfs.exists(b"journal"):
2661 2657 self.ui.status(_(b"rolling back interrupted transaction\n"))
2662 2658 vfsmap = {
2663 2659 b'': self.svfs,
2664 2660 b'plain': self.vfs,
2665 2661 }
2666 2662 transaction.rollback(
2667 2663 self.svfs,
2668 2664 vfsmap,
2669 2665 b"journal",
2670 2666 self.ui.warn,
2671 2667 checkambigfiles=_cachedfiles,
2672 2668 )
2673 2669 self.invalidate()
2674 2670 return True
2675 2671 else:
2676 2672 self.ui.warn(_(b"no interrupted transaction available\n"))
2677 2673 return False
2678 2674
2679 2675 def rollback(self, dryrun=False, force=False):
2680 2676 wlock = lock = dsguard = None
2681 2677 try:
2682 2678 wlock = self.wlock()
2683 2679 lock = self.lock()
2684 2680 if self.svfs.exists(b"undo"):
2685 2681 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2686 2682
2687 2683 return self._rollback(dryrun, force, dsguard)
2688 2684 else:
2689 2685 self.ui.warn(_(b"no rollback information available\n"))
2690 2686 return 1
2691 2687 finally:
2692 2688 release(dsguard, lock, wlock)
2693 2689
2694 2690 @unfilteredmethod # Until we get smarter cache management
2695 2691 def _rollback(self, dryrun, force, dsguard):
2696 2692 ui = self.ui
2697 2693 try:
2698 2694 args = self.vfs.read(b'undo.desc').splitlines()
2699 2695 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2700 2696 if len(args) >= 3:
2701 2697 detail = args[2]
2702 2698 oldtip = oldlen - 1
2703 2699
2704 2700 if detail and ui.verbose:
2705 2701 msg = _(
2706 2702 b'repository tip rolled back to revision %d'
2707 2703 b' (undo %s: %s)\n'
2708 2704 ) % (oldtip, desc, detail)
2709 2705 else:
2710 2706 msg = _(
2711 2707 b'repository tip rolled back to revision %d (undo %s)\n'
2712 2708 ) % (oldtip, desc)
2713 2709 except IOError:
2714 2710 msg = _(b'rolling back unknown transaction\n')
2715 2711 desc = None
2716 2712
2717 2713 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2718 2714 raise error.Abort(
2719 2715 _(
2720 2716 b'rollback of last commit while not checked out '
2721 2717 b'may lose data'
2722 2718 ),
2723 2719 hint=_(b'use -f to force'),
2724 2720 )
2725 2721
2726 2722 ui.status(msg)
2727 2723 if dryrun:
2728 2724 return 0
2729 2725
2730 2726 parents = self.dirstate.parents()
2731 2727 self.destroying()
2732 2728 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2733 2729 transaction.rollback(
2734 2730 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2735 2731 )
2736 2732 bookmarksvfs = bookmarks.bookmarksvfs(self)
2737 2733 if bookmarksvfs.exists(b'undo.bookmarks'):
2738 2734 bookmarksvfs.rename(
2739 2735 b'undo.bookmarks', b'bookmarks', checkambig=True
2740 2736 )
2741 2737 if self.svfs.exists(b'undo.phaseroots'):
2742 2738 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2743 2739 self.invalidate()
2744 2740
2745 2741 has_node = self.changelog.index.has_node
2746 2742 parentgone = any(not has_node(p) for p in parents)
2747 2743 if parentgone:
2748 2744 # prevent dirstateguard from overwriting already restored one
2749 2745 dsguard.close()
2750 2746
2751 2747 narrowspec.restorebackup(self, b'undo.narrowspec')
2752 2748 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2753 2749 self.dirstate.restorebackup(None, b'undo.dirstate')
2754 2750 try:
2755 2751 branch = self.vfs.read(b'undo.branch')
2756 2752 self.dirstate.setbranch(encoding.tolocal(branch))
2757 2753 except IOError:
2758 2754 ui.warn(
2759 2755 _(
2760 2756 b'named branch could not be reset: '
2761 2757 b'current branch is still \'%s\'\n'
2762 2758 )
2763 2759 % self.dirstate.branch()
2764 2760 )
2765 2761
2766 2762 parents = tuple([p.rev() for p in self[None].parents()])
2767 2763 if len(parents) > 1:
2768 2764 ui.status(
2769 2765 _(
2770 2766 b'working directory now based on '
2771 2767 b'revisions %d and %d\n'
2772 2768 )
2773 2769 % parents
2774 2770 )
2775 2771 else:
2776 2772 ui.status(
2777 2773 _(b'working directory now based on revision %d\n') % parents
2778 2774 )
2779 2775 mergestatemod.mergestate.clean(self)
2780 2776
2781 2777 # TODO: if we know which new heads may result from this rollback, pass
2782 2778 # them to destroy(), which will prevent the branchhead cache from being
2783 2779 # invalidated.
2784 2780 self.destroyed()
2785 2781 return 0
2786 2782
2787 2783 def _buildcacheupdater(self, newtransaction):
2788 2784 """called during transaction to build the callback updating cache
2789 2785
2790 2786 Lives on the repository to help extension who might want to augment
2791 2787 this logic. For this purpose, the created transaction is passed to the
2792 2788 method.
2793 2789 """
2794 2790 # we must avoid cyclic reference between repo and transaction.
2795 2791 reporef = weakref.ref(self)
2796 2792
2797 2793 def updater(tr):
2798 2794 repo = reporef()
2799 2795 assert repo is not None # help pytype
2800 2796 repo.updatecaches(tr)
2801 2797
2802 2798 return updater
2803 2799
2804 2800 @unfilteredmethod
2805 2801 def updatecaches(self, tr=None, full=False, caches=None):
2806 2802 """warm appropriate caches
2807 2803
2808 2804 If this function is called after a transaction closed. The transaction
2809 2805 will be available in the 'tr' argument. This can be used to selectively
2810 2806 update caches relevant to the changes in that transaction.
2811 2807
2812 2808 If 'full' is set, make sure all caches the function knows about have
2813 2809 up-to-date data. Even the ones usually loaded more lazily.
2814 2810
2815 2811 The `full` argument can take a special "post-clone" value. In this case
2816 2812 the cache warming is made after a clone and of the slower cache might
2817 2813 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2818 2814 as we plan for a cleaner way to deal with this for 5.9.
2819 2815 """
2820 2816 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2821 2817 # During strip, many caches are invalid but
2822 2818 # later call to `destroyed` will refresh them.
2823 2819 return
2824 2820
2825 2821 unfi = self.unfiltered()
2826 2822
2827 2823 if full:
2828 2824 msg = (
2829 2825 "`full` argument for `repo.updatecaches` is deprecated\n"
2830 2826 "(use `caches=repository.CACHE_ALL` instead)"
2831 2827 )
2832 2828 self.ui.deprecwarn(msg, b"5.9")
2833 2829 caches = repository.CACHES_ALL
2834 2830 if full == b"post-clone":
2835 2831 caches = repository.CACHES_POST_CLONE
2836 2832 caches = repository.CACHES_ALL
2837 2833 elif caches is None:
2838 2834 caches = repository.CACHES_DEFAULT
2839 2835
2840 2836 if repository.CACHE_BRANCHMAP_SERVED in caches:
2841 2837 if tr is None or tr.changes[b'origrepolen'] < len(self):
2842 2838 # accessing the 'served' branchmap should refresh all the others,
2843 2839 self.ui.debug(b'updating the branch cache\n')
2844 2840 self.filtered(b'served').branchmap()
2845 2841 self.filtered(b'served.hidden').branchmap()
2846 2842 # flush all possibly delayed write.
2847 2843 self._branchcaches.write_delayed(self)
2848 2844
2849 2845 if repository.CACHE_CHANGELOG_CACHE in caches:
2850 2846 self.changelog.update_caches(transaction=tr)
2851 2847
2852 2848 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2853 2849 self.manifestlog.update_caches(transaction=tr)
2854 2850
2855 2851 if repository.CACHE_REV_BRANCH in caches:
2856 2852 rbc = unfi.revbranchcache()
2857 2853 for r in unfi.changelog:
2858 2854 rbc.branchinfo(r)
2859 2855 rbc.write()
2860 2856
2861 2857 if repository.CACHE_FULL_MANIFEST in caches:
2862 2858 # ensure the working copy parents are in the manifestfulltextcache
2863 2859 for ctx in self[b'.'].parents():
2864 2860 ctx.manifest() # accessing the manifest is enough
2865 2861
2866 2862 if repository.CACHE_FILE_NODE_TAGS in caches:
2867 2863 # accessing fnode cache warms the cache
2868 2864 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2869 2865
2870 2866 if repository.CACHE_TAGS_DEFAULT in caches:
2871 2867 # accessing tags warm the cache
2872 2868 self.tags()
2873 2869 if repository.CACHE_TAGS_SERVED in caches:
2874 2870 self.filtered(b'served').tags()
2875 2871
2876 2872 if repository.CACHE_BRANCHMAP_ALL in caches:
2877 2873 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2878 2874 # so we're forcing a write to cause these caches to be warmed up
2879 2875 # even if they haven't explicitly been requested yet (if they've
2880 2876 # never been used by hg, they won't ever have been written, even if
2881 2877 # they're a subset of another kind of cache that *has* been used).
2882 2878 for filt in repoview.filtertable.keys():
2883 2879 filtered = self.filtered(filt)
2884 2880 filtered.branchmap().write(filtered)
2885 2881
2886 2882 def invalidatecaches(self):
2887 2883
2888 2884 if '_tagscache' in vars(self):
2889 2885 # can't use delattr on proxy
2890 2886 del self.__dict__['_tagscache']
2891 2887
2892 2888 self._branchcaches.clear()
2893 2889 self.invalidatevolatilesets()
2894 2890 self._sparsesignaturecache.clear()
2895 2891
2896 2892 def invalidatevolatilesets(self):
2897 2893 self.filteredrevcache.clear()
2898 2894 obsolete.clearobscaches(self)
2899 2895 self._quick_access_changeid_invalidate()
2900 2896
2901 2897 def invalidatedirstate(self):
2902 2898 """Invalidates the dirstate, causing the next call to dirstate
2903 2899 to check if it was modified since the last time it was read,
2904 2900 rereading it if it has.
2905 2901
2906 2902 This is different to dirstate.invalidate() that it doesn't always
2907 2903 rereads the dirstate. Use dirstate.invalidate() if you want to
2908 2904 explicitly read the dirstate again (i.e. restoring it to a previous
2909 2905 known good state)."""
2910 2906 if hasunfilteredcache(self, 'dirstate'):
2911 2907 for k in self.dirstate._filecache:
2912 2908 try:
2913 2909 delattr(self.dirstate, k)
2914 2910 except AttributeError:
2915 2911 pass
2916 2912 delattr(self.unfiltered(), 'dirstate')
2917 2913
2918 2914 def invalidate(self, clearfilecache=False):
2919 2915 """Invalidates both store and non-store parts other than dirstate
2920 2916
2921 2917 If a transaction is running, invalidation of store is omitted,
2922 2918 because discarding in-memory changes might cause inconsistency
2923 2919 (e.g. incomplete fncache causes unintentional failure, but
2924 2920 redundant one doesn't).
2925 2921 """
2926 2922 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2927 2923 for k in list(self._filecache.keys()):
2928 2924 # dirstate is invalidated separately in invalidatedirstate()
2929 2925 if k == b'dirstate':
2930 2926 continue
2931 2927 if (
2932 2928 k == b'changelog'
2933 2929 and self.currenttransaction()
2934 2930 and self.changelog._delayed
2935 2931 ):
2936 2932 # The changelog object may store unwritten revisions. We don't
2937 2933 # want to lose them.
2938 2934 # TODO: Solve the problem instead of working around it.
2939 2935 continue
2940 2936
2941 2937 if clearfilecache:
2942 2938 del self._filecache[k]
2943 2939 try:
2944 2940 delattr(unfiltered, k)
2945 2941 except AttributeError:
2946 2942 pass
2947 2943 self.invalidatecaches()
2948 2944 if not self.currenttransaction():
2949 2945 # TODO: Changing contents of store outside transaction
2950 2946 # causes inconsistency. We should make in-memory store
2951 2947 # changes detectable, and abort if changed.
2952 2948 self.store.invalidatecaches()
2953 2949
2954 2950 def invalidateall(self):
2955 2951 """Fully invalidates both store and non-store parts, causing the
2956 2952 subsequent operation to reread any outside changes."""
2957 2953 # extension should hook this to invalidate its caches
2958 2954 self.invalidate()
2959 2955 self.invalidatedirstate()
2960 2956
2961 2957 @unfilteredmethod
2962 2958 def _refreshfilecachestats(self, tr):
2963 2959 """Reload stats of cached files so that they are flagged as valid"""
2964 2960 for k, ce in self._filecache.items():
2965 2961 k = pycompat.sysstr(k)
2966 2962 if k == 'dirstate' or k not in self.__dict__:
2967 2963 continue
2968 2964 ce.refresh()
2969 2965
2970 2966 def _lock(
2971 2967 self,
2972 2968 vfs,
2973 2969 lockname,
2974 2970 wait,
2975 2971 releasefn,
2976 2972 acquirefn,
2977 2973 desc,
2978 2974 ):
2979 2975 timeout = 0
2980 2976 warntimeout = 0
2981 2977 if wait:
2982 2978 timeout = self.ui.configint(b"ui", b"timeout")
2983 2979 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2984 2980 # internal config: ui.signal-safe-lock
2985 2981 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2986 2982
2987 2983 l = lockmod.trylock(
2988 2984 self.ui,
2989 2985 vfs,
2990 2986 lockname,
2991 2987 timeout,
2992 2988 warntimeout,
2993 2989 releasefn=releasefn,
2994 2990 acquirefn=acquirefn,
2995 2991 desc=desc,
2996 2992 signalsafe=signalsafe,
2997 2993 )
2998 2994 return l
2999 2995
3000 2996 def _afterlock(self, callback):
3001 2997 """add a callback to be run when the repository is fully unlocked
3002 2998
3003 2999 The callback will be executed when the outermost lock is released
3004 3000 (with wlock being higher level than 'lock')."""
3005 3001 for ref in (self._wlockref, self._lockref):
3006 3002 l = ref and ref()
3007 3003 if l and l.held:
3008 3004 l.postrelease.append(callback)
3009 3005 break
3010 3006 else: # no lock have been found.
3011 3007 callback(True)
3012 3008
3013 3009 def lock(self, wait=True):
3014 3010 """Lock the repository store (.hg/store) and return a weak reference
3015 3011 to the lock. Use this before modifying the store (e.g. committing or
3016 3012 stripping). If you are opening a transaction, get a lock as well.)
3017 3013
3018 3014 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3019 3015 'wlock' first to avoid a dead-lock hazard."""
3020 3016 l = self._currentlock(self._lockref)
3021 3017 if l is not None:
3022 3018 l.lock()
3023 3019 return l
3024 3020
3025 3021 l = self._lock(
3026 3022 vfs=self.svfs,
3027 3023 lockname=b"lock",
3028 3024 wait=wait,
3029 3025 releasefn=None,
3030 3026 acquirefn=self.invalidate,
3031 3027 desc=_(b'repository %s') % self.origroot,
3032 3028 )
3033 3029 self._lockref = weakref.ref(l)
3034 3030 return l
3035 3031
3036 3032 def wlock(self, wait=True):
3037 3033 """Lock the non-store parts of the repository (everything under
3038 3034 .hg except .hg/store) and return a weak reference to the lock.
3039 3035
3040 3036 Use this before modifying files in .hg.
3041 3037
3042 3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3043 3039 'wlock' first to avoid a dead-lock hazard."""
3044 3040 l = self._wlockref() if self._wlockref else None
3045 3041 if l is not None and l.held:
3046 3042 l.lock()
3047 3043 return l
3048 3044
3049 3045 # We do not need to check for non-waiting lock acquisition. Such
3050 3046 # acquisition would not cause dead-lock as they would just fail.
3051 3047 if wait and (
3052 3048 self.ui.configbool(b'devel', b'all-warnings')
3053 3049 or self.ui.configbool(b'devel', b'check-locks')
3054 3050 ):
3055 3051 if self._currentlock(self._lockref) is not None:
3056 3052 self.ui.develwarn(b'"wlock" acquired after "lock"')
3057 3053
3058 3054 def unlock():
3059 3055 if self.dirstate.pendingparentchange():
3060 3056 self.dirstate.invalidate()
3061 3057 else:
3062 3058 self.dirstate.write(None)
3063 3059
3064 3060 self._filecache[b'dirstate'].refresh()
3065 3061
3066 3062 l = self._lock(
3067 3063 self.vfs,
3068 3064 b"wlock",
3069 3065 wait,
3070 3066 unlock,
3071 3067 self.invalidatedirstate,
3072 3068 _(b'working directory of %s') % self.origroot,
3073 3069 )
3074 3070 self._wlockref = weakref.ref(l)
3075 3071 return l
3076 3072
3077 3073 def _currentlock(self, lockref):
3078 3074 """Returns the lock if it's held, or None if it's not."""
3079 3075 if lockref is None:
3080 3076 return None
3081 3077 l = lockref()
3082 3078 if l is None or not l.held:
3083 3079 return None
3084 3080 return l
3085 3081
3086 3082 def currentwlock(self):
3087 3083 """Returns the wlock if it's held, or None if it's not."""
3088 3084 return self._currentlock(self._wlockref)
3089 3085
3090 3086 def checkcommitpatterns(self, wctx, match, status, fail):
3091 3087 """check for commit arguments that aren't committable"""
3092 3088 if match.isexact() or match.prefix():
3093 3089 matched = set(status.modified + status.added + status.removed)
3094 3090
3095 3091 for f in match.files():
3096 3092 f = self.dirstate.normalize(f)
3097 3093 if f == b'.' or f in matched or f in wctx.substate:
3098 3094 continue
3099 3095 if f in status.deleted:
3100 3096 fail(f, _(b'file not found!'))
3101 3097 # Is it a directory that exists or used to exist?
3102 3098 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3103 3099 d = f + b'/'
3104 3100 for mf in matched:
3105 3101 if mf.startswith(d):
3106 3102 break
3107 3103 else:
3108 3104 fail(f, _(b"no match under directory!"))
3109 3105 elif f not in self.dirstate:
3110 3106 fail(f, _(b"file not tracked!"))
3111 3107
3112 3108 @unfilteredmethod
3113 3109 def commit(
3114 3110 self,
3115 3111 text=b"",
3116 3112 user=None,
3117 3113 date=None,
3118 3114 match=None,
3119 3115 force=False,
3120 3116 editor=None,
3121 3117 extra=None,
3122 3118 ):
3123 3119 """Add a new revision to current repository.
3124 3120
3125 3121 Revision information is gathered from the working directory,
3126 3122 match can be used to filter the committed files. If editor is
3127 3123 supplied, it is called to get a commit message.
3128 3124 """
3129 3125 if extra is None:
3130 3126 extra = {}
3131 3127
3132 3128 def fail(f, msg):
3133 3129 raise error.InputError(b'%s: %s' % (f, msg))
3134 3130
3135 3131 if not match:
3136 3132 match = matchmod.always()
3137 3133
3138 3134 if not force:
3139 3135 match.bad = fail
3140 3136
3141 3137 # lock() for recent changelog (see issue4368)
3142 3138 with self.wlock(), self.lock():
3143 3139 wctx = self[None]
3144 3140 merge = len(wctx.parents()) > 1
3145 3141
3146 3142 if not force and merge and not match.always():
3147 3143 raise error.Abort(
3148 3144 _(
3149 3145 b'cannot partially commit a merge '
3150 3146 b'(do not specify files or patterns)'
3151 3147 )
3152 3148 )
3153 3149
3154 3150 status = self.status(match=match, clean=force)
3155 3151 if force:
3156 3152 status.modified.extend(
3157 3153 status.clean
3158 3154 ) # mq may commit clean files
3159 3155
3160 3156 # check subrepos
3161 3157 subs, commitsubs, newstate = subrepoutil.precommit(
3162 3158 self.ui, wctx, status, match, force=force
3163 3159 )
3164 3160
3165 3161 # make sure all explicit patterns are matched
3166 3162 if not force:
3167 3163 self.checkcommitpatterns(wctx, match, status, fail)
3168 3164
3169 3165 cctx = context.workingcommitctx(
3170 3166 self, status, text, user, date, extra
3171 3167 )
3172 3168
3173 3169 ms = mergestatemod.mergestate.read(self)
3174 3170 mergeutil.checkunresolved(ms)
3175 3171
3176 3172 # internal config: ui.allowemptycommit
3177 3173 if cctx.isempty() and not self.ui.configbool(
3178 3174 b'ui', b'allowemptycommit'
3179 3175 ):
3180 3176 self.ui.debug(b'nothing to commit, clearing merge state\n')
3181 3177 ms.reset()
3182 3178 return None
3183 3179
3184 3180 if merge and cctx.deleted():
3185 3181 raise error.Abort(_(b"cannot commit merge with missing files"))
3186 3182
3187 3183 if editor:
3188 3184 cctx._text = editor(self, cctx, subs)
3189 3185 edited = text != cctx._text
3190 3186
3191 3187 # Save commit message in case this transaction gets rolled back
3192 3188 # (e.g. by a pretxncommit hook). Leave the content alone on
3193 3189 # the assumption that the user will use the same editor again.
3194 3190 msg_path = self.savecommitmessage(cctx._text)
3195 3191
3196 3192 # commit subs and write new state
3197 3193 if subs:
3198 3194 uipathfn = scmutil.getuipathfn(self)
3199 3195 for s in sorted(commitsubs):
3200 3196 sub = wctx.sub(s)
3201 3197 self.ui.status(
3202 3198 _(b'committing subrepository %s\n')
3203 3199 % uipathfn(subrepoutil.subrelpath(sub))
3204 3200 )
3205 3201 sr = sub.commit(cctx._text, user, date)
3206 3202 newstate[s] = (newstate[s][0], sr)
3207 3203 subrepoutil.writestate(self, newstate)
3208 3204
3209 3205 p1, p2 = self.dirstate.parents()
3210 3206 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3211 3207 try:
3212 3208 self.hook(
3213 3209 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3214 3210 )
3215 3211 with self.transaction(b'commit'):
3216 3212 ret = self.commitctx(cctx, True)
3217 3213 # update bookmarks, dirstate and mergestate
3218 3214 bookmarks.update(self, [p1, p2], ret)
3219 3215 cctx.markcommitted(ret)
3220 3216 ms.reset()
3221 3217 except: # re-raises
3222 3218 if edited:
3223 3219 self.ui.write(
3224 3220 _(b'note: commit message saved in %s\n') % msg_path
3225 3221 )
3226 3222 self.ui.write(
3227 3223 _(
3228 3224 b"note: use 'hg commit --logfile "
3229 3225 b"%s --edit' to reuse it\n"
3230 3226 )
3231 3227 % msg_path
3232 3228 )
3233 3229 raise
3234 3230
3235 3231 def commithook(unused_success):
3236 3232 # hack for command that use a temporary commit (eg: histedit)
3237 3233 # temporary commit got stripped before hook release
3238 3234 if self.changelog.hasnode(ret):
3239 3235 self.hook(
3240 3236 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3241 3237 )
3242 3238
3243 3239 self._afterlock(commithook)
3244 3240 return ret
3245 3241
3246 3242 @unfilteredmethod
3247 3243 def commitctx(self, ctx, error=False, origctx=None):
3248 3244 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3249 3245
3250 3246 @unfilteredmethod
3251 3247 def destroying(self):
3252 3248 """Inform the repository that nodes are about to be destroyed.
3253 3249 Intended for use by strip and rollback, so there's a common
3254 3250 place for anything that has to be done before destroying history.
3255 3251
3256 3252 This is mostly useful for saving state that is in memory and waiting
3257 3253 to be flushed when the current lock is released. Because a call to
3258 3254 destroyed is imminent, the repo will be invalidated causing those
3259 3255 changes to stay in memory (waiting for the next unlock), or vanish
3260 3256 completely.
3261 3257 """
3262 3258 # When using the same lock to commit and strip, the phasecache is left
3263 3259 # dirty after committing. Then when we strip, the repo is invalidated,
3264 3260 # causing those changes to disappear.
3265 3261 if '_phasecache' in vars(self):
3266 3262 self._phasecache.write()
3267 3263
3268 3264 @unfilteredmethod
3269 3265 def destroyed(self):
3270 3266 """Inform the repository that nodes have been destroyed.
3271 3267 Intended for use by strip and rollback, so there's a common
3272 3268 place for anything that has to be done after destroying history.
3273 3269 """
3274 3270 # When one tries to:
3275 3271 # 1) destroy nodes thus calling this method (e.g. strip)
3276 3272 # 2) use phasecache somewhere (e.g. commit)
3277 3273 #
3278 3274 # then 2) will fail because the phasecache contains nodes that were
3279 3275 # removed. We can either remove phasecache from the filecache,
3280 3276 # causing it to reload next time it is accessed, or simply filter
3281 3277 # the removed nodes now and write the updated cache.
3282 3278 self._phasecache.filterunknown(self)
3283 3279 self._phasecache.write()
3284 3280
3285 3281 # refresh all repository caches
3286 3282 self.updatecaches()
3287 3283
3288 3284 # Ensure the persistent tag cache is updated. Doing it now
3289 3285 # means that the tag cache only has to worry about destroyed
3290 3286 # heads immediately after a strip/rollback. That in turn
3291 3287 # guarantees that "cachetip == currenttip" (comparing both rev
3292 3288 # and node) always means no nodes have been added or destroyed.
3293 3289
3294 3290 # XXX this is suboptimal when qrefresh'ing: we strip the current
3295 3291 # head, refresh the tag cache, then immediately add a new head.
3296 3292 # But I think doing it this way is necessary for the "instant
3297 3293 # tag cache retrieval" case to work.
3298 3294 self.invalidate()
3299 3295
3300 3296 def status(
3301 3297 self,
3302 3298 node1=b'.',
3303 3299 node2=None,
3304 3300 match=None,
3305 3301 ignored=False,
3306 3302 clean=False,
3307 3303 unknown=False,
3308 3304 listsubrepos=False,
3309 3305 ):
3310 3306 '''a convenience method that calls node1.status(node2)'''
3311 3307 return self[node1].status(
3312 3308 node2, match, ignored, clean, unknown, listsubrepos
3313 3309 )
3314 3310
3315 3311 def addpostdsstatus(self, ps):
3316 3312 """Add a callback to run within the wlock, at the point at which status
3317 3313 fixups happen.
3318 3314
3319 3315 On status completion, callback(wctx, status) will be called with the
3320 3316 wlock held, unless the dirstate has changed from underneath or the wlock
3321 3317 couldn't be grabbed.
3322 3318
3323 3319 Callbacks should not capture and use a cached copy of the dirstate --
3324 3320 it might change in the meanwhile. Instead, they should access the
3325 3321 dirstate via wctx.repo().dirstate.
3326 3322
3327 3323 This list is emptied out after each status run -- extensions should
3328 3324 make sure it adds to this list each time dirstate.status is called.
3329 3325 Extensions should also make sure they don't call this for statuses
3330 3326 that don't involve the dirstate.
3331 3327 """
3332 3328
3333 3329 # The list is located here for uniqueness reasons -- it is actually
3334 3330 # managed by the workingctx, but that isn't unique per-repo.
3335 3331 self._postdsstatus.append(ps)
3336 3332
3337 3333 def postdsstatus(self):
3338 3334 """Used by workingctx to get the list of post-dirstate-status hooks."""
3339 3335 return self._postdsstatus
3340 3336
3341 3337 def clearpostdsstatus(self):
3342 3338 """Used by workingctx to clear post-dirstate-status hooks."""
3343 3339 del self._postdsstatus[:]
3344 3340
3345 3341 def heads(self, start=None):
3346 3342 if start is None:
3347 3343 cl = self.changelog
3348 3344 headrevs = reversed(cl.headrevs())
3349 3345 return [cl.node(rev) for rev in headrevs]
3350 3346
3351 3347 heads = self.changelog.heads(start)
3352 3348 # sort the output in rev descending order
3353 3349 return sorted(heads, key=self.changelog.rev, reverse=True)
3354 3350
3355 3351 def branchheads(self, branch=None, start=None, closed=False):
3356 3352 """return a (possibly filtered) list of heads for the given branch
3357 3353
3358 3354 Heads are returned in topological order, from newest to oldest.
3359 3355 If branch is None, use the dirstate branch.
3360 3356 If start is not None, return only heads reachable from start.
3361 3357 If closed is True, return heads that are marked as closed as well.
3362 3358 """
3363 3359 if branch is None:
3364 3360 branch = self[None].branch()
3365 3361 branches = self.branchmap()
3366 3362 if not branches.hasbranch(branch):
3367 3363 return []
3368 3364 # the cache returns heads ordered lowest to highest
3369 3365 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3370 3366 if start is not None:
3371 3367 # filter out the heads that cannot be reached from startrev
3372 3368 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3373 3369 bheads = [h for h in bheads if h in fbheads]
3374 3370 return bheads
3375 3371
3376 3372 def branches(self, nodes):
3377 3373 if not nodes:
3378 3374 nodes = [self.changelog.tip()]
3379 3375 b = []
3380 3376 for n in nodes:
3381 3377 t = n
3382 3378 while True:
3383 3379 p = self.changelog.parents(n)
3384 3380 if p[1] != self.nullid or p[0] == self.nullid:
3385 3381 b.append((t, n, p[0], p[1]))
3386 3382 break
3387 3383 n = p[0]
3388 3384 return b
3389 3385
3390 3386 def between(self, pairs):
3391 3387 r = []
3392 3388
3393 3389 for top, bottom in pairs:
3394 3390 n, l, i = top, [], 0
3395 3391 f = 1
3396 3392
3397 3393 while n != bottom and n != self.nullid:
3398 3394 p = self.changelog.parents(n)[0]
3399 3395 if i == f:
3400 3396 l.append(n)
3401 3397 f = f * 2
3402 3398 n = p
3403 3399 i += 1
3404 3400
3405 3401 r.append(l)
3406 3402
3407 3403 return r
3408 3404
3409 3405 def checkpush(self, pushop):
3410 3406 """Extensions can override this function if additional checks have
3411 3407 to be performed before pushing, or call it if they override push
3412 3408 command.
3413 3409 """
3414 3410
3415 3411 @unfilteredpropertycache
3416 3412 def prepushoutgoinghooks(self):
3417 3413 """Return util.hooks consists of a pushop with repo, remote, outgoing
3418 3414 methods, which are called before pushing changesets.
3419 3415 """
3420 3416 return util.hooks()
3421 3417
3422 3418 def pushkey(self, namespace, key, old, new):
3423 3419 try:
3424 3420 tr = self.currenttransaction()
3425 3421 hookargs = {}
3426 3422 if tr is not None:
3427 3423 hookargs.update(tr.hookargs)
3428 3424 hookargs = pycompat.strkwargs(hookargs)
3429 3425 hookargs['namespace'] = namespace
3430 3426 hookargs['key'] = key
3431 3427 hookargs['old'] = old
3432 3428 hookargs['new'] = new
3433 3429 self.hook(b'prepushkey', throw=True, **hookargs)
3434 3430 except error.HookAbort as exc:
3435 3431 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3436 3432 if exc.hint:
3437 3433 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3438 3434 return False
3439 3435 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3440 3436 ret = pushkey.push(self, namespace, key, old, new)
3441 3437
3442 3438 def runhook(unused_success):
3443 3439 self.hook(
3444 3440 b'pushkey',
3445 3441 namespace=namespace,
3446 3442 key=key,
3447 3443 old=old,
3448 3444 new=new,
3449 3445 ret=ret,
3450 3446 )
3451 3447
3452 3448 self._afterlock(runhook)
3453 3449 return ret
3454 3450
3455 3451 def listkeys(self, namespace):
3456 3452 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3457 3453 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3458 3454 values = pushkey.list(self, namespace)
3459 3455 self.hook(b'listkeys', namespace=namespace, values=values)
3460 3456 return values
3461 3457
3462 3458 def debugwireargs(self, one, two, three=None, four=None, five=None):
3463 3459 '''used to test argument passing over the wire'''
3464 3460 return b"%s %s %s %s %s" % (
3465 3461 one,
3466 3462 two,
3467 3463 pycompat.bytestr(three),
3468 3464 pycompat.bytestr(four),
3469 3465 pycompat.bytestr(five),
3470 3466 )
3471 3467
3472 3468 def savecommitmessage(self, text):
3473 3469 fp = self.vfs(b'last-message.txt', b'wb')
3474 3470 try:
3475 3471 fp.write(text)
3476 3472 finally:
3477 3473 fp.close()
3478 3474 return self.pathto(fp.name[len(self.root) + 1 :])
3479 3475
3480 3476 def register_wanted_sidedata(self, category):
3481 3477 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3482 3478 # Only revlogv2 repos can want sidedata.
3483 3479 return
3484 3480 self._wanted_sidedata.add(pycompat.bytestr(category))
3485 3481
3486 3482 def register_sidedata_computer(
3487 3483 self, kind, category, keys, computer, flags, replace=False
3488 3484 ):
3489 3485 if kind not in revlogconst.ALL_KINDS:
3490 3486 msg = _(b"unexpected revlog kind '%s'.")
3491 3487 raise error.ProgrammingError(msg % kind)
3492 3488 category = pycompat.bytestr(category)
3493 3489 already_registered = category in self._sidedata_computers.get(kind, [])
3494 3490 if already_registered and not replace:
3495 3491 msg = _(
3496 3492 b"cannot register a sidedata computer twice for category '%s'."
3497 3493 )
3498 3494 raise error.ProgrammingError(msg % category)
3499 3495 if replace and not already_registered:
3500 3496 msg = _(
3501 3497 b"cannot replace a sidedata computer that isn't registered "
3502 3498 b"for category '%s'."
3503 3499 )
3504 3500 raise error.ProgrammingError(msg % category)
3505 3501 self._sidedata_computers.setdefault(kind, {})
3506 3502 self._sidedata_computers[kind][category] = (keys, computer, flags)
3507 3503
3508 3504
3509 3505 # used to avoid circular references so destructors work
3510 3506 def aftertrans(files):
3511 3507 renamefiles = [tuple(t) for t in files]
3512 3508
3513 3509 def a():
3514 3510 for vfs, src, dest in renamefiles:
3515 3511 # if src and dest refer to a same file, vfs.rename is a no-op,
3516 3512 # leaving both src and dest on disk. delete dest to make sure
3517 3513 # the rename couldn't be such a no-op.
3518 3514 vfs.tryunlink(dest)
3519 3515 try:
3520 3516 vfs.rename(src, dest)
3521 3517 except FileNotFoundError: # journal file does not yet exist
3522 3518 pass
3523 3519
3524 3520 return a
3525 3521
3526 3522
3527 3523 def undoname(fn):
3528 3524 base, name = os.path.split(fn)
3529 3525 assert name.startswith(b'journal')
3530 3526 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3531 3527
3532 3528
3533 3529 def instance(ui, path, create, intents=None, createopts=None):
3534 3530
3535 3531 # prevent cyclic import localrepo -> upgrade -> localrepo
3536 3532 from . import upgrade
3537 3533
3538 3534 localpath = urlutil.urllocalpath(path)
3539 3535 if create:
3540 3536 createrepository(ui, localpath, createopts=createopts)
3541 3537
3542 3538 def repo_maker():
3543 3539 return makelocalrepository(ui, localpath, intents=intents)
3544 3540
3545 3541 repo = repo_maker()
3546 3542 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3547 3543 return repo
3548 3544
3549 3545
3550 3546 def islocal(path):
3551 3547 return True
3552 3548
3553 3549
3554 3550 def defaultcreateopts(ui, createopts=None):
3555 3551 """Populate the default creation options for a repository.
3556 3552
3557 3553 A dictionary of explicitly requested creation options can be passed
3558 3554 in. Missing keys will be populated.
3559 3555 """
3560 3556 createopts = dict(createopts or {})
3561 3557
3562 3558 if b'backend' not in createopts:
3563 3559 # experimental config: storage.new-repo-backend
3564 3560 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3565 3561
3566 3562 return createopts
3567 3563
3568 3564
3569 3565 def clone_requirements(ui, createopts, srcrepo):
3570 3566 """clone the requirements of a local repo for a local clone
3571 3567
3572 3568 The store requirements are unchanged while the working copy requirements
3573 3569 depends on the configuration
3574 3570 """
3575 3571 target_requirements = set()
3576 3572 if not srcrepo.requirements:
3577 3573 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3578 3574 # with it.
3579 3575 return target_requirements
3580 3576 createopts = defaultcreateopts(ui, createopts=createopts)
3581 3577 for r in newreporequirements(ui, createopts):
3582 3578 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3583 3579 target_requirements.add(r)
3584 3580
3585 3581 for r in srcrepo.requirements:
3586 3582 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3587 3583 target_requirements.add(r)
3588 3584 return target_requirements
3589 3585
3590 3586
3591 3587 def newreporequirements(ui, createopts):
3592 3588 """Determine the set of requirements for a new local repository.
3593 3589
3594 3590 Extensions can wrap this function to specify custom requirements for
3595 3591 new repositories.
3596 3592 """
3597 3593
3598 3594 if b'backend' not in createopts:
3599 3595 raise error.ProgrammingError(
3600 3596 b'backend key not present in createopts; '
3601 3597 b'was defaultcreateopts() called?'
3602 3598 )
3603 3599
3604 3600 if createopts[b'backend'] != b'revlogv1':
3605 3601 raise error.Abort(
3606 3602 _(
3607 3603 b'unable to determine repository requirements for '
3608 3604 b'storage backend: %s'
3609 3605 )
3610 3606 % createopts[b'backend']
3611 3607 )
3612 3608
3613 3609 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3614 3610 if ui.configbool(b'format', b'usestore'):
3615 3611 requirements.add(requirementsmod.STORE_REQUIREMENT)
3616 3612 if ui.configbool(b'format', b'usefncache'):
3617 3613 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3618 3614 if ui.configbool(b'format', b'dotencode'):
3619 3615 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3620 3616
3621 3617 compengines = ui.configlist(b'format', b'revlog-compression')
3622 3618 for compengine in compengines:
3623 3619 if compengine in util.compengines:
3624 3620 engine = util.compengines[compengine]
3625 3621 if engine.available() and engine.revlogheader():
3626 3622 break
3627 3623 else:
3628 3624 raise error.Abort(
3629 3625 _(
3630 3626 b'compression engines %s defined by '
3631 3627 b'format.revlog-compression not available'
3632 3628 )
3633 3629 % b', '.join(b'"%s"' % e for e in compengines),
3634 3630 hint=_(
3635 3631 b'run "hg debuginstall" to list available '
3636 3632 b'compression engines'
3637 3633 ),
3638 3634 )
3639 3635
3640 3636 # zlib is the historical default and doesn't need an explicit requirement.
3641 3637 if compengine == b'zstd':
3642 3638 requirements.add(b'revlog-compression-zstd')
3643 3639 elif compengine != b'zlib':
3644 3640 requirements.add(b'exp-compression-%s' % compengine)
3645 3641
3646 3642 if scmutil.gdinitconfig(ui):
3647 3643 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3648 3644 if ui.configbool(b'format', b'sparse-revlog'):
3649 3645 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3650 3646
3651 3647 # experimental config: format.use-dirstate-v2
3652 3648 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3653 3649 if ui.configbool(b'format', b'use-dirstate-v2'):
3654 3650 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3655 3651
3656 3652 # experimental config: format.exp-use-copies-side-data-changeset
3657 3653 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3658 3654 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3659 3655 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3660 3656 if ui.configbool(b'experimental', b'treemanifest'):
3661 3657 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3662 3658
3663 3659 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3664 3660 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3665 3661 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3666 3662
3667 3663 revlogv2 = ui.config(b'experimental', b'revlogv2')
3668 3664 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3669 3665 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3670 3666 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3671 3667 # experimental config: format.internal-phase
3672 3668 if ui.configbool(b'format', b'use-internal-phase'):
3673 3669 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3674 3670
3675 3671 # experimental config: format.exp-archived-phase
3676 3672 if ui.configbool(b'format', b'exp-archived-phase'):
3677 3673 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3678 3674
3679 3675 if createopts.get(b'narrowfiles'):
3680 3676 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3681 3677
3682 3678 if createopts.get(b'lfs'):
3683 3679 requirements.add(b'lfs')
3684 3680
3685 3681 if ui.configbool(b'format', b'bookmarks-in-store'):
3686 3682 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3687 3683
3688 3684 if ui.configbool(b'format', b'use-persistent-nodemap'):
3689 3685 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3690 3686
3691 3687 # if share-safe is enabled, let's create the new repository with the new
3692 3688 # requirement
3693 3689 if ui.configbool(b'format', b'use-share-safe'):
3694 3690 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3695 3691
3696 3692 # if we are creating a share-repoΒΉ we have to handle requirement
3697 3693 # differently.
3698 3694 #
3699 3695 # [1] (i.e. reusing the store from another repository, just having a
3700 3696 # working copy)
3701 3697 if b'sharedrepo' in createopts:
3702 3698 source_requirements = set(createopts[b'sharedrepo'].requirements)
3703 3699
3704 3700 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3705 3701 # share to an old school repository, we have to copy the
3706 3702 # requirements and hope for the best.
3707 3703 requirements = source_requirements
3708 3704 else:
3709 3705 # We have control on the working copy only, so "copy" the non
3710 3706 # working copy part over, ignoring previous logic.
3711 3707 to_drop = set()
3712 3708 for req in requirements:
3713 3709 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3714 3710 continue
3715 3711 if req in source_requirements:
3716 3712 continue
3717 3713 to_drop.add(req)
3718 3714 requirements -= to_drop
3719 3715 requirements |= source_requirements
3720 3716
3721 3717 if createopts.get(b'sharedrelative'):
3722 3718 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3723 3719 else:
3724 3720 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3725 3721
3726 3722 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3727 3723 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3728 3724 msg = _("ignoring unknown tracked key version: %d\n")
3729 3725 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3730 3726 if version != 1:
3731 3727 ui.warn(msg % version, hint=hint)
3732 3728 else:
3733 3729 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3734 3730
3735 3731 return requirements
3736 3732
3737 3733
3738 3734 def checkrequirementscompat(ui, requirements):
3739 3735 """Checks compatibility of repository requirements enabled and disabled.
3740 3736
3741 3737 Returns a set of requirements which needs to be dropped because dependend
3742 3738 requirements are not enabled. Also warns users about it"""
3743 3739
3744 3740 dropped = set()
3745 3741
3746 3742 if requirementsmod.STORE_REQUIREMENT not in requirements:
3747 3743 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3748 3744 ui.warn(
3749 3745 _(
3750 3746 b'ignoring enabled \'format.bookmarks-in-store\' config '
3751 3747 b'beacuse it is incompatible with disabled '
3752 3748 b'\'format.usestore\' config\n'
3753 3749 )
3754 3750 )
3755 3751 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3756 3752
3757 3753 if (
3758 3754 requirementsmod.SHARED_REQUIREMENT in requirements
3759 3755 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3760 3756 ):
3761 3757 raise error.Abort(
3762 3758 _(
3763 3759 b"cannot create shared repository as source was created"
3764 3760 b" with 'format.usestore' config disabled"
3765 3761 )
3766 3762 )
3767 3763
3768 3764 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3769 3765 if ui.hasconfig(b'format', b'use-share-safe'):
3770 3766 msg = _(
3771 3767 b"ignoring enabled 'format.use-share-safe' config because "
3772 3768 b"it is incompatible with disabled 'format.usestore'"
3773 3769 b" config\n"
3774 3770 )
3775 3771 ui.warn(msg)
3776 3772 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3777 3773
3778 3774 return dropped
3779 3775
3780 3776
3781 3777 def filterknowncreateopts(ui, createopts):
3782 3778 """Filters a dict of repo creation options against options that are known.
3783 3779
3784 3780 Receives a dict of repo creation options and returns a dict of those
3785 3781 options that we don't know how to handle.
3786 3782
3787 3783 This function is called as part of repository creation. If the
3788 3784 returned dict contains any items, repository creation will not
3789 3785 be allowed, as it means there was a request to create a repository
3790 3786 with options not recognized by loaded code.
3791 3787
3792 3788 Extensions can wrap this function to filter out creation options
3793 3789 they know how to handle.
3794 3790 """
3795 3791 known = {
3796 3792 b'backend',
3797 3793 b'lfs',
3798 3794 b'narrowfiles',
3799 3795 b'sharedrepo',
3800 3796 b'sharedrelative',
3801 3797 b'shareditems',
3802 3798 b'shallowfilestore',
3803 3799 }
3804 3800
3805 3801 return {k: v for k, v in createopts.items() if k not in known}
3806 3802
3807 3803
3808 3804 def createrepository(ui, path, createopts=None, requirements=None):
3809 3805 """Create a new repository in a vfs.
3810 3806
3811 3807 ``path`` path to the new repo's working directory.
3812 3808 ``createopts`` options for the new repository.
3813 3809 ``requirement`` predefined set of requirements.
3814 3810 (incompatible with ``createopts``)
3815 3811
3816 3812 The following keys for ``createopts`` are recognized:
3817 3813
3818 3814 backend
3819 3815 The storage backend to use.
3820 3816 lfs
3821 3817 Repository will be created with ``lfs`` requirement. The lfs extension
3822 3818 will automatically be loaded when the repository is accessed.
3823 3819 narrowfiles
3824 3820 Set up repository to support narrow file storage.
3825 3821 sharedrepo
3826 3822 Repository object from which storage should be shared.
3827 3823 sharedrelative
3828 3824 Boolean indicating if the path to the shared repo should be
3829 3825 stored as relative. By default, the pointer to the "parent" repo
3830 3826 is stored as an absolute path.
3831 3827 shareditems
3832 3828 Set of items to share to the new repository (in addition to storage).
3833 3829 shallowfilestore
3834 3830 Indicates that storage for files should be shallow (not all ancestor
3835 3831 revisions are known).
3836 3832 """
3837 3833
3838 3834 if requirements is not None:
3839 3835 if createopts is not None:
3840 3836 msg = b'cannot specify both createopts and requirements'
3841 3837 raise error.ProgrammingError(msg)
3842 3838 createopts = {}
3843 3839 else:
3844 3840 createopts = defaultcreateopts(ui, createopts=createopts)
3845 3841
3846 3842 unknownopts = filterknowncreateopts(ui, createopts)
3847 3843
3848 3844 if not isinstance(unknownopts, dict):
3849 3845 raise error.ProgrammingError(
3850 3846 b'filterknowncreateopts() did not return a dict'
3851 3847 )
3852 3848
3853 3849 if unknownopts:
3854 3850 raise error.Abort(
3855 3851 _(
3856 3852 b'unable to create repository because of unknown '
3857 3853 b'creation option: %s'
3858 3854 )
3859 3855 % b', '.join(sorted(unknownopts)),
3860 3856 hint=_(b'is a required extension not loaded?'),
3861 3857 )
3862 3858
3863 3859 requirements = newreporequirements(ui, createopts=createopts)
3864 3860 requirements -= checkrequirementscompat(ui, requirements)
3865 3861
3866 3862 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3867 3863
3868 3864 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3869 3865 if hgvfs.exists():
3870 3866 raise error.RepoError(_(b'repository %s already exists') % path)
3871 3867
3872 3868 if b'sharedrepo' in createopts:
3873 3869 sharedpath = createopts[b'sharedrepo'].sharedpath
3874 3870
3875 3871 if createopts.get(b'sharedrelative'):
3876 3872 try:
3877 3873 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3878 3874 sharedpath = util.pconvert(sharedpath)
3879 3875 except (IOError, ValueError) as e:
3880 3876 # ValueError is raised on Windows if the drive letters differ
3881 3877 # on each path.
3882 3878 raise error.Abort(
3883 3879 _(b'cannot calculate relative path'),
3884 3880 hint=stringutil.forcebytestr(e),
3885 3881 )
3886 3882
3887 3883 if not wdirvfs.exists():
3888 3884 wdirvfs.makedirs()
3889 3885
3890 3886 hgvfs.makedir(notindexed=True)
3891 3887 if b'sharedrepo' not in createopts:
3892 3888 hgvfs.mkdir(b'cache')
3893 3889 hgvfs.mkdir(b'wcache')
3894 3890
3895 3891 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3896 3892 if has_store and b'sharedrepo' not in createopts:
3897 3893 hgvfs.mkdir(b'store')
3898 3894
3899 3895 # We create an invalid changelog outside the store so very old
3900 3896 # Mercurial versions (which didn't know about the requirements
3901 3897 # file) encounter an error on reading the changelog. This
3902 3898 # effectively locks out old clients and prevents them from
3903 3899 # mucking with a repo in an unknown format.
3904 3900 #
3905 3901 # The revlog header has version 65535, which won't be recognized by
3906 3902 # such old clients.
3907 3903 hgvfs.append(
3908 3904 b'00changelog.i',
3909 3905 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3910 3906 b'layout',
3911 3907 )
3912 3908
3913 3909 # Filter the requirements into working copy and store ones
3914 3910 wcreq, storereq = scmutil.filterrequirements(requirements)
3915 3911 # write working copy ones
3916 3912 scmutil.writerequires(hgvfs, wcreq)
3917 3913 # If there are store requirements and the current repository
3918 3914 # is not a shared one, write stored requirements
3919 3915 # For new shared repository, we don't need to write the store
3920 3916 # requirements as they are already present in store requires
3921 3917 if storereq and b'sharedrepo' not in createopts:
3922 3918 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3923 3919 scmutil.writerequires(storevfs, storereq)
3924 3920
3925 3921 # Write out file telling readers where to find the shared store.
3926 3922 if b'sharedrepo' in createopts:
3927 3923 hgvfs.write(b'sharedpath', sharedpath)
3928 3924
3929 3925 if createopts.get(b'shareditems'):
3930 3926 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3931 3927 hgvfs.write(b'shared', shared)
3932 3928
3933 3929
3934 3930 def poisonrepository(repo):
3935 3931 """Poison a repository instance so it can no longer be used."""
3936 3932 # Perform any cleanup on the instance.
3937 3933 repo.close()
3938 3934
3939 3935 # Our strategy is to replace the type of the object with one that
3940 3936 # has all attribute lookups result in error.
3941 3937 #
3942 3938 # But we have to allow the close() method because some constructors
3943 3939 # of repos call close() on repo references.
3944 3940 class poisonedrepository:
3945 3941 def __getattribute__(self, item):
3946 3942 if item == 'close':
3947 3943 return object.__getattribute__(self, item)
3948 3944
3949 3945 raise error.ProgrammingError(
3950 3946 b'repo instances should not be used after unshare'
3951 3947 )
3952 3948
3953 3949 def close(self):
3954 3950 pass
3955 3951
3956 3952 # We may have a repoview, which intercepts __setattr__. So be sure
3957 3953 # we operate at the lowest level possible.
3958 3954 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now