##// END OF EJS Templates
requirements: move the comment about manifestv2 in the module...
marmoute -
r49448:66b59fbb default
parent child Browse files
Show More
@@ -1,3918 +1,3913 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import functools
13 13 import os
14 14 import random
15 15 import sys
16 16 import time
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullrev,
24 24 sha1nodeconstants,
25 25 short,
26 26 )
27 27 from .pycompat import (
28 28 delattr,
29 29 getattr,
30 30 )
31 31 from . import (
32 32 bookmarks,
33 33 branchmap,
34 34 bundle2,
35 35 bundlecaches,
36 36 changegroup,
37 37 color,
38 38 commit,
39 39 context,
40 40 dirstate,
41 41 dirstateguard,
42 42 discovery,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filelog,
48 48 hook,
49 49 lock as lockmod,
50 50 match as matchmod,
51 51 mergestate as mergestatemod,
52 52 mergeutil,
53 53 namespaces,
54 54 narrowspec,
55 55 obsolete,
56 56 pathutil,
57 57 phases,
58 58 pushkey,
59 59 pycompat,
60 60 rcutil,
61 61 repoview,
62 62 requirements as requirementsmod,
63 63 revlog,
64 64 revset,
65 65 revsetlang,
66 66 scmutil,
67 67 sparse,
68 68 store as storemod,
69 69 subrepoutil,
70 70 tags as tagsmod,
71 71 transaction,
72 72 txnutil,
73 73 util,
74 74 vfs as vfsmod,
75 75 wireprototypes,
76 76 )
77 77
78 78 from .interfaces import (
79 79 repository,
80 80 util as interfaceutil,
81 81 )
82 82
83 83 from .utils import (
84 84 hashutil,
85 85 procutil,
86 86 stringutil,
87 87 urlutil,
88 88 )
89 89
90 90 from .revlogutils import (
91 91 concurrency_checker as revlogchecker,
92 92 constants as revlogconst,
93 93 sidedata as sidedatamod,
94 94 )
95 95
96 96 release = lockmod.release
97 97 urlerr = util.urlerr
98 98 urlreq = util.urlreq
99 99
100 100 # set of (path, vfs-location) tuples. vfs-location is:
101 101 # - 'plain for vfs relative paths
102 102 # - '' for svfs relative paths
103 103 _cachedfiles = set()
104 104
105 105
106 106 class _basefilecache(scmutil.filecache):
107 107 """All filecache usage on repo are done for logic that should be unfiltered"""
108 108
109 109 def __get__(self, repo, type=None):
110 110 if repo is None:
111 111 return self
112 112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 113 unfi = repo.unfiltered()
114 114 try:
115 115 return unfi.__dict__[self.sname]
116 116 except KeyError:
117 117 pass
118 118 return super(_basefilecache, self).__get__(unfi, type)
119 119
120 120 def set(self, repo, value):
121 121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122 122
123 123
124 124 class repofilecache(_basefilecache):
125 125 """filecache for files in .hg but outside of .hg/store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(repofilecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b'plain'))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.vfs.join(fname)
134 134
135 135
136 136 class storecache(_basefilecache):
137 137 """filecache for files in the store"""
138 138
139 139 def __init__(self, *paths):
140 140 super(storecache, self).__init__(*paths)
141 141 for path in paths:
142 142 _cachedfiles.add((path, b''))
143 143
144 144 def join(self, obj, fname):
145 145 return obj.sjoin(fname)
146 146
147 147
148 148 class changelogcache(storecache):
149 149 """filecache for the changelog"""
150 150
151 151 def __init__(self):
152 152 super(changelogcache, self).__init__()
153 153 _cachedfiles.add((b'00changelog.i', b''))
154 154 _cachedfiles.add((b'00changelog.n', b''))
155 155
156 156 def tracked_paths(self, obj):
157 157 paths = [self.join(obj, b'00changelog.i')]
158 158 if obj.store.opener.options.get(b'persistent-nodemap', False):
159 159 paths.append(self.join(obj, b'00changelog.n'))
160 160 return paths
161 161
162 162
163 163 class manifestlogcache(storecache):
164 164 """filecache for the manifestlog"""
165 165
166 166 def __init__(self):
167 167 super(manifestlogcache, self).__init__()
168 168 _cachedfiles.add((b'00manifest.i', b''))
169 169 _cachedfiles.add((b'00manifest.n', b''))
170 170
171 171 def tracked_paths(self, obj):
172 172 paths = [self.join(obj, b'00manifest.i')]
173 173 if obj.store.opener.options.get(b'persistent-nodemap', False):
174 174 paths.append(self.join(obj, b'00manifest.n'))
175 175 return paths
176 176
177 177
178 178 class mixedrepostorecache(_basefilecache):
179 179 """filecache for a mix files in .hg/store and outside"""
180 180
181 181 def __init__(self, *pathsandlocations):
182 182 # scmutil.filecache only uses the path for passing back into our
183 183 # join(), so we can safely pass a list of paths and locations
184 184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
185 185 _cachedfiles.update(pathsandlocations)
186 186
187 187 def join(self, obj, fnameandlocation):
188 188 fname, location = fnameandlocation
189 189 if location == b'plain':
190 190 return obj.vfs.join(fname)
191 191 else:
192 192 if location != b'':
193 193 raise error.ProgrammingError(
194 194 b'unexpected location: %s' % location
195 195 )
196 196 return obj.sjoin(fname)
197 197
198 198
199 199 def isfilecached(repo, name):
200 200 """check if a repo has already cached "name" filecache-ed property
201 201
202 202 This returns (cachedobj-or-None, iscached) tuple.
203 203 """
204 204 cacheentry = repo.unfiltered()._filecache.get(name, None)
205 205 if not cacheentry:
206 206 return None, False
207 207 return cacheentry.obj, True
208 208
209 209
210 210 class unfilteredpropertycache(util.propertycache):
211 211 """propertycache that apply to unfiltered repo only"""
212 212
213 213 def __get__(self, repo, type=None):
214 214 unfi = repo.unfiltered()
215 215 if unfi is repo:
216 216 return super(unfilteredpropertycache, self).__get__(unfi)
217 217 return getattr(unfi, self.name)
218 218
219 219
220 220 class filteredpropertycache(util.propertycache):
221 221 """propertycache that must take filtering in account"""
222 222
223 223 def cachevalue(self, obj, value):
224 224 object.__setattr__(obj, self.name, value)
225 225
226 226
227 227 def hasunfilteredcache(repo, name):
228 228 """check if a repo has an unfilteredpropertycache value for <name>"""
229 229 return name in vars(repo.unfiltered())
230 230
231 231
232 232 def unfilteredmethod(orig):
233 233 """decorate method that always need to be run on unfiltered version"""
234 234
235 235 @functools.wraps(orig)
236 236 def wrapper(repo, *args, **kwargs):
237 237 return orig(repo.unfiltered(), *args, **kwargs)
238 238
239 239 return wrapper
240 240
241 241
242 242 moderncaps = {
243 243 b'lookup',
244 244 b'branchmap',
245 245 b'pushkey',
246 246 b'known',
247 247 b'getbundle',
248 248 b'unbundle',
249 249 }
250 250 legacycaps = moderncaps.union({b'changegroupsubset'})
251 251
252 252
253 253 @interfaceutil.implementer(repository.ipeercommandexecutor)
254 254 class localcommandexecutor(object):
255 255 def __init__(self, peer):
256 256 self._peer = peer
257 257 self._sent = False
258 258 self._closed = False
259 259
260 260 def __enter__(self):
261 261 return self
262 262
263 263 def __exit__(self, exctype, excvalue, exctb):
264 264 self.close()
265 265
266 266 def callcommand(self, command, args):
267 267 if self._sent:
268 268 raise error.ProgrammingError(
269 269 b'callcommand() cannot be used after sendcommands()'
270 270 )
271 271
272 272 if self._closed:
273 273 raise error.ProgrammingError(
274 274 b'callcommand() cannot be used after close()'
275 275 )
276 276
277 277 # We don't need to support anything fancy. Just call the named
278 278 # method on the peer and return a resolved future.
279 279 fn = getattr(self._peer, pycompat.sysstr(command))
280 280
281 281 f = pycompat.futures.Future()
282 282
283 283 try:
284 284 result = fn(**pycompat.strkwargs(args))
285 285 except Exception:
286 286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
287 287 else:
288 288 f.set_result(result)
289 289
290 290 return f
291 291
292 292 def sendcommands(self):
293 293 self._sent = True
294 294
295 295 def close(self):
296 296 self._closed = True
297 297
298 298
299 299 @interfaceutil.implementer(repository.ipeercommands)
300 300 class localpeer(repository.peer):
301 301 '''peer for a local repo; reflects only the most recent API'''
302 302
303 303 def __init__(self, repo, caps=None):
304 304 super(localpeer, self).__init__()
305 305
306 306 if caps is None:
307 307 caps = moderncaps.copy()
308 308 self._repo = repo.filtered(b'served')
309 309 self.ui = repo.ui
310 310
311 311 if repo._wanted_sidedata:
312 312 formatted = bundle2.format_remote_wanted_sidedata(repo)
313 313 caps.add(b'exp-wanted-sidedata=' + formatted)
314 314
315 315 self._caps = repo._restrictcapabilities(caps)
316 316
317 317 # Begin of _basepeer interface.
318 318
319 319 def url(self):
320 320 return self._repo.url()
321 321
322 322 def local(self):
323 323 return self._repo
324 324
325 325 def peer(self):
326 326 return self
327 327
328 328 def canpush(self):
329 329 return True
330 330
331 331 def close(self):
332 332 self._repo.close()
333 333
334 334 # End of _basepeer interface.
335 335
336 336 # Begin of _basewirecommands interface.
337 337
338 338 def branchmap(self):
339 339 return self._repo.branchmap()
340 340
341 341 def capabilities(self):
342 342 return self._caps
343 343
344 344 def clonebundles(self):
345 345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346 346
347 347 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 348 """Used to test argument passing over the wire"""
349 349 return b"%s %s %s %s %s" % (
350 350 one,
351 351 two,
352 352 pycompat.bytestr(three),
353 353 pycompat.bytestr(four),
354 354 pycompat.bytestr(five),
355 355 )
356 356
357 357 def getbundle(
358 358 self,
359 359 source,
360 360 heads=None,
361 361 common=None,
362 362 bundlecaps=None,
363 363 remote_sidedata=None,
364 364 **kwargs
365 365 ):
366 366 chunks = exchange.getbundlechunks(
367 367 self._repo,
368 368 source,
369 369 heads=heads,
370 370 common=common,
371 371 bundlecaps=bundlecaps,
372 372 remote_sidedata=remote_sidedata,
373 373 **kwargs
374 374 )[1]
375 375 cb = util.chunkbuffer(chunks)
376 376
377 377 if exchange.bundle2requested(bundlecaps):
378 378 # When requesting a bundle2, getbundle returns a stream to make the
379 379 # wire level function happier. We need to build a proper object
380 380 # from it in local peer.
381 381 return bundle2.getunbundler(self.ui, cb)
382 382 else:
383 383 return changegroup.getunbundler(b'01', cb, None)
384 384
385 385 def heads(self):
386 386 return self._repo.heads()
387 387
388 388 def known(self, nodes):
389 389 return self._repo.known(nodes)
390 390
391 391 def listkeys(self, namespace):
392 392 return self._repo.listkeys(namespace)
393 393
394 394 def lookup(self, key):
395 395 return self._repo.lookup(key)
396 396
397 397 def pushkey(self, namespace, key, old, new):
398 398 return self._repo.pushkey(namespace, key, old, new)
399 399
400 400 def stream_out(self):
401 401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402 402
403 403 def unbundle(self, bundle, heads, url):
404 404 """apply a bundle on a repo
405 405
406 406 This function handles the repo locking itself."""
407 407 try:
408 408 try:
409 409 bundle = exchange.readbundle(self.ui, bundle, None)
410 410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 411 if util.safehasattr(ret, b'getchunks'):
412 412 # This is a bundle20 object, turn it into an unbundler.
413 413 # This little dance should be dropped eventually when the
414 414 # API is finally improved.
415 415 stream = util.chunkbuffer(ret.getchunks())
416 416 ret = bundle2.getunbundler(self.ui, stream)
417 417 return ret
418 418 except Exception as exc:
419 419 # If the exception contains output salvaged from a bundle2
420 420 # reply, we need to make sure it is printed before continuing
421 421 # to fail. So we build a bundle2 with such output and consume
422 422 # it directly.
423 423 #
424 424 # This is not very elegant but allows a "simple" solution for
425 425 # issue4594
426 426 output = getattr(exc, '_bundle2salvagedoutput', ())
427 427 if output:
428 428 bundler = bundle2.bundle20(self._repo.ui)
429 429 for out in output:
430 430 bundler.addpart(out)
431 431 stream = util.chunkbuffer(bundler.getchunks())
432 432 b = bundle2.getunbundler(self.ui, stream)
433 433 bundle2.processbundle(self._repo, b)
434 434 raise
435 435 except error.PushRaced as exc:
436 436 raise error.ResponseError(
437 437 _(b'push failed:'), stringutil.forcebytestr(exc)
438 438 )
439 439
440 440 # End of _basewirecommands interface.
441 441
442 442 # Begin of peer interface.
443 443
444 444 def commandexecutor(self):
445 445 return localcommandexecutor(self)
446 446
447 447 # End of peer interface.
448 448
449 449
450 450 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 451 class locallegacypeer(localpeer):
452 452 """peer extension which implements legacy methods too; used for tests with
453 453 restricted capabilities"""
454 454
455 455 def __init__(self, repo):
456 456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457 457
458 458 # Begin of baselegacywirecommands interface.
459 459
460 460 def between(self, pairs):
461 461 return self._repo.between(pairs)
462 462
463 463 def branches(self, nodes):
464 464 return self._repo.branches(nodes)
465 465
466 466 def changegroup(self, nodes, source):
467 467 outgoing = discovery.outgoing(
468 468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 469 )
470 470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471 471
472 472 def changegroupsubset(self, bases, heads, source):
473 473 outgoing = discovery.outgoing(
474 474 self._repo, missingroots=bases, ancestorsof=heads
475 475 )
476 476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477 477
478 478 # End of baselegacywirecommands interface.
479 479
480 480
481 481 # Functions receiving (ui, features) that extensions can register to impact
482 482 # the ability to load repositories with custom requirements. Only
483 483 # functions defined in loaded extensions are called.
484 484 #
485 485 # The function receives a set of requirement strings that the repository
486 486 # is capable of opening. Functions will typically add elements to the
487 487 # set to reflect that the extension knows how to handle that requirements.
488 488 featuresetupfuncs = set()
489 489
490 490
491 491 def _getsharedvfs(hgvfs, requirements):
492 492 """returns the vfs object pointing to root of shared source
493 493 repo for a shared repository
494 494
495 495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 496 requirements is a set of requirements of current repo (shared one)
497 497 """
498 498 # The ``shared`` or ``relshared`` requirements indicate the
499 499 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 500 # This is an absolute path for ``shared`` and relative to
501 501 # ``.hg/`` for ``relshared``.
502 502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 504 sharedpath = util.normpath(hgvfs.join(sharedpath))
505 505
506 506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507 507
508 508 if not sharedvfs.exists():
509 509 raise error.RepoError(
510 510 _(b'.hg/sharedpath points to nonexistent directory %s')
511 511 % sharedvfs.base
512 512 )
513 513 return sharedvfs
514 514
515 515
516 516 def _readrequires(vfs, allowmissing):
517 517 """reads the require file present at root of this vfs
518 518 and return a set of requirements
519 519
520 520 If allowmissing is True, we suppress ENOENT if raised"""
521 521 # requires file contains a newline-delimited list of
522 522 # features/capabilities the opener (us) must have in order to use
523 523 # the repository. This file was introduced in Mercurial 0.9.2,
524 524 # which means very old repositories may not have one. We assume
525 525 # a missing file translates to no requirements.
526 526 try:
527 527 requirements = set(vfs.read(b'requires').splitlines())
528 528 except IOError as e:
529 529 if not (allowmissing and e.errno == errno.ENOENT):
530 530 raise
531 531 requirements = set()
532 532 return requirements
533 533
534 534
535 535 def makelocalrepository(baseui, path, intents=None):
536 536 """Create a local repository object.
537 537
538 538 Given arguments needed to construct a local repository, this function
539 539 performs various early repository loading functionality (such as
540 540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
541 541 the repository can be opened, derives a type suitable for representing
542 542 that repository, and returns an instance of it.
543 543
544 544 The returned object conforms to the ``repository.completelocalrepository``
545 545 interface.
546 546
547 547 The repository type is derived by calling a series of factory functions
548 548 for each aspect/interface of the final repository. These are defined by
549 549 ``REPO_INTERFACES``.
550 550
551 551 Each factory function is called to produce a type implementing a specific
552 552 interface. The cumulative list of returned types will be combined into a
553 553 new type and that type will be instantiated to represent the local
554 554 repository.
555 555
556 556 The factory functions each receive various state that may be consulted
557 557 as part of deriving a type.
558 558
559 559 Extensions should wrap these factory functions to customize repository type
560 560 creation. Note that an extension's wrapped function may be called even if
561 561 that extension is not loaded for the repo being constructed. Extensions
562 562 should check if their ``__name__`` appears in the
563 563 ``extensionmodulenames`` set passed to the factory function and no-op if
564 564 not.
565 565 """
566 566 ui = baseui.copy()
567 567 # Prevent copying repo configuration.
568 568 ui.copy = baseui.copy
569 569
570 570 # Working directory VFS rooted at repository root.
571 571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
572 572
573 573 # Main VFS for .hg/ directory.
574 574 hgpath = wdirvfs.join(b'.hg')
575 575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
576 576 # Whether this repository is shared one or not
577 577 shared = False
578 578 # If this repository is shared, vfs pointing to shared repo
579 579 sharedvfs = None
580 580
581 581 # The .hg/ path should exist and should be a directory. All other
582 582 # cases are errors.
583 583 if not hgvfs.isdir():
584 584 try:
585 585 hgvfs.stat()
586 586 except OSError as e:
587 587 if e.errno != errno.ENOENT:
588 588 raise
589 589 except ValueError as e:
590 590 # Can be raised on Python 3.8 when path is invalid.
591 591 raise error.Abort(
592 592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
593 593 )
594 594
595 595 raise error.RepoError(_(b'repository %s not found') % path)
596 596
597 597 requirements = _readrequires(hgvfs, True)
598 598 shared = (
599 599 requirementsmod.SHARED_REQUIREMENT in requirements
600 600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
601 601 )
602 602 storevfs = None
603 603 if shared:
604 604 # This is a shared repo
605 605 sharedvfs = _getsharedvfs(hgvfs, requirements)
606 606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
607 607 else:
608 608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
609 609
610 610 # if .hg/requires contains the sharesafe requirement, it means
611 611 # there exists a `.hg/store/requires` too and we should read it
612 612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
613 613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
614 614 # is not present, refer checkrequirementscompat() for that
615 615 #
616 616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
617 617 # repository was shared the old way. We check the share source .hg/requires
618 618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
619 619 # to be reshared
620 620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
621 621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
622 622
623 623 if (
624 624 shared
625 625 and requirementsmod.SHARESAFE_REQUIREMENT
626 626 not in _readrequires(sharedvfs, True)
627 627 ):
628 628 mismatch_warn = ui.configbool(
629 629 b'share', b'safe-mismatch.source-not-safe.warn'
630 630 )
631 631 mismatch_config = ui.config(
632 632 b'share', b'safe-mismatch.source-not-safe'
633 633 )
634 634 if mismatch_config in (
635 635 b'downgrade-allow',
636 636 b'allow',
637 637 b'downgrade-abort',
638 638 ):
639 639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 640 from . import upgrade
641 641
642 642 upgrade.downgrade_share_to_non_safe(
643 643 ui,
644 644 hgvfs,
645 645 sharedvfs,
646 646 requirements,
647 647 mismatch_config,
648 648 mismatch_warn,
649 649 )
650 650 elif mismatch_config == b'abort':
651 651 raise error.Abort(
652 652 _(b"share source does not support share-safe requirement"),
653 653 hint=hint,
654 654 )
655 655 else:
656 656 raise error.Abort(
657 657 _(
658 658 b"share-safe mismatch with source.\nUnrecognized"
659 659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 660 b" set."
661 661 )
662 662 % mismatch_config,
663 663 hint=hint,
664 664 )
665 665 else:
666 666 requirements |= _readrequires(storevfs, False)
667 667 elif shared:
668 668 sourcerequires = _readrequires(sharedvfs, False)
669 669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 671 mismatch_warn = ui.configbool(
672 672 b'share', b'safe-mismatch.source-safe.warn'
673 673 )
674 674 if mismatch_config in (
675 675 b'upgrade-allow',
676 676 b'allow',
677 677 b'upgrade-abort',
678 678 ):
679 679 # prevent cyclic import localrepo -> upgrade -> localrepo
680 680 from . import upgrade
681 681
682 682 upgrade.upgrade_share_to_safe(
683 683 ui,
684 684 hgvfs,
685 685 storevfs,
686 686 requirements,
687 687 mismatch_config,
688 688 mismatch_warn,
689 689 )
690 690 elif mismatch_config == b'abort':
691 691 raise error.Abort(
692 692 _(
693 693 b'version mismatch: source uses share-safe'
694 694 b' functionality while the current share does not'
695 695 ),
696 696 hint=hint,
697 697 )
698 698 else:
699 699 raise error.Abort(
700 700 _(
701 701 b"share-safe mismatch with source.\nUnrecognized"
702 702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 703 )
704 704 % mismatch_config,
705 705 hint=hint,
706 706 )
707 707
708 708 # The .hg/hgrc file may load extensions or contain config options
709 709 # that influence repository construction. Attempt to load it and
710 710 # process any new extensions that it may have pulled in.
711 711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 713 extensions.loadall(ui)
714 714 extensions.populateui(ui)
715 715
716 716 # Set of module names of extensions loaded for this repository.
717 717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718 718
719 719 supportedrequirements = gathersupportedrequirements(ui)
720 720
721 721 # We first validate the requirements are known.
722 722 ensurerequirementsrecognized(requirements, supportedrequirements)
723 723
724 724 # Then we validate that the known set is reasonable to use together.
725 725 ensurerequirementscompatible(ui, requirements)
726 726
727 727 # TODO there are unhandled edge cases related to opening repositories with
728 728 # shared storage. If storage is shared, we should also test for requirements
729 729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 730 # that repo, as that repo may load extensions needed to open it. This is a
731 731 # bit complicated because we don't want the other hgrc to overwrite settings
732 732 # in this hgrc.
733 733 #
734 734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 735 # file when sharing repos. But if a requirement is added after the share is
736 736 # performed, thereby introducing a new requirement for the opener, we may
737 737 # will not see that and could encounter a run-time error interacting with
738 738 # that shared store since it has an unknown-to-us requirement.
739 739
740 740 # At this point, we know we should be capable of opening the repository.
741 741 # Now get on with doing that.
742 742
743 743 features = set()
744 744
745 745 # The "store" part of the repository holds versioned data. How it is
746 746 # accessed is determined by various requirements. If `shared` or
747 747 # `relshared` requirements are present, this indicates current repository
748 748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 749 if shared:
750 750 storebasepath = sharedvfs.base
751 751 cachepath = sharedvfs.join(b'cache')
752 752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 753 else:
754 754 storebasepath = hgvfs.base
755 755 cachepath = hgvfs.join(b'cache')
756 756 wcachepath = hgvfs.join(b'wcache')
757 757
758 758 # The store has changed over time and the exact layout is dictated by
759 759 # requirements. The store interface abstracts differences across all
760 760 # of them.
761 761 store = makestore(
762 762 requirements,
763 763 storebasepath,
764 764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 765 )
766 766 hgvfs.createmode = store.createmode
767 767
768 768 storevfs = store.vfs
769 769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770 770
771 771 if (
772 772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 774 ):
775 775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 776 # the revlogv2 docket introduced race condition that we need to fix
777 777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778 778
779 779 # The cache vfs is used to manage cache files.
780 780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 781 cachevfs.createmode = store.createmode
782 782 # The cache vfs is used to manage cache files related to the working copy
783 783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 784 wcachevfs.createmode = store.createmode
785 785
786 786 # Now resolve the type for the repository object. We do this by repeatedly
787 787 # calling a factory function to produces types for specific aspects of the
788 788 # repo's operation. The aggregate returned types are used as base classes
789 789 # for a dynamically-derived type, which will represent our new repository.
790 790
791 791 bases = []
792 792 extrastate = {}
793 793
794 794 for iface, fn in REPO_INTERFACES:
795 795 # We pass all potentially useful state to give extensions tons of
796 796 # flexibility.
797 797 typ = fn()(
798 798 ui=ui,
799 799 intents=intents,
800 800 requirements=requirements,
801 801 features=features,
802 802 wdirvfs=wdirvfs,
803 803 hgvfs=hgvfs,
804 804 store=store,
805 805 storevfs=storevfs,
806 806 storeoptions=storevfs.options,
807 807 cachevfs=cachevfs,
808 808 wcachevfs=wcachevfs,
809 809 extensionmodulenames=extensionmodulenames,
810 810 extrastate=extrastate,
811 811 baseclasses=bases,
812 812 )
813 813
814 814 if not isinstance(typ, type):
815 815 raise error.ProgrammingError(
816 816 b'unable to construct type for %s' % iface
817 817 )
818 818
819 819 bases.append(typ)
820 820
821 821 # type() allows you to use characters in type names that wouldn't be
822 822 # recognized as Python symbols in source code. We abuse that to add
823 823 # rich information about our constructed repo.
824 824 name = pycompat.sysstr(
825 825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 826 )
827 827
828 828 cls = type(name, tuple(bases), {})
829 829
830 830 return cls(
831 831 baseui=baseui,
832 832 ui=ui,
833 833 origroot=path,
834 834 wdirvfs=wdirvfs,
835 835 hgvfs=hgvfs,
836 836 requirements=requirements,
837 837 supportedrequirements=supportedrequirements,
838 838 sharedpath=storebasepath,
839 839 store=store,
840 840 cachevfs=cachevfs,
841 841 wcachevfs=wcachevfs,
842 842 features=features,
843 843 intents=intents,
844 844 )
845 845
846 846
847 847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
848 848 """Load hgrc files/content into a ui instance.
849 849
850 850 This is called during repository opening to load any additional
851 851 config files or settings relevant to the current repository.
852 852
853 853 Returns a bool indicating whether any additional configs were loaded.
854 854
855 855 Extensions should monkeypatch this function to modify how per-repo
856 856 configs are loaded. For example, an extension may wish to pull in
857 857 configs from alternate files or sources.
858 858
859 859 sharedvfs is vfs object pointing to source repo if the current one is a
860 860 shared one
861 861 """
862 862 if not rcutil.use_repo_hgrc():
863 863 return False
864 864
865 865 ret = False
866 866 # first load config from shared source if we has to
867 867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
868 868 try:
869 869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
870 870 ret = True
871 871 except IOError:
872 872 pass
873 873
874 874 try:
875 875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
876 876 ret = True
877 877 except IOError:
878 878 pass
879 879
880 880 try:
881 881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
882 882 ret = True
883 883 except IOError:
884 884 pass
885 885
886 886 return ret
887 887
888 888
889 889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
890 890 """Perform additional actions after .hg/hgrc is loaded.
891 891
892 892 This function is called during repository loading immediately after
893 893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
894 894
895 895 The function can be used to validate configs, automatically add
896 896 options (including extensions) based on requirements, etc.
897 897 """
898 898
899 899 # Map of requirements to list of extensions to load automatically when
900 900 # requirement is present.
901 901 autoextensions = {
902 902 b'git': [b'git'],
903 903 b'largefiles': [b'largefiles'],
904 904 b'lfs': [b'lfs'],
905 905 }
906 906
907 907 for requirement, names in sorted(autoextensions.items()):
908 908 if requirement not in requirements:
909 909 continue
910 910
911 911 for name in names:
912 912 if not ui.hasconfig(b'extensions', name):
913 913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
914 914
915 915
916 916 def gathersupportedrequirements(ui):
917 917 """Determine the complete set of recognized requirements."""
918 918 # Start with all requirements supported by this file.
919 919 supported = set(localrepository._basesupported)
920 920
921 921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
922 922 # relevant to this ui instance.
923 923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
924 924
925 925 for fn in featuresetupfuncs:
926 926 if fn.__module__ in modules:
927 927 fn(ui, supported)
928 928
929 929 # Add derived requirements from registered compression engines.
930 930 for name in util.compengines:
931 931 engine = util.compengines[name]
932 932 if engine.available() and engine.revlogheader():
933 933 supported.add(b'exp-compression-%s' % name)
934 934 if engine.name() == b'zstd':
935 935 supported.add(b'revlog-compression-zstd')
936 936
937 937 return supported
938 938
939 939
940 940 def ensurerequirementsrecognized(requirements, supported):
941 941 """Validate that a set of local requirements is recognized.
942 942
943 943 Receives a set of requirements. Raises an ``error.RepoError`` if there
944 944 exists any requirement in that set that currently loaded code doesn't
945 945 recognize.
946 946
947 947 Returns a set of supported requirements.
948 948 """
949 949 missing = set()
950 950
951 951 for requirement in requirements:
952 952 if requirement in supported:
953 953 continue
954 954
955 955 if not requirement or not requirement[0:1].isalnum():
956 956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
957 957
958 958 missing.add(requirement)
959 959
960 960 if missing:
961 961 raise error.RequirementError(
962 962 _(b'repository requires features unknown to this Mercurial: %s')
963 963 % b' '.join(sorted(missing)),
964 964 hint=_(
965 965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
966 966 b'for more information'
967 967 ),
968 968 )
969 969
970 970
971 971 def ensurerequirementscompatible(ui, requirements):
972 972 """Validates that a set of recognized requirements is mutually compatible.
973 973
974 974 Some requirements may not be compatible with others or require
975 975 config options that aren't enabled. This function is called during
976 976 repository opening to ensure that the set of requirements needed
977 977 to open a repository is sane and compatible with config options.
978 978
979 979 Extensions can monkeypatch this function to perform additional
980 980 checking.
981 981
982 982 ``error.RepoError`` should be raised on failure.
983 983 """
984 984 if (
985 985 requirementsmod.SPARSE_REQUIREMENT in requirements
986 986 and not sparse.enabled
987 987 ):
988 988 raise error.RepoError(
989 989 _(
990 990 b'repository is using sparse feature but '
991 991 b'sparse is not enabled; enable the '
992 992 b'"sparse" extensions to access'
993 993 )
994 994 )
995 995
996 996
997 997 def makestore(requirements, path, vfstype):
998 998 """Construct a storage object for a repository."""
999 999 if requirementsmod.STORE_REQUIREMENT in requirements:
1000 1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1001 1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1002 1002 return storemod.fncachestore(path, vfstype, dotencode)
1003 1003
1004 1004 return storemod.encodedstore(path, vfstype)
1005 1005
1006 1006 return storemod.basicstore(path, vfstype)
1007 1007
1008 1008
1009 1009 def resolvestorevfsoptions(ui, requirements, features):
1010 1010 """Resolve the options to pass to the store vfs opener.
1011 1011
1012 1012 The returned dict is used to influence behavior of the storage layer.
1013 1013 """
1014 1014 options = {}
1015 1015
1016 1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1017 1017 options[b'treemanifest'] = True
1018 1018
1019 1019 # experimental config: format.manifestcachesize
1020 1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1021 1021 if manifestcachesize is not None:
1022 1022 options[b'manifestcachesize'] = manifestcachesize
1023 1023
1024 1024 # In the absence of another requirement superseding a revlog-related
1025 1025 # requirement, we have to assume the repo is using revlog version 0.
1026 1026 # This revlog format is super old and we don't bother trying to parse
1027 1027 # opener options for it because those options wouldn't do anything
1028 1028 # meaningful on such old repos.
1029 1029 if (
1030 1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1031 1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1032 1032 ):
1033 1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1034 1034 else: # explicitly mark repo as using revlogv0
1035 1035 options[b'revlogv0'] = True
1036 1036
1037 1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1038 1038 options[b'copies-storage'] = b'changeset-sidedata'
1039 1039 else:
1040 1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1041 1041 copiesextramode = (b'changeset-only', b'compatibility')
1042 1042 if writecopiesto in copiesextramode:
1043 1043 options[b'copies-storage'] = b'extra'
1044 1044
1045 1045 return options
1046 1046
1047 1047
1048 1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1049 1049 """Resolve opener options specific to revlogs."""
1050 1050
1051 1051 options = {}
1052 1052 options[b'flagprocessors'] = {}
1053 1053
1054 1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1055 1055 options[b'revlogv1'] = True
1056 1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1057 1057 options[b'revlogv2'] = True
1058 1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1059 1059 options[b'changelogv2'] = True
1060 1060
1061 1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1062 1062 options[b'generaldelta'] = True
1063 1063
1064 1064 # experimental config: format.chunkcachesize
1065 1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1066 1066 if chunkcachesize is not None:
1067 1067 options[b'chunkcachesize'] = chunkcachesize
1068 1068
1069 1069 deltabothparents = ui.configbool(
1070 1070 b'storage', b'revlog.optimize-delta-parent-choice'
1071 1071 )
1072 1072 options[b'deltabothparents'] = deltabothparents
1073 1073
1074 1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 1075 options[b'issue6528.fix-incoming'] = issue6528
1076 1076
1077 1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 1078 lazydeltabase = False
1079 1079 if lazydelta:
1080 1080 lazydeltabase = ui.configbool(
1081 1081 b'storage', b'revlog.reuse-external-delta-parent'
1082 1082 )
1083 1083 if lazydeltabase is None:
1084 1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 1085 options[b'lazydelta'] = lazydelta
1086 1086 options[b'lazydeltabase'] = lazydeltabase
1087 1087
1088 1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 1089 if 0 <= chainspan:
1090 1090 options[b'maxdeltachainspan'] = chainspan
1091 1091
1092 1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 1093 if mmapindexthreshold is not None:
1094 1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1095 1095
1096 1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 1097 srdensitythres = float(
1098 1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 1099 )
1100 1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 1101 options[b'with-sparse-read'] = withsparseread
1102 1102 options[b'sparse-read-density-threshold'] = srdensitythres
1103 1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1104 1104
1105 1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 1106 options[b'sparse-revlog'] = sparserevlog
1107 1107 if sparserevlog:
1108 1108 options[b'generaldelta'] = True
1109 1109
1110 1110 maxchainlen = None
1111 1111 if sparserevlog:
1112 1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 1113 # experimental config: format.maxchainlen
1114 1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 1115 if maxchainlen is not None:
1116 1116 options[b'maxchainlen'] = maxchainlen
1117 1117
1118 1118 for r in requirements:
1119 1119 # we allow multiple compression engine requirement to co-exist because
1120 1120 # strickly speaking, revlog seems to support mixed compression style.
1121 1121 #
1122 1122 # The compression used for new entries will be "the last one"
1123 1123 prefix = r.startswith
1124 1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 1125 options[b'compengine'] = r.split(b'-', 2)[2]
1126 1126
1127 1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 1128 if options[b'zlib.level'] is not None:
1129 1129 if not (0 <= options[b'zlib.level'] <= 9):
1130 1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 1131 raise error.Abort(msg % options[b'zlib.level'])
1132 1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 1133 if options[b'zstd.level'] is not None:
1134 1134 if not (0 <= options[b'zstd.level'] <= 22):
1135 1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 1136 raise error.Abort(msg % options[b'zstd.level'])
1137 1137
1138 1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 1139 options[b'enableellipsis'] = True
1140 1140
1141 1141 if ui.configbool(b'experimental', b'rust.index'):
1142 1142 options[b'rust.index'] = True
1143 1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 1144 slow_path = ui.config(
1145 1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 1146 )
1147 1147 if slow_path not in (b'allow', b'warn', b'abort'):
1148 1148 default = ui.config_default(
1149 1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 1150 )
1151 1151 msg = _(
1152 1152 b'unknown value for config '
1153 1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 1154 )
1155 1155 ui.warn(msg % slow_path)
1156 1156 if not ui.quiet:
1157 1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 1158 slow_path = default
1159 1159
1160 1160 msg = _(
1161 1161 b"accessing `persistent-nodemap` repository without associated "
1162 1162 b"fast implementation."
1163 1163 )
1164 1164 hint = _(
1165 1165 b"check `hg help config.format.use-persistent-nodemap` "
1166 1166 b"for details"
1167 1167 )
1168 1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 1169 if slow_path == b'warn':
1170 1170 msg = b"warning: " + msg + b'\n'
1171 1171 ui.warn(msg)
1172 1172 if not ui.quiet:
1173 1173 hint = b'(' + hint + b')\n'
1174 1174 ui.warn(hint)
1175 1175 if slow_path == b'abort':
1176 1176 raise error.Abort(msg, hint=hint)
1177 1177 options[b'persistent-nodemap'] = True
1178 1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 1180 if slow_path not in (b'allow', b'warn', b'abort'):
1181 1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 1183 ui.warn(msg % slow_path)
1184 1184 if not ui.quiet:
1185 1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 1186 slow_path = default
1187 1187
1188 1188 msg = _(
1189 1189 b"accessing `dirstate-v2` repository without associated "
1190 1190 b"fast implementation."
1191 1191 )
1192 1192 hint = _(
1193 1193 b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details"
1194 1194 )
1195 1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 1196 if slow_path == b'warn':
1197 1197 msg = b"warning: " + msg + b'\n'
1198 1198 ui.warn(msg)
1199 1199 if not ui.quiet:
1200 1200 hint = b'(' + hint + b')\n'
1201 1201 ui.warn(hint)
1202 1202 if slow_path == b'abort':
1203 1203 raise error.Abort(msg, hint=hint)
1204 1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 1205 options[b'persistent-nodemap.mmap'] = True
1206 1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 1207 options[b'devel-force-nodemap'] = True
1208 1208
1209 1209 return options
1210 1210
1211 1211
1212 1212 def makemain(**kwargs):
1213 1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 1214 return localrepository
1215 1215
1216 1216
1217 1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 1218 class revlogfilestorage(object):
1219 1219 """File storage when using revlogs."""
1220 1220
1221 1221 def file(self, path):
1222 1222 if path.startswith(b'/'):
1223 1223 path = path[1:]
1224 1224
1225 1225 return filelog.filelog(self.svfs, path)
1226 1226
1227 1227
1228 1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 1229 class revlognarrowfilestorage(object):
1230 1230 """File storage when using revlogs and narrow files."""
1231 1231
1232 1232 def file(self, path):
1233 1233 if path.startswith(b'/'):
1234 1234 path = path[1:]
1235 1235
1236 1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237 1237
1238 1238
1239 1239 def makefilestorage(requirements, features, **kwargs):
1240 1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243 1243
1244 1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 1245 return revlognarrowfilestorage
1246 1246 else:
1247 1247 return revlogfilestorage
1248 1248
1249 1249
1250 1250 # List of repository interfaces and factory functions for them. Each
1251 1251 # will be called in order during ``makelocalrepository()`` to iteratively
1252 1252 # derive the final type for a local repository instance. We capture the
1253 1253 # function as a lambda so we don't hold a reference and the module-level
1254 1254 # functions can be wrapped.
1255 1255 REPO_INTERFACES = [
1256 1256 (repository.ilocalrepositorymain, lambda: makemain),
1257 1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 1258 ]
1259 1259
1260 1260
1261 1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 1262 class localrepository(object):
1263 1263 """Main class for representing local repositories.
1264 1264
1265 1265 All local repositories are instances of this class.
1266 1266
1267 1267 Constructed on its own, instances of this class are not usable as
1268 1268 repository objects. To obtain a usable repository object, call
1269 1269 ``hg.repository()``, ``localrepo.instance()``, or
1270 1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 1271 ``instance()`` adds support for creating new repositories.
1272 1272 ``hg.repository()`` adds more extension integration, including calling
1273 1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 1274 used.
1275 1275 """
1276 1276
1277 # obsolete experimental requirements:
1278 # - manifestv2: An experimental new manifest format that allowed
1279 # for stem compression of long paths. Experiment ended up not
1280 # being successful (repository sizes went up due to worse delta
1281 # chains), and the code was deleted in 4.6.
1282 1277 supportedformats = {
1283 1278 requirementsmod.REVLOGV1_REQUIREMENT,
1284 1279 requirementsmod.GENERALDELTA_REQUIREMENT,
1285 1280 requirementsmod.TREEMANIFEST_REQUIREMENT,
1286 1281 requirementsmod.COPIESSDC_REQUIREMENT,
1287 1282 requirementsmod.REVLOGV2_REQUIREMENT,
1288 1283 requirementsmod.CHANGELOGV2_REQUIREMENT,
1289 1284 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1290 1285 requirementsmod.NODEMAP_REQUIREMENT,
1291 1286 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1292 1287 requirementsmod.SHARESAFE_REQUIREMENT,
1293 1288 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1294 1289 }
1295 1290 _basesupported = supportedformats | {
1296 1291 requirementsmod.STORE_REQUIREMENT,
1297 1292 requirementsmod.FNCACHE_REQUIREMENT,
1298 1293 requirementsmod.SHARED_REQUIREMENT,
1299 1294 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1300 1295 requirementsmod.DOTENCODE_REQUIREMENT,
1301 1296 requirementsmod.SPARSE_REQUIREMENT,
1302 1297 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1303 1298 }
1304 1299
1305 1300 # list of prefix for file which can be written without 'wlock'
1306 1301 # Extensions should extend this list when needed
1307 1302 _wlockfreeprefix = {
1308 1303 # We migh consider requiring 'wlock' for the next
1309 1304 # two, but pretty much all the existing code assume
1310 1305 # wlock is not needed so we keep them excluded for
1311 1306 # now.
1312 1307 b'hgrc',
1313 1308 b'requires',
1314 1309 # XXX cache is a complicatged business someone
1315 1310 # should investigate this in depth at some point
1316 1311 b'cache/',
1317 1312 # XXX shouldn't be dirstate covered by the wlock?
1318 1313 b'dirstate',
1319 1314 # XXX bisect was still a bit too messy at the time
1320 1315 # this changeset was introduced. Someone should fix
1321 1316 # the remainig bit and drop this line
1322 1317 b'bisect.state',
1323 1318 }
1324 1319
1325 1320 def __init__(
1326 1321 self,
1327 1322 baseui,
1328 1323 ui,
1329 1324 origroot,
1330 1325 wdirvfs,
1331 1326 hgvfs,
1332 1327 requirements,
1333 1328 supportedrequirements,
1334 1329 sharedpath,
1335 1330 store,
1336 1331 cachevfs,
1337 1332 wcachevfs,
1338 1333 features,
1339 1334 intents=None,
1340 1335 ):
1341 1336 """Create a new local repository instance.
1342 1337
1343 1338 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1344 1339 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1345 1340 object.
1346 1341
1347 1342 Arguments:
1348 1343
1349 1344 baseui
1350 1345 ``ui.ui`` instance that ``ui`` argument was based off of.
1351 1346
1352 1347 ui
1353 1348 ``ui.ui`` instance for use by the repository.
1354 1349
1355 1350 origroot
1356 1351 ``bytes`` path to working directory root of this repository.
1357 1352
1358 1353 wdirvfs
1359 1354 ``vfs.vfs`` rooted at the working directory.
1360 1355
1361 1356 hgvfs
1362 1357 ``vfs.vfs`` rooted at .hg/
1363 1358
1364 1359 requirements
1365 1360 ``set`` of bytestrings representing repository opening requirements.
1366 1361
1367 1362 supportedrequirements
1368 1363 ``set`` of bytestrings representing repository requirements that we
1369 1364 know how to open. May be a supetset of ``requirements``.
1370 1365
1371 1366 sharedpath
1372 1367 ``bytes`` Defining path to storage base directory. Points to a
1373 1368 ``.hg/`` directory somewhere.
1374 1369
1375 1370 store
1376 1371 ``store.basicstore`` (or derived) instance providing access to
1377 1372 versioned storage.
1378 1373
1379 1374 cachevfs
1380 1375 ``vfs.vfs`` used for cache files.
1381 1376
1382 1377 wcachevfs
1383 1378 ``vfs.vfs`` used for cache files related to the working copy.
1384 1379
1385 1380 features
1386 1381 ``set`` of bytestrings defining features/capabilities of this
1387 1382 instance.
1388 1383
1389 1384 intents
1390 1385 ``set`` of system strings indicating what this repo will be used
1391 1386 for.
1392 1387 """
1393 1388 self.baseui = baseui
1394 1389 self.ui = ui
1395 1390 self.origroot = origroot
1396 1391 # vfs rooted at working directory.
1397 1392 self.wvfs = wdirvfs
1398 1393 self.root = wdirvfs.base
1399 1394 # vfs rooted at .hg/. Used to access most non-store paths.
1400 1395 self.vfs = hgvfs
1401 1396 self.path = hgvfs.base
1402 1397 self.requirements = requirements
1403 1398 self.nodeconstants = sha1nodeconstants
1404 1399 self.nullid = self.nodeconstants.nullid
1405 1400 self.supported = supportedrequirements
1406 1401 self.sharedpath = sharedpath
1407 1402 self.store = store
1408 1403 self.cachevfs = cachevfs
1409 1404 self.wcachevfs = wcachevfs
1410 1405 self.features = features
1411 1406
1412 1407 self.filtername = None
1413 1408
1414 1409 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1415 1410 b'devel', b'check-locks'
1416 1411 ):
1417 1412 self.vfs.audit = self._getvfsward(self.vfs.audit)
1418 1413 # A list of callback to shape the phase if no data were found.
1419 1414 # Callback are in the form: func(repo, roots) --> processed root.
1420 1415 # This list it to be filled by extension during repo setup
1421 1416 self._phasedefaults = []
1422 1417
1423 1418 color.setup(self.ui)
1424 1419
1425 1420 self.spath = self.store.path
1426 1421 self.svfs = self.store.vfs
1427 1422 self.sjoin = self.store.join
1428 1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1429 1424 b'devel', b'check-locks'
1430 1425 ):
1431 1426 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1432 1427 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1433 1428 else: # standard vfs
1434 1429 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1435 1430
1436 1431 self._dirstatevalidatewarned = False
1437 1432
1438 1433 self._branchcaches = branchmap.BranchMapCache()
1439 1434 self._revbranchcache = None
1440 1435 self._filterpats = {}
1441 1436 self._datafilters = {}
1442 1437 self._transref = self._lockref = self._wlockref = None
1443 1438
1444 1439 # A cache for various files under .hg/ that tracks file changes,
1445 1440 # (used by the filecache decorator)
1446 1441 #
1447 1442 # Maps a property name to its util.filecacheentry
1448 1443 self._filecache = {}
1449 1444
1450 1445 # hold sets of revision to be filtered
1451 1446 # should be cleared when something might have changed the filter value:
1452 1447 # - new changesets,
1453 1448 # - phase change,
1454 1449 # - new obsolescence marker,
1455 1450 # - working directory parent change,
1456 1451 # - bookmark changes
1457 1452 self.filteredrevcache = {}
1458 1453
1459 1454 # post-dirstate-status hooks
1460 1455 self._postdsstatus = []
1461 1456
1462 1457 # generic mapping between names and nodes
1463 1458 self.names = namespaces.namespaces()
1464 1459
1465 1460 # Key to signature value.
1466 1461 self._sparsesignaturecache = {}
1467 1462 # Signature to cached matcher instance.
1468 1463 self._sparsematchercache = {}
1469 1464
1470 1465 self._extrafilterid = repoview.extrafilter(ui)
1471 1466
1472 1467 self.filecopiesmode = None
1473 1468 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1474 1469 self.filecopiesmode = b'changeset-sidedata'
1475 1470
1476 1471 self._wanted_sidedata = set()
1477 1472 self._sidedata_computers = {}
1478 1473 sidedatamod.set_sidedata_spec_for_repo(self)
1479 1474
1480 1475 def _getvfsward(self, origfunc):
1481 1476 """build a ward for self.vfs"""
1482 1477 rref = weakref.ref(self)
1483 1478
1484 1479 def checkvfs(path, mode=None):
1485 1480 ret = origfunc(path, mode=mode)
1486 1481 repo = rref()
1487 1482 if (
1488 1483 repo is None
1489 1484 or not util.safehasattr(repo, b'_wlockref')
1490 1485 or not util.safehasattr(repo, b'_lockref')
1491 1486 ):
1492 1487 return
1493 1488 if mode in (None, b'r', b'rb'):
1494 1489 return
1495 1490 if path.startswith(repo.path):
1496 1491 # truncate name relative to the repository (.hg)
1497 1492 path = path[len(repo.path) + 1 :]
1498 1493 if path.startswith(b'cache/'):
1499 1494 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1500 1495 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1501 1496 # path prefixes covered by 'lock'
1502 1497 vfs_path_prefixes = (
1503 1498 b'journal.',
1504 1499 b'undo.',
1505 1500 b'strip-backup/',
1506 1501 b'cache/',
1507 1502 )
1508 1503 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1509 1504 if repo._currentlock(repo._lockref) is None:
1510 1505 repo.ui.develwarn(
1511 1506 b'write with no lock: "%s"' % path,
1512 1507 stacklevel=3,
1513 1508 config=b'check-locks',
1514 1509 )
1515 1510 elif repo._currentlock(repo._wlockref) is None:
1516 1511 # rest of vfs files are covered by 'wlock'
1517 1512 #
1518 1513 # exclude special files
1519 1514 for prefix in self._wlockfreeprefix:
1520 1515 if path.startswith(prefix):
1521 1516 return
1522 1517 repo.ui.develwarn(
1523 1518 b'write with no wlock: "%s"' % path,
1524 1519 stacklevel=3,
1525 1520 config=b'check-locks',
1526 1521 )
1527 1522 return ret
1528 1523
1529 1524 return checkvfs
1530 1525
1531 1526 def _getsvfsward(self, origfunc):
1532 1527 """build a ward for self.svfs"""
1533 1528 rref = weakref.ref(self)
1534 1529
1535 1530 def checksvfs(path, mode=None):
1536 1531 ret = origfunc(path, mode=mode)
1537 1532 repo = rref()
1538 1533 if repo is None or not util.safehasattr(repo, b'_lockref'):
1539 1534 return
1540 1535 if mode in (None, b'r', b'rb'):
1541 1536 return
1542 1537 if path.startswith(repo.sharedpath):
1543 1538 # truncate name relative to the repository (.hg)
1544 1539 path = path[len(repo.sharedpath) + 1 :]
1545 1540 if repo._currentlock(repo._lockref) is None:
1546 1541 repo.ui.develwarn(
1547 1542 b'write with no lock: "%s"' % path, stacklevel=4
1548 1543 )
1549 1544 return ret
1550 1545
1551 1546 return checksvfs
1552 1547
1553 1548 def close(self):
1554 1549 self._writecaches()
1555 1550
1556 1551 def _writecaches(self):
1557 1552 if self._revbranchcache:
1558 1553 self._revbranchcache.write()
1559 1554
1560 1555 def _restrictcapabilities(self, caps):
1561 1556 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1562 1557 caps = set(caps)
1563 1558 capsblob = bundle2.encodecaps(
1564 1559 bundle2.getrepocaps(self, role=b'client')
1565 1560 )
1566 1561 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1567 1562 if self.ui.configbool(b'experimental', b'narrow'):
1568 1563 caps.add(wireprototypes.NARROWCAP)
1569 1564 return caps
1570 1565
1571 1566 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1572 1567 # self -> auditor -> self._checknested -> self
1573 1568
1574 1569 @property
1575 1570 def auditor(self):
1576 1571 # This is only used by context.workingctx.match in order to
1577 1572 # detect files in subrepos.
1578 1573 return pathutil.pathauditor(self.root, callback=self._checknested)
1579 1574
1580 1575 @property
1581 1576 def nofsauditor(self):
1582 1577 # This is only used by context.basectx.match in order to detect
1583 1578 # files in subrepos.
1584 1579 return pathutil.pathauditor(
1585 1580 self.root, callback=self._checknested, realfs=False, cached=True
1586 1581 )
1587 1582
1588 1583 def _checknested(self, path):
1589 1584 """Determine if path is a legal nested repository."""
1590 1585 if not path.startswith(self.root):
1591 1586 return False
1592 1587 subpath = path[len(self.root) + 1 :]
1593 1588 normsubpath = util.pconvert(subpath)
1594 1589
1595 1590 # XXX: Checking against the current working copy is wrong in
1596 1591 # the sense that it can reject things like
1597 1592 #
1598 1593 # $ hg cat -r 10 sub/x.txt
1599 1594 #
1600 1595 # if sub/ is no longer a subrepository in the working copy
1601 1596 # parent revision.
1602 1597 #
1603 1598 # However, it can of course also allow things that would have
1604 1599 # been rejected before, such as the above cat command if sub/
1605 1600 # is a subrepository now, but was a normal directory before.
1606 1601 # The old path auditor would have rejected by mistake since it
1607 1602 # panics when it sees sub/.hg/.
1608 1603 #
1609 1604 # All in all, checking against the working copy seems sensible
1610 1605 # since we want to prevent access to nested repositories on
1611 1606 # the filesystem *now*.
1612 1607 ctx = self[None]
1613 1608 parts = util.splitpath(subpath)
1614 1609 while parts:
1615 1610 prefix = b'/'.join(parts)
1616 1611 if prefix in ctx.substate:
1617 1612 if prefix == normsubpath:
1618 1613 return True
1619 1614 else:
1620 1615 sub = ctx.sub(prefix)
1621 1616 return sub.checknested(subpath[len(prefix) + 1 :])
1622 1617 else:
1623 1618 parts.pop()
1624 1619 return False
1625 1620
1626 1621 def peer(self):
1627 1622 return localpeer(self) # not cached to avoid reference cycle
1628 1623
1629 1624 def unfiltered(self):
1630 1625 """Return unfiltered version of the repository
1631 1626
1632 1627 Intended to be overwritten by filtered repo."""
1633 1628 return self
1634 1629
1635 1630 def filtered(self, name, visibilityexceptions=None):
1636 1631 """Return a filtered version of a repository
1637 1632
1638 1633 The `name` parameter is the identifier of the requested view. This
1639 1634 will return a repoview object set "exactly" to the specified view.
1640 1635
1641 1636 This function does not apply recursive filtering to a repository. For
1642 1637 example calling `repo.filtered("served")` will return a repoview using
1643 1638 the "served" view, regardless of the initial view used by `repo`.
1644 1639
1645 1640 In other word, there is always only one level of `repoview` "filtering".
1646 1641 """
1647 1642 if self._extrafilterid is not None and b'%' not in name:
1648 1643 name = name + b'%' + self._extrafilterid
1649 1644
1650 1645 cls = repoview.newtype(self.unfiltered().__class__)
1651 1646 return cls(self, name, visibilityexceptions)
1652 1647
1653 1648 @mixedrepostorecache(
1654 1649 (b'bookmarks', b'plain'),
1655 1650 (b'bookmarks.current', b'plain'),
1656 1651 (b'bookmarks', b''),
1657 1652 (b'00changelog.i', b''),
1658 1653 )
1659 1654 def _bookmarks(self):
1660 1655 # Since the multiple files involved in the transaction cannot be
1661 1656 # written atomically (with current repository format), there is a race
1662 1657 # condition here.
1663 1658 #
1664 1659 # 1) changelog content A is read
1665 1660 # 2) outside transaction update changelog to content B
1666 1661 # 3) outside transaction update bookmark file referring to content B
1667 1662 # 4) bookmarks file content is read and filtered against changelog-A
1668 1663 #
1669 1664 # When this happens, bookmarks against nodes missing from A are dropped.
1670 1665 #
1671 1666 # Having this happening during read is not great, but it become worse
1672 1667 # when this happen during write because the bookmarks to the "unknown"
1673 1668 # nodes will be dropped for good. However, writes happen within locks.
1674 1669 # This locking makes it possible to have a race free consistent read.
1675 1670 # For this purpose data read from disc before locking are
1676 1671 # "invalidated" right after the locks are taken. This invalidations are
1677 1672 # "light", the `filecache` mechanism keep the data in memory and will
1678 1673 # reuse them if the underlying files did not changed. Not parsing the
1679 1674 # same data multiple times helps performances.
1680 1675 #
1681 1676 # Unfortunately in the case describe above, the files tracked by the
1682 1677 # bookmarks file cache might not have changed, but the in-memory
1683 1678 # content is still "wrong" because we used an older changelog content
1684 1679 # to process the on-disk data. So after locking, the changelog would be
1685 1680 # refreshed but `_bookmarks` would be preserved.
1686 1681 # Adding `00changelog.i` to the list of tracked file is not
1687 1682 # enough, because at the time we build the content for `_bookmarks` in
1688 1683 # (4), the changelog file has already diverged from the content used
1689 1684 # for loading `changelog` in (1)
1690 1685 #
1691 1686 # To prevent the issue, we force the changelog to be explicitly
1692 1687 # reloaded while computing `_bookmarks`. The data race can still happen
1693 1688 # without the lock (with a narrower window), but it would no longer go
1694 1689 # undetected during the lock time refresh.
1695 1690 #
1696 1691 # The new schedule is as follow
1697 1692 #
1698 1693 # 1) filecache logic detect that `_bookmarks` needs to be computed
1699 1694 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1700 1695 # 3) We force `changelog` filecache to be tested
1701 1696 # 4) cachestat for `changelog` are captured (for changelog)
1702 1697 # 5) `_bookmarks` is computed and cached
1703 1698 #
1704 1699 # The step in (3) ensure we have a changelog at least as recent as the
1705 1700 # cache stat computed in (1). As a result at locking time:
1706 1701 # * if the changelog did not changed since (1) -> we can reuse the data
1707 1702 # * otherwise -> the bookmarks get refreshed.
1708 1703 self._refreshchangelog()
1709 1704 return bookmarks.bmstore(self)
1710 1705
1711 1706 def _refreshchangelog(self):
1712 1707 """make sure the in memory changelog match the on-disk one"""
1713 1708 if 'changelog' in vars(self) and self.currenttransaction() is None:
1714 1709 del self.changelog
1715 1710
1716 1711 @property
1717 1712 def _activebookmark(self):
1718 1713 return self._bookmarks.active
1719 1714
1720 1715 # _phasesets depend on changelog. what we need is to call
1721 1716 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1722 1717 # can't be easily expressed in filecache mechanism.
1723 1718 @storecache(b'phaseroots', b'00changelog.i')
1724 1719 def _phasecache(self):
1725 1720 return phases.phasecache(self, self._phasedefaults)
1726 1721
1727 1722 @storecache(b'obsstore')
1728 1723 def obsstore(self):
1729 1724 return obsolete.makestore(self.ui, self)
1730 1725
1731 1726 @changelogcache()
1732 1727 def changelog(repo):
1733 1728 # load dirstate before changelog to avoid race see issue6303
1734 1729 repo.dirstate.prefetch_parents()
1735 1730 return repo.store.changelog(
1736 1731 txnutil.mayhavepending(repo.root),
1737 1732 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1738 1733 )
1739 1734
1740 1735 @manifestlogcache()
1741 1736 def manifestlog(self):
1742 1737 return self.store.manifestlog(self, self._storenarrowmatch)
1743 1738
1744 1739 @repofilecache(b'dirstate')
1745 1740 def dirstate(self):
1746 1741 return self._makedirstate()
1747 1742
1748 1743 def _makedirstate(self):
1749 1744 """Extension point for wrapping the dirstate per-repo."""
1750 1745 sparsematchfn = lambda: sparse.matcher(self)
1751 1746 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1752 1747 use_dirstate_v2 = v2_req in self.requirements
1753 1748
1754 1749 return dirstate.dirstate(
1755 1750 self.vfs,
1756 1751 self.ui,
1757 1752 self.root,
1758 1753 self._dirstatevalidate,
1759 1754 sparsematchfn,
1760 1755 self.nodeconstants,
1761 1756 use_dirstate_v2,
1762 1757 )
1763 1758
1764 1759 def _dirstatevalidate(self, node):
1765 1760 try:
1766 1761 self.changelog.rev(node)
1767 1762 return node
1768 1763 except error.LookupError:
1769 1764 if not self._dirstatevalidatewarned:
1770 1765 self._dirstatevalidatewarned = True
1771 1766 self.ui.warn(
1772 1767 _(b"warning: ignoring unknown working parent %s!\n")
1773 1768 % short(node)
1774 1769 )
1775 1770 return self.nullid
1776 1771
1777 1772 @storecache(narrowspec.FILENAME)
1778 1773 def narrowpats(self):
1779 1774 """matcher patterns for this repository's narrowspec
1780 1775
1781 1776 A tuple of (includes, excludes).
1782 1777 """
1783 1778 return narrowspec.load(self)
1784 1779
1785 1780 @storecache(narrowspec.FILENAME)
1786 1781 def _storenarrowmatch(self):
1787 1782 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1788 1783 return matchmod.always()
1789 1784 include, exclude = self.narrowpats
1790 1785 return narrowspec.match(self.root, include=include, exclude=exclude)
1791 1786
1792 1787 @storecache(narrowspec.FILENAME)
1793 1788 def _narrowmatch(self):
1794 1789 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1795 1790 return matchmod.always()
1796 1791 narrowspec.checkworkingcopynarrowspec(self)
1797 1792 include, exclude = self.narrowpats
1798 1793 return narrowspec.match(self.root, include=include, exclude=exclude)
1799 1794
1800 1795 def narrowmatch(self, match=None, includeexact=False):
1801 1796 """matcher corresponding the the repo's narrowspec
1802 1797
1803 1798 If `match` is given, then that will be intersected with the narrow
1804 1799 matcher.
1805 1800
1806 1801 If `includeexact` is True, then any exact matches from `match` will
1807 1802 be included even if they're outside the narrowspec.
1808 1803 """
1809 1804 if match:
1810 1805 if includeexact and not self._narrowmatch.always():
1811 1806 # do not exclude explicitly-specified paths so that they can
1812 1807 # be warned later on
1813 1808 em = matchmod.exact(match.files())
1814 1809 nm = matchmod.unionmatcher([self._narrowmatch, em])
1815 1810 return matchmod.intersectmatchers(match, nm)
1816 1811 return matchmod.intersectmatchers(match, self._narrowmatch)
1817 1812 return self._narrowmatch
1818 1813
1819 1814 def setnarrowpats(self, newincludes, newexcludes):
1820 1815 narrowspec.save(self, newincludes, newexcludes)
1821 1816 self.invalidate(clearfilecache=True)
1822 1817
1823 1818 @unfilteredpropertycache
1824 1819 def _quick_access_changeid_null(self):
1825 1820 return {
1826 1821 b'null': (nullrev, self.nodeconstants.nullid),
1827 1822 nullrev: (nullrev, self.nodeconstants.nullid),
1828 1823 self.nullid: (nullrev, self.nullid),
1829 1824 }
1830 1825
1831 1826 @unfilteredpropertycache
1832 1827 def _quick_access_changeid_wc(self):
1833 1828 # also fast path access to the working copy parents
1834 1829 # however, only do it for filter that ensure wc is visible.
1835 1830 quick = self._quick_access_changeid_null.copy()
1836 1831 cl = self.unfiltered().changelog
1837 1832 for node in self.dirstate.parents():
1838 1833 if node == self.nullid:
1839 1834 continue
1840 1835 rev = cl.index.get_rev(node)
1841 1836 if rev is None:
1842 1837 # unknown working copy parent case:
1843 1838 #
1844 1839 # skip the fast path and let higher code deal with it
1845 1840 continue
1846 1841 pair = (rev, node)
1847 1842 quick[rev] = pair
1848 1843 quick[node] = pair
1849 1844 # also add the parents of the parents
1850 1845 for r in cl.parentrevs(rev):
1851 1846 if r == nullrev:
1852 1847 continue
1853 1848 n = cl.node(r)
1854 1849 pair = (r, n)
1855 1850 quick[r] = pair
1856 1851 quick[n] = pair
1857 1852 p1node = self.dirstate.p1()
1858 1853 if p1node != self.nullid:
1859 1854 quick[b'.'] = quick[p1node]
1860 1855 return quick
1861 1856
1862 1857 @unfilteredmethod
1863 1858 def _quick_access_changeid_invalidate(self):
1864 1859 if '_quick_access_changeid_wc' in vars(self):
1865 1860 del self.__dict__['_quick_access_changeid_wc']
1866 1861
1867 1862 @property
1868 1863 def _quick_access_changeid(self):
1869 1864 """an helper dictionnary for __getitem__ calls
1870 1865
1871 1866 This contains a list of symbol we can recognise right away without
1872 1867 further processing.
1873 1868 """
1874 1869 if self.filtername in repoview.filter_has_wc:
1875 1870 return self._quick_access_changeid_wc
1876 1871 return self._quick_access_changeid_null
1877 1872
1878 1873 def __getitem__(self, changeid):
1879 1874 # dealing with special cases
1880 1875 if changeid is None:
1881 1876 return context.workingctx(self)
1882 1877 if isinstance(changeid, context.basectx):
1883 1878 return changeid
1884 1879
1885 1880 # dealing with multiple revisions
1886 1881 if isinstance(changeid, slice):
1887 1882 # wdirrev isn't contiguous so the slice shouldn't include it
1888 1883 return [
1889 1884 self[i]
1890 1885 for i in pycompat.xrange(*changeid.indices(len(self)))
1891 1886 if i not in self.changelog.filteredrevs
1892 1887 ]
1893 1888
1894 1889 # dealing with some special values
1895 1890 quick_access = self._quick_access_changeid.get(changeid)
1896 1891 if quick_access is not None:
1897 1892 rev, node = quick_access
1898 1893 return context.changectx(self, rev, node, maybe_filtered=False)
1899 1894 if changeid == b'tip':
1900 1895 node = self.changelog.tip()
1901 1896 rev = self.changelog.rev(node)
1902 1897 return context.changectx(self, rev, node)
1903 1898
1904 1899 # dealing with arbitrary values
1905 1900 try:
1906 1901 if isinstance(changeid, int):
1907 1902 node = self.changelog.node(changeid)
1908 1903 rev = changeid
1909 1904 elif changeid == b'.':
1910 1905 # this is a hack to delay/avoid loading obsmarkers
1911 1906 # when we know that '.' won't be hidden
1912 1907 node = self.dirstate.p1()
1913 1908 rev = self.unfiltered().changelog.rev(node)
1914 1909 elif len(changeid) == self.nodeconstants.nodelen:
1915 1910 try:
1916 1911 node = changeid
1917 1912 rev = self.changelog.rev(changeid)
1918 1913 except error.FilteredLookupError:
1919 1914 changeid = hex(changeid) # for the error message
1920 1915 raise
1921 1916 except LookupError:
1922 1917 # check if it might have come from damaged dirstate
1923 1918 #
1924 1919 # XXX we could avoid the unfiltered if we had a recognizable
1925 1920 # exception for filtered changeset access
1926 1921 if (
1927 1922 self.local()
1928 1923 and changeid in self.unfiltered().dirstate.parents()
1929 1924 ):
1930 1925 msg = _(b"working directory has unknown parent '%s'!")
1931 1926 raise error.Abort(msg % short(changeid))
1932 1927 changeid = hex(changeid) # for the error message
1933 1928 raise
1934 1929
1935 1930 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1936 1931 node = bin(changeid)
1937 1932 rev = self.changelog.rev(node)
1938 1933 else:
1939 1934 raise error.ProgrammingError(
1940 1935 b"unsupported changeid '%s' of type %s"
1941 1936 % (changeid, pycompat.bytestr(type(changeid)))
1942 1937 )
1943 1938
1944 1939 return context.changectx(self, rev, node)
1945 1940
1946 1941 except (error.FilteredIndexError, error.FilteredLookupError):
1947 1942 raise error.FilteredRepoLookupError(
1948 1943 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1949 1944 )
1950 1945 except (IndexError, LookupError):
1951 1946 raise error.RepoLookupError(
1952 1947 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1953 1948 )
1954 1949 except error.WdirUnsupported:
1955 1950 return context.workingctx(self)
1956 1951
1957 1952 def __contains__(self, changeid):
1958 1953 """True if the given changeid exists"""
1959 1954 try:
1960 1955 self[changeid]
1961 1956 return True
1962 1957 except error.RepoLookupError:
1963 1958 return False
1964 1959
1965 1960 def __nonzero__(self):
1966 1961 return True
1967 1962
1968 1963 __bool__ = __nonzero__
1969 1964
1970 1965 def __len__(self):
1971 1966 # no need to pay the cost of repoview.changelog
1972 1967 unfi = self.unfiltered()
1973 1968 return len(unfi.changelog)
1974 1969
1975 1970 def __iter__(self):
1976 1971 return iter(self.changelog)
1977 1972
1978 1973 def revs(self, expr, *args):
1979 1974 """Find revisions matching a revset.
1980 1975
1981 1976 The revset is specified as a string ``expr`` that may contain
1982 1977 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1983 1978
1984 1979 Revset aliases from the configuration are not expanded. To expand
1985 1980 user aliases, consider calling ``scmutil.revrange()`` or
1986 1981 ``repo.anyrevs([expr], user=True)``.
1987 1982
1988 1983 Returns a smartset.abstractsmartset, which is a list-like interface
1989 1984 that contains integer revisions.
1990 1985 """
1991 1986 tree = revsetlang.spectree(expr, *args)
1992 1987 return revset.makematcher(tree)(self)
1993 1988
1994 1989 def set(self, expr, *args):
1995 1990 """Find revisions matching a revset and emit changectx instances.
1996 1991
1997 1992 This is a convenience wrapper around ``revs()`` that iterates the
1998 1993 result and is a generator of changectx instances.
1999 1994
2000 1995 Revset aliases from the configuration are not expanded. To expand
2001 1996 user aliases, consider calling ``scmutil.revrange()``.
2002 1997 """
2003 1998 for r in self.revs(expr, *args):
2004 1999 yield self[r]
2005 2000
2006 2001 def anyrevs(self, specs, user=False, localalias=None):
2007 2002 """Find revisions matching one of the given revsets.
2008 2003
2009 2004 Revset aliases from the configuration are not expanded by default. To
2010 2005 expand user aliases, specify ``user=True``. To provide some local
2011 2006 definitions overriding user aliases, set ``localalias`` to
2012 2007 ``{name: definitionstring}``.
2013 2008 """
2014 2009 if specs == [b'null']:
2015 2010 return revset.baseset([nullrev])
2016 2011 if specs == [b'.']:
2017 2012 quick_data = self._quick_access_changeid.get(b'.')
2018 2013 if quick_data is not None:
2019 2014 return revset.baseset([quick_data[0]])
2020 2015 if user:
2021 2016 m = revset.matchany(
2022 2017 self.ui,
2023 2018 specs,
2024 2019 lookup=revset.lookupfn(self),
2025 2020 localalias=localalias,
2026 2021 )
2027 2022 else:
2028 2023 m = revset.matchany(None, specs, localalias=localalias)
2029 2024 return m(self)
2030 2025
2031 2026 def url(self):
2032 2027 return b'file:' + self.root
2033 2028
2034 2029 def hook(self, name, throw=False, **args):
2035 2030 """Call a hook, passing this repo instance.
2036 2031
2037 2032 This a convenience method to aid invoking hooks. Extensions likely
2038 2033 won't call this unless they have registered a custom hook or are
2039 2034 replacing code that is expected to call a hook.
2040 2035 """
2041 2036 return hook.hook(self.ui, self, name, throw, **args)
2042 2037
2043 2038 @filteredpropertycache
2044 2039 def _tagscache(self):
2045 2040 """Returns a tagscache object that contains various tags related
2046 2041 caches."""
2047 2042
2048 2043 # This simplifies its cache management by having one decorated
2049 2044 # function (this one) and the rest simply fetch things from it.
2050 2045 class tagscache(object):
2051 2046 def __init__(self):
2052 2047 # These two define the set of tags for this repository. tags
2053 2048 # maps tag name to node; tagtypes maps tag name to 'global' or
2054 2049 # 'local'. (Global tags are defined by .hgtags across all
2055 2050 # heads, and local tags are defined in .hg/localtags.)
2056 2051 # They constitute the in-memory cache of tags.
2057 2052 self.tags = self.tagtypes = None
2058 2053
2059 2054 self.nodetagscache = self.tagslist = None
2060 2055
2061 2056 cache = tagscache()
2062 2057 cache.tags, cache.tagtypes = self._findtags()
2063 2058
2064 2059 return cache
2065 2060
2066 2061 def tags(self):
2067 2062 '''return a mapping of tag to node'''
2068 2063 t = {}
2069 2064 if self.changelog.filteredrevs:
2070 2065 tags, tt = self._findtags()
2071 2066 else:
2072 2067 tags = self._tagscache.tags
2073 2068 rev = self.changelog.rev
2074 2069 for k, v in pycompat.iteritems(tags):
2075 2070 try:
2076 2071 # ignore tags to unknown nodes
2077 2072 rev(v)
2078 2073 t[k] = v
2079 2074 except (error.LookupError, ValueError):
2080 2075 pass
2081 2076 return t
2082 2077
2083 2078 def _findtags(self):
2084 2079 """Do the hard work of finding tags. Return a pair of dicts
2085 2080 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2086 2081 maps tag name to a string like \'global\' or \'local\'.
2087 2082 Subclasses or extensions are free to add their own tags, but
2088 2083 should be aware that the returned dicts will be retained for the
2089 2084 duration of the localrepo object."""
2090 2085
2091 2086 # XXX what tagtype should subclasses/extensions use? Currently
2092 2087 # mq and bookmarks add tags, but do not set the tagtype at all.
2093 2088 # Should each extension invent its own tag type? Should there
2094 2089 # be one tagtype for all such "virtual" tags? Or is the status
2095 2090 # quo fine?
2096 2091
2097 2092 # map tag name to (node, hist)
2098 2093 alltags = tagsmod.findglobaltags(self.ui, self)
2099 2094 # map tag name to tag type
2100 2095 tagtypes = {tag: b'global' for tag in alltags}
2101 2096
2102 2097 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2103 2098
2104 2099 # Build the return dicts. Have to re-encode tag names because
2105 2100 # the tags module always uses UTF-8 (in order not to lose info
2106 2101 # writing to the cache), but the rest of Mercurial wants them in
2107 2102 # local encoding.
2108 2103 tags = {}
2109 2104 for (name, (node, hist)) in pycompat.iteritems(alltags):
2110 2105 if node != self.nullid:
2111 2106 tags[encoding.tolocal(name)] = node
2112 2107 tags[b'tip'] = self.changelog.tip()
2113 2108 tagtypes = {
2114 2109 encoding.tolocal(name): value
2115 2110 for (name, value) in pycompat.iteritems(tagtypes)
2116 2111 }
2117 2112 return (tags, tagtypes)
2118 2113
2119 2114 def tagtype(self, tagname):
2120 2115 """
2121 2116 return the type of the given tag. result can be:
2122 2117
2123 2118 'local' : a local tag
2124 2119 'global' : a global tag
2125 2120 None : tag does not exist
2126 2121 """
2127 2122
2128 2123 return self._tagscache.tagtypes.get(tagname)
2129 2124
2130 2125 def tagslist(self):
2131 2126 '''return a list of tags ordered by revision'''
2132 2127 if not self._tagscache.tagslist:
2133 2128 l = []
2134 2129 for t, n in pycompat.iteritems(self.tags()):
2135 2130 l.append((self.changelog.rev(n), t, n))
2136 2131 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2137 2132
2138 2133 return self._tagscache.tagslist
2139 2134
2140 2135 def nodetags(self, node):
2141 2136 '''return the tags associated with a node'''
2142 2137 if not self._tagscache.nodetagscache:
2143 2138 nodetagscache = {}
2144 2139 for t, n in pycompat.iteritems(self._tagscache.tags):
2145 2140 nodetagscache.setdefault(n, []).append(t)
2146 2141 for tags in pycompat.itervalues(nodetagscache):
2147 2142 tags.sort()
2148 2143 self._tagscache.nodetagscache = nodetagscache
2149 2144 return self._tagscache.nodetagscache.get(node, [])
2150 2145
2151 2146 def nodebookmarks(self, node):
2152 2147 """return the list of bookmarks pointing to the specified node"""
2153 2148 return self._bookmarks.names(node)
2154 2149
2155 2150 def branchmap(self):
2156 2151 """returns a dictionary {branch: [branchheads]} with branchheads
2157 2152 ordered by increasing revision number"""
2158 2153 return self._branchcaches[self]
2159 2154
2160 2155 @unfilteredmethod
2161 2156 def revbranchcache(self):
2162 2157 if not self._revbranchcache:
2163 2158 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2164 2159 return self._revbranchcache
2165 2160
2166 2161 def register_changeset(self, rev, changelogrevision):
2167 2162 self.revbranchcache().setdata(rev, changelogrevision)
2168 2163
2169 2164 def branchtip(self, branch, ignoremissing=False):
2170 2165 """return the tip node for a given branch
2171 2166
2172 2167 If ignoremissing is True, then this method will not raise an error.
2173 2168 This is helpful for callers that only expect None for a missing branch
2174 2169 (e.g. namespace).
2175 2170
2176 2171 """
2177 2172 try:
2178 2173 return self.branchmap().branchtip(branch)
2179 2174 except KeyError:
2180 2175 if not ignoremissing:
2181 2176 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2182 2177 else:
2183 2178 pass
2184 2179
2185 2180 def lookup(self, key):
2186 2181 node = scmutil.revsymbol(self, key).node()
2187 2182 if node is None:
2188 2183 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2189 2184 return node
2190 2185
2191 2186 def lookupbranch(self, key):
2192 2187 if self.branchmap().hasbranch(key):
2193 2188 return key
2194 2189
2195 2190 return scmutil.revsymbol(self, key).branch()
2196 2191
2197 2192 def known(self, nodes):
2198 2193 cl = self.changelog
2199 2194 get_rev = cl.index.get_rev
2200 2195 filtered = cl.filteredrevs
2201 2196 result = []
2202 2197 for n in nodes:
2203 2198 r = get_rev(n)
2204 2199 resp = not (r is None or r in filtered)
2205 2200 result.append(resp)
2206 2201 return result
2207 2202
2208 2203 def local(self):
2209 2204 return self
2210 2205
2211 2206 def publishing(self):
2212 2207 # it's safe (and desirable) to trust the publish flag unconditionally
2213 2208 # so that we don't finalize changes shared between users via ssh or nfs
2214 2209 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2215 2210
2216 2211 def cancopy(self):
2217 2212 # so statichttprepo's override of local() works
2218 2213 if not self.local():
2219 2214 return False
2220 2215 if not self.publishing():
2221 2216 return True
2222 2217 # if publishing we can't copy if there is filtered content
2223 2218 return not self.filtered(b'visible').changelog.filteredrevs
2224 2219
2225 2220 def shared(self):
2226 2221 '''the type of shared repository (None if not shared)'''
2227 2222 if self.sharedpath != self.path:
2228 2223 return b'store'
2229 2224 return None
2230 2225
2231 2226 def wjoin(self, f, *insidef):
2232 2227 return self.vfs.reljoin(self.root, f, *insidef)
2233 2228
2234 2229 def setparents(self, p1, p2=None):
2235 2230 if p2 is None:
2236 2231 p2 = self.nullid
2237 2232 self[None].setparents(p1, p2)
2238 2233 self._quick_access_changeid_invalidate()
2239 2234
2240 2235 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2241 2236 """changeid must be a changeset revision, if specified.
2242 2237 fileid can be a file revision or node."""
2243 2238 return context.filectx(
2244 2239 self, path, changeid, fileid, changectx=changectx
2245 2240 )
2246 2241
2247 2242 def getcwd(self):
2248 2243 return self.dirstate.getcwd()
2249 2244
2250 2245 def pathto(self, f, cwd=None):
2251 2246 return self.dirstate.pathto(f, cwd)
2252 2247
2253 2248 def _loadfilter(self, filter):
2254 2249 if filter not in self._filterpats:
2255 2250 l = []
2256 2251 for pat, cmd in self.ui.configitems(filter):
2257 2252 if cmd == b'!':
2258 2253 continue
2259 2254 mf = matchmod.match(self.root, b'', [pat])
2260 2255 fn = None
2261 2256 params = cmd
2262 2257 for name, filterfn in pycompat.iteritems(self._datafilters):
2263 2258 if cmd.startswith(name):
2264 2259 fn = filterfn
2265 2260 params = cmd[len(name) :].lstrip()
2266 2261 break
2267 2262 if not fn:
2268 2263 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2269 2264 fn.__name__ = 'commandfilter'
2270 2265 # Wrap old filters not supporting keyword arguments
2271 2266 if not pycompat.getargspec(fn)[2]:
2272 2267 oldfn = fn
2273 2268 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2274 2269 fn.__name__ = 'compat-' + oldfn.__name__
2275 2270 l.append((mf, fn, params))
2276 2271 self._filterpats[filter] = l
2277 2272 return self._filterpats[filter]
2278 2273
2279 2274 def _filter(self, filterpats, filename, data):
2280 2275 for mf, fn, cmd in filterpats:
2281 2276 if mf(filename):
2282 2277 self.ui.debug(
2283 2278 b"filtering %s through %s\n"
2284 2279 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2285 2280 )
2286 2281 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2287 2282 break
2288 2283
2289 2284 return data
2290 2285
2291 2286 @unfilteredpropertycache
2292 2287 def _encodefilterpats(self):
2293 2288 return self._loadfilter(b'encode')
2294 2289
2295 2290 @unfilteredpropertycache
2296 2291 def _decodefilterpats(self):
2297 2292 return self._loadfilter(b'decode')
2298 2293
2299 2294 def adddatafilter(self, name, filter):
2300 2295 self._datafilters[name] = filter
2301 2296
2302 2297 def wread(self, filename):
2303 2298 if self.wvfs.islink(filename):
2304 2299 data = self.wvfs.readlink(filename)
2305 2300 else:
2306 2301 data = self.wvfs.read(filename)
2307 2302 return self._filter(self._encodefilterpats, filename, data)
2308 2303
2309 2304 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2310 2305 """write ``data`` into ``filename`` in the working directory
2311 2306
2312 2307 This returns length of written (maybe decoded) data.
2313 2308 """
2314 2309 data = self._filter(self._decodefilterpats, filename, data)
2315 2310 if b'l' in flags:
2316 2311 self.wvfs.symlink(data, filename)
2317 2312 else:
2318 2313 self.wvfs.write(
2319 2314 filename, data, backgroundclose=backgroundclose, **kwargs
2320 2315 )
2321 2316 if b'x' in flags:
2322 2317 self.wvfs.setflags(filename, False, True)
2323 2318 else:
2324 2319 self.wvfs.setflags(filename, False, False)
2325 2320 return len(data)
2326 2321
2327 2322 def wwritedata(self, filename, data):
2328 2323 return self._filter(self._decodefilterpats, filename, data)
2329 2324
2330 2325 def currenttransaction(self):
2331 2326 """return the current transaction or None if non exists"""
2332 2327 if self._transref:
2333 2328 tr = self._transref()
2334 2329 else:
2335 2330 tr = None
2336 2331
2337 2332 if tr and tr.running():
2338 2333 return tr
2339 2334 return None
2340 2335
2341 2336 def transaction(self, desc, report=None):
2342 2337 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2343 2338 b'devel', b'check-locks'
2344 2339 ):
2345 2340 if self._currentlock(self._lockref) is None:
2346 2341 raise error.ProgrammingError(b'transaction requires locking')
2347 2342 tr = self.currenttransaction()
2348 2343 if tr is not None:
2349 2344 return tr.nest(name=desc)
2350 2345
2351 2346 # abort here if the journal already exists
2352 2347 if self.svfs.exists(b"journal"):
2353 2348 raise error.RepoError(
2354 2349 _(b"abandoned transaction found"),
2355 2350 hint=_(b"run 'hg recover' to clean up transaction"),
2356 2351 )
2357 2352
2358 2353 idbase = b"%.40f#%f" % (random.random(), time.time())
2359 2354 ha = hex(hashutil.sha1(idbase).digest())
2360 2355 txnid = b'TXN:' + ha
2361 2356 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2362 2357
2363 2358 self._writejournal(desc)
2364 2359 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2365 2360 if report:
2366 2361 rp = report
2367 2362 else:
2368 2363 rp = self.ui.warn
2369 2364 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2370 2365 # we must avoid cyclic reference between repo and transaction.
2371 2366 reporef = weakref.ref(self)
2372 2367 # Code to track tag movement
2373 2368 #
2374 2369 # Since tags are all handled as file content, it is actually quite hard
2375 2370 # to track these movement from a code perspective. So we fallback to a
2376 2371 # tracking at the repository level. One could envision to track changes
2377 2372 # to the '.hgtags' file through changegroup apply but that fails to
2378 2373 # cope with case where transaction expose new heads without changegroup
2379 2374 # being involved (eg: phase movement).
2380 2375 #
2381 2376 # For now, We gate the feature behind a flag since this likely comes
2382 2377 # with performance impacts. The current code run more often than needed
2383 2378 # and do not use caches as much as it could. The current focus is on
2384 2379 # the behavior of the feature so we disable it by default. The flag
2385 2380 # will be removed when we are happy with the performance impact.
2386 2381 #
2387 2382 # Once this feature is no longer experimental move the following
2388 2383 # documentation to the appropriate help section:
2389 2384 #
2390 2385 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2391 2386 # tags (new or changed or deleted tags). In addition the details of
2392 2387 # these changes are made available in a file at:
2393 2388 # ``REPOROOT/.hg/changes/tags.changes``.
2394 2389 # Make sure you check for HG_TAG_MOVED before reading that file as it
2395 2390 # might exist from a previous transaction even if no tag were touched
2396 2391 # in this one. Changes are recorded in a line base format::
2397 2392 #
2398 2393 # <action> <hex-node> <tag-name>\n
2399 2394 #
2400 2395 # Actions are defined as follow:
2401 2396 # "-R": tag is removed,
2402 2397 # "+A": tag is added,
2403 2398 # "-M": tag is moved (old value),
2404 2399 # "+M": tag is moved (new value),
2405 2400 tracktags = lambda x: None
2406 2401 # experimental config: experimental.hook-track-tags
2407 2402 shouldtracktags = self.ui.configbool(
2408 2403 b'experimental', b'hook-track-tags'
2409 2404 )
2410 2405 if desc != b'strip' and shouldtracktags:
2411 2406 oldheads = self.changelog.headrevs()
2412 2407
2413 2408 def tracktags(tr2):
2414 2409 repo = reporef()
2415 2410 assert repo is not None # help pytype
2416 2411 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2417 2412 newheads = repo.changelog.headrevs()
2418 2413 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2419 2414 # notes: we compare lists here.
2420 2415 # As we do it only once buiding set would not be cheaper
2421 2416 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2422 2417 if changes:
2423 2418 tr2.hookargs[b'tag_moved'] = b'1'
2424 2419 with repo.vfs(
2425 2420 b'changes/tags.changes', b'w', atomictemp=True
2426 2421 ) as changesfile:
2427 2422 # note: we do not register the file to the transaction
2428 2423 # because we needs it to still exist on the transaction
2429 2424 # is close (for txnclose hooks)
2430 2425 tagsmod.writediff(changesfile, changes)
2431 2426
2432 2427 def validate(tr2):
2433 2428 """will run pre-closing hooks"""
2434 2429 # XXX the transaction API is a bit lacking here so we take a hacky
2435 2430 # path for now
2436 2431 #
2437 2432 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2438 2433 # dict is copied before these run. In addition we needs the data
2439 2434 # available to in memory hooks too.
2440 2435 #
2441 2436 # Moreover, we also need to make sure this runs before txnclose
2442 2437 # hooks and there is no "pending" mechanism that would execute
2443 2438 # logic only if hooks are about to run.
2444 2439 #
2445 2440 # Fixing this limitation of the transaction is also needed to track
2446 2441 # other families of changes (bookmarks, phases, obsolescence).
2447 2442 #
2448 2443 # This will have to be fixed before we remove the experimental
2449 2444 # gating.
2450 2445 tracktags(tr2)
2451 2446 repo = reporef()
2452 2447 assert repo is not None # help pytype
2453 2448
2454 2449 singleheadopt = (b'experimental', b'single-head-per-branch')
2455 2450 singlehead = repo.ui.configbool(*singleheadopt)
2456 2451 if singlehead:
2457 2452 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2458 2453 accountclosed = singleheadsub.get(
2459 2454 b"account-closed-heads", False
2460 2455 )
2461 2456 if singleheadsub.get(b"public-changes-only", False):
2462 2457 filtername = b"immutable"
2463 2458 else:
2464 2459 filtername = b"visible"
2465 2460 scmutil.enforcesinglehead(
2466 2461 repo, tr2, desc, accountclosed, filtername
2467 2462 )
2468 2463 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2469 2464 for name, (old, new) in sorted(
2470 2465 tr.changes[b'bookmarks'].items()
2471 2466 ):
2472 2467 args = tr.hookargs.copy()
2473 2468 args.update(bookmarks.preparehookargs(name, old, new))
2474 2469 repo.hook(
2475 2470 b'pretxnclose-bookmark',
2476 2471 throw=True,
2477 2472 **pycompat.strkwargs(args)
2478 2473 )
2479 2474 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2480 2475 cl = repo.unfiltered().changelog
2481 2476 for revs, (old, new) in tr.changes[b'phases']:
2482 2477 for rev in revs:
2483 2478 args = tr.hookargs.copy()
2484 2479 node = hex(cl.node(rev))
2485 2480 args.update(phases.preparehookargs(node, old, new))
2486 2481 repo.hook(
2487 2482 b'pretxnclose-phase',
2488 2483 throw=True,
2489 2484 **pycompat.strkwargs(args)
2490 2485 )
2491 2486
2492 2487 repo.hook(
2493 2488 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2494 2489 )
2495 2490
2496 2491 def releasefn(tr, success):
2497 2492 repo = reporef()
2498 2493 if repo is None:
2499 2494 # If the repo has been GC'd (and this release function is being
2500 2495 # called from transaction.__del__), there's not much we can do,
2501 2496 # so just leave the unfinished transaction there and let the
2502 2497 # user run `hg recover`.
2503 2498 return
2504 2499 if success:
2505 2500 # this should be explicitly invoked here, because
2506 2501 # in-memory changes aren't written out at closing
2507 2502 # transaction, if tr.addfilegenerator (via
2508 2503 # dirstate.write or so) isn't invoked while
2509 2504 # transaction running
2510 2505 repo.dirstate.write(None)
2511 2506 else:
2512 2507 # discard all changes (including ones already written
2513 2508 # out) in this transaction
2514 2509 narrowspec.restorebackup(self, b'journal.narrowspec')
2515 2510 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2516 2511 repo.dirstate.restorebackup(None, b'journal.dirstate')
2517 2512
2518 2513 repo.invalidate(clearfilecache=True)
2519 2514
2520 2515 tr = transaction.transaction(
2521 2516 rp,
2522 2517 self.svfs,
2523 2518 vfsmap,
2524 2519 b"journal",
2525 2520 b"undo",
2526 2521 aftertrans(renames),
2527 2522 self.store.createmode,
2528 2523 validator=validate,
2529 2524 releasefn=releasefn,
2530 2525 checkambigfiles=_cachedfiles,
2531 2526 name=desc,
2532 2527 )
2533 2528 tr.changes[b'origrepolen'] = len(self)
2534 2529 tr.changes[b'obsmarkers'] = set()
2535 2530 tr.changes[b'phases'] = []
2536 2531 tr.changes[b'bookmarks'] = {}
2537 2532
2538 2533 tr.hookargs[b'txnid'] = txnid
2539 2534 tr.hookargs[b'txnname'] = desc
2540 2535 tr.hookargs[b'changes'] = tr.changes
2541 2536 # note: writing the fncache only during finalize mean that the file is
2542 2537 # outdated when running hooks. As fncache is used for streaming clone,
2543 2538 # this is not expected to break anything that happen during the hooks.
2544 2539 tr.addfinalize(b'flush-fncache', self.store.write)
2545 2540
2546 2541 def txnclosehook(tr2):
2547 2542 """To be run if transaction is successful, will schedule a hook run"""
2548 2543 # Don't reference tr2 in hook() so we don't hold a reference.
2549 2544 # This reduces memory consumption when there are multiple
2550 2545 # transactions per lock. This can likely go away if issue5045
2551 2546 # fixes the function accumulation.
2552 2547 hookargs = tr2.hookargs
2553 2548
2554 2549 def hookfunc(unused_success):
2555 2550 repo = reporef()
2556 2551 assert repo is not None # help pytype
2557 2552
2558 2553 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2559 2554 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2560 2555 for name, (old, new) in bmchanges:
2561 2556 args = tr.hookargs.copy()
2562 2557 args.update(bookmarks.preparehookargs(name, old, new))
2563 2558 repo.hook(
2564 2559 b'txnclose-bookmark',
2565 2560 throw=False,
2566 2561 **pycompat.strkwargs(args)
2567 2562 )
2568 2563
2569 2564 if hook.hashook(repo.ui, b'txnclose-phase'):
2570 2565 cl = repo.unfiltered().changelog
2571 2566 phasemv = sorted(
2572 2567 tr.changes[b'phases'], key=lambda r: r[0][0]
2573 2568 )
2574 2569 for revs, (old, new) in phasemv:
2575 2570 for rev in revs:
2576 2571 args = tr.hookargs.copy()
2577 2572 node = hex(cl.node(rev))
2578 2573 args.update(phases.preparehookargs(node, old, new))
2579 2574 repo.hook(
2580 2575 b'txnclose-phase',
2581 2576 throw=False,
2582 2577 **pycompat.strkwargs(args)
2583 2578 )
2584 2579
2585 2580 repo.hook(
2586 2581 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2587 2582 )
2588 2583
2589 2584 repo = reporef()
2590 2585 assert repo is not None # help pytype
2591 2586 repo._afterlock(hookfunc)
2592 2587
2593 2588 tr.addfinalize(b'txnclose-hook', txnclosehook)
2594 2589 # Include a leading "-" to make it happen before the transaction summary
2595 2590 # reports registered via scmutil.registersummarycallback() whose names
2596 2591 # are 00-txnreport etc. That way, the caches will be warm when the
2597 2592 # callbacks run.
2598 2593 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2599 2594
2600 2595 def txnaborthook(tr2):
2601 2596 """To be run if transaction is aborted"""
2602 2597 repo = reporef()
2603 2598 assert repo is not None # help pytype
2604 2599 repo.hook(
2605 2600 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2606 2601 )
2607 2602
2608 2603 tr.addabort(b'txnabort-hook', txnaborthook)
2609 2604 # avoid eager cache invalidation. in-memory data should be identical
2610 2605 # to stored data if transaction has no error.
2611 2606 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2612 2607 self._transref = weakref.ref(tr)
2613 2608 scmutil.registersummarycallback(self, tr, desc)
2614 2609 return tr
2615 2610
2616 2611 def _journalfiles(self):
2617 2612 return (
2618 2613 (self.svfs, b'journal'),
2619 2614 (self.svfs, b'journal.narrowspec'),
2620 2615 (self.vfs, b'journal.narrowspec.dirstate'),
2621 2616 (self.vfs, b'journal.dirstate'),
2622 2617 (self.vfs, b'journal.branch'),
2623 2618 (self.vfs, b'journal.desc'),
2624 2619 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2625 2620 (self.svfs, b'journal.phaseroots'),
2626 2621 )
2627 2622
2628 2623 def undofiles(self):
2629 2624 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2630 2625
2631 2626 @unfilteredmethod
2632 2627 def _writejournal(self, desc):
2633 2628 self.dirstate.savebackup(None, b'journal.dirstate')
2634 2629 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2635 2630 narrowspec.savebackup(self, b'journal.narrowspec')
2636 2631 self.vfs.write(
2637 2632 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2638 2633 )
2639 2634 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2640 2635 bookmarksvfs = bookmarks.bookmarksvfs(self)
2641 2636 bookmarksvfs.write(
2642 2637 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2643 2638 )
2644 2639 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2645 2640
2646 2641 def recover(self):
2647 2642 with self.lock():
2648 2643 if self.svfs.exists(b"journal"):
2649 2644 self.ui.status(_(b"rolling back interrupted transaction\n"))
2650 2645 vfsmap = {
2651 2646 b'': self.svfs,
2652 2647 b'plain': self.vfs,
2653 2648 }
2654 2649 transaction.rollback(
2655 2650 self.svfs,
2656 2651 vfsmap,
2657 2652 b"journal",
2658 2653 self.ui.warn,
2659 2654 checkambigfiles=_cachedfiles,
2660 2655 )
2661 2656 self.invalidate()
2662 2657 return True
2663 2658 else:
2664 2659 self.ui.warn(_(b"no interrupted transaction available\n"))
2665 2660 return False
2666 2661
2667 2662 def rollback(self, dryrun=False, force=False):
2668 2663 wlock = lock = dsguard = None
2669 2664 try:
2670 2665 wlock = self.wlock()
2671 2666 lock = self.lock()
2672 2667 if self.svfs.exists(b"undo"):
2673 2668 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2674 2669
2675 2670 return self._rollback(dryrun, force, dsguard)
2676 2671 else:
2677 2672 self.ui.warn(_(b"no rollback information available\n"))
2678 2673 return 1
2679 2674 finally:
2680 2675 release(dsguard, lock, wlock)
2681 2676
2682 2677 @unfilteredmethod # Until we get smarter cache management
2683 2678 def _rollback(self, dryrun, force, dsguard):
2684 2679 ui = self.ui
2685 2680 try:
2686 2681 args = self.vfs.read(b'undo.desc').splitlines()
2687 2682 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2688 2683 if len(args) >= 3:
2689 2684 detail = args[2]
2690 2685 oldtip = oldlen - 1
2691 2686
2692 2687 if detail and ui.verbose:
2693 2688 msg = _(
2694 2689 b'repository tip rolled back to revision %d'
2695 2690 b' (undo %s: %s)\n'
2696 2691 ) % (oldtip, desc, detail)
2697 2692 else:
2698 2693 msg = _(
2699 2694 b'repository tip rolled back to revision %d (undo %s)\n'
2700 2695 ) % (oldtip, desc)
2701 2696 except IOError:
2702 2697 msg = _(b'rolling back unknown transaction\n')
2703 2698 desc = None
2704 2699
2705 2700 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2706 2701 raise error.Abort(
2707 2702 _(
2708 2703 b'rollback of last commit while not checked out '
2709 2704 b'may lose data'
2710 2705 ),
2711 2706 hint=_(b'use -f to force'),
2712 2707 )
2713 2708
2714 2709 ui.status(msg)
2715 2710 if dryrun:
2716 2711 return 0
2717 2712
2718 2713 parents = self.dirstate.parents()
2719 2714 self.destroying()
2720 2715 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2721 2716 transaction.rollback(
2722 2717 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2723 2718 )
2724 2719 bookmarksvfs = bookmarks.bookmarksvfs(self)
2725 2720 if bookmarksvfs.exists(b'undo.bookmarks'):
2726 2721 bookmarksvfs.rename(
2727 2722 b'undo.bookmarks', b'bookmarks', checkambig=True
2728 2723 )
2729 2724 if self.svfs.exists(b'undo.phaseroots'):
2730 2725 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2731 2726 self.invalidate()
2732 2727
2733 2728 has_node = self.changelog.index.has_node
2734 2729 parentgone = any(not has_node(p) for p in parents)
2735 2730 if parentgone:
2736 2731 # prevent dirstateguard from overwriting already restored one
2737 2732 dsguard.close()
2738 2733
2739 2734 narrowspec.restorebackup(self, b'undo.narrowspec')
2740 2735 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2741 2736 self.dirstate.restorebackup(None, b'undo.dirstate')
2742 2737 try:
2743 2738 branch = self.vfs.read(b'undo.branch')
2744 2739 self.dirstate.setbranch(encoding.tolocal(branch))
2745 2740 except IOError:
2746 2741 ui.warn(
2747 2742 _(
2748 2743 b'named branch could not be reset: '
2749 2744 b'current branch is still \'%s\'\n'
2750 2745 )
2751 2746 % self.dirstate.branch()
2752 2747 )
2753 2748
2754 2749 parents = tuple([p.rev() for p in self[None].parents()])
2755 2750 if len(parents) > 1:
2756 2751 ui.status(
2757 2752 _(
2758 2753 b'working directory now based on '
2759 2754 b'revisions %d and %d\n'
2760 2755 )
2761 2756 % parents
2762 2757 )
2763 2758 else:
2764 2759 ui.status(
2765 2760 _(b'working directory now based on revision %d\n') % parents
2766 2761 )
2767 2762 mergestatemod.mergestate.clean(self)
2768 2763
2769 2764 # TODO: if we know which new heads may result from this rollback, pass
2770 2765 # them to destroy(), which will prevent the branchhead cache from being
2771 2766 # invalidated.
2772 2767 self.destroyed()
2773 2768 return 0
2774 2769
2775 2770 def _buildcacheupdater(self, newtransaction):
2776 2771 """called during transaction to build the callback updating cache
2777 2772
2778 2773 Lives on the repository to help extension who might want to augment
2779 2774 this logic. For this purpose, the created transaction is passed to the
2780 2775 method.
2781 2776 """
2782 2777 # we must avoid cyclic reference between repo and transaction.
2783 2778 reporef = weakref.ref(self)
2784 2779
2785 2780 def updater(tr):
2786 2781 repo = reporef()
2787 2782 assert repo is not None # help pytype
2788 2783 repo.updatecaches(tr)
2789 2784
2790 2785 return updater
2791 2786
2792 2787 @unfilteredmethod
2793 2788 def updatecaches(self, tr=None, full=False, caches=None):
2794 2789 """warm appropriate caches
2795 2790
2796 2791 If this function is called after a transaction closed. The transaction
2797 2792 will be available in the 'tr' argument. This can be used to selectively
2798 2793 update caches relevant to the changes in that transaction.
2799 2794
2800 2795 If 'full' is set, make sure all caches the function knows about have
2801 2796 up-to-date data. Even the ones usually loaded more lazily.
2802 2797
2803 2798 The `full` argument can take a special "post-clone" value. In this case
2804 2799 the cache warming is made after a clone and of the slower cache might
2805 2800 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2806 2801 as we plan for a cleaner way to deal with this for 5.9.
2807 2802 """
2808 2803 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2809 2804 # During strip, many caches are invalid but
2810 2805 # later call to `destroyed` will refresh them.
2811 2806 return
2812 2807
2813 2808 unfi = self.unfiltered()
2814 2809
2815 2810 if full:
2816 2811 msg = (
2817 2812 "`full` argument for `repo.updatecaches` is deprecated\n"
2818 2813 "(use `caches=repository.CACHE_ALL` instead)"
2819 2814 )
2820 2815 self.ui.deprecwarn(msg, b"5.9")
2821 2816 caches = repository.CACHES_ALL
2822 2817 if full == b"post-clone":
2823 2818 caches = repository.CACHES_POST_CLONE
2824 2819 caches = repository.CACHES_ALL
2825 2820 elif caches is None:
2826 2821 caches = repository.CACHES_DEFAULT
2827 2822
2828 2823 if repository.CACHE_BRANCHMAP_SERVED in caches:
2829 2824 if tr is None or tr.changes[b'origrepolen'] < len(self):
2830 2825 # accessing the 'served' branchmap should refresh all the others,
2831 2826 self.ui.debug(b'updating the branch cache\n')
2832 2827 self.filtered(b'served').branchmap()
2833 2828 self.filtered(b'served.hidden').branchmap()
2834 2829
2835 2830 if repository.CACHE_CHANGELOG_CACHE in caches:
2836 2831 self.changelog.update_caches(transaction=tr)
2837 2832
2838 2833 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2839 2834 self.manifestlog.update_caches(transaction=tr)
2840 2835
2841 2836 if repository.CACHE_REV_BRANCH in caches:
2842 2837 rbc = unfi.revbranchcache()
2843 2838 for r in unfi.changelog:
2844 2839 rbc.branchinfo(r)
2845 2840 rbc.write()
2846 2841
2847 2842 if repository.CACHE_FULL_MANIFEST in caches:
2848 2843 # ensure the working copy parents are in the manifestfulltextcache
2849 2844 for ctx in self[b'.'].parents():
2850 2845 ctx.manifest() # accessing the manifest is enough
2851 2846
2852 2847 if repository.CACHE_FILE_NODE_TAGS in caches:
2853 2848 # accessing fnode cache warms the cache
2854 2849 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2855 2850
2856 2851 if repository.CACHE_TAGS_DEFAULT in caches:
2857 2852 # accessing tags warm the cache
2858 2853 self.tags()
2859 2854 if repository.CACHE_TAGS_SERVED in caches:
2860 2855 self.filtered(b'served').tags()
2861 2856
2862 2857 if repository.CACHE_BRANCHMAP_ALL in caches:
2863 2858 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2864 2859 # so we're forcing a write to cause these caches to be warmed up
2865 2860 # even if they haven't explicitly been requested yet (if they've
2866 2861 # never been used by hg, they won't ever have been written, even if
2867 2862 # they're a subset of another kind of cache that *has* been used).
2868 2863 for filt in repoview.filtertable.keys():
2869 2864 filtered = self.filtered(filt)
2870 2865 filtered.branchmap().write(filtered)
2871 2866
2872 2867 def invalidatecaches(self):
2873 2868
2874 2869 if '_tagscache' in vars(self):
2875 2870 # can't use delattr on proxy
2876 2871 del self.__dict__['_tagscache']
2877 2872
2878 2873 self._branchcaches.clear()
2879 2874 self.invalidatevolatilesets()
2880 2875 self._sparsesignaturecache.clear()
2881 2876
2882 2877 def invalidatevolatilesets(self):
2883 2878 self.filteredrevcache.clear()
2884 2879 obsolete.clearobscaches(self)
2885 2880 self._quick_access_changeid_invalidate()
2886 2881
2887 2882 def invalidatedirstate(self):
2888 2883 """Invalidates the dirstate, causing the next call to dirstate
2889 2884 to check if it was modified since the last time it was read,
2890 2885 rereading it if it has.
2891 2886
2892 2887 This is different to dirstate.invalidate() that it doesn't always
2893 2888 rereads the dirstate. Use dirstate.invalidate() if you want to
2894 2889 explicitly read the dirstate again (i.e. restoring it to a previous
2895 2890 known good state)."""
2896 2891 if hasunfilteredcache(self, 'dirstate'):
2897 2892 for k in self.dirstate._filecache:
2898 2893 try:
2899 2894 delattr(self.dirstate, k)
2900 2895 except AttributeError:
2901 2896 pass
2902 2897 delattr(self.unfiltered(), 'dirstate')
2903 2898
2904 2899 def invalidate(self, clearfilecache=False):
2905 2900 """Invalidates both store and non-store parts other than dirstate
2906 2901
2907 2902 If a transaction is running, invalidation of store is omitted,
2908 2903 because discarding in-memory changes might cause inconsistency
2909 2904 (e.g. incomplete fncache causes unintentional failure, but
2910 2905 redundant one doesn't).
2911 2906 """
2912 2907 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2913 2908 for k in list(self._filecache.keys()):
2914 2909 # dirstate is invalidated separately in invalidatedirstate()
2915 2910 if k == b'dirstate':
2916 2911 continue
2917 2912 if (
2918 2913 k == b'changelog'
2919 2914 and self.currenttransaction()
2920 2915 and self.changelog._delayed
2921 2916 ):
2922 2917 # The changelog object may store unwritten revisions. We don't
2923 2918 # want to lose them.
2924 2919 # TODO: Solve the problem instead of working around it.
2925 2920 continue
2926 2921
2927 2922 if clearfilecache:
2928 2923 del self._filecache[k]
2929 2924 try:
2930 2925 delattr(unfiltered, k)
2931 2926 except AttributeError:
2932 2927 pass
2933 2928 self.invalidatecaches()
2934 2929 if not self.currenttransaction():
2935 2930 # TODO: Changing contents of store outside transaction
2936 2931 # causes inconsistency. We should make in-memory store
2937 2932 # changes detectable, and abort if changed.
2938 2933 self.store.invalidatecaches()
2939 2934
2940 2935 def invalidateall(self):
2941 2936 """Fully invalidates both store and non-store parts, causing the
2942 2937 subsequent operation to reread any outside changes."""
2943 2938 # extension should hook this to invalidate its caches
2944 2939 self.invalidate()
2945 2940 self.invalidatedirstate()
2946 2941
2947 2942 @unfilteredmethod
2948 2943 def _refreshfilecachestats(self, tr):
2949 2944 """Reload stats of cached files so that they are flagged as valid"""
2950 2945 for k, ce in self._filecache.items():
2951 2946 k = pycompat.sysstr(k)
2952 2947 if k == 'dirstate' or k not in self.__dict__:
2953 2948 continue
2954 2949 ce.refresh()
2955 2950
2956 2951 def _lock(
2957 2952 self,
2958 2953 vfs,
2959 2954 lockname,
2960 2955 wait,
2961 2956 releasefn,
2962 2957 acquirefn,
2963 2958 desc,
2964 2959 ):
2965 2960 timeout = 0
2966 2961 warntimeout = 0
2967 2962 if wait:
2968 2963 timeout = self.ui.configint(b"ui", b"timeout")
2969 2964 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2970 2965 # internal config: ui.signal-safe-lock
2971 2966 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2972 2967
2973 2968 l = lockmod.trylock(
2974 2969 self.ui,
2975 2970 vfs,
2976 2971 lockname,
2977 2972 timeout,
2978 2973 warntimeout,
2979 2974 releasefn=releasefn,
2980 2975 acquirefn=acquirefn,
2981 2976 desc=desc,
2982 2977 signalsafe=signalsafe,
2983 2978 )
2984 2979 return l
2985 2980
2986 2981 def _afterlock(self, callback):
2987 2982 """add a callback to be run when the repository is fully unlocked
2988 2983
2989 2984 The callback will be executed when the outermost lock is released
2990 2985 (with wlock being higher level than 'lock')."""
2991 2986 for ref in (self._wlockref, self._lockref):
2992 2987 l = ref and ref()
2993 2988 if l and l.held:
2994 2989 l.postrelease.append(callback)
2995 2990 break
2996 2991 else: # no lock have been found.
2997 2992 callback(True)
2998 2993
2999 2994 def lock(self, wait=True):
3000 2995 """Lock the repository store (.hg/store) and return a weak reference
3001 2996 to the lock. Use this before modifying the store (e.g. committing or
3002 2997 stripping). If you are opening a transaction, get a lock as well.)
3003 2998
3004 2999 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3005 3000 'wlock' first to avoid a dead-lock hazard."""
3006 3001 l = self._currentlock(self._lockref)
3007 3002 if l is not None:
3008 3003 l.lock()
3009 3004 return l
3010 3005
3011 3006 l = self._lock(
3012 3007 vfs=self.svfs,
3013 3008 lockname=b"lock",
3014 3009 wait=wait,
3015 3010 releasefn=None,
3016 3011 acquirefn=self.invalidate,
3017 3012 desc=_(b'repository %s') % self.origroot,
3018 3013 )
3019 3014 self._lockref = weakref.ref(l)
3020 3015 return l
3021 3016
3022 3017 def wlock(self, wait=True):
3023 3018 """Lock the non-store parts of the repository (everything under
3024 3019 .hg except .hg/store) and return a weak reference to the lock.
3025 3020
3026 3021 Use this before modifying files in .hg.
3027 3022
3028 3023 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3029 3024 'wlock' first to avoid a dead-lock hazard."""
3030 3025 l = self._wlockref() if self._wlockref else None
3031 3026 if l is not None and l.held:
3032 3027 l.lock()
3033 3028 return l
3034 3029
3035 3030 # We do not need to check for non-waiting lock acquisition. Such
3036 3031 # acquisition would not cause dead-lock as they would just fail.
3037 3032 if wait and (
3038 3033 self.ui.configbool(b'devel', b'all-warnings')
3039 3034 or self.ui.configbool(b'devel', b'check-locks')
3040 3035 ):
3041 3036 if self._currentlock(self._lockref) is not None:
3042 3037 self.ui.develwarn(b'"wlock" acquired after "lock"')
3043 3038
3044 3039 def unlock():
3045 3040 if self.dirstate.pendingparentchange():
3046 3041 self.dirstate.invalidate()
3047 3042 else:
3048 3043 self.dirstate.write(None)
3049 3044
3050 3045 self._filecache[b'dirstate'].refresh()
3051 3046
3052 3047 l = self._lock(
3053 3048 self.vfs,
3054 3049 b"wlock",
3055 3050 wait,
3056 3051 unlock,
3057 3052 self.invalidatedirstate,
3058 3053 _(b'working directory of %s') % self.origroot,
3059 3054 )
3060 3055 self._wlockref = weakref.ref(l)
3061 3056 return l
3062 3057
3063 3058 def _currentlock(self, lockref):
3064 3059 """Returns the lock if it's held, or None if it's not."""
3065 3060 if lockref is None:
3066 3061 return None
3067 3062 l = lockref()
3068 3063 if l is None or not l.held:
3069 3064 return None
3070 3065 return l
3071 3066
3072 3067 def currentwlock(self):
3073 3068 """Returns the wlock if it's held, or None if it's not."""
3074 3069 return self._currentlock(self._wlockref)
3075 3070
3076 3071 def checkcommitpatterns(self, wctx, match, status, fail):
3077 3072 """check for commit arguments that aren't committable"""
3078 3073 if match.isexact() or match.prefix():
3079 3074 matched = set(status.modified + status.added + status.removed)
3080 3075
3081 3076 for f in match.files():
3082 3077 f = self.dirstate.normalize(f)
3083 3078 if f == b'.' or f in matched or f in wctx.substate:
3084 3079 continue
3085 3080 if f in status.deleted:
3086 3081 fail(f, _(b'file not found!'))
3087 3082 # Is it a directory that exists or used to exist?
3088 3083 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3089 3084 d = f + b'/'
3090 3085 for mf in matched:
3091 3086 if mf.startswith(d):
3092 3087 break
3093 3088 else:
3094 3089 fail(f, _(b"no match under directory!"))
3095 3090 elif f not in self.dirstate:
3096 3091 fail(f, _(b"file not tracked!"))
3097 3092
3098 3093 @unfilteredmethod
3099 3094 def commit(
3100 3095 self,
3101 3096 text=b"",
3102 3097 user=None,
3103 3098 date=None,
3104 3099 match=None,
3105 3100 force=False,
3106 3101 editor=None,
3107 3102 extra=None,
3108 3103 ):
3109 3104 """Add a new revision to current repository.
3110 3105
3111 3106 Revision information is gathered from the working directory,
3112 3107 match can be used to filter the committed files. If editor is
3113 3108 supplied, it is called to get a commit message.
3114 3109 """
3115 3110 if extra is None:
3116 3111 extra = {}
3117 3112
3118 3113 def fail(f, msg):
3119 3114 raise error.InputError(b'%s: %s' % (f, msg))
3120 3115
3121 3116 if not match:
3122 3117 match = matchmod.always()
3123 3118
3124 3119 if not force:
3125 3120 match.bad = fail
3126 3121
3127 3122 # lock() for recent changelog (see issue4368)
3128 3123 with self.wlock(), self.lock():
3129 3124 wctx = self[None]
3130 3125 merge = len(wctx.parents()) > 1
3131 3126
3132 3127 if not force and merge and not match.always():
3133 3128 raise error.Abort(
3134 3129 _(
3135 3130 b'cannot partially commit a merge '
3136 3131 b'(do not specify files or patterns)'
3137 3132 )
3138 3133 )
3139 3134
3140 3135 status = self.status(match=match, clean=force)
3141 3136 if force:
3142 3137 status.modified.extend(
3143 3138 status.clean
3144 3139 ) # mq may commit clean files
3145 3140
3146 3141 # check subrepos
3147 3142 subs, commitsubs, newstate = subrepoutil.precommit(
3148 3143 self.ui, wctx, status, match, force=force
3149 3144 )
3150 3145
3151 3146 # make sure all explicit patterns are matched
3152 3147 if not force:
3153 3148 self.checkcommitpatterns(wctx, match, status, fail)
3154 3149
3155 3150 cctx = context.workingcommitctx(
3156 3151 self, status, text, user, date, extra
3157 3152 )
3158 3153
3159 3154 ms = mergestatemod.mergestate.read(self)
3160 3155 mergeutil.checkunresolved(ms)
3161 3156
3162 3157 # internal config: ui.allowemptycommit
3163 3158 if cctx.isempty() and not self.ui.configbool(
3164 3159 b'ui', b'allowemptycommit'
3165 3160 ):
3166 3161 self.ui.debug(b'nothing to commit, clearing merge state\n')
3167 3162 ms.reset()
3168 3163 return None
3169 3164
3170 3165 if merge and cctx.deleted():
3171 3166 raise error.Abort(_(b"cannot commit merge with missing files"))
3172 3167
3173 3168 if editor:
3174 3169 cctx._text = editor(self, cctx, subs)
3175 3170 edited = text != cctx._text
3176 3171
3177 3172 # Save commit message in case this transaction gets rolled back
3178 3173 # (e.g. by a pretxncommit hook). Leave the content alone on
3179 3174 # the assumption that the user will use the same editor again.
3180 3175 msgfn = self.savecommitmessage(cctx._text)
3181 3176
3182 3177 # commit subs and write new state
3183 3178 if subs:
3184 3179 uipathfn = scmutil.getuipathfn(self)
3185 3180 for s in sorted(commitsubs):
3186 3181 sub = wctx.sub(s)
3187 3182 self.ui.status(
3188 3183 _(b'committing subrepository %s\n')
3189 3184 % uipathfn(subrepoutil.subrelpath(sub))
3190 3185 )
3191 3186 sr = sub.commit(cctx._text, user, date)
3192 3187 newstate[s] = (newstate[s][0], sr)
3193 3188 subrepoutil.writestate(self, newstate)
3194 3189
3195 3190 p1, p2 = self.dirstate.parents()
3196 3191 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3197 3192 try:
3198 3193 self.hook(
3199 3194 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3200 3195 )
3201 3196 with self.transaction(b'commit'):
3202 3197 ret = self.commitctx(cctx, True)
3203 3198 # update bookmarks, dirstate and mergestate
3204 3199 bookmarks.update(self, [p1, p2], ret)
3205 3200 cctx.markcommitted(ret)
3206 3201 ms.reset()
3207 3202 except: # re-raises
3208 3203 if edited:
3209 3204 self.ui.write(
3210 3205 _(b'note: commit message saved in %s\n') % msgfn
3211 3206 )
3212 3207 self.ui.write(
3213 3208 _(
3214 3209 b"note: use 'hg commit --logfile "
3215 3210 b".hg/last-message.txt --edit' to reuse it\n"
3216 3211 )
3217 3212 )
3218 3213 raise
3219 3214
3220 3215 def commithook(unused_success):
3221 3216 # hack for command that use a temporary commit (eg: histedit)
3222 3217 # temporary commit got stripped before hook release
3223 3218 if self.changelog.hasnode(ret):
3224 3219 self.hook(
3225 3220 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3226 3221 )
3227 3222
3228 3223 self._afterlock(commithook)
3229 3224 return ret
3230 3225
3231 3226 @unfilteredmethod
3232 3227 def commitctx(self, ctx, error=False, origctx=None):
3233 3228 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3234 3229
3235 3230 @unfilteredmethod
3236 3231 def destroying(self):
3237 3232 """Inform the repository that nodes are about to be destroyed.
3238 3233 Intended for use by strip and rollback, so there's a common
3239 3234 place for anything that has to be done before destroying history.
3240 3235
3241 3236 This is mostly useful for saving state that is in memory and waiting
3242 3237 to be flushed when the current lock is released. Because a call to
3243 3238 destroyed is imminent, the repo will be invalidated causing those
3244 3239 changes to stay in memory (waiting for the next unlock), or vanish
3245 3240 completely.
3246 3241 """
3247 3242 # When using the same lock to commit and strip, the phasecache is left
3248 3243 # dirty after committing. Then when we strip, the repo is invalidated,
3249 3244 # causing those changes to disappear.
3250 3245 if '_phasecache' in vars(self):
3251 3246 self._phasecache.write()
3252 3247
3253 3248 @unfilteredmethod
3254 3249 def destroyed(self):
3255 3250 """Inform the repository that nodes have been destroyed.
3256 3251 Intended for use by strip and rollback, so there's a common
3257 3252 place for anything that has to be done after destroying history.
3258 3253 """
3259 3254 # When one tries to:
3260 3255 # 1) destroy nodes thus calling this method (e.g. strip)
3261 3256 # 2) use phasecache somewhere (e.g. commit)
3262 3257 #
3263 3258 # then 2) will fail because the phasecache contains nodes that were
3264 3259 # removed. We can either remove phasecache from the filecache,
3265 3260 # causing it to reload next time it is accessed, or simply filter
3266 3261 # the removed nodes now and write the updated cache.
3267 3262 self._phasecache.filterunknown(self)
3268 3263 self._phasecache.write()
3269 3264
3270 3265 # refresh all repository caches
3271 3266 self.updatecaches()
3272 3267
3273 3268 # Ensure the persistent tag cache is updated. Doing it now
3274 3269 # means that the tag cache only has to worry about destroyed
3275 3270 # heads immediately after a strip/rollback. That in turn
3276 3271 # guarantees that "cachetip == currenttip" (comparing both rev
3277 3272 # and node) always means no nodes have been added or destroyed.
3278 3273
3279 3274 # XXX this is suboptimal when qrefresh'ing: we strip the current
3280 3275 # head, refresh the tag cache, then immediately add a new head.
3281 3276 # But I think doing it this way is necessary for the "instant
3282 3277 # tag cache retrieval" case to work.
3283 3278 self.invalidate()
3284 3279
3285 3280 def status(
3286 3281 self,
3287 3282 node1=b'.',
3288 3283 node2=None,
3289 3284 match=None,
3290 3285 ignored=False,
3291 3286 clean=False,
3292 3287 unknown=False,
3293 3288 listsubrepos=False,
3294 3289 ):
3295 3290 '''a convenience method that calls node1.status(node2)'''
3296 3291 return self[node1].status(
3297 3292 node2, match, ignored, clean, unknown, listsubrepos
3298 3293 )
3299 3294
3300 3295 def addpostdsstatus(self, ps):
3301 3296 """Add a callback to run within the wlock, at the point at which status
3302 3297 fixups happen.
3303 3298
3304 3299 On status completion, callback(wctx, status) will be called with the
3305 3300 wlock held, unless the dirstate has changed from underneath or the wlock
3306 3301 couldn't be grabbed.
3307 3302
3308 3303 Callbacks should not capture and use a cached copy of the dirstate --
3309 3304 it might change in the meanwhile. Instead, they should access the
3310 3305 dirstate via wctx.repo().dirstate.
3311 3306
3312 3307 This list is emptied out after each status run -- extensions should
3313 3308 make sure it adds to this list each time dirstate.status is called.
3314 3309 Extensions should also make sure they don't call this for statuses
3315 3310 that don't involve the dirstate.
3316 3311 """
3317 3312
3318 3313 # The list is located here for uniqueness reasons -- it is actually
3319 3314 # managed by the workingctx, but that isn't unique per-repo.
3320 3315 self._postdsstatus.append(ps)
3321 3316
3322 3317 def postdsstatus(self):
3323 3318 """Used by workingctx to get the list of post-dirstate-status hooks."""
3324 3319 return self._postdsstatus
3325 3320
3326 3321 def clearpostdsstatus(self):
3327 3322 """Used by workingctx to clear post-dirstate-status hooks."""
3328 3323 del self._postdsstatus[:]
3329 3324
3330 3325 def heads(self, start=None):
3331 3326 if start is None:
3332 3327 cl = self.changelog
3333 3328 headrevs = reversed(cl.headrevs())
3334 3329 return [cl.node(rev) for rev in headrevs]
3335 3330
3336 3331 heads = self.changelog.heads(start)
3337 3332 # sort the output in rev descending order
3338 3333 return sorted(heads, key=self.changelog.rev, reverse=True)
3339 3334
3340 3335 def branchheads(self, branch=None, start=None, closed=False):
3341 3336 """return a (possibly filtered) list of heads for the given branch
3342 3337
3343 3338 Heads are returned in topological order, from newest to oldest.
3344 3339 If branch is None, use the dirstate branch.
3345 3340 If start is not None, return only heads reachable from start.
3346 3341 If closed is True, return heads that are marked as closed as well.
3347 3342 """
3348 3343 if branch is None:
3349 3344 branch = self[None].branch()
3350 3345 branches = self.branchmap()
3351 3346 if not branches.hasbranch(branch):
3352 3347 return []
3353 3348 # the cache returns heads ordered lowest to highest
3354 3349 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3355 3350 if start is not None:
3356 3351 # filter out the heads that cannot be reached from startrev
3357 3352 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3358 3353 bheads = [h for h in bheads if h in fbheads]
3359 3354 return bheads
3360 3355
3361 3356 def branches(self, nodes):
3362 3357 if not nodes:
3363 3358 nodes = [self.changelog.tip()]
3364 3359 b = []
3365 3360 for n in nodes:
3366 3361 t = n
3367 3362 while True:
3368 3363 p = self.changelog.parents(n)
3369 3364 if p[1] != self.nullid or p[0] == self.nullid:
3370 3365 b.append((t, n, p[0], p[1]))
3371 3366 break
3372 3367 n = p[0]
3373 3368 return b
3374 3369
3375 3370 def between(self, pairs):
3376 3371 r = []
3377 3372
3378 3373 for top, bottom in pairs:
3379 3374 n, l, i = top, [], 0
3380 3375 f = 1
3381 3376
3382 3377 while n != bottom and n != self.nullid:
3383 3378 p = self.changelog.parents(n)[0]
3384 3379 if i == f:
3385 3380 l.append(n)
3386 3381 f = f * 2
3387 3382 n = p
3388 3383 i += 1
3389 3384
3390 3385 r.append(l)
3391 3386
3392 3387 return r
3393 3388
3394 3389 def checkpush(self, pushop):
3395 3390 """Extensions can override this function if additional checks have
3396 3391 to be performed before pushing, or call it if they override push
3397 3392 command.
3398 3393 """
3399 3394
3400 3395 @unfilteredpropertycache
3401 3396 def prepushoutgoinghooks(self):
3402 3397 """Return util.hooks consists of a pushop with repo, remote, outgoing
3403 3398 methods, which are called before pushing changesets.
3404 3399 """
3405 3400 return util.hooks()
3406 3401
3407 3402 def pushkey(self, namespace, key, old, new):
3408 3403 try:
3409 3404 tr = self.currenttransaction()
3410 3405 hookargs = {}
3411 3406 if tr is not None:
3412 3407 hookargs.update(tr.hookargs)
3413 3408 hookargs = pycompat.strkwargs(hookargs)
3414 3409 hookargs['namespace'] = namespace
3415 3410 hookargs['key'] = key
3416 3411 hookargs['old'] = old
3417 3412 hookargs['new'] = new
3418 3413 self.hook(b'prepushkey', throw=True, **hookargs)
3419 3414 except error.HookAbort as exc:
3420 3415 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3421 3416 if exc.hint:
3422 3417 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3423 3418 return False
3424 3419 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3425 3420 ret = pushkey.push(self, namespace, key, old, new)
3426 3421
3427 3422 def runhook(unused_success):
3428 3423 self.hook(
3429 3424 b'pushkey',
3430 3425 namespace=namespace,
3431 3426 key=key,
3432 3427 old=old,
3433 3428 new=new,
3434 3429 ret=ret,
3435 3430 )
3436 3431
3437 3432 self._afterlock(runhook)
3438 3433 return ret
3439 3434
3440 3435 def listkeys(self, namespace):
3441 3436 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3442 3437 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3443 3438 values = pushkey.list(self, namespace)
3444 3439 self.hook(b'listkeys', namespace=namespace, values=values)
3445 3440 return values
3446 3441
3447 3442 def debugwireargs(self, one, two, three=None, four=None, five=None):
3448 3443 '''used to test argument passing over the wire'''
3449 3444 return b"%s %s %s %s %s" % (
3450 3445 one,
3451 3446 two,
3452 3447 pycompat.bytestr(three),
3453 3448 pycompat.bytestr(four),
3454 3449 pycompat.bytestr(five),
3455 3450 )
3456 3451
3457 3452 def savecommitmessage(self, text):
3458 3453 fp = self.vfs(b'last-message.txt', b'wb')
3459 3454 try:
3460 3455 fp.write(text)
3461 3456 finally:
3462 3457 fp.close()
3463 3458 return self.pathto(fp.name[len(self.root) + 1 :])
3464 3459
3465 3460 def register_wanted_sidedata(self, category):
3466 3461 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3467 3462 # Only revlogv2 repos can want sidedata.
3468 3463 return
3469 3464 self._wanted_sidedata.add(pycompat.bytestr(category))
3470 3465
3471 3466 def register_sidedata_computer(
3472 3467 self, kind, category, keys, computer, flags, replace=False
3473 3468 ):
3474 3469 if kind not in revlogconst.ALL_KINDS:
3475 3470 msg = _(b"unexpected revlog kind '%s'.")
3476 3471 raise error.ProgrammingError(msg % kind)
3477 3472 category = pycompat.bytestr(category)
3478 3473 already_registered = category in self._sidedata_computers.get(kind, [])
3479 3474 if already_registered and not replace:
3480 3475 msg = _(
3481 3476 b"cannot register a sidedata computer twice for category '%s'."
3482 3477 )
3483 3478 raise error.ProgrammingError(msg % category)
3484 3479 if replace and not already_registered:
3485 3480 msg = _(
3486 3481 b"cannot replace a sidedata computer that isn't registered "
3487 3482 b"for category '%s'."
3488 3483 )
3489 3484 raise error.ProgrammingError(msg % category)
3490 3485 self._sidedata_computers.setdefault(kind, {})
3491 3486 self._sidedata_computers[kind][category] = (keys, computer, flags)
3492 3487
3493 3488
3494 3489 # used to avoid circular references so destructors work
3495 3490 def aftertrans(files):
3496 3491 renamefiles = [tuple(t) for t in files]
3497 3492
3498 3493 def a():
3499 3494 for vfs, src, dest in renamefiles:
3500 3495 # if src and dest refer to a same file, vfs.rename is a no-op,
3501 3496 # leaving both src and dest on disk. delete dest to make sure
3502 3497 # the rename couldn't be such a no-op.
3503 3498 vfs.tryunlink(dest)
3504 3499 try:
3505 3500 vfs.rename(src, dest)
3506 3501 except OSError as exc: # journal file does not yet exist
3507 3502 if exc.errno != errno.ENOENT:
3508 3503 raise
3509 3504
3510 3505 return a
3511 3506
3512 3507
3513 3508 def undoname(fn):
3514 3509 base, name = os.path.split(fn)
3515 3510 assert name.startswith(b'journal')
3516 3511 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3517 3512
3518 3513
3519 3514 def instance(ui, path, create, intents=None, createopts=None):
3520 3515 localpath = urlutil.urllocalpath(path)
3521 3516 if create:
3522 3517 createrepository(ui, localpath, createopts=createopts)
3523 3518
3524 3519 return makelocalrepository(ui, localpath, intents=intents)
3525 3520
3526 3521
3527 3522 def islocal(path):
3528 3523 return True
3529 3524
3530 3525
3531 3526 def defaultcreateopts(ui, createopts=None):
3532 3527 """Populate the default creation options for a repository.
3533 3528
3534 3529 A dictionary of explicitly requested creation options can be passed
3535 3530 in. Missing keys will be populated.
3536 3531 """
3537 3532 createopts = dict(createopts or {})
3538 3533
3539 3534 if b'backend' not in createopts:
3540 3535 # experimental config: storage.new-repo-backend
3541 3536 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3542 3537
3543 3538 return createopts
3544 3539
3545 3540
3546 3541 def clone_requirements(ui, createopts, srcrepo):
3547 3542 """clone the requirements of a local repo for a local clone
3548 3543
3549 3544 The store requirements are unchanged while the working copy requirements
3550 3545 depends on the configuration
3551 3546 """
3552 3547 target_requirements = set()
3553 3548 createopts = defaultcreateopts(ui, createopts=createopts)
3554 3549 for r in newreporequirements(ui, createopts):
3555 3550 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3556 3551 target_requirements.add(r)
3557 3552
3558 3553 for r in srcrepo.requirements:
3559 3554 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3560 3555 target_requirements.add(r)
3561 3556 return target_requirements
3562 3557
3563 3558
3564 3559 def newreporequirements(ui, createopts):
3565 3560 """Determine the set of requirements for a new local repository.
3566 3561
3567 3562 Extensions can wrap this function to specify custom requirements for
3568 3563 new repositories.
3569 3564 """
3570 3565
3571 3566 if b'backend' not in createopts:
3572 3567 raise error.ProgrammingError(
3573 3568 b'backend key not present in createopts; '
3574 3569 b'was defaultcreateopts() called?'
3575 3570 )
3576 3571
3577 3572 if createopts[b'backend'] != b'revlogv1':
3578 3573 raise error.Abort(
3579 3574 _(
3580 3575 b'unable to determine repository requirements for '
3581 3576 b'storage backend: %s'
3582 3577 )
3583 3578 % createopts[b'backend']
3584 3579 )
3585 3580
3586 3581 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3587 3582 if ui.configbool(b'format', b'usestore'):
3588 3583 requirements.add(requirementsmod.STORE_REQUIREMENT)
3589 3584 if ui.configbool(b'format', b'usefncache'):
3590 3585 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3591 3586 if ui.configbool(b'format', b'dotencode'):
3592 3587 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3593 3588
3594 3589 compengines = ui.configlist(b'format', b'revlog-compression')
3595 3590 for compengine in compengines:
3596 3591 if compengine in util.compengines:
3597 3592 engine = util.compengines[compengine]
3598 3593 if engine.available() and engine.revlogheader():
3599 3594 break
3600 3595 else:
3601 3596 raise error.Abort(
3602 3597 _(
3603 3598 b'compression engines %s defined by '
3604 3599 b'format.revlog-compression not available'
3605 3600 )
3606 3601 % b', '.join(b'"%s"' % e for e in compengines),
3607 3602 hint=_(
3608 3603 b'run "hg debuginstall" to list available '
3609 3604 b'compression engines'
3610 3605 ),
3611 3606 )
3612 3607
3613 3608 # zlib is the historical default and doesn't need an explicit requirement.
3614 3609 if compengine == b'zstd':
3615 3610 requirements.add(b'revlog-compression-zstd')
3616 3611 elif compengine != b'zlib':
3617 3612 requirements.add(b'exp-compression-%s' % compengine)
3618 3613
3619 3614 if scmutil.gdinitconfig(ui):
3620 3615 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3621 3616 if ui.configbool(b'format', b'sparse-revlog'):
3622 3617 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3623 3618
3624 3619 # experimental config: format.exp-rc-dirstate-v2
3625 3620 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3626 3621 if ui.configbool(b'format', b'exp-rc-dirstate-v2'):
3627 3622 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3628 3623
3629 3624 # experimental config: format.exp-use-copies-side-data-changeset
3630 3625 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3631 3626 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3632 3627 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3633 3628 if ui.configbool(b'experimental', b'treemanifest'):
3634 3629 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3635 3630
3636 3631 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3637 3632 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3638 3633 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3639 3634
3640 3635 revlogv2 = ui.config(b'experimental', b'revlogv2')
3641 3636 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3642 3637 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3643 3638 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3644 3639 # experimental config: format.internal-phase
3645 3640 if ui.configbool(b'format', b'internal-phase'):
3646 3641 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3647 3642
3648 3643 if createopts.get(b'narrowfiles'):
3649 3644 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3650 3645
3651 3646 if createopts.get(b'lfs'):
3652 3647 requirements.add(b'lfs')
3653 3648
3654 3649 if ui.configbool(b'format', b'bookmarks-in-store'):
3655 3650 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3656 3651
3657 3652 if ui.configbool(b'format', b'use-persistent-nodemap'):
3658 3653 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3659 3654
3660 3655 # if share-safe is enabled, let's create the new repository with the new
3661 3656 # requirement
3662 3657 if ui.configbool(b'format', b'use-share-safe'):
3663 3658 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3664 3659
3665 3660 # if we are creating a share-repoΒΉ we have to handle requirement
3666 3661 # differently.
3667 3662 #
3668 3663 # [1] (i.e. reusing the store from another repository, just having a
3669 3664 # working copy)
3670 3665 if b'sharedrepo' in createopts:
3671 3666 source_requirements = set(createopts[b'sharedrepo'].requirements)
3672 3667
3673 3668 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3674 3669 # share to an old school repository, we have to copy the
3675 3670 # requirements and hope for the best.
3676 3671 requirements = source_requirements
3677 3672 else:
3678 3673 # We have control on the working copy only, so "copy" the non
3679 3674 # working copy part over, ignoring previous logic.
3680 3675 to_drop = set()
3681 3676 for req in requirements:
3682 3677 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3683 3678 continue
3684 3679 if req in source_requirements:
3685 3680 continue
3686 3681 to_drop.add(req)
3687 3682 requirements -= to_drop
3688 3683 requirements |= source_requirements
3689 3684
3690 3685 if createopts.get(b'sharedrelative'):
3691 3686 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3692 3687 else:
3693 3688 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3694 3689
3695 3690 return requirements
3696 3691
3697 3692
3698 3693 def checkrequirementscompat(ui, requirements):
3699 3694 """Checks compatibility of repository requirements enabled and disabled.
3700 3695
3701 3696 Returns a set of requirements which needs to be dropped because dependend
3702 3697 requirements are not enabled. Also warns users about it"""
3703 3698
3704 3699 dropped = set()
3705 3700
3706 3701 if requirementsmod.STORE_REQUIREMENT not in requirements:
3707 3702 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3708 3703 ui.warn(
3709 3704 _(
3710 3705 b'ignoring enabled \'format.bookmarks-in-store\' config '
3711 3706 b'beacuse it is incompatible with disabled '
3712 3707 b'\'format.usestore\' config\n'
3713 3708 )
3714 3709 )
3715 3710 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3716 3711
3717 3712 if (
3718 3713 requirementsmod.SHARED_REQUIREMENT in requirements
3719 3714 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3720 3715 ):
3721 3716 raise error.Abort(
3722 3717 _(
3723 3718 b"cannot create shared repository as source was created"
3724 3719 b" with 'format.usestore' config disabled"
3725 3720 )
3726 3721 )
3727 3722
3728 3723 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3729 3724 ui.warn(
3730 3725 _(
3731 3726 b"ignoring enabled 'format.use-share-safe' config because "
3732 3727 b"it is incompatible with disabled 'format.usestore'"
3733 3728 b" config\n"
3734 3729 )
3735 3730 )
3736 3731 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3737 3732
3738 3733 return dropped
3739 3734
3740 3735
3741 3736 def filterknowncreateopts(ui, createopts):
3742 3737 """Filters a dict of repo creation options against options that are known.
3743 3738
3744 3739 Receives a dict of repo creation options and returns a dict of those
3745 3740 options that we don't know how to handle.
3746 3741
3747 3742 This function is called as part of repository creation. If the
3748 3743 returned dict contains any items, repository creation will not
3749 3744 be allowed, as it means there was a request to create a repository
3750 3745 with options not recognized by loaded code.
3751 3746
3752 3747 Extensions can wrap this function to filter out creation options
3753 3748 they know how to handle.
3754 3749 """
3755 3750 known = {
3756 3751 b'backend',
3757 3752 b'lfs',
3758 3753 b'narrowfiles',
3759 3754 b'sharedrepo',
3760 3755 b'sharedrelative',
3761 3756 b'shareditems',
3762 3757 b'shallowfilestore',
3763 3758 }
3764 3759
3765 3760 return {k: v for k, v in createopts.items() if k not in known}
3766 3761
3767 3762
3768 3763 def createrepository(ui, path, createopts=None, requirements=None):
3769 3764 """Create a new repository in a vfs.
3770 3765
3771 3766 ``path`` path to the new repo's working directory.
3772 3767 ``createopts`` options for the new repository.
3773 3768 ``requirement`` predefined set of requirements.
3774 3769 (incompatible with ``createopts``)
3775 3770
3776 3771 The following keys for ``createopts`` are recognized:
3777 3772
3778 3773 backend
3779 3774 The storage backend to use.
3780 3775 lfs
3781 3776 Repository will be created with ``lfs`` requirement. The lfs extension
3782 3777 will automatically be loaded when the repository is accessed.
3783 3778 narrowfiles
3784 3779 Set up repository to support narrow file storage.
3785 3780 sharedrepo
3786 3781 Repository object from which storage should be shared.
3787 3782 sharedrelative
3788 3783 Boolean indicating if the path to the shared repo should be
3789 3784 stored as relative. By default, the pointer to the "parent" repo
3790 3785 is stored as an absolute path.
3791 3786 shareditems
3792 3787 Set of items to share to the new repository (in addition to storage).
3793 3788 shallowfilestore
3794 3789 Indicates that storage for files should be shallow (not all ancestor
3795 3790 revisions are known).
3796 3791 """
3797 3792
3798 3793 if requirements is not None:
3799 3794 if createopts is not None:
3800 3795 msg = b'cannot specify both createopts and requirements'
3801 3796 raise error.ProgrammingError(msg)
3802 3797 createopts = {}
3803 3798 else:
3804 3799 createopts = defaultcreateopts(ui, createopts=createopts)
3805 3800
3806 3801 unknownopts = filterknowncreateopts(ui, createopts)
3807 3802
3808 3803 if not isinstance(unknownopts, dict):
3809 3804 raise error.ProgrammingError(
3810 3805 b'filterknowncreateopts() did not return a dict'
3811 3806 )
3812 3807
3813 3808 if unknownopts:
3814 3809 raise error.Abort(
3815 3810 _(
3816 3811 b'unable to create repository because of unknown '
3817 3812 b'creation option: %s'
3818 3813 )
3819 3814 % b', '.join(sorted(unknownopts)),
3820 3815 hint=_(b'is a required extension not loaded?'),
3821 3816 )
3822 3817
3823 3818 requirements = newreporequirements(ui, createopts=createopts)
3824 3819 requirements -= checkrequirementscompat(ui, requirements)
3825 3820
3826 3821 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3827 3822
3828 3823 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3829 3824 if hgvfs.exists():
3830 3825 raise error.RepoError(_(b'repository %s already exists') % path)
3831 3826
3832 3827 if b'sharedrepo' in createopts:
3833 3828 sharedpath = createopts[b'sharedrepo'].sharedpath
3834 3829
3835 3830 if createopts.get(b'sharedrelative'):
3836 3831 try:
3837 3832 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3838 3833 sharedpath = util.pconvert(sharedpath)
3839 3834 except (IOError, ValueError) as e:
3840 3835 # ValueError is raised on Windows if the drive letters differ
3841 3836 # on each path.
3842 3837 raise error.Abort(
3843 3838 _(b'cannot calculate relative path'),
3844 3839 hint=stringutil.forcebytestr(e),
3845 3840 )
3846 3841
3847 3842 if not wdirvfs.exists():
3848 3843 wdirvfs.makedirs()
3849 3844
3850 3845 hgvfs.makedir(notindexed=True)
3851 3846 if b'sharedrepo' not in createopts:
3852 3847 hgvfs.mkdir(b'cache')
3853 3848 hgvfs.mkdir(b'wcache')
3854 3849
3855 3850 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3856 3851 if has_store and b'sharedrepo' not in createopts:
3857 3852 hgvfs.mkdir(b'store')
3858 3853
3859 3854 # We create an invalid changelog outside the store so very old
3860 3855 # Mercurial versions (which didn't know about the requirements
3861 3856 # file) encounter an error on reading the changelog. This
3862 3857 # effectively locks out old clients and prevents them from
3863 3858 # mucking with a repo in an unknown format.
3864 3859 #
3865 3860 # The revlog header has version 65535, which won't be recognized by
3866 3861 # such old clients.
3867 3862 hgvfs.append(
3868 3863 b'00changelog.i',
3869 3864 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3870 3865 b'layout',
3871 3866 )
3872 3867
3873 3868 # Filter the requirements into working copy and store ones
3874 3869 wcreq, storereq = scmutil.filterrequirements(requirements)
3875 3870 # write working copy ones
3876 3871 scmutil.writerequires(hgvfs, wcreq)
3877 3872 # If there are store requirements and the current repository
3878 3873 # is not a shared one, write stored requirements
3879 3874 # For new shared repository, we don't need to write the store
3880 3875 # requirements as they are already present in store requires
3881 3876 if storereq and b'sharedrepo' not in createopts:
3882 3877 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3883 3878 scmutil.writerequires(storevfs, storereq)
3884 3879
3885 3880 # Write out file telling readers where to find the shared store.
3886 3881 if b'sharedrepo' in createopts:
3887 3882 hgvfs.write(b'sharedpath', sharedpath)
3888 3883
3889 3884 if createopts.get(b'shareditems'):
3890 3885 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3891 3886 hgvfs.write(b'shared', shared)
3892 3887
3893 3888
3894 3889 def poisonrepository(repo):
3895 3890 """Poison a repository instance so it can no longer be used."""
3896 3891 # Perform any cleanup on the instance.
3897 3892 repo.close()
3898 3893
3899 3894 # Our strategy is to replace the type of the object with one that
3900 3895 # has all attribute lookups result in error.
3901 3896 #
3902 3897 # But we have to allow the close() method because some constructors
3903 3898 # of repos call close() on repo references.
3904 3899 class poisonedrepository(object):
3905 3900 def __getattribute__(self, item):
3906 3901 if item == 'close':
3907 3902 return object.__getattribute__(self, item)
3908 3903
3909 3904 raise error.ProgrammingError(
3910 3905 b'repo instances should not be used after unshare'
3911 3906 )
3912 3907
3913 3908 def close(self):
3914 3909 pass
3915 3910
3916 3911 # We may have a repoview, which intercepts __setattr__. So be sure
3917 3912 # we operate at the lowest level possible.
3918 3913 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,111 +1,117 b''
1 1 # requirements.py - objects and functions related to repository requirements
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 # obsolete experimental requirements:
11 # - manifestv2: An experimental new manifest format that allowed
12 # for stem compression of long paths. Experiment ended up not
13 # being successful (repository sizes went up due to worse delta
14 # chains), and the code was deleted in 4.6.
15
10 16 GENERALDELTA_REQUIREMENT = b'generaldelta'
11 17 DOTENCODE_REQUIREMENT = b'dotencode'
12 18 STORE_REQUIREMENT = b'store'
13 19 FNCACHE_REQUIREMENT = b'fncache'
14 20
15 21 DIRSTATE_V2_REQUIREMENT = b'dirstate-v2'
16 22
17 23 # When narrowing is finalized and no longer subject to format changes,
18 24 # we should move this to just "narrow" or similar.
19 25 NARROW_REQUIREMENT = b'narrowhg-experimental'
20 26
21 27 # Enables sparse working directory usage
22 28 SPARSE_REQUIREMENT = b'exp-sparse'
23 29
24 30 # Enables the internal phase which is used to hide changesets instead
25 31 # of stripping them
26 32 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
27 33
28 34 # Stores manifest in Tree structure
29 35 TREEMANIFEST_REQUIREMENT = b'treemanifest'
30 36
31 37 REVLOGV1_REQUIREMENT = b'revlogv1'
32 38
33 39 # Increment the sub-version when the revlog v2 format changes to lock out old
34 40 # clients.
35 41 CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
36 42
37 43 # Increment the sub-version when the revlog v2 format changes to lock out old
38 44 # clients.
39 45 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
40 46
41 47 # A repository with the sparserevlog feature will have delta chains that
42 48 # can spread over a larger span. Sparse reading cuts these large spans into
43 49 # pieces, so that each piece isn't too big.
44 50 # Without the sparserevlog capability, reading from the repository could use
45 51 # huge amounts of memory, because the whole span would be read at once,
46 52 # including all the intermediate revisions that aren't pertinent for the chain.
47 53 # This is why once a repository has enabled sparse-read, it becomes required.
48 54 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
49 55
50 56 # A repository with the the copies-sidedata-changeset requirement will store
51 57 # copies related information in changeset's sidedata.
52 58 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
53 59
54 60 # The repository use persistent nodemap for the changelog and the manifest.
55 61 NODEMAP_REQUIREMENT = b'persistent-nodemap'
56 62
57 63 # Denotes that the current repository is a share
58 64 SHARED_REQUIREMENT = b'shared'
59 65
60 66 # Denotes that current repository is a share and the shared source path is
61 67 # relative to the current repository root path
62 68 RELATIVE_SHARED_REQUIREMENT = b'relshared'
63 69
64 70 # A repository with share implemented safely. The repository has different
65 71 # store and working copy requirements i.e. both `.hg/requires` and
66 72 # `.hg/store/requires` are present.
67 73 SHARESAFE_REQUIREMENT = b'share-safe'
68 74
69 75 # Bookmarks must be stored in the `store` part of the repository and will be
70 76 # share accross shares
71 77 BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore'
72 78
73 79 # List of requirements which are working directory specific
74 80 # These requirements cannot be shared between repositories if they
75 81 # share the same store
76 82 # * sparse is a working directory specific functionality and hence working
77 83 # directory specific requirement
78 84 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
79 85 # represents that the current working copy/repository shares store of another
80 86 # repo. Hence both of them should be stored in working copy
81 87 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
82 88 # the requirements are stored in store's requires
83 89 # * DIRSTATE_V2_REQUIREMENT affects .hg/dirstate, of which there is one per
84 90 # working directory.
85 91 WORKING_DIR_REQUIREMENTS = {
86 92 SPARSE_REQUIREMENT,
87 93 SHARED_REQUIREMENT,
88 94 RELATIVE_SHARED_REQUIREMENT,
89 95 SHARESAFE_REQUIREMENT,
90 96 DIRSTATE_V2_REQUIREMENT,
91 97 }
92 98
93 99 # List of requirement that impact "stream-clone" (and hardlink clone) and
94 100 # cannot be changed in such cases.
95 101 #
96 102 # requirements not in this list are safe to be altered during stream-clone.
97 103 #
98 104 # note: the list is currently inherited from previous code and miss some relevant requirement while containing some irrelevant ones.
99 105 STREAM_FIXED_REQUIREMENTS = {
100 106 BOOKMARKS_IN_STORE_REQUIREMENT,
101 107 CHANGELOGV2_REQUIREMENT,
102 108 COPIESSDC_REQUIREMENT,
103 109 DIRSTATE_V2_REQUIREMENT,
104 110 GENERALDELTA_REQUIREMENT,
105 111 NODEMAP_REQUIREMENT,
106 112 REVLOGV1_REQUIREMENT,
107 113 REVLOGV2_REQUIREMENT,
108 114 SHARESAFE_REQUIREMENT,
109 115 SPARSEREVLOG_REQUIREMENT,
110 116 TREEMANIFEST_REQUIREMENT,
111 117 }
General Comments 0
You need to be logged in to leave comments. Login now