##// END OF EJS Templates
manifestlog: also monitor `00manifest.n` when applicable...
marmoute -
r48854:7970895a stable
parent child Browse files
Show More
@@ -1,3866 +1,3881 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 147 class changelogcache(storecache):
148 148 """filecache for the changelog"""
149 149
150 150 def __init__(self):
151 151 super(changelogcache, self).__init__()
152 152 _cachedfiles.add((b'00changelog.i', b''))
153 153 _cachedfiles.add((b'00changelog.n', b''))
154 154
155 155 def tracked_paths(self, obj):
156 156 paths = [self.join(obj, b'00changelog.i')]
157 157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 158 paths.append(self.join(obj, b'00changelog.n'))
159 159 return paths
160 160
161 161
162 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
164
165 def __init__(self):
166 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
169
170 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
175
176
162 177 class mixedrepostorecache(_basefilecache):
163 178 """filecache for a mix files in .hg/store and outside"""
164 179
165 180 def __init__(self, *pathsandlocations):
166 181 # scmutil.filecache only uses the path for passing back into our
167 182 # join(), so we can safely pass a list of paths and locations
168 183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
169 184 _cachedfiles.update(pathsandlocations)
170 185
171 186 def join(self, obj, fnameandlocation):
172 187 fname, location = fnameandlocation
173 188 if location == b'plain':
174 189 return obj.vfs.join(fname)
175 190 else:
176 191 if location != b'':
177 192 raise error.ProgrammingError(
178 193 b'unexpected location: %s' % location
179 194 )
180 195 return obj.sjoin(fname)
181 196
182 197
183 198 def isfilecached(repo, name):
184 199 """check if a repo has already cached "name" filecache-ed property
185 200
186 201 This returns (cachedobj-or-None, iscached) tuple.
187 202 """
188 203 cacheentry = repo.unfiltered()._filecache.get(name, None)
189 204 if not cacheentry:
190 205 return None, False
191 206 return cacheentry.obj, True
192 207
193 208
194 209 class unfilteredpropertycache(util.propertycache):
195 210 """propertycache that apply to unfiltered repo only"""
196 211
197 212 def __get__(self, repo, type=None):
198 213 unfi = repo.unfiltered()
199 214 if unfi is repo:
200 215 return super(unfilteredpropertycache, self).__get__(unfi)
201 216 return getattr(unfi, self.name)
202 217
203 218
204 219 class filteredpropertycache(util.propertycache):
205 220 """propertycache that must take filtering in account"""
206 221
207 222 def cachevalue(self, obj, value):
208 223 object.__setattr__(obj, self.name, value)
209 224
210 225
211 226 def hasunfilteredcache(repo, name):
212 227 """check if a repo has an unfilteredpropertycache value for <name>"""
213 228 return name in vars(repo.unfiltered())
214 229
215 230
216 231 def unfilteredmethod(orig):
217 232 """decorate method that always need to be run on unfiltered version"""
218 233
219 234 @functools.wraps(orig)
220 235 def wrapper(repo, *args, **kwargs):
221 236 return orig(repo.unfiltered(), *args, **kwargs)
222 237
223 238 return wrapper
224 239
225 240
226 241 moderncaps = {
227 242 b'lookup',
228 243 b'branchmap',
229 244 b'pushkey',
230 245 b'known',
231 246 b'getbundle',
232 247 b'unbundle',
233 248 }
234 249 legacycaps = moderncaps.union({b'changegroupsubset'})
235 250
236 251
237 252 @interfaceutil.implementer(repository.ipeercommandexecutor)
238 253 class localcommandexecutor(object):
239 254 def __init__(self, peer):
240 255 self._peer = peer
241 256 self._sent = False
242 257 self._closed = False
243 258
244 259 def __enter__(self):
245 260 return self
246 261
247 262 def __exit__(self, exctype, excvalue, exctb):
248 263 self.close()
249 264
250 265 def callcommand(self, command, args):
251 266 if self._sent:
252 267 raise error.ProgrammingError(
253 268 b'callcommand() cannot be used after sendcommands()'
254 269 )
255 270
256 271 if self._closed:
257 272 raise error.ProgrammingError(
258 273 b'callcommand() cannot be used after close()'
259 274 )
260 275
261 276 # We don't need to support anything fancy. Just call the named
262 277 # method on the peer and return a resolved future.
263 278 fn = getattr(self._peer, pycompat.sysstr(command))
264 279
265 280 f = pycompat.futures.Future()
266 281
267 282 try:
268 283 result = fn(**pycompat.strkwargs(args))
269 284 except Exception:
270 285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
271 286 else:
272 287 f.set_result(result)
273 288
274 289 return f
275 290
276 291 def sendcommands(self):
277 292 self._sent = True
278 293
279 294 def close(self):
280 295 self._closed = True
281 296
282 297
283 298 @interfaceutil.implementer(repository.ipeercommands)
284 299 class localpeer(repository.peer):
285 300 '''peer for a local repo; reflects only the most recent API'''
286 301
287 302 def __init__(self, repo, caps=None):
288 303 super(localpeer, self).__init__()
289 304
290 305 if caps is None:
291 306 caps = moderncaps.copy()
292 307 self._repo = repo.filtered(b'served')
293 308 self.ui = repo.ui
294 309
295 310 if repo._wanted_sidedata:
296 311 formatted = bundle2.format_remote_wanted_sidedata(repo)
297 312 caps.add(b'exp-wanted-sidedata=' + formatted)
298 313
299 314 self._caps = repo._restrictcapabilities(caps)
300 315
301 316 # Begin of _basepeer interface.
302 317
303 318 def url(self):
304 319 return self._repo.url()
305 320
306 321 def local(self):
307 322 return self._repo
308 323
309 324 def peer(self):
310 325 return self
311 326
312 327 def canpush(self):
313 328 return True
314 329
315 330 def close(self):
316 331 self._repo.close()
317 332
318 333 # End of _basepeer interface.
319 334
320 335 # Begin of _basewirecommands interface.
321 336
322 337 def branchmap(self):
323 338 return self._repo.branchmap()
324 339
325 340 def capabilities(self):
326 341 return self._caps
327 342
328 343 def clonebundles(self):
329 344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
330 345
331 346 def debugwireargs(self, one, two, three=None, four=None, five=None):
332 347 """Used to test argument passing over the wire"""
333 348 return b"%s %s %s %s %s" % (
334 349 one,
335 350 two,
336 351 pycompat.bytestr(three),
337 352 pycompat.bytestr(four),
338 353 pycompat.bytestr(five),
339 354 )
340 355
341 356 def getbundle(
342 357 self,
343 358 source,
344 359 heads=None,
345 360 common=None,
346 361 bundlecaps=None,
347 362 remote_sidedata=None,
348 363 **kwargs
349 364 ):
350 365 chunks = exchange.getbundlechunks(
351 366 self._repo,
352 367 source,
353 368 heads=heads,
354 369 common=common,
355 370 bundlecaps=bundlecaps,
356 371 remote_sidedata=remote_sidedata,
357 372 **kwargs
358 373 )[1]
359 374 cb = util.chunkbuffer(chunks)
360 375
361 376 if exchange.bundle2requested(bundlecaps):
362 377 # When requesting a bundle2, getbundle returns a stream to make the
363 378 # wire level function happier. We need to build a proper object
364 379 # from it in local peer.
365 380 return bundle2.getunbundler(self.ui, cb)
366 381 else:
367 382 return changegroup.getunbundler(b'01', cb, None)
368 383
369 384 def heads(self):
370 385 return self._repo.heads()
371 386
372 387 def known(self, nodes):
373 388 return self._repo.known(nodes)
374 389
375 390 def listkeys(self, namespace):
376 391 return self._repo.listkeys(namespace)
377 392
378 393 def lookup(self, key):
379 394 return self._repo.lookup(key)
380 395
381 396 def pushkey(self, namespace, key, old, new):
382 397 return self._repo.pushkey(namespace, key, old, new)
383 398
384 399 def stream_out(self):
385 400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
386 401
387 402 def unbundle(self, bundle, heads, url):
388 403 """apply a bundle on a repo
389 404
390 405 This function handles the repo locking itself."""
391 406 try:
392 407 try:
393 408 bundle = exchange.readbundle(self.ui, bundle, None)
394 409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
395 410 if util.safehasattr(ret, b'getchunks'):
396 411 # This is a bundle20 object, turn it into an unbundler.
397 412 # This little dance should be dropped eventually when the
398 413 # API is finally improved.
399 414 stream = util.chunkbuffer(ret.getchunks())
400 415 ret = bundle2.getunbundler(self.ui, stream)
401 416 return ret
402 417 except Exception as exc:
403 418 # If the exception contains output salvaged from a bundle2
404 419 # reply, we need to make sure it is printed before continuing
405 420 # to fail. So we build a bundle2 with such output and consume
406 421 # it directly.
407 422 #
408 423 # This is not very elegant but allows a "simple" solution for
409 424 # issue4594
410 425 output = getattr(exc, '_bundle2salvagedoutput', ())
411 426 if output:
412 427 bundler = bundle2.bundle20(self._repo.ui)
413 428 for out in output:
414 429 bundler.addpart(out)
415 430 stream = util.chunkbuffer(bundler.getchunks())
416 431 b = bundle2.getunbundler(self.ui, stream)
417 432 bundle2.processbundle(self._repo, b)
418 433 raise
419 434 except error.PushRaced as exc:
420 435 raise error.ResponseError(
421 436 _(b'push failed:'), stringutil.forcebytestr(exc)
422 437 )
423 438
424 439 # End of _basewirecommands interface.
425 440
426 441 # Begin of peer interface.
427 442
428 443 def commandexecutor(self):
429 444 return localcommandexecutor(self)
430 445
431 446 # End of peer interface.
432 447
433 448
434 449 @interfaceutil.implementer(repository.ipeerlegacycommands)
435 450 class locallegacypeer(localpeer):
436 451 """peer extension which implements legacy methods too; used for tests with
437 452 restricted capabilities"""
438 453
439 454 def __init__(self, repo):
440 455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
441 456
442 457 # Begin of baselegacywirecommands interface.
443 458
444 459 def between(self, pairs):
445 460 return self._repo.between(pairs)
446 461
447 462 def branches(self, nodes):
448 463 return self._repo.branches(nodes)
449 464
450 465 def changegroup(self, nodes, source):
451 466 outgoing = discovery.outgoing(
452 467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
453 468 )
454 469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
455 470
456 471 def changegroupsubset(self, bases, heads, source):
457 472 outgoing = discovery.outgoing(
458 473 self._repo, missingroots=bases, ancestorsof=heads
459 474 )
460 475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
461 476
462 477 # End of baselegacywirecommands interface.
463 478
464 479
465 480 # Functions receiving (ui, features) that extensions can register to impact
466 481 # the ability to load repositories with custom requirements. Only
467 482 # functions defined in loaded extensions are called.
468 483 #
469 484 # The function receives a set of requirement strings that the repository
470 485 # is capable of opening. Functions will typically add elements to the
471 486 # set to reflect that the extension knows how to handle that requirements.
472 487 featuresetupfuncs = set()
473 488
474 489
475 490 def _getsharedvfs(hgvfs, requirements):
476 491 """returns the vfs object pointing to root of shared source
477 492 repo for a shared repository
478 493
479 494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
480 495 requirements is a set of requirements of current repo (shared one)
481 496 """
482 497 # The ``shared`` or ``relshared`` requirements indicate the
483 498 # store lives in the path contained in the ``.hg/sharedpath`` file.
484 499 # This is an absolute path for ``shared`` and relative to
485 500 # ``.hg/`` for ``relshared``.
486 501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
487 502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
488 503 sharedpath = util.normpath(hgvfs.join(sharedpath))
489 504
490 505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
491 506
492 507 if not sharedvfs.exists():
493 508 raise error.RepoError(
494 509 _(b'.hg/sharedpath points to nonexistent directory %s')
495 510 % sharedvfs.base
496 511 )
497 512 return sharedvfs
498 513
499 514
500 515 def _readrequires(vfs, allowmissing):
501 516 """reads the require file present at root of this vfs
502 517 and return a set of requirements
503 518
504 519 If allowmissing is True, we suppress ENOENT if raised"""
505 520 # requires file contains a newline-delimited list of
506 521 # features/capabilities the opener (us) must have in order to use
507 522 # the repository. This file was introduced in Mercurial 0.9.2,
508 523 # which means very old repositories may not have one. We assume
509 524 # a missing file translates to no requirements.
510 525 try:
511 526 requirements = set(vfs.read(b'requires').splitlines())
512 527 except IOError as e:
513 528 if not (allowmissing and e.errno == errno.ENOENT):
514 529 raise
515 530 requirements = set()
516 531 return requirements
517 532
518 533
519 534 def makelocalrepository(baseui, path, intents=None):
520 535 """Create a local repository object.
521 536
522 537 Given arguments needed to construct a local repository, this function
523 538 performs various early repository loading functionality (such as
524 539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
525 540 the repository can be opened, derives a type suitable for representing
526 541 that repository, and returns an instance of it.
527 542
528 543 The returned object conforms to the ``repository.completelocalrepository``
529 544 interface.
530 545
531 546 The repository type is derived by calling a series of factory functions
532 547 for each aspect/interface of the final repository. These are defined by
533 548 ``REPO_INTERFACES``.
534 549
535 550 Each factory function is called to produce a type implementing a specific
536 551 interface. The cumulative list of returned types will be combined into a
537 552 new type and that type will be instantiated to represent the local
538 553 repository.
539 554
540 555 The factory functions each receive various state that may be consulted
541 556 as part of deriving a type.
542 557
543 558 Extensions should wrap these factory functions to customize repository type
544 559 creation. Note that an extension's wrapped function may be called even if
545 560 that extension is not loaded for the repo being constructed. Extensions
546 561 should check if their ``__name__`` appears in the
547 562 ``extensionmodulenames`` set passed to the factory function and no-op if
548 563 not.
549 564 """
550 565 ui = baseui.copy()
551 566 # Prevent copying repo configuration.
552 567 ui.copy = baseui.copy
553 568
554 569 # Working directory VFS rooted at repository root.
555 570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
556 571
557 572 # Main VFS for .hg/ directory.
558 573 hgpath = wdirvfs.join(b'.hg')
559 574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
560 575 # Whether this repository is shared one or not
561 576 shared = False
562 577 # If this repository is shared, vfs pointing to shared repo
563 578 sharedvfs = None
564 579
565 580 # The .hg/ path should exist and should be a directory. All other
566 581 # cases are errors.
567 582 if not hgvfs.isdir():
568 583 try:
569 584 hgvfs.stat()
570 585 except OSError as e:
571 586 if e.errno != errno.ENOENT:
572 587 raise
573 588 except ValueError as e:
574 589 # Can be raised on Python 3.8 when path is invalid.
575 590 raise error.Abort(
576 591 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
577 592 )
578 593
579 594 raise error.RepoError(_(b'repository %s not found') % path)
580 595
581 596 requirements = _readrequires(hgvfs, True)
582 597 shared = (
583 598 requirementsmod.SHARED_REQUIREMENT in requirements
584 599 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
585 600 )
586 601 storevfs = None
587 602 if shared:
588 603 # This is a shared repo
589 604 sharedvfs = _getsharedvfs(hgvfs, requirements)
590 605 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
591 606 else:
592 607 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
593 608
594 609 # if .hg/requires contains the sharesafe requirement, it means
595 610 # there exists a `.hg/store/requires` too and we should read it
596 611 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
597 612 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
598 613 # is not present, refer checkrequirementscompat() for that
599 614 #
600 615 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
601 616 # repository was shared the old way. We check the share source .hg/requires
602 617 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
603 618 # to be reshared
604 619 hint = _(b"see `hg help config.format.use-share-safe` for more information")
605 620 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
606 621
607 622 if (
608 623 shared
609 624 and requirementsmod.SHARESAFE_REQUIREMENT
610 625 not in _readrequires(sharedvfs, True)
611 626 ):
612 627 mismatch_warn = ui.configbool(
613 628 b'share', b'safe-mismatch.source-not-safe.warn'
614 629 )
615 630 mismatch_config = ui.config(
616 631 b'share', b'safe-mismatch.source-not-safe'
617 632 )
618 633 if mismatch_config in (
619 634 b'downgrade-allow',
620 635 b'allow',
621 636 b'downgrade-abort',
622 637 ):
623 638 # prevent cyclic import localrepo -> upgrade -> localrepo
624 639 from . import upgrade
625 640
626 641 upgrade.downgrade_share_to_non_safe(
627 642 ui,
628 643 hgvfs,
629 644 sharedvfs,
630 645 requirements,
631 646 mismatch_config,
632 647 mismatch_warn,
633 648 )
634 649 elif mismatch_config == b'abort':
635 650 raise error.Abort(
636 651 _(b"share source does not support share-safe requirement"),
637 652 hint=hint,
638 653 )
639 654 else:
640 655 raise error.Abort(
641 656 _(
642 657 b"share-safe mismatch with source.\nUnrecognized"
643 658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
644 659 b" set."
645 660 )
646 661 % mismatch_config,
647 662 hint=hint,
648 663 )
649 664 else:
650 665 requirements |= _readrequires(storevfs, False)
651 666 elif shared:
652 667 sourcerequires = _readrequires(sharedvfs, False)
653 668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
654 669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
655 670 mismatch_warn = ui.configbool(
656 671 b'share', b'safe-mismatch.source-safe.warn'
657 672 )
658 673 if mismatch_config in (
659 674 b'upgrade-allow',
660 675 b'allow',
661 676 b'upgrade-abort',
662 677 ):
663 678 # prevent cyclic import localrepo -> upgrade -> localrepo
664 679 from . import upgrade
665 680
666 681 upgrade.upgrade_share_to_safe(
667 682 ui,
668 683 hgvfs,
669 684 storevfs,
670 685 requirements,
671 686 mismatch_config,
672 687 mismatch_warn,
673 688 )
674 689 elif mismatch_config == b'abort':
675 690 raise error.Abort(
676 691 _(
677 692 b'version mismatch: source uses share-safe'
678 693 b' functionality while the current share does not'
679 694 ),
680 695 hint=hint,
681 696 )
682 697 else:
683 698 raise error.Abort(
684 699 _(
685 700 b"share-safe mismatch with source.\nUnrecognized"
686 701 b" value '%s' of `share.safe-mismatch.source-safe` set."
687 702 )
688 703 % mismatch_config,
689 704 hint=hint,
690 705 )
691 706
692 707 # The .hg/hgrc file may load extensions or contain config options
693 708 # that influence repository construction. Attempt to load it and
694 709 # process any new extensions that it may have pulled in.
695 710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
696 711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
697 712 extensions.loadall(ui)
698 713 extensions.populateui(ui)
699 714
700 715 # Set of module names of extensions loaded for this repository.
701 716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
702 717
703 718 supportedrequirements = gathersupportedrequirements(ui)
704 719
705 720 # We first validate the requirements are known.
706 721 ensurerequirementsrecognized(requirements, supportedrequirements)
707 722
708 723 # Then we validate that the known set is reasonable to use together.
709 724 ensurerequirementscompatible(ui, requirements)
710 725
711 726 # TODO there are unhandled edge cases related to opening repositories with
712 727 # shared storage. If storage is shared, we should also test for requirements
713 728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
714 729 # that repo, as that repo may load extensions needed to open it. This is a
715 730 # bit complicated because we don't want the other hgrc to overwrite settings
716 731 # in this hgrc.
717 732 #
718 733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
719 734 # file when sharing repos. But if a requirement is added after the share is
720 735 # performed, thereby introducing a new requirement for the opener, we may
721 736 # will not see that and could encounter a run-time error interacting with
722 737 # that shared store since it has an unknown-to-us requirement.
723 738
724 739 # At this point, we know we should be capable of opening the repository.
725 740 # Now get on with doing that.
726 741
727 742 features = set()
728 743
729 744 # The "store" part of the repository holds versioned data. How it is
730 745 # accessed is determined by various requirements. If `shared` or
731 746 # `relshared` requirements are present, this indicates current repository
732 747 # is a share and store exists in path mentioned in `.hg/sharedpath`
733 748 if shared:
734 749 storebasepath = sharedvfs.base
735 750 cachepath = sharedvfs.join(b'cache')
736 751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
737 752 else:
738 753 storebasepath = hgvfs.base
739 754 cachepath = hgvfs.join(b'cache')
740 755 wcachepath = hgvfs.join(b'wcache')
741 756
742 757 # The store has changed over time and the exact layout is dictated by
743 758 # requirements. The store interface abstracts differences across all
744 759 # of them.
745 760 store = makestore(
746 761 requirements,
747 762 storebasepath,
748 763 lambda base: vfsmod.vfs(base, cacheaudited=True),
749 764 )
750 765 hgvfs.createmode = store.createmode
751 766
752 767 storevfs = store.vfs
753 768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
754 769
755 770 if (
756 771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
757 772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
758 773 ):
759 774 features.add(repository.REPO_FEATURE_SIDE_DATA)
760 775 # the revlogv2 docket introduced race condition that we need to fix
761 776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
762 777
763 778 # The cache vfs is used to manage cache files.
764 779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
765 780 cachevfs.createmode = store.createmode
766 781 # The cache vfs is used to manage cache files related to the working copy
767 782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
768 783 wcachevfs.createmode = store.createmode
769 784
770 785 # Now resolve the type for the repository object. We do this by repeatedly
771 786 # calling a factory function to produces types for specific aspects of the
772 787 # repo's operation. The aggregate returned types are used as base classes
773 788 # for a dynamically-derived type, which will represent our new repository.
774 789
775 790 bases = []
776 791 extrastate = {}
777 792
778 793 for iface, fn in REPO_INTERFACES:
779 794 # We pass all potentially useful state to give extensions tons of
780 795 # flexibility.
781 796 typ = fn()(
782 797 ui=ui,
783 798 intents=intents,
784 799 requirements=requirements,
785 800 features=features,
786 801 wdirvfs=wdirvfs,
787 802 hgvfs=hgvfs,
788 803 store=store,
789 804 storevfs=storevfs,
790 805 storeoptions=storevfs.options,
791 806 cachevfs=cachevfs,
792 807 wcachevfs=wcachevfs,
793 808 extensionmodulenames=extensionmodulenames,
794 809 extrastate=extrastate,
795 810 baseclasses=bases,
796 811 )
797 812
798 813 if not isinstance(typ, type):
799 814 raise error.ProgrammingError(
800 815 b'unable to construct type for %s' % iface
801 816 )
802 817
803 818 bases.append(typ)
804 819
805 820 # type() allows you to use characters in type names that wouldn't be
806 821 # recognized as Python symbols in source code. We abuse that to add
807 822 # rich information about our constructed repo.
808 823 name = pycompat.sysstr(
809 824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
810 825 )
811 826
812 827 cls = type(name, tuple(bases), {})
813 828
814 829 return cls(
815 830 baseui=baseui,
816 831 ui=ui,
817 832 origroot=path,
818 833 wdirvfs=wdirvfs,
819 834 hgvfs=hgvfs,
820 835 requirements=requirements,
821 836 supportedrequirements=supportedrequirements,
822 837 sharedpath=storebasepath,
823 838 store=store,
824 839 cachevfs=cachevfs,
825 840 wcachevfs=wcachevfs,
826 841 features=features,
827 842 intents=intents,
828 843 )
829 844
830 845
831 846 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
832 847 """Load hgrc files/content into a ui instance.
833 848
834 849 This is called during repository opening to load any additional
835 850 config files or settings relevant to the current repository.
836 851
837 852 Returns a bool indicating whether any additional configs were loaded.
838 853
839 854 Extensions should monkeypatch this function to modify how per-repo
840 855 configs are loaded. For example, an extension may wish to pull in
841 856 configs from alternate files or sources.
842 857
843 858 sharedvfs is vfs object pointing to source repo if the current one is a
844 859 shared one
845 860 """
846 861 if not rcutil.use_repo_hgrc():
847 862 return False
848 863
849 864 ret = False
850 865 # first load config from shared source if we has to
851 866 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
852 867 try:
853 868 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
854 869 ret = True
855 870 except IOError:
856 871 pass
857 872
858 873 try:
859 874 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
860 875 ret = True
861 876 except IOError:
862 877 pass
863 878
864 879 try:
865 880 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
866 881 ret = True
867 882 except IOError:
868 883 pass
869 884
870 885 return ret
871 886
872 887
873 888 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
874 889 """Perform additional actions after .hg/hgrc is loaded.
875 890
876 891 This function is called during repository loading immediately after
877 892 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
878 893
879 894 The function can be used to validate configs, automatically add
880 895 options (including extensions) based on requirements, etc.
881 896 """
882 897
883 898 # Map of requirements to list of extensions to load automatically when
884 899 # requirement is present.
885 900 autoextensions = {
886 901 b'git': [b'git'],
887 902 b'largefiles': [b'largefiles'],
888 903 b'lfs': [b'lfs'],
889 904 }
890 905
891 906 for requirement, names in sorted(autoextensions.items()):
892 907 if requirement not in requirements:
893 908 continue
894 909
895 910 for name in names:
896 911 if not ui.hasconfig(b'extensions', name):
897 912 ui.setconfig(b'extensions', name, b'', source=b'autoload')
898 913
899 914
900 915 def gathersupportedrequirements(ui):
901 916 """Determine the complete set of recognized requirements."""
902 917 # Start with all requirements supported by this file.
903 918 supported = set(localrepository._basesupported)
904 919
905 920 if dirstate.SUPPORTS_DIRSTATE_V2:
906 921 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
907 922
908 923 # Execute ``featuresetupfuncs`` entries if they belong to an extension
909 924 # relevant to this ui instance.
910 925 modules = {m.__name__ for n, m in extensions.extensions(ui)}
911 926
912 927 for fn in featuresetupfuncs:
913 928 if fn.__module__ in modules:
914 929 fn(ui, supported)
915 930
916 931 # Add derived requirements from registered compression engines.
917 932 for name in util.compengines:
918 933 engine = util.compengines[name]
919 934 if engine.available() and engine.revlogheader():
920 935 supported.add(b'exp-compression-%s' % name)
921 936 if engine.name() == b'zstd':
922 937 supported.add(b'revlog-compression-zstd')
923 938
924 939 return supported
925 940
926 941
927 942 def ensurerequirementsrecognized(requirements, supported):
928 943 """Validate that a set of local requirements is recognized.
929 944
930 945 Receives a set of requirements. Raises an ``error.RepoError`` if there
931 946 exists any requirement in that set that currently loaded code doesn't
932 947 recognize.
933 948
934 949 Returns a set of supported requirements.
935 950 """
936 951 missing = set()
937 952
938 953 for requirement in requirements:
939 954 if requirement in supported:
940 955 continue
941 956
942 957 if not requirement or not requirement[0:1].isalnum():
943 958 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
944 959
945 960 missing.add(requirement)
946 961
947 962 if missing:
948 963 raise error.RequirementError(
949 964 _(b'repository requires features unknown to this Mercurial: %s')
950 965 % b' '.join(sorted(missing)),
951 966 hint=_(
952 967 b'see https://mercurial-scm.org/wiki/MissingRequirement '
953 968 b'for more information'
954 969 ),
955 970 )
956 971
957 972
958 973 def ensurerequirementscompatible(ui, requirements):
959 974 """Validates that a set of recognized requirements is mutually compatible.
960 975
961 976 Some requirements may not be compatible with others or require
962 977 config options that aren't enabled. This function is called during
963 978 repository opening to ensure that the set of requirements needed
964 979 to open a repository is sane and compatible with config options.
965 980
966 981 Extensions can monkeypatch this function to perform additional
967 982 checking.
968 983
969 984 ``error.RepoError`` should be raised on failure.
970 985 """
971 986 if (
972 987 requirementsmod.SPARSE_REQUIREMENT in requirements
973 988 and not sparse.enabled
974 989 ):
975 990 raise error.RepoError(
976 991 _(
977 992 b'repository is using sparse feature but '
978 993 b'sparse is not enabled; enable the '
979 994 b'"sparse" extensions to access'
980 995 )
981 996 )
982 997
983 998
984 999 def makestore(requirements, path, vfstype):
985 1000 """Construct a storage object for a repository."""
986 1001 if requirementsmod.STORE_REQUIREMENT in requirements:
987 1002 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
988 1003 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
989 1004 return storemod.fncachestore(path, vfstype, dotencode)
990 1005
991 1006 return storemod.encodedstore(path, vfstype)
992 1007
993 1008 return storemod.basicstore(path, vfstype)
994 1009
995 1010
996 1011 def resolvestorevfsoptions(ui, requirements, features):
997 1012 """Resolve the options to pass to the store vfs opener.
998 1013
999 1014 The returned dict is used to influence behavior of the storage layer.
1000 1015 """
1001 1016 options = {}
1002 1017
1003 1018 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1004 1019 options[b'treemanifest'] = True
1005 1020
1006 1021 # experimental config: format.manifestcachesize
1007 1022 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1008 1023 if manifestcachesize is not None:
1009 1024 options[b'manifestcachesize'] = manifestcachesize
1010 1025
1011 1026 # In the absence of another requirement superseding a revlog-related
1012 1027 # requirement, we have to assume the repo is using revlog version 0.
1013 1028 # This revlog format is super old and we don't bother trying to parse
1014 1029 # opener options for it because those options wouldn't do anything
1015 1030 # meaningful on such old repos.
1016 1031 if (
1017 1032 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1018 1033 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1019 1034 ):
1020 1035 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1021 1036 else: # explicitly mark repo as using revlogv0
1022 1037 options[b'revlogv0'] = True
1023 1038
1024 1039 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1025 1040 options[b'copies-storage'] = b'changeset-sidedata'
1026 1041 else:
1027 1042 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1028 1043 copiesextramode = (b'changeset-only', b'compatibility')
1029 1044 if writecopiesto in copiesextramode:
1030 1045 options[b'copies-storage'] = b'extra'
1031 1046
1032 1047 return options
1033 1048
1034 1049
1035 1050 def resolverevlogstorevfsoptions(ui, requirements, features):
1036 1051 """Resolve opener options specific to revlogs."""
1037 1052
1038 1053 options = {}
1039 1054 options[b'flagprocessors'] = {}
1040 1055
1041 1056 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1042 1057 options[b'revlogv1'] = True
1043 1058 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1044 1059 options[b'revlogv2'] = True
1045 1060 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1046 1061 options[b'changelogv2'] = True
1047 1062
1048 1063 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1049 1064 options[b'generaldelta'] = True
1050 1065
1051 1066 # experimental config: format.chunkcachesize
1052 1067 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1053 1068 if chunkcachesize is not None:
1054 1069 options[b'chunkcachesize'] = chunkcachesize
1055 1070
1056 1071 deltabothparents = ui.configbool(
1057 1072 b'storage', b'revlog.optimize-delta-parent-choice'
1058 1073 )
1059 1074 options[b'deltabothparents'] = deltabothparents
1060 1075
1061 1076 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1062 1077 options[b'issue6528.fix-incoming'] = issue6528
1063 1078
1064 1079 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1065 1080 lazydeltabase = False
1066 1081 if lazydelta:
1067 1082 lazydeltabase = ui.configbool(
1068 1083 b'storage', b'revlog.reuse-external-delta-parent'
1069 1084 )
1070 1085 if lazydeltabase is None:
1071 1086 lazydeltabase = not scmutil.gddeltaconfig(ui)
1072 1087 options[b'lazydelta'] = lazydelta
1073 1088 options[b'lazydeltabase'] = lazydeltabase
1074 1089
1075 1090 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1076 1091 if 0 <= chainspan:
1077 1092 options[b'maxdeltachainspan'] = chainspan
1078 1093
1079 1094 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1080 1095 if mmapindexthreshold is not None:
1081 1096 options[b'mmapindexthreshold'] = mmapindexthreshold
1082 1097
1083 1098 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1084 1099 srdensitythres = float(
1085 1100 ui.config(b'experimental', b'sparse-read.density-threshold')
1086 1101 )
1087 1102 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1088 1103 options[b'with-sparse-read'] = withsparseread
1089 1104 options[b'sparse-read-density-threshold'] = srdensitythres
1090 1105 options[b'sparse-read-min-gap-size'] = srmingapsize
1091 1106
1092 1107 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1093 1108 options[b'sparse-revlog'] = sparserevlog
1094 1109 if sparserevlog:
1095 1110 options[b'generaldelta'] = True
1096 1111
1097 1112 maxchainlen = None
1098 1113 if sparserevlog:
1099 1114 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1100 1115 # experimental config: format.maxchainlen
1101 1116 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1102 1117 if maxchainlen is not None:
1103 1118 options[b'maxchainlen'] = maxchainlen
1104 1119
1105 1120 for r in requirements:
1106 1121 # we allow multiple compression engine requirement to co-exist because
1107 1122 # strickly speaking, revlog seems to support mixed compression style.
1108 1123 #
1109 1124 # The compression used for new entries will be "the last one"
1110 1125 prefix = r.startswith
1111 1126 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1112 1127 options[b'compengine'] = r.split(b'-', 2)[2]
1113 1128
1114 1129 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1115 1130 if options[b'zlib.level'] is not None:
1116 1131 if not (0 <= options[b'zlib.level'] <= 9):
1117 1132 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1118 1133 raise error.Abort(msg % options[b'zlib.level'])
1119 1134 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1120 1135 if options[b'zstd.level'] is not None:
1121 1136 if not (0 <= options[b'zstd.level'] <= 22):
1122 1137 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1123 1138 raise error.Abort(msg % options[b'zstd.level'])
1124 1139
1125 1140 if requirementsmod.NARROW_REQUIREMENT in requirements:
1126 1141 options[b'enableellipsis'] = True
1127 1142
1128 1143 if ui.configbool(b'experimental', b'rust.index'):
1129 1144 options[b'rust.index'] = True
1130 1145 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1131 1146 slow_path = ui.config(
1132 1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1133 1148 )
1134 1149 if slow_path not in (b'allow', b'warn', b'abort'):
1135 1150 default = ui.config_default(
1136 1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1137 1152 )
1138 1153 msg = _(
1139 1154 b'unknown value for config '
1140 1155 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1141 1156 )
1142 1157 ui.warn(msg % slow_path)
1143 1158 if not ui.quiet:
1144 1159 ui.warn(_(b'falling back to default value: %s\n') % default)
1145 1160 slow_path = default
1146 1161
1147 1162 msg = _(
1148 1163 b"accessing `persistent-nodemap` repository without associated "
1149 1164 b"fast implementation."
1150 1165 )
1151 1166 hint = _(
1152 1167 b"check `hg help config.format.use-persistent-nodemap` "
1153 1168 b"for details"
1154 1169 )
1155 1170 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1156 1171 if slow_path == b'warn':
1157 1172 msg = b"warning: " + msg + b'\n'
1158 1173 ui.warn(msg)
1159 1174 if not ui.quiet:
1160 1175 hint = b'(' + hint + b')\n'
1161 1176 ui.warn(hint)
1162 1177 if slow_path == b'abort':
1163 1178 raise error.Abort(msg, hint=hint)
1164 1179 options[b'persistent-nodemap'] = True
1165 1180 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1166 1181 options[b'persistent-nodemap.mmap'] = True
1167 1182 if ui.configbool(b'devel', b'persistent-nodemap'):
1168 1183 options[b'devel-force-nodemap'] = True
1169 1184
1170 1185 return options
1171 1186
1172 1187
1173 1188 def makemain(**kwargs):
1174 1189 """Produce a type conforming to ``ilocalrepositorymain``."""
1175 1190 return localrepository
1176 1191
1177 1192
1178 1193 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1179 1194 class revlogfilestorage(object):
1180 1195 """File storage when using revlogs."""
1181 1196
1182 1197 def file(self, path):
1183 1198 if path.startswith(b'/'):
1184 1199 path = path[1:]
1185 1200
1186 1201 return filelog.filelog(self.svfs, path)
1187 1202
1188 1203
1189 1204 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1190 1205 class revlognarrowfilestorage(object):
1191 1206 """File storage when using revlogs and narrow files."""
1192 1207
1193 1208 def file(self, path):
1194 1209 if path.startswith(b'/'):
1195 1210 path = path[1:]
1196 1211
1197 1212 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1198 1213
1199 1214
1200 1215 def makefilestorage(requirements, features, **kwargs):
1201 1216 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1202 1217 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1203 1218 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1204 1219
1205 1220 if requirementsmod.NARROW_REQUIREMENT in requirements:
1206 1221 return revlognarrowfilestorage
1207 1222 else:
1208 1223 return revlogfilestorage
1209 1224
1210 1225
1211 1226 # List of repository interfaces and factory functions for them. Each
1212 1227 # will be called in order during ``makelocalrepository()`` to iteratively
1213 1228 # derive the final type for a local repository instance. We capture the
1214 1229 # function as a lambda so we don't hold a reference and the module-level
1215 1230 # functions can be wrapped.
1216 1231 REPO_INTERFACES = [
1217 1232 (repository.ilocalrepositorymain, lambda: makemain),
1218 1233 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1219 1234 ]
1220 1235
1221 1236
1222 1237 @interfaceutil.implementer(repository.ilocalrepositorymain)
1223 1238 class localrepository(object):
1224 1239 """Main class for representing local repositories.
1225 1240
1226 1241 All local repositories are instances of this class.
1227 1242
1228 1243 Constructed on its own, instances of this class are not usable as
1229 1244 repository objects. To obtain a usable repository object, call
1230 1245 ``hg.repository()``, ``localrepo.instance()``, or
1231 1246 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1232 1247 ``instance()`` adds support for creating new repositories.
1233 1248 ``hg.repository()`` adds more extension integration, including calling
1234 1249 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1235 1250 used.
1236 1251 """
1237 1252
1238 1253 # obsolete experimental requirements:
1239 1254 # - manifestv2: An experimental new manifest format that allowed
1240 1255 # for stem compression of long paths. Experiment ended up not
1241 1256 # being successful (repository sizes went up due to worse delta
1242 1257 # chains), and the code was deleted in 4.6.
1243 1258 supportedformats = {
1244 1259 requirementsmod.REVLOGV1_REQUIREMENT,
1245 1260 requirementsmod.GENERALDELTA_REQUIREMENT,
1246 1261 requirementsmod.TREEMANIFEST_REQUIREMENT,
1247 1262 requirementsmod.COPIESSDC_REQUIREMENT,
1248 1263 requirementsmod.REVLOGV2_REQUIREMENT,
1249 1264 requirementsmod.CHANGELOGV2_REQUIREMENT,
1250 1265 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1251 1266 requirementsmod.NODEMAP_REQUIREMENT,
1252 1267 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1253 1268 requirementsmod.SHARESAFE_REQUIREMENT,
1254 1269 }
1255 1270 _basesupported = supportedformats | {
1256 1271 requirementsmod.STORE_REQUIREMENT,
1257 1272 requirementsmod.FNCACHE_REQUIREMENT,
1258 1273 requirementsmod.SHARED_REQUIREMENT,
1259 1274 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1260 1275 requirementsmod.DOTENCODE_REQUIREMENT,
1261 1276 requirementsmod.SPARSE_REQUIREMENT,
1262 1277 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1263 1278 }
1264 1279
1265 1280 # list of prefix for file which can be written without 'wlock'
1266 1281 # Extensions should extend this list when needed
1267 1282 _wlockfreeprefix = {
1268 1283 # We migh consider requiring 'wlock' for the next
1269 1284 # two, but pretty much all the existing code assume
1270 1285 # wlock is not needed so we keep them excluded for
1271 1286 # now.
1272 1287 b'hgrc',
1273 1288 b'requires',
1274 1289 # XXX cache is a complicatged business someone
1275 1290 # should investigate this in depth at some point
1276 1291 b'cache/',
1277 1292 # XXX shouldn't be dirstate covered by the wlock?
1278 1293 b'dirstate',
1279 1294 # XXX bisect was still a bit too messy at the time
1280 1295 # this changeset was introduced. Someone should fix
1281 1296 # the remainig bit and drop this line
1282 1297 b'bisect.state',
1283 1298 }
1284 1299
1285 1300 def __init__(
1286 1301 self,
1287 1302 baseui,
1288 1303 ui,
1289 1304 origroot,
1290 1305 wdirvfs,
1291 1306 hgvfs,
1292 1307 requirements,
1293 1308 supportedrequirements,
1294 1309 sharedpath,
1295 1310 store,
1296 1311 cachevfs,
1297 1312 wcachevfs,
1298 1313 features,
1299 1314 intents=None,
1300 1315 ):
1301 1316 """Create a new local repository instance.
1302 1317
1303 1318 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1304 1319 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1305 1320 object.
1306 1321
1307 1322 Arguments:
1308 1323
1309 1324 baseui
1310 1325 ``ui.ui`` instance that ``ui`` argument was based off of.
1311 1326
1312 1327 ui
1313 1328 ``ui.ui`` instance for use by the repository.
1314 1329
1315 1330 origroot
1316 1331 ``bytes`` path to working directory root of this repository.
1317 1332
1318 1333 wdirvfs
1319 1334 ``vfs.vfs`` rooted at the working directory.
1320 1335
1321 1336 hgvfs
1322 1337 ``vfs.vfs`` rooted at .hg/
1323 1338
1324 1339 requirements
1325 1340 ``set`` of bytestrings representing repository opening requirements.
1326 1341
1327 1342 supportedrequirements
1328 1343 ``set`` of bytestrings representing repository requirements that we
1329 1344 know how to open. May be a supetset of ``requirements``.
1330 1345
1331 1346 sharedpath
1332 1347 ``bytes`` Defining path to storage base directory. Points to a
1333 1348 ``.hg/`` directory somewhere.
1334 1349
1335 1350 store
1336 1351 ``store.basicstore`` (or derived) instance providing access to
1337 1352 versioned storage.
1338 1353
1339 1354 cachevfs
1340 1355 ``vfs.vfs`` used for cache files.
1341 1356
1342 1357 wcachevfs
1343 1358 ``vfs.vfs`` used for cache files related to the working copy.
1344 1359
1345 1360 features
1346 1361 ``set`` of bytestrings defining features/capabilities of this
1347 1362 instance.
1348 1363
1349 1364 intents
1350 1365 ``set`` of system strings indicating what this repo will be used
1351 1366 for.
1352 1367 """
1353 1368 self.baseui = baseui
1354 1369 self.ui = ui
1355 1370 self.origroot = origroot
1356 1371 # vfs rooted at working directory.
1357 1372 self.wvfs = wdirvfs
1358 1373 self.root = wdirvfs.base
1359 1374 # vfs rooted at .hg/. Used to access most non-store paths.
1360 1375 self.vfs = hgvfs
1361 1376 self.path = hgvfs.base
1362 1377 self.requirements = requirements
1363 1378 self.nodeconstants = sha1nodeconstants
1364 1379 self.nullid = self.nodeconstants.nullid
1365 1380 self.supported = supportedrequirements
1366 1381 self.sharedpath = sharedpath
1367 1382 self.store = store
1368 1383 self.cachevfs = cachevfs
1369 1384 self.wcachevfs = wcachevfs
1370 1385 self.features = features
1371 1386
1372 1387 self.filtername = None
1373 1388
1374 1389 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1375 1390 b'devel', b'check-locks'
1376 1391 ):
1377 1392 self.vfs.audit = self._getvfsward(self.vfs.audit)
1378 1393 # A list of callback to shape the phase if no data were found.
1379 1394 # Callback are in the form: func(repo, roots) --> processed root.
1380 1395 # This list it to be filled by extension during repo setup
1381 1396 self._phasedefaults = []
1382 1397
1383 1398 color.setup(self.ui)
1384 1399
1385 1400 self.spath = self.store.path
1386 1401 self.svfs = self.store.vfs
1387 1402 self.sjoin = self.store.join
1388 1403 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1389 1404 b'devel', b'check-locks'
1390 1405 ):
1391 1406 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1392 1407 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1393 1408 else: # standard vfs
1394 1409 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1395 1410
1396 1411 self._dirstatevalidatewarned = False
1397 1412
1398 1413 self._branchcaches = branchmap.BranchMapCache()
1399 1414 self._revbranchcache = None
1400 1415 self._filterpats = {}
1401 1416 self._datafilters = {}
1402 1417 self._transref = self._lockref = self._wlockref = None
1403 1418
1404 1419 # A cache for various files under .hg/ that tracks file changes,
1405 1420 # (used by the filecache decorator)
1406 1421 #
1407 1422 # Maps a property name to its util.filecacheentry
1408 1423 self._filecache = {}
1409 1424
1410 1425 # hold sets of revision to be filtered
1411 1426 # should be cleared when something might have changed the filter value:
1412 1427 # - new changesets,
1413 1428 # - phase change,
1414 1429 # - new obsolescence marker,
1415 1430 # - working directory parent change,
1416 1431 # - bookmark changes
1417 1432 self.filteredrevcache = {}
1418 1433
1419 1434 # post-dirstate-status hooks
1420 1435 self._postdsstatus = []
1421 1436
1422 1437 # generic mapping between names and nodes
1423 1438 self.names = namespaces.namespaces()
1424 1439
1425 1440 # Key to signature value.
1426 1441 self._sparsesignaturecache = {}
1427 1442 # Signature to cached matcher instance.
1428 1443 self._sparsematchercache = {}
1429 1444
1430 1445 self._extrafilterid = repoview.extrafilter(ui)
1431 1446
1432 1447 self.filecopiesmode = None
1433 1448 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1434 1449 self.filecopiesmode = b'changeset-sidedata'
1435 1450
1436 1451 self._wanted_sidedata = set()
1437 1452 self._sidedata_computers = {}
1438 1453 sidedatamod.set_sidedata_spec_for_repo(self)
1439 1454
1440 1455 def _getvfsward(self, origfunc):
1441 1456 """build a ward for self.vfs"""
1442 1457 rref = weakref.ref(self)
1443 1458
1444 1459 def checkvfs(path, mode=None):
1445 1460 ret = origfunc(path, mode=mode)
1446 1461 repo = rref()
1447 1462 if (
1448 1463 repo is None
1449 1464 or not util.safehasattr(repo, b'_wlockref')
1450 1465 or not util.safehasattr(repo, b'_lockref')
1451 1466 ):
1452 1467 return
1453 1468 if mode in (None, b'r', b'rb'):
1454 1469 return
1455 1470 if path.startswith(repo.path):
1456 1471 # truncate name relative to the repository (.hg)
1457 1472 path = path[len(repo.path) + 1 :]
1458 1473 if path.startswith(b'cache/'):
1459 1474 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1460 1475 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1461 1476 # path prefixes covered by 'lock'
1462 1477 vfs_path_prefixes = (
1463 1478 b'journal.',
1464 1479 b'undo.',
1465 1480 b'strip-backup/',
1466 1481 b'cache/',
1467 1482 )
1468 1483 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1469 1484 if repo._currentlock(repo._lockref) is None:
1470 1485 repo.ui.develwarn(
1471 1486 b'write with no lock: "%s"' % path,
1472 1487 stacklevel=3,
1473 1488 config=b'check-locks',
1474 1489 )
1475 1490 elif repo._currentlock(repo._wlockref) is None:
1476 1491 # rest of vfs files are covered by 'wlock'
1477 1492 #
1478 1493 # exclude special files
1479 1494 for prefix in self._wlockfreeprefix:
1480 1495 if path.startswith(prefix):
1481 1496 return
1482 1497 repo.ui.develwarn(
1483 1498 b'write with no wlock: "%s"' % path,
1484 1499 stacklevel=3,
1485 1500 config=b'check-locks',
1486 1501 )
1487 1502 return ret
1488 1503
1489 1504 return checkvfs
1490 1505
1491 1506 def _getsvfsward(self, origfunc):
1492 1507 """build a ward for self.svfs"""
1493 1508 rref = weakref.ref(self)
1494 1509
1495 1510 def checksvfs(path, mode=None):
1496 1511 ret = origfunc(path, mode=mode)
1497 1512 repo = rref()
1498 1513 if repo is None or not util.safehasattr(repo, b'_lockref'):
1499 1514 return
1500 1515 if mode in (None, b'r', b'rb'):
1501 1516 return
1502 1517 if path.startswith(repo.sharedpath):
1503 1518 # truncate name relative to the repository (.hg)
1504 1519 path = path[len(repo.sharedpath) + 1 :]
1505 1520 if repo._currentlock(repo._lockref) is None:
1506 1521 repo.ui.develwarn(
1507 1522 b'write with no lock: "%s"' % path, stacklevel=4
1508 1523 )
1509 1524 return ret
1510 1525
1511 1526 return checksvfs
1512 1527
1513 1528 def close(self):
1514 1529 self._writecaches()
1515 1530
1516 1531 def _writecaches(self):
1517 1532 if self._revbranchcache:
1518 1533 self._revbranchcache.write()
1519 1534
1520 1535 def _restrictcapabilities(self, caps):
1521 1536 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1522 1537 caps = set(caps)
1523 1538 capsblob = bundle2.encodecaps(
1524 1539 bundle2.getrepocaps(self, role=b'client')
1525 1540 )
1526 1541 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1527 1542 if self.ui.configbool(b'experimental', b'narrow'):
1528 1543 caps.add(wireprototypes.NARROWCAP)
1529 1544 return caps
1530 1545
1531 1546 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1532 1547 # self -> auditor -> self._checknested -> self
1533 1548
1534 1549 @property
1535 1550 def auditor(self):
1536 1551 # This is only used by context.workingctx.match in order to
1537 1552 # detect files in subrepos.
1538 1553 return pathutil.pathauditor(self.root, callback=self._checknested)
1539 1554
1540 1555 @property
1541 1556 def nofsauditor(self):
1542 1557 # This is only used by context.basectx.match in order to detect
1543 1558 # files in subrepos.
1544 1559 return pathutil.pathauditor(
1545 1560 self.root, callback=self._checknested, realfs=False, cached=True
1546 1561 )
1547 1562
1548 1563 def _checknested(self, path):
1549 1564 """Determine if path is a legal nested repository."""
1550 1565 if not path.startswith(self.root):
1551 1566 return False
1552 1567 subpath = path[len(self.root) + 1 :]
1553 1568 normsubpath = util.pconvert(subpath)
1554 1569
1555 1570 # XXX: Checking against the current working copy is wrong in
1556 1571 # the sense that it can reject things like
1557 1572 #
1558 1573 # $ hg cat -r 10 sub/x.txt
1559 1574 #
1560 1575 # if sub/ is no longer a subrepository in the working copy
1561 1576 # parent revision.
1562 1577 #
1563 1578 # However, it can of course also allow things that would have
1564 1579 # been rejected before, such as the above cat command if sub/
1565 1580 # is a subrepository now, but was a normal directory before.
1566 1581 # The old path auditor would have rejected by mistake since it
1567 1582 # panics when it sees sub/.hg/.
1568 1583 #
1569 1584 # All in all, checking against the working copy seems sensible
1570 1585 # since we want to prevent access to nested repositories on
1571 1586 # the filesystem *now*.
1572 1587 ctx = self[None]
1573 1588 parts = util.splitpath(subpath)
1574 1589 while parts:
1575 1590 prefix = b'/'.join(parts)
1576 1591 if prefix in ctx.substate:
1577 1592 if prefix == normsubpath:
1578 1593 return True
1579 1594 else:
1580 1595 sub = ctx.sub(prefix)
1581 1596 return sub.checknested(subpath[len(prefix) + 1 :])
1582 1597 else:
1583 1598 parts.pop()
1584 1599 return False
1585 1600
1586 1601 def peer(self):
1587 1602 return localpeer(self) # not cached to avoid reference cycle
1588 1603
1589 1604 def unfiltered(self):
1590 1605 """Return unfiltered version of the repository
1591 1606
1592 1607 Intended to be overwritten by filtered repo."""
1593 1608 return self
1594 1609
1595 1610 def filtered(self, name, visibilityexceptions=None):
1596 1611 """Return a filtered version of a repository
1597 1612
1598 1613 The `name` parameter is the identifier of the requested view. This
1599 1614 will return a repoview object set "exactly" to the specified view.
1600 1615
1601 1616 This function does not apply recursive filtering to a repository. For
1602 1617 example calling `repo.filtered("served")` will return a repoview using
1603 1618 the "served" view, regardless of the initial view used by `repo`.
1604 1619
1605 1620 In other word, there is always only one level of `repoview` "filtering".
1606 1621 """
1607 1622 if self._extrafilterid is not None and b'%' not in name:
1608 1623 name = name + b'%' + self._extrafilterid
1609 1624
1610 1625 cls = repoview.newtype(self.unfiltered().__class__)
1611 1626 return cls(self, name, visibilityexceptions)
1612 1627
1613 1628 @mixedrepostorecache(
1614 1629 (b'bookmarks', b'plain'),
1615 1630 (b'bookmarks.current', b'plain'),
1616 1631 (b'bookmarks', b''),
1617 1632 (b'00changelog.i', b''),
1618 1633 )
1619 1634 def _bookmarks(self):
1620 1635 # Since the multiple files involved in the transaction cannot be
1621 1636 # written atomically (with current repository format), there is a race
1622 1637 # condition here.
1623 1638 #
1624 1639 # 1) changelog content A is read
1625 1640 # 2) outside transaction update changelog to content B
1626 1641 # 3) outside transaction update bookmark file referring to content B
1627 1642 # 4) bookmarks file content is read and filtered against changelog-A
1628 1643 #
1629 1644 # When this happens, bookmarks against nodes missing from A are dropped.
1630 1645 #
1631 1646 # Having this happening during read is not great, but it become worse
1632 1647 # when this happen during write because the bookmarks to the "unknown"
1633 1648 # nodes will be dropped for good. However, writes happen within locks.
1634 1649 # This locking makes it possible to have a race free consistent read.
1635 1650 # For this purpose data read from disc before locking are
1636 1651 # "invalidated" right after the locks are taken. This invalidations are
1637 1652 # "light", the `filecache` mechanism keep the data in memory and will
1638 1653 # reuse them if the underlying files did not changed. Not parsing the
1639 1654 # same data multiple times helps performances.
1640 1655 #
1641 1656 # Unfortunately in the case describe above, the files tracked by the
1642 1657 # bookmarks file cache might not have changed, but the in-memory
1643 1658 # content is still "wrong" because we used an older changelog content
1644 1659 # to process the on-disk data. So after locking, the changelog would be
1645 1660 # refreshed but `_bookmarks` would be preserved.
1646 1661 # Adding `00changelog.i` to the list of tracked file is not
1647 1662 # enough, because at the time we build the content for `_bookmarks` in
1648 1663 # (4), the changelog file has already diverged from the content used
1649 1664 # for loading `changelog` in (1)
1650 1665 #
1651 1666 # To prevent the issue, we force the changelog to be explicitly
1652 1667 # reloaded while computing `_bookmarks`. The data race can still happen
1653 1668 # without the lock (with a narrower window), but it would no longer go
1654 1669 # undetected during the lock time refresh.
1655 1670 #
1656 1671 # The new schedule is as follow
1657 1672 #
1658 1673 # 1) filecache logic detect that `_bookmarks` needs to be computed
1659 1674 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1660 1675 # 3) We force `changelog` filecache to be tested
1661 1676 # 4) cachestat for `changelog` are captured (for changelog)
1662 1677 # 5) `_bookmarks` is computed and cached
1663 1678 #
1664 1679 # The step in (3) ensure we have a changelog at least as recent as the
1665 1680 # cache stat computed in (1). As a result at locking time:
1666 1681 # * if the changelog did not changed since (1) -> we can reuse the data
1667 1682 # * otherwise -> the bookmarks get refreshed.
1668 1683 self._refreshchangelog()
1669 1684 return bookmarks.bmstore(self)
1670 1685
1671 1686 def _refreshchangelog(self):
1672 1687 """make sure the in memory changelog match the on-disk one"""
1673 1688 if 'changelog' in vars(self) and self.currenttransaction() is None:
1674 1689 del self.changelog
1675 1690
1676 1691 @property
1677 1692 def _activebookmark(self):
1678 1693 return self._bookmarks.active
1679 1694
1680 1695 # _phasesets depend on changelog. what we need is to call
1681 1696 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1682 1697 # can't be easily expressed in filecache mechanism.
1683 1698 @storecache(b'phaseroots', b'00changelog.i')
1684 1699 def _phasecache(self):
1685 1700 return phases.phasecache(self, self._phasedefaults)
1686 1701
1687 1702 @storecache(b'obsstore')
1688 1703 def obsstore(self):
1689 1704 return obsolete.makestore(self.ui, self)
1690 1705
1691 1706 @changelogcache()
1692 1707 def changelog(repo):
1693 1708 # load dirstate before changelog to avoid race see issue6303
1694 1709 repo.dirstate.prefetch_parents()
1695 1710 return repo.store.changelog(
1696 1711 txnutil.mayhavepending(repo.root),
1697 1712 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1698 1713 )
1699 1714
1700 @storecache(b'00manifest.i')
1715 @manifestlogcache()
1701 1716 def manifestlog(self):
1702 1717 return self.store.manifestlog(self, self._storenarrowmatch)
1703 1718
1704 1719 @repofilecache(b'dirstate')
1705 1720 def dirstate(self):
1706 1721 return self._makedirstate()
1707 1722
1708 1723 def _makedirstate(self):
1709 1724 """Extension point for wrapping the dirstate per-repo."""
1710 1725 sparsematchfn = lambda: sparse.matcher(self)
1711 1726 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1712 1727 use_dirstate_v2 = v2_req in self.requirements
1713 1728
1714 1729 return dirstate.dirstate(
1715 1730 self.vfs,
1716 1731 self.ui,
1717 1732 self.root,
1718 1733 self._dirstatevalidate,
1719 1734 sparsematchfn,
1720 1735 self.nodeconstants,
1721 1736 use_dirstate_v2,
1722 1737 )
1723 1738
1724 1739 def _dirstatevalidate(self, node):
1725 1740 try:
1726 1741 self.changelog.rev(node)
1727 1742 return node
1728 1743 except error.LookupError:
1729 1744 if not self._dirstatevalidatewarned:
1730 1745 self._dirstatevalidatewarned = True
1731 1746 self.ui.warn(
1732 1747 _(b"warning: ignoring unknown working parent %s!\n")
1733 1748 % short(node)
1734 1749 )
1735 1750 return self.nullid
1736 1751
1737 1752 @storecache(narrowspec.FILENAME)
1738 1753 def narrowpats(self):
1739 1754 """matcher patterns for this repository's narrowspec
1740 1755
1741 1756 A tuple of (includes, excludes).
1742 1757 """
1743 1758 return narrowspec.load(self)
1744 1759
1745 1760 @storecache(narrowspec.FILENAME)
1746 1761 def _storenarrowmatch(self):
1747 1762 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1748 1763 return matchmod.always()
1749 1764 include, exclude = self.narrowpats
1750 1765 return narrowspec.match(self.root, include=include, exclude=exclude)
1751 1766
1752 1767 @storecache(narrowspec.FILENAME)
1753 1768 def _narrowmatch(self):
1754 1769 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1755 1770 return matchmod.always()
1756 1771 narrowspec.checkworkingcopynarrowspec(self)
1757 1772 include, exclude = self.narrowpats
1758 1773 return narrowspec.match(self.root, include=include, exclude=exclude)
1759 1774
1760 1775 def narrowmatch(self, match=None, includeexact=False):
1761 1776 """matcher corresponding the the repo's narrowspec
1762 1777
1763 1778 If `match` is given, then that will be intersected with the narrow
1764 1779 matcher.
1765 1780
1766 1781 If `includeexact` is True, then any exact matches from `match` will
1767 1782 be included even if they're outside the narrowspec.
1768 1783 """
1769 1784 if match:
1770 1785 if includeexact and not self._narrowmatch.always():
1771 1786 # do not exclude explicitly-specified paths so that they can
1772 1787 # be warned later on
1773 1788 em = matchmod.exact(match.files())
1774 1789 nm = matchmod.unionmatcher([self._narrowmatch, em])
1775 1790 return matchmod.intersectmatchers(match, nm)
1776 1791 return matchmod.intersectmatchers(match, self._narrowmatch)
1777 1792 return self._narrowmatch
1778 1793
1779 1794 def setnarrowpats(self, newincludes, newexcludes):
1780 1795 narrowspec.save(self, newincludes, newexcludes)
1781 1796 self.invalidate(clearfilecache=True)
1782 1797
1783 1798 @unfilteredpropertycache
1784 1799 def _quick_access_changeid_null(self):
1785 1800 return {
1786 1801 b'null': (nullrev, self.nodeconstants.nullid),
1787 1802 nullrev: (nullrev, self.nodeconstants.nullid),
1788 1803 self.nullid: (nullrev, self.nullid),
1789 1804 }
1790 1805
1791 1806 @unfilteredpropertycache
1792 1807 def _quick_access_changeid_wc(self):
1793 1808 # also fast path access to the working copy parents
1794 1809 # however, only do it for filter that ensure wc is visible.
1795 1810 quick = self._quick_access_changeid_null.copy()
1796 1811 cl = self.unfiltered().changelog
1797 1812 for node in self.dirstate.parents():
1798 1813 if node == self.nullid:
1799 1814 continue
1800 1815 rev = cl.index.get_rev(node)
1801 1816 if rev is None:
1802 1817 # unknown working copy parent case:
1803 1818 #
1804 1819 # skip the fast path and let higher code deal with it
1805 1820 continue
1806 1821 pair = (rev, node)
1807 1822 quick[rev] = pair
1808 1823 quick[node] = pair
1809 1824 # also add the parents of the parents
1810 1825 for r in cl.parentrevs(rev):
1811 1826 if r == nullrev:
1812 1827 continue
1813 1828 n = cl.node(r)
1814 1829 pair = (r, n)
1815 1830 quick[r] = pair
1816 1831 quick[n] = pair
1817 1832 p1node = self.dirstate.p1()
1818 1833 if p1node != self.nullid:
1819 1834 quick[b'.'] = quick[p1node]
1820 1835 return quick
1821 1836
1822 1837 @unfilteredmethod
1823 1838 def _quick_access_changeid_invalidate(self):
1824 1839 if '_quick_access_changeid_wc' in vars(self):
1825 1840 del self.__dict__['_quick_access_changeid_wc']
1826 1841
1827 1842 @property
1828 1843 def _quick_access_changeid(self):
1829 1844 """an helper dictionnary for __getitem__ calls
1830 1845
1831 1846 This contains a list of symbol we can recognise right away without
1832 1847 further processing.
1833 1848 """
1834 1849 if self.filtername in repoview.filter_has_wc:
1835 1850 return self._quick_access_changeid_wc
1836 1851 return self._quick_access_changeid_null
1837 1852
1838 1853 def __getitem__(self, changeid):
1839 1854 # dealing with special cases
1840 1855 if changeid is None:
1841 1856 return context.workingctx(self)
1842 1857 if isinstance(changeid, context.basectx):
1843 1858 return changeid
1844 1859
1845 1860 # dealing with multiple revisions
1846 1861 if isinstance(changeid, slice):
1847 1862 # wdirrev isn't contiguous so the slice shouldn't include it
1848 1863 return [
1849 1864 self[i]
1850 1865 for i in pycompat.xrange(*changeid.indices(len(self)))
1851 1866 if i not in self.changelog.filteredrevs
1852 1867 ]
1853 1868
1854 1869 # dealing with some special values
1855 1870 quick_access = self._quick_access_changeid.get(changeid)
1856 1871 if quick_access is not None:
1857 1872 rev, node = quick_access
1858 1873 return context.changectx(self, rev, node, maybe_filtered=False)
1859 1874 if changeid == b'tip':
1860 1875 node = self.changelog.tip()
1861 1876 rev = self.changelog.rev(node)
1862 1877 return context.changectx(self, rev, node)
1863 1878
1864 1879 # dealing with arbitrary values
1865 1880 try:
1866 1881 if isinstance(changeid, int):
1867 1882 node = self.changelog.node(changeid)
1868 1883 rev = changeid
1869 1884 elif changeid == b'.':
1870 1885 # this is a hack to delay/avoid loading obsmarkers
1871 1886 # when we know that '.' won't be hidden
1872 1887 node = self.dirstate.p1()
1873 1888 rev = self.unfiltered().changelog.rev(node)
1874 1889 elif len(changeid) == self.nodeconstants.nodelen:
1875 1890 try:
1876 1891 node = changeid
1877 1892 rev = self.changelog.rev(changeid)
1878 1893 except error.FilteredLookupError:
1879 1894 changeid = hex(changeid) # for the error message
1880 1895 raise
1881 1896 except LookupError:
1882 1897 # check if it might have come from damaged dirstate
1883 1898 #
1884 1899 # XXX we could avoid the unfiltered if we had a recognizable
1885 1900 # exception for filtered changeset access
1886 1901 if (
1887 1902 self.local()
1888 1903 and changeid in self.unfiltered().dirstate.parents()
1889 1904 ):
1890 1905 msg = _(b"working directory has unknown parent '%s'!")
1891 1906 raise error.Abort(msg % short(changeid))
1892 1907 changeid = hex(changeid) # for the error message
1893 1908 raise
1894 1909
1895 1910 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1896 1911 node = bin(changeid)
1897 1912 rev = self.changelog.rev(node)
1898 1913 else:
1899 1914 raise error.ProgrammingError(
1900 1915 b"unsupported changeid '%s' of type %s"
1901 1916 % (changeid, pycompat.bytestr(type(changeid)))
1902 1917 )
1903 1918
1904 1919 return context.changectx(self, rev, node)
1905 1920
1906 1921 except (error.FilteredIndexError, error.FilteredLookupError):
1907 1922 raise error.FilteredRepoLookupError(
1908 1923 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1909 1924 )
1910 1925 except (IndexError, LookupError):
1911 1926 raise error.RepoLookupError(
1912 1927 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1913 1928 )
1914 1929 except error.WdirUnsupported:
1915 1930 return context.workingctx(self)
1916 1931
1917 1932 def __contains__(self, changeid):
1918 1933 """True if the given changeid exists"""
1919 1934 try:
1920 1935 self[changeid]
1921 1936 return True
1922 1937 except error.RepoLookupError:
1923 1938 return False
1924 1939
1925 1940 def __nonzero__(self):
1926 1941 return True
1927 1942
1928 1943 __bool__ = __nonzero__
1929 1944
1930 1945 def __len__(self):
1931 1946 # no need to pay the cost of repoview.changelog
1932 1947 unfi = self.unfiltered()
1933 1948 return len(unfi.changelog)
1934 1949
1935 1950 def __iter__(self):
1936 1951 return iter(self.changelog)
1937 1952
1938 1953 def revs(self, expr, *args):
1939 1954 """Find revisions matching a revset.
1940 1955
1941 1956 The revset is specified as a string ``expr`` that may contain
1942 1957 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1943 1958
1944 1959 Revset aliases from the configuration are not expanded. To expand
1945 1960 user aliases, consider calling ``scmutil.revrange()`` or
1946 1961 ``repo.anyrevs([expr], user=True)``.
1947 1962
1948 1963 Returns a smartset.abstractsmartset, which is a list-like interface
1949 1964 that contains integer revisions.
1950 1965 """
1951 1966 tree = revsetlang.spectree(expr, *args)
1952 1967 return revset.makematcher(tree)(self)
1953 1968
1954 1969 def set(self, expr, *args):
1955 1970 """Find revisions matching a revset and emit changectx instances.
1956 1971
1957 1972 This is a convenience wrapper around ``revs()`` that iterates the
1958 1973 result and is a generator of changectx instances.
1959 1974
1960 1975 Revset aliases from the configuration are not expanded. To expand
1961 1976 user aliases, consider calling ``scmutil.revrange()``.
1962 1977 """
1963 1978 for r in self.revs(expr, *args):
1964 1979 yield self[r]
1965 1980
1966 1981 def anyrevs(self, specs, user=False, localalias=None):
1967 1982 """Find revisions matching one of the given revsets.
1968 1983
1969 1984 Revset aliases from the configuration are not expanded by default. To
1970 1985 expand user aliases, specify ``user=True``. To provide some local
1971 1986 definitions overriding user aliases, set ``localalias`` to
1972 1987 ``{name: definitionstring}``.
1973 1988 """
1974 1989 if specs == [b'null']:
1975 1990 return revset.baseset([nullrev])
1976 1991 if specs == [b'.']:
1977 1992 quick_data = self._quick_access_changeid.get(b'.')
1978 1993 if quick_data is not None:
1979 1994 return revset.baseset([quick_data[0]])
1980 1995 if user:
1981 1996 m = revset.matchany(
1982 1997 self.ui,
1983 1998 specs,
1984 1999 lookup=revset.lookupfn(self),
1985 2000 localalias=localalias,
1986 2001 )
1987 2002 else:
1988 2003 m = revset.matchany(None, specs, localalias=localalias)
1989 2004 return m(self)
1990 2005
1991 2006 def url(self):
1992 2007 return b'file:' + self.root
1993 2008
1994 2009 def hook(self, name, throw=False, **args):
1995 2010 """Call a hook, passing this repo instance.
1996 2011
1997 2012 This a convenience method to aid invoking hooks. Extensions likely
1998 2013 won't call this unless they have registered a custom hook or are
1999 2014 replacing code that is expected to call a hook.
2000 2015 """
2001 2016 return hook.hook(self.ui, self, name, throw, **args)
2002 2017
2003 2018 @filteredpropertycache
2004 2019 def _tagscache(self):
2005 2020 """Returns a tagscache object that contains various tags related
2006 2021 caches."""
2007 2022
2008 2023 # This simplifies its cache management by having one decorated
2009 2024 # function (this one) and the rest simply fetch things from it.
2010 2025 class tagscache(object):
2011 2026 def __init__(self):
2012 2027 # These two define the set of tags for this repository. tags
2013 2028 # maps tag name to node; tagtypes maps tag name to 'global' or
2014 2029 # 'local'. (Global tags are defined by .hgtags across all
2015 2030 # heads, and local tags are defined in .hg/localtags.)
2016 2031 # They constitute the in-memory cache of tags.
2017 2032 self.tags = self.tagtypes = None
2018 2033
2019 2034 self.nodetagscache = self.tagslist = None
2020 2035
2021 2036 cache = tagscache()
2022 2037 cache.tags, cache.tagtypes = self._findtags()
2023 2038
2024 2039 return cache
2025 2040
2026 2041 def tags(self):
2027 2042 '''return a mapping of tag to node'''
2028 2043 t = {}
2029 2044 if self.changelog.filteredrevs:
2030 2045 tags, tt = self._findtags()
2031 2046 else:
2032 2047 tags = self._tagscache.tags
2033 2048 rev = self.changelog.rev
2034 2049 for k, v in pycompat.iteritems(tags):
2035 2050 try:
2036 2051 # ignore tags to unknown nodes
2037 2052 rev(v)
2038 2053 t[k] = v
2039 2054 except (error.LookupError, ValueError):
2040 2055 pass
2041 2056 return t
2042 2057
2043 2058 def _findtags(self):
2044 2059 """Do the hard work of finding tags. Return a pair of dicts
2045 2060 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2046 2061 maps tag name to a string like \'global\' or \'local\'.
2047 2062 Subclasses or extensions are free to add their own tags, but
2048 2063 should be aware that the returned dicts will be retained for the
2049 2064 duration of the localrepo object."""
2050 2065
2051 2066 # XXX what tagtype should subclasses/extensions use? Currently
2052 2067 # mq and bookmarks add tags, but do not set the tagtype at all.
2053 2068 # Should each extension invent its own tag type? Should there
2054 2069 # be one tagtype for all such "virtual" tags? Or is the status
2055 2070 # quo fine?
2056 2071
2057 2072 # map tag name to (node, hist)
2058 2073 alltags = tagsmod.findglobaltags(self.ui, self)
2059 2074 # map tag name to tag type
2060 2075 tagtypes = {tag: b'global' for tag in alltags}
2061 2076
2062 2077 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2063 2078
2064 2079 # Build the return dicts. Have to re-encode tag names because
2065 2080 # the tags module always uses UTF-8 (in order not to lose info
2066 2081 # writing to the cache), but the rest of Mercurial wants them in
2067 2082 # local encoding.
2068 2083 tags = {}
2069 2084 for (name, (node, hist)) in pycompat.iteritems(alltags):
2070 2085 if node != self.nullid:
2071 2086 tags[encoding.tolocal(name)] = node
2072 2087 tags[b'tip'] = self.changelog.tip()
2073 2088 tagtypes = {
2074 2089 encoding.tolocal(name): value
2075 2090 for (name, value) in pycompat.iteritems(tagtypes)
2076 2091 }
2077 2092 return (tags, tagtypes)
2078 2093
2079 2094 def tagtype(self, tagname):
2080 2095 """
2081 2096 return the type of the given tag. result can be:
2082 2097
2083 2098 'local' : a local tag
2084 2099 'global' : a global tag
2085 2100 None : tag does not exist
2086 2101 """
2087 2102
2088 2103 return self._tagscache.tagtypes.get(tagname)
2089 2104
2090 2105 def tagslist(self):
2091 2106 '''return a list of tags ordered by revision'''
2092 2107 if not self._tagscache.tagslist:
2093 2108 l = []
2094 2109 for t, n in pycompat.iteritems(self.tags()):
2095 2110 l.append((self.changelog.rev(n), t, n))
2096 2111 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2097 2112
2098 2113 return self._tagscache.tagslist
2099 2114
2100 2115 def nodetags(self, node):
2101 2116 '''return the tags associated with a node'''
2102 2117 if not self._tagscache.nodetagscache:
2103 2118 nodetagscache = {}
2104 2119 for t, n in pycompat.iteritems(self._tagscache.tags):
2105 2120 nodetagscache.setdefault(n, []).append(t)
2106 2121 for tags in pycompat.itervalues(nodetagscache):
2107 2122 tags.sort()
2108 2123 self._tagscache.nodetagscache = nodetagscache
2109 2124 return self._tagscache.nodetagscache.get(node, [])
2110 2125
2111 2126 def nodebookmarks(self, node):
2112 2127 """return the list of bookmarks pointing to the specified node"""
2113 2128 return self._bookmarks.names(node)
2114 2129
2115 2130 def branchmap(self):
2116 2131 """returns a dictionary {branch: [branchheads]} with branchheads
2117 2132 ordered by increasing revision number"""
2118 2133 return self._branchcaches[self]
2119 2134
2120 2135 @unfilteredmethod
2121 2136 def revbranchcache(self):
2122 2137 if not self._revbranchcache:
2123 2138 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2124 2139 return self._revbranchcache
2125 2140
2126 2141 def register_changeset(self, rev, changelogrevision):
2127 2142 self.revbranchcache().setdata(rev, changelogrevision)
2128 2143
2129 2144 def branchtip(self, branch, ignoremissing=False):
2130 2145 """return the tip node for a given branch
2131 2146
2132 2147 If ignoremissing is True, then this method will not raise an error.
2133 2148 This is helpful for callers that only expect None for a missing branch
2134 2149 (e.g. namespace).
2135 2150
2136 2151 """
2137 2152 try:
2138 2153 return self.branchmap().branchtip(branch)
2139 2154 except KeyError:
2140 2155 if not ignoremissing:
2141 2156 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2142 2157 else:
2143 2158 pass
2144 2159
2145 2160 def lookup(self, key):
2146 2161 node = scmutil.revsymbol(self, key).node()
2147 2162 if node is None:
2148 2163 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2149 2164 return node
2150 2165
2151 2166 def lookupbranch(self, key):
2152 2167 if self.branchmap().hasbranch(key):
2153 2168 return key
2154 2169
2155 2170 return scmutil.revsymbol(self, key).branch()
2156 2171
2157 2172 def known(self, nodes):
2158 2173 cl = self.changelog
2159 2174 get_rev = cl.index.get_rev
2160 2175 filtered = cl.filteredrevs
2161 2176 result = []
2162 2177 for n in nodes:
2163 2178 r = get_rev(n)
2164 2179 resp = not (r is None or r in filtered)
2165 2180 result.append(resp)
2166 2181 return result
2167 2182
2168 2183 def local(self):
2169 2184 return self
2170 2185
2171 2186 def publishing(self):
2172 2187 # it's safe (and desirable) to trust the publish flag unconditionally
2173 2188 # so that we don't finalize changes shared between users via ssh or nfs
2174 2189 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2175 2190
2176 2191 def cancopy(self):
2177 2192 # so statichttprepo's override of local() works
2178 2193 if not self.local():
2179 2194 return False
2180 2195 if not self.publishing():
2181 2196 return True
2182 2197 # if publishing we can't copy if there is filtered content
2183 2198 return not self.filtered(b'visible').changelog.filteredrevs
2184 2199
2185 2200 def shared(self):
2186 2201 '''the type of shared repository (None if not shared)'''
2187 2202 if self.sharedpath != self.path:
2188 2203 return b'store'
2189 2204 return None
2190 2205
2191 2206 def wjoin(self, f, *insidef):
2192 2207 return self.vfs.reljoin(self.root, f, *insidef)
2193 2208
2194 2209 def setparents(self, p1, p2=None):
2195 2210 if p2 is None:
2196 2211 p2 = self.nullid
2197 2212 self[None].setparents(p1, p2)
2198 2213 self._quick_access_changeid_invalidate()
2199 2214
2200 2215 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2201 2216 """changeid must be a changeset revision, if specified.
2202 2217 fileid can be a file revision or node."""
2203 2218 return context.filectx(
2204 2219 self, path, changeid, fileid, changectx=changectx
2205 2220 )
2206 2221
2207 2222 def getcwd(self):
2208 2223 return self.dirstate.getcwd()
2209 2224
2210 2225 def pathto(self, f, cwd=None):
2211 2226 return self.dirstate.pathto(f, cwd)
2212 2227
2213 2228 def _loadfilter(self, filter):
2214 2229 if filter not in self._filterpats:
2215 2230 l = []
2216 2231 for pat, cmd in self.ui.configitems(filter):
2217 2232 if cmd == b'!':
2218 2233 continue
2219 2234 mf = matchmod.match(self.root, b'', [pat])
2220 2235 fn = None
2221 2236 params = cmd
2222 2237 for name, filterfn in pycompat.iteritems(self._datafilters):
2223 2238 if cmd.startswith(name):
2224 2239 fn = filterfn
2225 2240 params = cmd[len(name) :].lstrip()
2226 2241 break
2227 2242 if not fn:
2228 2243 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2229 2244 fn.__name__ = 'commandfilter'
2230 2245 # Wrap old filters not supporting keyword arguments
2231 2246 if not pycompat.getargspec(fn)[2]:
2232 2247 oldfn = fn
2233 2248 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2234 2249 fn.__name__ = 'compat-' + oldfn.__name__
2235 2250 l.append((mf, fn, params))
2236 2251 self._filterpats[filter] = l
2237 2252 return self._filterpats[filter]
2238 2253
2239 2254 def _filter(self, filterpats, filename, data):
2240 2255 for mf, fn, cmd in filterpats:
2241 2256 if mf(filename):
2242 2257 self.ui.debug(
2243 2258 b"filtering %s through %s\n"
2244 2259 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2245 2260 )
2246 2261 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2247 2262 break
2248 2263
2249 2264 return data
2250 2265
2251 2266 @unfilteredpropertycache
2252 2267 def _encodefilterpats(self):
2253 2268 return self._loadfilter(b'encode')
2254 2269
2255 2270 @unfilteredpropertycache
2256 2271 def _decodefilterpats(self):
2257 2272 return self._loadfilter(b'decode')
2258 2273
2259 2274 def adddatafilter(self, name, filter):
2260 2275 self._datafilters[name] = filter
2261 2276
2262 2277 def wread(self, filename):
2263 2278 if self.wvfs.islink(filename):
2264 2279 data = self.wvfs.readlink(filename)
2265 2280 else:
2266 2281 data = self.wvfs.read(filename)
2267 2282 return self._filter(self._encodefilterpats, filename, data)
2268 2283
2269 2284 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2270 2285 """write ``data`` into ``filename`` in the working directory
2271 2286
2272 2287 This returns length of written (maybe decoded) data.
2273 2288 """
2274 2289 data = self._filter(self._decodefilterpats, filename, data)
2275 2290 if b'l' in flags:
2276 2291 self.wvfs.symlink(data, filename)
2277 2292 else:
2278 2293 self.wvfs.write(
2279 2294 filename, data, backgroundclose=backgroundclose, **kwargs
2280 2295 )
2281 2296 if b'x' in flags:
2282 2297 self.wvfs.setflags(filename, False, True)
2283 2298 else:
2284 2299 self.wvfs.setflags(filename, False, False)
2285 2300 return len(data)
2286 2301
2287 2302 def wwritedata(self, filename, data):
2288 2303 return self._filter(self._decodefilterpats, filename, data)
2289 2304
2290 2305 def currenttransaction(self):
2291 2306 """return the current transaction or None if non exists"""
2292 2307 if self._transref:
2293 2308 tr = self._transref()
2294 2309 else:
2295 2310 tr = None
2296 2311
2297 2312 if tr and tr.running():
2298 2313 return tr
2299 2314 return None
2300 2315
2301 2316 def transaction(self, desc, report=None):
2302 2317 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2303 2318 b'devel', b'check-locks'
2304 2319 ):
2305 2320 if self._currentlock(self._lockref) is None:
2306 2321 raise error.ProgrammingError(b'transaction requires locking')
2307 2322 tr = self.currenttransaction()
2308 2323 if tr is not None:
2309 2324 return tr.nest(name=desc)
2310 2325
2311 2326 # abort here if the journal already exists
2312 2327 if self.svfs.exists(b"journal"):
2313 2328 raise error.RepoError(
2314 2329 _(b"abandoned transaction found"),
2315 2330 hint=_(b"run 'hg recover' to clean up transaction"),
2316 2331 )
2317 2332
2318 2333 idbase = b"%.40f#%f" % (random.random(), time.time())
2319 2334 ha = hex(hashutil.sha1(idbase).digest())
2320 2335 txnid = b'TXN:' + ha
2321 2336 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2322 2337
2323 2338 self._writejournal(desc)
2324 2339 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2325 2340 if report:
2326 2341 rp = report
2327 2342 else:
2328 2343 rp = self.ui.warn
2329 2344 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2330 2345 # we must avoid cyclic reference between repo and transaction.
2331 2346 reporef = weakref.ref(self)
2332 2347 # Code to track tag movement
2333 2348 #
2334 2349 # Since tags are all handled as file content, it is actually quite hard
2335 2350 # to track these movement from a code perspective. So we fallback to a
2336 2351 # tracking at the repository level. One could envision to track changes
2337 2352 # to the '.hgtags' file through changegroup apply but that fails to
2338 2353 # cope with case where transaction expose new heads without changegroup
2339 2354 # being involved (eg: phase movement).
2340 2355 #
2341 2356 # For now, We gate the feature behind a flag since this likely comes
2342 2357 # with performance impacts. The current code run more often than needed
2343 2358 # and do not use caches as much as it could. The current focus is on
2344 2359 # the behavior of the feature so we disable it by default. The flag
2345 2360 # will be removed when we are happy with the performance impact.
2346 2361 #
2347 2362 # Once this feature is no longer experimental move the following
2348 2363 # documentation to the appropriate help section:
2349 2364 #
2350 2365 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2351 2366 # tags (new or changed or deleted tags). In addition the details of
2352 2367 # these changes are made available in a file at:
2353 2368 # ``REPOROOT/.hg/changes/tags.changes``.
2354 2369 # Make sure you check for HG_TAG_MOVED before reading that file as it
2355 2370 # might exist from a previous transaction even if no tag were touched
2356 2371 # in this one. Changes are recorded in a line base format::
2357 2372 #
2358 2373 # <action> <hex-node> <tag-name>\n
2359 2374 #
2360 2375 # Actions are defined as follow:
2361 2376 # "-R": tag is removed,
2362 2377 # "+A": tag is added,
2363 2378 # "-M": tag is moved (old value),
2364 2379 # "+M": tag is moved (new value),
2365 2380 tracktags = lambda x: None
2366 2381 # experimental config: experimental.hook-track-tags
2367 2382 shouldtracktags = self.ui.configbool(
2368 2383 b'experimental', b'hook-track-tags'
2369 2384 )
2370 2385 if desc != b'strip' and shouldtracktags:
2371 2386 oldheads = self.changelog.headrevs()
2372 2387
2373 2388 def tracktags(tr2):
2374 2389 repo = reporef()
2375 2390 assert repo is not None # help pytype
2376 2391 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2377 2392 newheads = repo.changelog.headrevs()
2378 2393 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2379 2394 # notes: we compare lists here.
2380 2395 # As we do it only once buiding set would not be cheaper
2381 2396 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2382 2397 if changes:
2383 2398 tr2.hookargs[b'tag_moved'] = b'1'
2384 2399 with repo.vfs(
2385 2400 b'changes/tags.changes', b'w', atomictemp=True
2386 2401 ) as changesfile:
2387 2402 # note: we do not register the file to the transaction
2388 2403 # because we needs it to still exist on the transaction
2389 2404 # is close (for txnclose hooks)
2390 2405 tagsmod.writediff(changesfile, changes)
2391 2406
2392 2407 def validate(tr2):
2393 2408 """will run pre-closing hooks"""
2394 2409 # XXX the transaction API is a bit lacking here so we take a hacky
2395 2410 # path for now
2396 2411 #
2397 2412 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2398 2413 # dict is copied before these run. In addition we needs the data
2399 2414 # available to in memory hooks too.
2400 2415 #
2401 2416 # Moreover, we also need to make sure this runs before txnclose
2402 2417 # hooks and there is no "pending" mechanism that would execute
2403 2418 # logic only if hooks are about to run.
2404 2419 #
2405 2420 # Fixing this limitation of the transaction is also needed to track
2406 2421 # other families of changes (bookmarks, phases, obsolescence).
2407 2422 #
2408 2423 # This will have to be fixed before we remove the experimental
2409 2424 # gating.
2410 2425 tracktags(tr2)
2411 2426 repo = reporef()
2412 2427 assert repo is not None # help pytype
2413 2428
2414 2429 singleheadopt = (b'experimental', b'single-head-per-branch')
2415 2430 singlehead = repo.ui.configbool(*singleheadopt)
2416 2431 if singlehead:
2417 2432 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2418 2433 accountclosed = singleheadsub.get(
2419 2434 b"account-closed-heads", False
2420 2435 )
2421 2436 if singleheadsub.get(b"public-changes-only", False):
2422 2437 filtername = b"immutable"
2423 2438 else:
2424 2439 filtername = b"visible"
2425 2440 scmutil.enforcesinglehead(
2426 2441 repo, tr2, desc, accountclosed, filtername
2427 2442 )
2428 2443 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2429 2444 for name, (old, new) in sorted(
2430 2445 tr.changes[b'bookmarks'].items()
2431 2446 ):
2432 2447 args = tr.hookargs.copy()
2433 2448 args.update(bookmarks.preparehookargs(name, old, new))
2434 2449 repo.hook(
2435 2450 b'pretxnclose-bookmark',
2436 2451 throw=True,
2437 2452 **pycompat.strkwargs(args)
2438 2453 )
2439 2454 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2440 2455 cl = repo.unfiltered().changelog
2441 2456 for revs, (old, new) in tr.changes[b'phases']:
2442 2457 for rev in revs:
2443 2458 args = tr.hookargs.copy()
2444 2459 node = hex(cl.node(rev))
2445 2460 args.update(phases.preparehookargs(node, old, new))
2446 2461 repo.hook(
2447 2462 b'pretxnclose-phase',
2448 2463 throw=True,
2449 2464 **pycompat.strkwargs(args)
2450 2465 )
2451 2466
2452 2467 repo.hook(
2453 2468 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2454 2469 )
2455 2470
2456 2471 def releasefn(tr, success):
2457 2472 repo = reporef()
2458 2473 if repo is None:
2459 2474 # If the repo has been GC'd (and this release function is being
2460 2475 # called from transaction.__del__), there's not much we can do,
2461 2476 # so just leave the unfinished transaction there and let the
2462 2477 # user run `hg recover`.
2463 2478 return
2464 2479 if success:
2465 2480 # this should be explicitly invoked here, because
2466 2481 # in-memory changes aren't written out at closing
2467 2482 # transaction, if tr.addfilegenerator (via
2468 2483 # dirstate.write or so) isn't invoked while
2469 2484 # transaction running
2470 2485 repo.dirstate.write(None)
2471 2486 else:
2472 2487 # discard all changes (including ones already written
2473 2488 # out) in this transaction
2474 2489 narrowspec.restorebackup(self, b'journal.narrowspec')
2475 2490 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2476 2491 repo.dirstate.restorebackup(None, b'journal.dirstate')
2477 2492
2478 2493 repo.invalidate(clearfilecache=True)
2479 2494
2480 2495 tr = transaction.transaction(
2481 2496 rp,
2482 2497 self.svfs,
2483 2498 vfsmap,
2484 2499 b"journal",
2485 2500 b"undo",
2486 2501 aftertrans(renames),
2487 2502 self.store.createmode,
2488 2503 validator=validate,
2489 2504 releasefn=releasefn,
2490 2505 checkambigfiles=_cachedfiles,
2491 2506 name=desc,
2492 2507 )
2493 2508 tr.changes[b'origrepolen'] = len(self)
2494 2509 tr.changes[b'obsmarkers'] = set()
2495 2510 tr.changes[b'phases'] = []
2496 2511 tr.changes[b'bookmarks'] = {}
2497 2512
2498 2513 tr.hookargs[b'txnid'] = txnid
2499 2514 tr.hookargs[b'txnname'] = desc
2500 2515 tr.hookargs[b'changes'] = tr.changes
2501 2516 # note: writing the fncache only during finalize mean that the file is
2502 2517 # outdated when running hooks. As fncache is used for streaming clone,
2503 2518 # this is not expected to break anything that happen during the hooks.
2504 2519 tr.addfinalize(b'flush-fncache', self.store.write)
2505 2520
2506 2521 def txnclosehook(tr2):
2507 2522 """To be run if transaction is successful, will schedule a hook run"""
2508 2523 # Don't reference tr2 in hook() so we don't hold a reference.
2509 2524 # This reduces memory consumption when there are multiple
2510 2525 # transactions per lock. This can likely go away if issue5045
2511 2526 # fixes the function accumulation.
2512 2527 hookargs = tr2.hookargs
2513 2528
2514 2529 def hookfunc(unused_success):
2515 2530 repo = reporef()
2516 2531 assert repo is not None # help pytype
2517 2532
2518 2533 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2519 2534 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2520 2535 for name, (old, new) in bmchanges:
2521 2536 args = tr.hookargs.copy()
2522 2537 args.update(bookmarks.preparehookargs(name, old, new))
2523 2538 repo.hook(
2524 2539 b'txnclose-bookmark',
2525 2540 throw=False,
2526 2541 **pycompat.strkwargs(args)
2527 2542 )
2528 2543
2529 2544 if hook.hashook(repo.ui, b'txnclose-phase'):
2530 2545 cl = repo.unfiltered().changelog
2531 2546 phasemv = sorted(
2532 2547 tr.changes[b'phases'], key=lambda r: r[0][0]
2533 2548 )
2534 2549 for revs, (old, new) in phasemv:
2535 2550 for rev in revs:
2536 2551 args = tr.hookargs.copy()
2537 2552 node = hex(cl.node(rev))
2538 2553 args.update(phases.preparehookargs(node, old, new))
2539 2554 repo.hook(
2540 2555 b'txnclose-phase',
2541 2556 throw=False,
2542 2557 **pycompat.strkwargs(args)
2543 2558 )
2544 2559
2545 2560 repo.hook(
2546 2561 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2547 2562 )
2548 2563
2549 2564 repo = reporef()
2550 2565 assert repo is not None # help pytype
2551 2566 repo._afterlock(hookfunc)
2552 2567
2553 2568 tr.addfinalize(b'txnclose-hook', txnclosehook)
2554 2569 # Include a leading "-" to make it happen before the transaction summary
2555 2570 # reports registered via scmutil.registersummarycallback() whose names
2556 2571 # are 00-txnreport etc. That way, the caches will be warm when the
2557 2572 # callbacks run.
2558 2573 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2559 2574
2560 2575 def txnaborthook(tr2):
2561 2576 """To be run if transaction is aborted"""
2562 2577 repo = reporef()
2563 2578 assert repo is not None # help pytype
2564 2579 repo.hook(
2565 2580 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2566 2581 )
2567 2582
2568 2583 tr.addabort(b'txnabort-hook', txnaborthook)
2569 2584 # avoid eager cache invalidation. in-memory data should be identical
2570 2585 # to stored data if transaction has no error.
2571 2586 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2572 2587 self._transref = weakref.ref(tr)
2573 2588 scmutil.registersummarycallback(self, tr, desc)
2574 2589 return tr
2575 2590
2576 2591 def _journalfiles(self):
2577 2592 return (
2578 2593 (self.svfs, b'journal'),
2579 2594 (self.svfs, b'journal.narrowspec'),
2580 2595 (self.vfs, b'journal.narrowspec.dirstate'),
2581 2596 (self.vfs, b'journal.dirstate'),
2582 2597 (self.vfs, b'journal.branch'),
2583 2598 (self.vfs, b'journal.desc'),
2584 2599 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2585 2600 (self.svfs, b'journal.phaseroots'),
2586 2601 )
2587 2602
2588 2603 def undofiles(self):
2589 2604 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2590 2605
2591 2606 @unfilteredmethod
2592 2607 def _writejournal(self, desc):
2593 2608 self.dirstate.savebackup(None, b'journal.dirstate')
2594 2609 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2595 2610 narrowspec.savebackup(self, b'journal.narrowspec')
2596 2611 self.vfs.write(
2597 2612 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2598 2613 )
2599 2614 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2600 2615 bookmarksvfs = bookmarks.bookmarksvfs(self)
2601 2616 bookmarksvfs.write(
2602 2617 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2603 2618 )
2604 2619 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2605 2620
2606 2621 def recover(self):
2607 2622 with self.lock():
2608 2623 if self.svfs.exists(b"journal"):
2609 2624 self.ui.status(_(b"rolling back interrupted transaction\n"))
2610 2625 vfsmap = {
2611 2626 b'': self.svfs,
2612 2627 b'plain': self.vfs,
2613 2628 }
2614 2629 transaction.rollback(
2615 2630 self.svfs,
2616 2631 vfsmap,
2617 2632 b"journal",
2618 2633 self.ui.warn,
2619 2634 checkambigfiles=_cachedfiles,
2620 2635 )
2621 2636 self.invalidate()
2622 2637 return True
2623 2638 else:
2624 2639 self.ui.warn(_(b"no interrupted transaction available\n"))
2625 2640 return False
2626 2641
2627 2642 def rollback(self, dryrun=False, force=False):
2628 2643 wlock = lock = dsguard = None
2629 2644 try:
2630 2645 wlock = self.wlock()
2631 2646 lock = self.lock()
2632 2647 if self.svfs.exists(b"undo"):
2633 2648 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2634 2649
2635 2650 return self._rollback(dryrun, force, dsguard)
2636 2651 else:
2637 2652 self.ui.warn(_(b"no rollback information available\n"))
2638 2653 return 1
2639 2654 finally:
2640 2655 release(dsguard, lock, wlock)
2641 2656
2642 2657 @unfilteredmethod # Until we get smarter cache management
2643 2658 def _rollback(self, dryrun, force, dsguard):
2644 2659 ui = self.ui
2645 2660 try:
2646 2661 args = self.vfs.read(b'undo.desc').splitlines()
2647 2662 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2648 2663 if len(args) >= 3:
2649 2664 detail = args[2]
2650 2665 oldtip = oldlen - 1
2651 2666
2652 2667 if detail and ui.verbose:
2653 2668 msg = _(
2654 2669 b'repository tip rolled back to revision %d'
2655 2670 b' (undo %s: %s)\n'
2656 2671 ) % (oldtip, desc, detail)
2657 2672 else:
2658 2673 msg = _(
2659 2674 b'repository tip rolled back to revision %d (undo %s)\n'
2660 2675 ) % (oldtip, desc)
2661 2676 except IOError:
2662 2677 msg = _(b'rolling back unknown transaction\n')
2663 2678 desc = None
2664 2679
2665 2680 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2666 2681 raise error.Abort(
2667 2682 _(
2668 2683 b'rollback of last commit while not checked out '
2669 2684 b'may lose data'
2670 2685 ),
2671 2686 hint=_(b'use -f to force'),
2672 2687 )
2673 2688
2674 2689 ui.status(msg)
2675 2690 if dryrun:
2676 2691 return 0
2677 2692
2678 2693 parents = self.dirstate.parents()
2679 2694 self.destroying()
2680 2695 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2681 2696 transaction.rollback(
2682 2697 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2683 2698 )
2684 2699 bookmarksvfs = bookmarks.bookmarksvfs(self)
2685 2700 if bookmarksvfs.exists(b'undo.bookmarks'):
2686 2701 bookmarksvfs.rename(
2687 2702 b'undo.bookmarks', b'bookmarks', checkambig=True
2688 2703 )
2689 2704 if self.svfs.exists(b'undo.phaseroots'):
2690 2705 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2691 2706 self.invalidate()
2692 2707
2693 2708 has_node = self.changelog.index.has_node
2694 2709 parentgone = any(not has_node(p) for p in parents)
2695 2710 if parentgone:
2696 2711 # prevent dirstateguard from overwriting already restored one
2697 2712 dsguard.close()
2698 2713
2699 2714 narrowspec.restorebackup(self, b'undo.narrowspec')
2700 2715 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2701 2716 self.dirstate.restorebackup(None, b'undo.dirstate')
2702 2717 try:
2703 2718 branch = self.vfs.read(b'undo.branch')
2704 2719 self.dirstate.setbranch(encoding.tolocal(branch))
2705 2720 except IOError:
2706 2721 ui.warn(
2707 2722 _(
2708 2723 b'named branch could not be reset: '
2709 2724 b'current branch is still \'%s\'\n'
2710 2725 )
2711 2726 % self.dirstate.branch()
2712 2727 )
2713 2728
2714 2729 parents = tuple([p.rev() for p in self[None].parents()])
2715 2730 if len(parents) > 1:
2716 2731 ui.status(
2717 2732 _(
2718 2733 b'working directory now based on '
2719 2734 b'revisions %d and %d\n'
2720 2735 )
2721 2736 % parents
2722 2737 )
2723 2738 else:
2724 2739 ui.status(
2725 2740 _(b'working directory now based on revision %d\n') % parents
2726 2741 )
2727 2742 mergestatemod.mergestate.clean(self)
2728 2743
2729 2744 # TODO: if we know which new heads may result from this rollback, pass
2730 2745 # them to destroy(), which will prevent the branchhead cache from being
2731 2746 # invalidated.
2732 2747 self.destroyed()
2733 2748 return 0
2734 2749
2735 2750 def _buildcacheupdater(self, newtransaction):
2736 2751 """called during transaction to build the callback updating cache
2737 2752
2738 2753 Lives on the repository to help extension who might want to augment
2739 2754 this logic. For this purpose, the created transaction is passed to the
2740 2755 method.
2741 2756 """
2742 2757 # we must avoid cyclic reference between repo and transaction.
2743 2758 reporef = weakref.ref(self)
2744 2759
2745 2760 def updater(tr):
2746 2761 repo = reporef()
2747 2762 assert repo is not None # help pytype
2748 2763 repo.updatecaches(tr)
2749 2764
2750 2765 return updater
2751 2766
2752 2767 @unfilteredmethod
2753 2768 def updatecaches(self, tr=None, full=False, caches=None):
2754 2769 """warm appropriate caches
2755 2770
2756 2771 If this function is called after a transaction closed. The transaction
2757 2772 will be available in the 'tr' argument. This can be used to selectively
2758 2773 update caches relevant to the changes in that transaction.
2759 2774
2760 2775 If 'full' is set, make sure all caches the function knows about have
2761 2776 up-to-date data. Even the ones usually loaded more lazily.
2762 2777
2763 2778 The `full` argument can take a special "post-clone" value. In this case
2764 2779 the cache warming is made after a clone and of the slower cache might
2765 2780 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2766 2781 as we plan for a cleaner way to deal with this for 5.9.
2767 2782 """
2768 2783 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2769 2784 # During strip, many caches are invalid but
2770 2785 # later call to `destroyed` will refresh them.
2771 2786 return
2772 2787
2773 2788 unfi = self.unfiltered()
2774 2789
2775 2790 if full:
2776 2791 msg = (
2777 2792 "`full` argument for `repo.updatecaches` is deprecated\n"
2778 2793 "(use `caches=repository.CACHE_ALL` instead)"
2779 2794 )
2780 2795 self.ui.deprecwarn(msg, b"5.9")
2781 2796 caches = repository.CACHES_ALL
2782 2797 if full == b"post-clone":
2783 2798 caches = repository.CACHES_POST_CLONE
2784 2799 caches = repository.CACHES_ALL
2785 2800 elif caches is None:
2786 2801 caches = repository.CACHES_DEFAULT
2787 2802
2788 2803 if repository.CACHE_BRANCHMAP_SERVED in caches:
2789 2804 if tr is None or tr.changes[b'origrepolen'] < len(self):
2790 2805 # accessing the 'served' branchmap should refresh all the others,
2791 2806 self.ui.debug(b'updating the branch cache\n')
2792 2807 self.filtered(b'served').branchmap()
2793 2808 self.filtered(b'served.hidden').branchmap()
2794 2809
2795 2810 if repository.CACHE_CHANGELOG_CACHE in caches:
2796 2811 self.changelog.update_caches(transaction=tr)
2797 2812
2798 2813 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2799 2814 self.manifestlog.update_caches(transaction=tr)
2800 2815
2801 2816 if repository.CACHE_REV_BRANCH in caches:
2802 2817 rbc = unfi.revbranchcache()
2803 2818 for r in unfi.changelog:
2804 2819 rbc.branchinfo(r)
2805 2820 rbc.write()
2806 2821
2807 2822 if repository.CACHE_FULL_MANIFEST in caches:
2808 2823 # ensure the working copy parents are in the manifestfulltextcache
2809 2824 for ctx in self[b'.'].parents():
2810 2825 ctx.manifest() # accessing the manifest is enough
2811 2826
2812 2827 if repository.CACHE_FILE_NODE_TAGS in caches:
2813 2828 # accessing fnode cache warms the cache
2814 2829 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2815 2830
2816 2831 if repository.CACHE_TAGS_DEFAULT in caches:
2817 2832 # accessing tags warm the cache
2818 2833 self.tags()
2819 2834 if repository.CACHE_TAGS_SERVED in caches:
2820 2835 self.filtered(b'served').tags()
2821 2836
2822 2837 if repository.CACHE_BRANCHMAP_ALL in caches:
2823 2838 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2824 2839 # so we're forcing a write to cause these caches to be warmed up
2825 2840 # even if they haven't explicitly been requested yet (if they've
2826 2841 # never been used by hg, they won't ever have been written, even if
2827 2842 # they're a subset of another kind of cache that *has* been used).
2828 2843 for filt in repoview.filtertable.keys():
2829 2844 filtered = self.filtered(filt)
2830 2845 filtered.branchmap().write(filtered)
2831 2846
2832 2847 def invalidatecaches(self):
2833 2848
2834 2849 if '_tagscache' in vars(self):
2835 2850 # can't use delattr on proxy
2836 2851 del self.__dict__['_tagscache']
2837 2852
2838 2853 self._branchcaches.clear()
2839 2854 self.invalidatevolatilesets()
2840 2855 self._sparsesignaturecache.clear()
2841 2856
2842 2857 def invalidatevolatilesets(self):
2843 2858 self.filteredrevcache.clear()
2844 2859 obsolete.clearobscaches(self)
2845 2860 self._quick_access_changeid_invalidate()
2846 2861
2847 2862 def invalidatedirstate(self):
2848 2863 """Invalidates the dirstate, causing the next call to dirstate
2849 2864 to check if it was modified since the last time it was read,
2850 2865 rereading it if it has.
2851 2866
2852 2867 This is different to dirstate.invalidate() that it doesn't always
2853 2868 rereads the dirstate. Use dirstate.invalidate() if you want to
2854 2869 explicitly read the dirstate again (i.e. restoring it to a previous
2855 2870 known good state)."""
2856 2871 if hasunfilteredcache(self, 'dirstate'):
2857 2872 for k in self.dirstate._filecache:
2858 2873 try:
2859 2874 delattr(self.dirstate, k)
2860 2875 except AttributeError:
2861 2876 pass
2862 2877 delattr(self.unfiltered(), 'dirstate')
2863 2878
2864 2879 def invalidate(self, clearfilecache=False):
2865 2880 """Invalidates both store and non-store parts other than dirstate
2866 2881
2867 2882 If a transaction is running, invalidation of store is omitted,
2868 2883 because discarding in-memory changes might cause inconsistency
2869 2884 (e.g. incomplete fncache causes unintentional failure, but
2870 2885 redundant one doesn't).
2871 2886 """
2872 2887 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2873 2888 for k in list(self._filecache.keys()):
2874 2889 # dirstate is invalidated separately in invalidatedirstate()
2875 2890 if k == b'dirstate':
2876 2891 continue
2877 2892 if (
2878 2893 k == b'changelog'
2879 2894 and self.currenttransaction()
2880 2895 and self.changelog._delayed
2881 2896 ):
2882 2897 # The changelog object may store unwritten revisions. We don't
2883 2898 # want to lose them.
2884 2899 # TODO: Solve the problem instead of working around it.
2885 2900 continue
2886 2901
2887 2902 if clearfilecache:
2888 2903 del self._filecache[k]
2889 2904 try:
2890 2905 delattr(unfiltered, k)
2891 2906 except AttributeError:
2892 2907 pass
2893 2908 self.invalidatecaches()
2894 2909 if not self.currenttransaction():
2895 2910 # TODO: Changing contents of store outside transaction
2896 2911 # causes inconsistency. We should make in-memory store
2897 2912 # changes detectable, and abort if changed.
2898 2913 self.store.invalidatecaches()
2899 2914
2900 2915 def invalidateall(self):
2901 2916 """Fully invalidates both store and non-store parts, causing the
2902 2917 subsequent operation to reread any outside changes."""
2903 2918 # extension should hook this to invalidate its caches
2904 2919 self.invalidate()
2905 2920 self.invalidatedirstate()
2906 2921
2907 2922 @unfilteredmethod
2908 2923 def _refreshfilecachestats(self, tr):
2909 2924 """Reload stats of cached files so that they are flagged as valid"""
2910 2925 for k, ce in self._filecache.items():
2911 2926 k = pycompat.sysstr(k)
2912 2927 if k == 'dirstate' or k not in self.__dict__:
2913 2928 continue
2914 2929 ce.refresh()
2915 2930
2916 2931 def _lock(
2917 2932 self,
2918 2933 vfs,
2919 2934 lockname,
2920 2935 wait,
2921 2936 releasefn,
2922 2937 acquirefn,
2923 2938 desc,
2924 2939 ):
2925 2940 timeout = 0
2926 2941 warntimeout = 0
2927 2942 if wait:
2928 2943 timeout = self.ui.configint(b"ui", b"timeout")
2929 2944 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2930 2945 # internal config: ui.signal-safe-lock
2931 2946 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2932 2947
2933 2948 l = lockmod.trylock(
2934 2949 self.ui,
2935 2950 vfs,
2936 2951 lockname,
2937 2952 timeout,
2938 2953 warntimeout,
2939 2954 releasefn=releasefn,
2940 2955 acquirefn=acquirefn,
2941 2956 desc=desc,
2942 2957 signalsafe=signalsafe,
2943 2958 )
2944 2959 return l
2945 2960
2946 2961 def _afterlock(self, callback):
2947 2962 """add a callback to be run when the repository is fully unlocked
2948 2963
2949 2964 The callback will be executed when the outermost lock is released
2950 2965 (with wlock being higher level than 'lock')."""
2951 2966 for ref in (self._wlockref, self._lockref):
2952 2967 l = ref and ref()
2953 2968 if l and l.held:
2954 2969 l.postrelease.append(callback)
2955 2970 break
2956 2971 else: # no lock have been found.
2957 2972 callback(True)
2958 2973
2959 2974 def lock(self, wait=True):
2960 2975 """Lock the repository store (.hg/store) and return a weak reference
2961 2976 to the lock. Use this before modifying the store (e.g. committing or
2962 2977 stripping). If you are opening a transaction, get a lock as well.)
2963 2978
2964 2979 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2965 2980 'wlock' first to avoid a dead-lock hazard."""
2966 2981 l = self._currentlock(self._lockref)
2967 2982 if l is not None:
2968 2983 l.lock()
2969 2984 return l
2970 2985
2971 2986 l = self._lock(
2972 2987 vfs=self.svfs,
2973 2988 lockname=b"lock",
2974 2989 wait=wait,
2975 2990 releasefn=None,
2976 2991 acquirefn=self.invalidate,
2977 2992 desc=_(b'repository %s') % self.origroot,
2978 2993 )
2979 2994 self._lockref = weakref.ref(l)
2980 2995 return l
2981 2996
2982 2997 def wlock(self, wait=True):
2983 2998 """Lock the non-store parts of the repository (everything under
2984 2999 .hg except .hg/store) and return a weak reference to the lock.
2985 3000
2986 3001 Use this before modifying files in .hg.
2987 3002
2988 3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2989 3004 'wlock' first to avoid a dead-lock hazard."""
2990 3005 l = self._wlockref() if self._wlockref else None
2991 3006 if l is not None and l.held:
2992 3007 l.lock()
2993 3008 return l
2994 3009
2995 3010 # We do not need to check for non-waiting lock acquisition. Such
2996 3011 # acquisition would not cause dead-lock as they would just fail.
2997 3012 if wait and (
2998 3013 self.ui.configbool(b'devel', b'all-warnings')
2999 3014 or self.ui.configbool(b'devel', b'check-locks')
3000 3015 ):
3001 3016 if self._currentlock(self._lockref) is not None:
3002 3017 self.ui.develwarn(b'"wlock" acquired after "lock"')
3003 3018
3004 3019 def unlock():
3005 3020 if self.dirstate.pendingparentchange():
3006 3021 self.dirstate.invalidate()
3007 3022 else:
3008 3023 self.dirstate.write(None)
3009 3024
3010 3025 self._filecache[b'dirstate'].refresh()
3011 3026
3012 3027 l = self._lock(
3013 3028 self.vfs,
3014 3029 b"wlock",
3015 3030 wait,
3016 3031 unlock,
3017 3032 self.invalidatedirstate,
3018 3033 _(b'working directory of %s') % self.origroot,
3019 3034 )
3020 3035 self._wlockref = weakref.ref(l)
3021 3036 return l
3022 3037
3023 3038 def _currentlock(self, lockref):
3024 3039 """Returns the lock if it's held, or None if it's not."""
3025 3040 if lockref is None:
3026 3041 return None
3027 3042 l = lockref()
3028 3043 if l is None or not l.held:
3029 3044 return None
3030 3045 return l
3031 3046
3032 3047 def currentwlock(self):
3033 3048 """Returns the wlock if it's held, or None if it's not."""
3034 3049 return self._currentlock(self._wlockref)
3035 3050
3036 3051 def checkcommitpatterns(self, wctx, match, status, fail):
3037 3052 """check for commit arguments that aren't committable"""
3038 3053 if match.isexact() or match.prefix():
3039 3054 matched = set(status.modified + status.added + status.removed)
3040 3055
3041 3056 for f in match.files():
3042 3057 f = self.dirstate.normalize(f)
3043 3058 if f == b'.' or f in matched or f in wctx.substate:
3044 3059 continue
3045 3060 if f in status.deleted:
3046 3061 fail(f, _(b'file not found!'))
3047 3062 # Is it a directory that exists or used to exist?
3048 3063 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3049 3064 d = f + b'/'
3050 3065 for mf in matched:
3051 3066 if mf.startswith(d):
3052 3067 break
3053 3068 else:
3054 3069 fail(f, _(b"no match under directory!"))
3055 3070 elif f not in self.dirstate:
3056 3071 fail(f, _(b"file not tracked!"))
3057 3072
3058 3073 @unfilteredmethod
3059 3074 def commit(
3060 3075 self,
3061 3076 text=b"",
3062 3077 user=None,
3063 3078 date=None,
3064 3079 match=None,
3065 3080 force=False,
3066 3081 editor=None,
3067 3082 extra=None,
3068 3083 ):
3069 3084 """Add a new revision to current repository.
3070 3085
3071 3086 Revision information is gathered from the working directory,
3072 3087 match can be used to filter the committed files. If editor is
3073 3088 supplied, it is called to get a commit message.
3074 3089 """
3075 3090 if extra is None:
3076 3091 extra = {}
3077 3092
3078 3093 def fail(f, msg):
3079 3094 raise error.InputError(b'%s: %s' % (f, msg))
3080 3095
3081 3096 if not match:
3082 3097 match = matchmod.always()
3083 3098
3084 3099 if not force:
3085 3100 match.bad = fail
3086 3101
3087 3102 # lock() for recent changelog (see issue4368)
3088 3103 with self.wlock(), self.lock():
3089 3104 wctx = self[None]
3090 3105 merge = len(wctx.parents()) > 1
3091 3106
3092 3107 if not force and merge and not match.always():
3093 3108 raise error.Abort(
3094 3109 _(
3095 3110 b'cannot partially commit a merge '
3096 3111 b'(do not specify files or patterns)'
3097 3112 )
3098 3113 )
3099 3114
3100 3115 status = self.status(match=match, clean=force)
3101 3116 if force:
3102 3117 status.modified.extend(
3103 3118 status.clean
3104 3119 ) # mq may commit clean files
3105 3120
3106 3121 # check subrepos
3107 3122 subs, commitsubs, newstate = subrepoutil.precommit(
3108 3123 self.ui, wctx, status, match, force=force
3109 3124 )
3110 3125
3111 3126 # make sure all explicit patterns are matched
3112 3127 if not force:
3113 3128 self.checkcommitpatterns(wctx, match, status, fail)
3114 3129
3115 3130 cctx = context.workingcommitctx(
3116 3131 self, status, text, user, date, extra
3117 3132 )
3118 3133
3119 3134 ms = mergestatemod.mergestate.read(self)
3120 3135 mergeutil.checkunresolved(ms)
3121 3136
3122 3137 # internal config: ui.allowemptycommit
3123 3138 if cctx.isempty() and not self.ui.configbool(
3124 3139 b'ui', b'allowemptycommit'
3125 3140 ):
3126 3141 self.ui.debug(b'nothing to commit, clearing merge state\n')
3127 3142 ms.reset()
3128 3143 return None
3129 3144
3130 3145 if merge and cctx.deleted():
3131 3146 raise error.Abort(_(b"cannot commit merge with missing files"))
3132 3147
3133 3148 if editor:
3134 3149 cctx._text = editor(self, cctx, subs)
3135 3150 edited = text != cctx._text
3136 3151
3137 3152 # Save commit message in case this transaction gets rolled back
3138 3153 # (e.g. by a pretxncommit hook). Leave the content alone on
3139 3154 # the assumption that the user will use the same editor again.
3140 3155 msgfn = self.savecommitmessage(cctx._text)
3141 3156
3142 3157 # commit subs and write new state
3143 3158 if subs:
3144 3159 uipathfn = scmutil.getuipathfn(self)
3145 3160 for s in sorted(commitsubs):
3146 3161 sub = wctx.sub(s)
3147 3162 self.ui.status(
3148 3163 _(b'committing subrepository %s\n')
3149 3164 % uipathfn(subrepoutil.subrelpath(sub))
3150 3165 )
3151 3166 sr = sub.commit(cctx._text, user, date)
3152 3167 newstate[s] = (newstate[s][0], sr)
3153 3168 subrepoutil.writestate(self, newstate)
3154 3169
3155 3170 p1, p2 = self.dirstate.parents()
3156 3171 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3157 3172 try:
3158 3173 self.hook(
3159 3174 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3160 3175 )
3161 3176 with self.transaction(b'commit'):
3162 3177 ret = self.commitctx(cctx, True)
3163 3178 # update bookmarks, dirstate and mergestate
3164 3179 bookmarks.update(self, [p1, p2], ret)
3165 3180 cctx.markcommitted(ret)
3166 3181 ms.reset()
3167 3182 except: # re-raises
3168 3183 if edited:
3169 3184 self.ui.write(
3170 3185 _(b'note: commit message saved in %s\n') % msgfn
3171 3186 )
3172 3187 self.ui.write(
3173 3188 _(
3174 3189 b"note: use 'hg commit --logfile "
3175 3190 b".hg/last-message.txt --edit' to reuse it\n"
3176 3191 )
3177 3192 )
3178 3193 raise
3179 3194
3180 3195 def commithook(unused_success):
3181 3196 # hack for command that use a temporary commit (eg: histedit)
3182 3197 # temporary commit got stripped before hook release
3183 3198 if self.changelog.hasnode(ret):
3184 3199 self.hook(
3185 3200 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3186 3201 )
3187 3202
3188 3203 self._afterlock(commithook)
3189 3204 return ret
3190 3205
3191 3206 @unfilteredmethod
3192 3207 def commitctx(self, ctx, error=False, origctx=None):
3193 3208 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3194 3209
3195 3210 @unfilteredmethod
3196 3211 def destroying(self):
3197 3212 """Inform the repository that nodes are about to be destroyed.
3198 3213 Intended for use by strip and rollback, so there's a common
3199 3214 place for anything that has to be done before destroying history.
3200 3215
3201 3216 This is mostly useful for saving state that is in memory and waiting
3202 3217 to be flushed when the current lock is released. Because a call to
3203 3218 destroyed is imminent, the repo will be invalidated causing those
3204 3219 changes to stay in memory (waiting for the next unlock), or vanish
3205 3220 completely.
3206 3221 """
3207 3222 # When using the same lock to commit and strip, the phasecache is left
3208 3223 # dirty after committing. Then when we strip, the repo is invalidated,
3209 3224 # causing those changes to disappear.
3210 3225 if '_phasecache' in vars(self):
3211 3226 self._phasecache.write()
3212 3227
3213 3228 @unfilteredmethod
3214 3229 def destroyed(self):
3215 3230 """Inform the repository that nodes have been destroyed.
3216 3231 Intended for use by strip and rollback, so there's a common
3217 3232 place for anything that has to be done after destroying history.
3218 3233 """
3219 3234 # When one tries to:
3220 3235 # 1) destroy nodes thus calling this method (e.g. strip)
3221 3236 # 2) use phasecache somewhere (e.g. commit)
3222 3237 #
3223 3238 # then 2) will fail because the phasecache contains nodes that were
3224 3239 # removed. We can either remove phasecache from the filecache,
3225 3240 # causing it to reload next time it is accessed, or simply filter
3226 3241 # the removed nodes now and write the updated cache.
3227 3242 self._phasecache.filterunknown(self)
3228 3243 self._phasecache.write()
3229 3244
3230 3245 # refresh all repository caches
3231 3246 self.updatecaches()
3232 3247
3233 3248 # Ensure the persistent tag cache is updated. Doing it now
3234 3249 # means that the tag cache only has to worry about destroyed
3235 3250 # heads immediately after a strip/rollback. That in turn
3236 3251 # guarantees that "cachetip == currenttip" (comparing both rev
3237 3252 # and node) always means no nodes have been added or destroyed.
3238 3253
3239 3254 # XXX this is suboptimal when qrefresh'ing: we strip the current
3240 3255 # head, refresh the tag cache, then immediately add a new head.
3241 3256 # But I think doing it this way is necessary for the "instant
3242 3257 # tag cache retrieval" case to work.
3243 3258 self.invalidate()
3244 3259
3245 3260 def status(
3246 3261 self,
3247 3262 node1=b'.',
3248 3263 node2=None,
3249 3264 match=None,
3250 3265 ignored=False,
3251 3266 clean=False,
3252 3267 unknown=False,
3253 3268 listsubrepos=False,
3254 3269 ):
3255 3270 '''a convenience method that calls node1.status(node2)'''
3256 3271 return self[node1].status(
3257 3272 node2, match, ignored, clean, unknown, listsubrepos
3258 3273 )
3259 3274
3260 3275 def addpostdsstatus(self, ps):
3261 3276 """Add a callback to run within the wlock, at the point at which status
3262 3277 fixups happen.
3263 3278
3264 3279 On status completion, callback(wctx, status) will be called with the
3265 3280 wlock held, unless the dirstate has changed from underneath or the wlock
3266 3281 couldn't be grabbed.
3267 3282
3268 3283 Callbacks should not capture and use a cached copy of the dirstate --
3269 3284 it might change in the meanwhile. Instead, they should access the
3270 3285 dirstate via wctx.repo().dirstate.
3271 3286
3272 3287 This list is emptied out after each status run -- extensions should
3273 3288 make sure it adds to this list each time dirstate.status is called.
3274 3289 Extensions should also make sure they don't call this for statuses
3275 3290 that don't involve the dirstate.
3276 3291 """
3277 3292
3278 3293 # The list is located here for uniqueness reasons -- it is actually
3279 3294 # managed by the workingctx, but that isn't unique per-repo.
3280 3295 self._postdsstatus.append(ps)
3281 3296
3282 3297 def postdsstatus(self):
3283 3298 """Used by workingctx to get the list of post-dirstate-status hooks."""
3284 3299 return self._postdsstatus
3285 3300
3286 3301 def clearpostdsstatus(self):
3287 3302 """Used by workingctx to clear post-dirstate-status hooks."""
3288 3303 del self._postdsstatus[:]
3289 3304
3290 3305 def heads(self, start=None):
3291 3306 if start is None:
3292 3307 cl = self.changelog
3293 3308 headrevs = reversed(cl.headrevs())
3294 3309 return [cl.node(rev) for rev in headrevs]
3295 3310
3296 3311 heads = self.changelog.heads(start)
3297 3312 # sort the output in rev descending order
3298 3313 return sorted(heads, key=self.changelog.rev, reverse=True)
3299 3314
3300 3315 def branchheads(self, branch=None, start=None, closed=False):
3301 3316 """return a (possibly filtered) list of heads for the given branch
3302 3317
3303 3318 Heads are returned in topological order, from newest to oldest.
3304 3319 If branch is None, use the dirstate branch.
3305 3320 If start is not None, return only heads reachable from start.
3306 3321 If closed is True, return heads that are marked as closed as well.
3307 3322 """
3308 3323 if branch is None:
3309 3324 branch = self[None].branch()
3310 3325 branches = self.branchmap()
3311 3326 if not branches.hasbranch(branch):
3312 3327 return []
3313 3328 # the cache returns heads ordered lowest to highest
3314 3329 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3315 3330 if start is not None:
3316 3331 # filter out the heads that cannot be reached from startrev
3317 3332 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3318 3333 bheads = [h for h in bheads if h in fbheads]
3319 3334 return bheads
3320 3335
3321 3336 def branches(self, nodes):
3322 3337 if not nodes:
3323 3338 nodes = [self.changelog.tip()]
3324 3339 b = []
3325 3340 for n in nodes:
3326 3341 t = n
3327 3342 while True:
3328 3343 p = self.changelog.parents(n)
3329 3344 if p[1] != self.nullid or p[0] == self.nullid:
3330 3345 b.append((t, n, p[0], p[1]))
3331 3346 break
3332 3347 n = p[0]
3333 3348 return b
3334 3349
3335 3350 def between(self, pairs):
3336 3351 r = []
3337 3352
3338 3353 for top, bottom in pairs:
3339 3354 n, l, i = top, [], 0
3340 3355 f = 1
3341 3356
3342 3357 while n != bottom and n != self.nullid:
3343 3358 p = self.changelog.parents(n)[0]
3344 3359 if i == f:
3345 3360 l.append(n)
3346 3361 f = f * 2
3347 3362 n = p
3348 3363 i += 1
3349 3364
3350 3365 r.append(l)
3351 3366
3352 3367 return r
3353 3368
3354 3369 def checkpush(self, pushop):
3355 3370 """Extensions can override this function if additional checks have
3356 3371 to be performed before pushing, or call it if they override push
3357 3372 command.
3358 3373 """
3359 3374
3360 3375 @unfilteredpropertycache
3361 3376 def prepushoutgoinghooks(self):
3362 3377 """Return util.hooks consists of a pushop with repo, remote, outgoing
3363 3378 methods, which are called before pushing changesets.
3364 3379 """
3365 3380 return util.hooks()
3366 3381
3367 3382 def pushkey(self, namespace, key, old, new):
3368 3383 try:
3369 3384 tr = self.currenttransaction()
3370 3385 hookargs = {}
3371 3386 if tr is not None:
3372 3387 hookargs.update(tr.hookargs)
3373 3388 hookargs = pycompat.strkwargs(hookargs)
3374 3389 hookargs['namespace'] = namespace
3375 3390 hookargs['key'] = key
3376 3391 hookargs['old'] = old
3377 3392 hookargs['new'] = new
3378 3393 self.hook(b'prepushkey', throw=True, **hookargs)
3379 3394 except error.HookAbort as exc:
3380 3395 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3381 3396 if exc.hint:
3382 3397 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3383 3398 return False
3384 3399 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3385 3400 ret = pushkey.push(self, namespace, key, old, new)
3386 3401
3387 3402 def runhook(unused_success):
3388 3403 self.hook(
3389 3404 b'pushkey',
3390 3405 namespace=namespace,
3391 3406 key=key,
3392 3407 old=old,
3393 3408 new=new,
3394 3409 ret=ret,
3395 3410 )
3396 3411
3397 3412 self._afterlock(runhook)
3398 3413 return ret
3399 3414
3400 3415 def listkeys(self, namespace):
3401 3416 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3402 3417 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3403 3418 values = pushkey.list(self, namespace)
3404 3419 self.hook(b'listkeys', namespace=namespace, values=values)
3405 3420 return values
3406 3421
3407 3422 def debugwireargs(self, one, two, three=None, four=None, five=None):
3408 3423 '''used to test argument passing over the wire'''
3409 3424 return b"%s %s %s %s %s" % (
3410 3425 one,
3411 3426 two,
3412 3427 pycompat.bytestr(three),
3413 3428 pycompat.bytestr(four),
3414 3429 pycompat.bytestr(five),
3415 3430 )
3416 3431
3417 3432 def savecommitmessage(self, text):
3418 3433 fp = self.vfs(b'last-message.txt', b'wb')
3419 3434 try:
3420 3435 fp.write(text)
3421 3436 finally:
3422 3437 fp.close()
3423 3438 return self.pathto(fp.name[len(self.root) + 1 :])
3424 3439
3425 3440 def register_wanted_sidedata(self, category):
3426 3441 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3427 3442 # Only revlogv2 repos can want sidedata.
3428 3443 return
3429 3444 self._wanted_sidedata.add(pycompat.bytestr(category))
3430 3445
3431 3446 def register_sidedata_computer(
3432 3447 self, kind, category, keys, computer, flags, replace=False
3433 3448 ):
3434 3449 if kind not in revlogconst.ALL_KINDS:
3435 3450 msg = _(b"unexpected revlog kind '%s'.")
3436 3451 raise error.ProgrammingError(msg % kind)
3437 3452 category = pycompat.bytestr(category)
3438 3453 already_registered = category in self._sidedata_computers.get(kind, [])
3439 3454 if already_registered and not replace:
3440 3455 msg = _(
3441 3456 b"cannot register a sidedata computer twice for category '%s'."
3442 3457 )
3443 3458 raise error.ProgrammingError(msg % category)
3444 3459 if replace and not already_registered:
3445 3460 msg = _(
3446 3461 b"cannot replace a sidedata computer that isn't registered "
3447 3462 b"for category '%s'."
3448 3463 )
3449 3464 raise error.ProgrammingError(msg % category)
3450 3465 self._sidedata_computers.setdefault(kind, {})
3451 3466 self._sidedata_computers[kind][category] = (keys, computer, flags)
3452 3467
3453 3468
3454 3469 # used to avoid circular references so destructors work
3455 3470 def aftertrans(files):
3456 3471 renamefiles = [tuple(t) for t in files]
3457 3472
3458 3473 def a():
3459 3474 for vfs, src, dest in renamefiles:
3460 3475 # if src and dest refer to a same file, vfs.rename is a no-op,
3461 3476 # leaving both src and dest on disk. delete dest to make sure
3462 3477 # the rename couldn't be such a no-op.
3463 3478 vfs.tryunlink(dest)
3464 3479 try:
3465 3480 vfs.rename(src, dest)
3466 3481 except OSError as exc: # journal file does not yet exist
3467 3482 if exc.errno != errno.ENOENT:
3468 3483 raise
3469 3484
3470 3485 return a
3471 3486
3472 3487
3473 3488 def undoname(fn):
3474 3489 base, name = os.path.split(fn)
3475 3490 assert name.startswith(b'journal')
3476 3491 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3477 3492
3478 3493
3479 3494 def instance(ui, path, create, intents=None, createopts=None):
3480 3495 localpath = urlutil.urllocalpath(path)
3481 3496 if create:
3482 3497 createrepository(ui, localpath, createopts=createopts)
3483 3498
3484 3499 return makelocalrepository(ui, localpath, intents=intents)
3485 3500
3486 3501
3487 3502 def islocal(path):
3488 3503 return True
3489 3504
3490 3505
3491 3506 def defaultcreateopts(ui, createopts=None):
3492 3507 """Populate the default creation options for a repository.
3493 3508
3494 3509 A dictionary of explicitly requested creation options can be passed
3495 3510 in. Missing keys will be populated.
3496 3511 """
3497 3512 createopts = dict(createopts or {})
3498 3513
3499 3514 if b'backend' not in createopts:
3500 3515 # experimental config: storage.new-repo-backend
3501 3516 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3502 3517
3503 3518 return createopts
3504 3519
3505 3520
3506 3521 def clone_requirements(ui, createopts, srcrepo):
3507 3522 """clone the requirements of a local repo for a local clone
3508 3523
3509 3524 The store requirements are unchanged while the working copy requirements
3510 3525 depends on the configuration
3511 3526 """
3512 3527 target_requirements = set()
3513 3528 createopts = defaultcreateopts(ui, createopts=createopts)
3514 3529 for r in newreporequirements(ui, createopts):
3515 3530 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3516 3531 target_requirements.add(r)
3517 3532
3518 3533 for r in srcrepo.requirements:
3519 3534 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3520 3535 target_requirements.add(r)
3521 3536 return target_requirements
3522 3537
3523 3538
3524 3539 def newreporequirements(ui, createopts):
3525 3540 """Determine the set of requirements for a new local repository.
3526 3541
3527 3542 Extensions can wrap this function to specify custom requirements for
3528 3543 new repositories.
3529 3544 """
3530 3545 # If the repo is being created from a shared repository, we copy
3531 3546 # its requirements.
3532 3547 if b'sharedrepo' in createopts:
3533 3548 requirements = set(createopts[b'sharedrepo'].requirements)
3534 3549 if createopts.get(b'sharedrelative'):
3535 3550 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3536 3551 else:
3537 3552 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3538 3553
3539 3554 return requirements
3540 3555
3541 3556 if b'backend' not in createopts:
3542 3557 raise error.ProgrammingError(
3543 3558 b'backend key not present in createopts; '
3544 3559 b'was defaultcreateopts() called?'
3545 3560 )
3546 3561
3547 3562 if createopts[b'backend'] != b'revlogv1':
3548 3563 raise error.Abort(
3549 3564 _(
3550 3565 b'unable to determine repository requirements for '
3551 3566 b'storage backend: %s'
3552 3567 )
3553 3568 % createopts[b'backend']
3554 3569 )
3555 3570
3556 3571 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3557 3572 if ui.configbool(b'format', b'usestore'):
3558 3573 requirements.add(requirementsmod.STORE_REQUIREMENT)
3559 3574 if ui.configbool(b'format', b'usefncache'):
3560 3575 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3561 3576 if ui.configbool(b'format', b'dotencode'):
3562 3577 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3563 3578
3564 3579 compengines = ui.configlist(b'format', b'revlog-compression')
3565 3580 for compengine in compengines:
3566 3581 if compengine in util.compengines:
3567 3582 engine = util.compengines[compengine]
3568 3583 if engine.available() and engine.revlogheader():
3569 3584 break
3570 3585 else:
3571 3586 raise error.Abort(
3572 3587 _(
3573 3588 b'compression engines %s defined by '
3574 3589 b'format.revlog-compression not available'
3575 3590 )
3576 3591 % b', '.join(b'"%s"' % e for e in compengines),
3577 3592 hint=_(
3578 3593 b'run "hg debuginstall" to list available '
3579 3594 b'compression engines'
3580 3595 ),
3581 3596 )
3582 3597
3583 3598 # zlib is the historical default and doesn't need an explicit requirement.
3584 3599 if compengine == b'zstd':
3585 3600 requirements.add(b'revlog-compression-zstd')
3586 3601 elif compengine != b'zlib':
3587 3602 requirements.add(b'exp-compression-%s' % compengine)
3588 3603
3589 3604 if scmutil.gdinitconfig(ui):
3590 3605 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3591 3606 if ui.configbool(b'format', b'sparse-revlog'):
3592 3607 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3593 3608
3594 3609 # experimental config: format.exp-dirstate-v2
3595 3610 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3596 3611 if ui.configbool(b'format', b'exp-dirstate-v2'):
3597 3612 if dirstate.SUPPORTS_DIRSTATE_V2:
3598 3613 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3599 3614 else:
3600 3615 raise error.Abort(
3601 3616 _(
3602 3617 b"dirstate v2 format requested by config "
3603 3618 b"but not supported (requires Rust extensions)"
3604 3619 )
3605 3620 )
3606 3621
3607 3622 # experimental config: format.exp-use-copies-side-data-changeset
3608 3623 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3609 3624 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3610 3625 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3611 3626 if ui.configbool(b'experimental', b'treemanifest'):
3612 3627 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3613 3628
3614 3629 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3615 3630 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3616 3631 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3617 3632
3618 3633 revlogv2 = ui.config(b'experimental', b'revlogv2')
3619 3634 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3620 3635 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3621 3636 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3622 3637 # experimental config: format.internal-phase
3623 3638 if ui.configbool(b'format', b'internal-phase'):
3624 3639 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3625 3640
3626 3641 if createopts.get(b'narrowfiles'):
3627 3642 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3628 3643
3629 3644 if createopts.get(b'lfs'):
3630 3645 requirements.add(b'lfs')
3631 3646
3632 3647 if ui.configbool(b'format', b'bookmarks-in-store'):
3633 3648 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3634 3649
3635 3650 if ui.configbool(b'format', b'use-persistent-nodemap'):
3636 3651 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3637 3652
3638 3653 # if share-safe is enabled, let's create the new repository with the new
3639 3654 # requirement
3640 3655 if ui.configbool(b'format', b'use-share-safe'):
3641 3656 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3642 3657
3643 3658 return requirements
3644 3659
3645 3660
3646 3661 def checkrequirementscompat(ui, requirements):
3647 3662 """Checks compatibility of repository requirements enabled and disabled.
3648 3663
3649 3664 Returns a set of requirements which needs to be dropped because dependend
3650 3665 requirements are not enabled. Also warns users about it"""
3651 3666
3652 3667 dropped = set()
3653 3668
3654 3669 if requirementsmod.STORE_REQUIREMENT not in requirements:
3655 3670 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3656 3671 ui.warn(
3657 3672 _(
3658 3673 b'ignoring enabled \'format.bookmarks-in-store\' config '
3659 3674 b'beacuse it is incompatible with disabled '
3660 3675 b'\'format.usestore\' config\n'
3661 3676 )
3662 3677 )
3663 3678 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3664 3679
3665 3680 if (
3666 3681 requirementsmod.SHARED_REQUIREMENT in requirements
3667 3682 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3668 3683 ):
3669 3684 raise error.Abort(
3670 3685 _(
3671 3686 b"cannot create shared repository as source was created"
3672 3687 b" with 'format.usestore' config disabled"
3673 3688 )
3674 3689 )
3675 3690
3676 3691 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3677 3692 ui.warn(
3678 3693 _(
3679 3694 b"ignoring enabled 'format.use-share-safe' config because "
3680 3695 b"it is incompatible with disabled 'format.usestore'"
3681 3696 b" config\n"
3682 3697 )
3683 3698 )
3684 3699 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3685 3700
3686 3701 return dropped
3687 3702
3688 3703
3689 3704 def filterknowncreateopts(ui, createopts):
3690 3705 """Filters a dict of repo creation options against options that are known.
3691 3706
3692 3707 Receives a dict of repo creation options and returns a dict of those
3693 3708 options that we don't know how to handle.
3694 3709
3695 3710 This function is called as part of repository creation. If the
3696 3711 returned dict contains any items, repository creation will not
3697 3712 be allowed, as it means there was a request to create a repository
3698 3713 with options not recognized by loaded code.
3699 3714
3700 3715 Extensions can wrap this function to filter out creation options
3701 3716 they know how to handle.
3702 3717 """
3703 3718 known = {
3704 3719 b'backend',
3705 3720 b'lfs',
3706 3721 b'narrowfiles',
3707 3722 b'sharedrepo',
3708 3723 b'sharedrelative',
3709 3724 b'shareditems',
3710 3725 b'shallowfilestore',
3711 3726 }
3712 3727
3713 3728 return {k: v for k, v in createopts.items() if k not in known}
3714 3729
3715 3730
3716 3731 def createrepository(ui, path, createopts=None, requirements=None):
3717 3732 """Create a new repository in a vfs.
3718 3733
3719 3734 ``path`` path to the new repo's working directory.
3720 3735 ``createopts`` options for the new repository.
3721 3736 ``requirement`` predefined set of requirements.
3722 3737 (incompatible with ``createopts``)
3723 3738
3724 3739 The following keys for ``createopts`` are recognized:
3725 3740
3726 3741 backend
3727 3742 The storage backend to use.
3728 3743 lfs
3729 3744 Repository will be created with ``lfs`` requirement. The lfs extension
3730 3745 will automatically be loaded when the repository is accessed.
3731 3746 narrowfiles
3732 3747 Set up repository to support narrow file storage.
3733 3748 sharedrepo
3734 3749 Repository object from which storage should be shared.
3735 3750 sharedrelative
3736 3751 Boolean indicating if the path to the shared repo should be
3737 3752 stored as relative. By default, the pointer to the "parent" repo
3738 3753 is stored as an absolute path.
3739 3754 shareditems
3740 3755 Set of items to share to the new repository (in addition to storage).
3741 3756 shallowfilestore
3742 3757 Indicates that storage for files should be shallow (not all ancestor
3743 3758 revisions are known).
3744 3759 """
3745 3760
3746 3761 if requirements is not None:
3747 3762 if createopts is not None:
3748 3763 msg = b'cannot specify both createopts and requirements'
3749 3764 raise error.ProgrammingError(msg)
3750 3765 createopts = {}
3751 3766 else:
3752 3767 createopts = defaultcreateopts(ui, createopts=createopts)
3753 3768
3754 3769 unknownopts = filterknowncreateopts(ui, createopts)
3755 3770
3756 3771 if not isinstance(unknownopts, dict):
3757 3772 raise error.ProgrammingError(
3758 3773 b'filterknowncreateopts() did not return a dict'
3759 3774 )
3760 3775
3761 3776 if unknownopts:
3762 3777 raise error.Abort(
3763 3778 _(
3764 3779 b'unable to create repository because of unknown '
3765 3780 b'creation option: %s'
3766 3781 )
3767 3782 % b', '.join(sorted(unknownopts)),
3768 3783 hint=_(b'is a required extension not loaded?'),
3769 3784 )
3770 3785
3771 3786 requirements = newreporequirements(ui, createopts=createopts)
3772 3787 requirements -= checkrequirementscompat(ui, requirements)
3773 3788
3774 3789 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3775 3790
3776 3791 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3777 3792 if hgvfs.exists():
3778 3793 raise error.RepoError(_(b'repository %s already exists') % path)
3779 3794
3780 3795 if b'sharedrepo' in createopts:
3781 3796 sharedpath = createopts[b'sharedrepo'].sharedpath
3782 3797
3783 3798 if createopts.get(b'sharedrelative'):
3784 3799 try:
3785 3800 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3786 3801 sharedpath = util.pconvert(sharedpath)
3787 3802 except (IOError, ValueError) as e:
3788 3803 # ValueError is raised on Windows if the drive letters differ
3789 3804 # on each path.
3790 3805 raise error.Abort(
3791 3806 _(b'cannot calculate relative path'),
3792 3807 hint=stringutil.forcebytestr(e),
3793 3808 )
3794 3809
3795 3810 if not wdirvfs.exists():
3796 3811 wdirvfs.makedirs()
3797 3812
3798 3813 hgvfs.makedir(notindexed=True)
3799 3814 if b'sharedrepo' not in createopts:
3800 3815 hgvfs.mkdir(b'cache')
3801 3816 hgvfs.mkdir(b'wcache')
3802 3817
3803 3818 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3804 3819 if has_store and b'sharedrepo' not in createopts:
3805 3820 hgvfs.mkdir(b'store')
3806 3821
3807 3822 # We create an invalid changelog outside the store so very old
3808 3823 # Mercurial versions (which didn't know about the requirements
3809 3824 # file) encounter an error on reading the changelog. This
3810 3825 # effectively locks out old clients and prevents them from
3811 3826 # mucking with a repo in an unknown format.
3812 3827 #
3813 3828 # The revlog header has version 65535, which won't be recognized by
3814 3829 # such old clients.
3815 3830 hgvfs.append(
3816 3831 b'00changelog.i',
3817 3832 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3818 3833 b'layout',
3819 3834 )
3820 3835
3821 3836 # Filter the requirements into working copy and store ones
3822 3837 wcreq, storereq = scmutil.filterrequirements(requirements)
3823 3838 # write working copy ones
3824 3839 scmutil.writerequires(hgvfs, wcreq)
3825 3840 # If there are store requirements and the current repository
3826 3841 # is not a shared one, write stored requirements
3827 3842 # For new shared repository, we don't need to write the store
3828 3843 # requirements as they are already present in store requires
3829 3844 if storereq and b'sharedrepo' not in createopts:
3830 3845 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3831 3846 scmutil.writerequires(storevfs, storereq)
3832 3847
3833 3848 # Write out file telling readers where to find the shared store.
3834 3849 if b'sharedrepo' in createopts:
3835 3850 hgvfs.write(b'sharedpath', sharedpath)
3836 3851
3837 3852 if createopts.get(b'shareditems'):
3838 3853 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3839 3854 hgvfs.write(b'shared', shared)
3840 3855
3841 3856
3842 3857 def poisonrepository(repo):
3843 3858 """Poison a repository instance so it can no longer be used."""
3844 3859 # Perform any cleanup on the instance.
3845 3860 repo.close()
3846 3861
3847 3862 # Our strategy is to replace the type of the object with one that
3848 3863 # has all attribute lookups result in error.
3849 3864 #
3850 3865 # But we have to allow the close() method because some constructors
3851 3866 # of repos call close() on repo references.
3852 3867 class poisonedrepository(object):
3853 3868 def __getattribute__(self, item):
3854 3869 if item == 'close':
3855 3870 return object.__getattribute__(self, item)
3856 3871
3857 3872 raise error.ProgrammingError(
3858 3873 b'repo instances should not be used after unshare'
3859 3874 )
3860 3875
3861 3876 def close(self):
3862 3877 pass
3863 3878
3864 3879 # We may have a repoview, which intercepts __setattr__. So be sure
3865 3880 # we operate at the lowest level possible.
3866 3881 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now