##// END OF EJS Templates
narrow: delegate the narrow spec writing to the transaction...
marmoute -
r51081:8bc14ac5 default
parent child Browse files
Show More
@@ -1,4022 +1,4028 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from .pycompat import (
32 32 delattr,
33 33 getattr,
34 34 )
35 35 from . import (
36 36 bookmarks,
37 37 branchmap,
38 38 bundle2,
39 39 bundlecaches,
40 40 changegroup,
41 41 color,
42 42 commit,
43 43 context,
44 44 dirstate,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 pushkey,
62 62 pycompat,
63 63 rcutil,
64 64 repoview,
65 65 requirements as requirementsmod,
66 66 revlog,
67 67 revset,
68 68 revsetlang,
69 69 scmutil,
70 70 sparse,
71 71 store as storemod,
72 72 subrepoutil,
73 73 tags as tagsmod,
74 74 transaction,
75 75 txnutil,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprototypes,
79 79 )
80 80
81 81 from .interfaces import (
82 82 repository,
83 83 util as interfaceutil,
84 84 )
85 85
86 86 from .utils import (
87 87 hashutil,
88 88 procutil,
89 89 stringutil,
90 90 urlutil,
91 91 )
92 92
93 93 from .revlogutils import (
94 94 concurrency_checker as revlogchecker,
95 95 constants as revlogconst,
96 96 sidedata as sidedatamod,
97 97 )
98 98
99 99 release = lockmod.release
100 100 urlerr = util.urlerr
101 101 urlreq = util.urlreq
102 102
103 103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
104 104
105 105 # set of (path, vfs-location) tuples. vfs-location is:
106 106 # - 'plain for vfs relative paths
107 107 # - '' for svfs relative paths
108 108 _cachedfiles = set()
109 109
110 110
111 111 class _basefilecache(scmutil.filecache):
112 112 """All filecache usage on repo are done for logic that should be unfiltered"""
113 113
114 114 def __get__(self, repo, type=None):
115 115 if repo is None:
116 116 return self
117 117 # proxy to unfiltered __dict__ since filtered repo has no entry
118 118 unfi = repo.unfiltered()
119 119 try:
120 120 return unfi.__dict__[self.sname]
121 121 except KeyError:
122 122 pass
123 123 return super(_basefilecache, self).__get__(unfi, type)
124 124
125 125 def set(self, repo, value):
126 126 return super(_basefilecache, self).set(repo.unfiltered(), value)
127 127
128 128
129 129 class repofilecache(_basefilecache):
130 130 """filecache for files in .hg but outside of .hg/store"""
131 131
132 132 def __init__(self, *paths):
133 133 super(repofilecache, self).__init__(*paths)
134 134 for path in paths:
135 135 _cachedfiles.add((path, b'plain'))
136 136
137 137 def join(self, obj, fname):
138 138 return obj.vfs.join(fname)
139 139
140 140
141 141 class storecache(_basefilecache):
142 142 """filecache for files in the store"""
143 143
144 144 def __init__(self, *paths):
145 145 super(storecache, self).__init__(*paths)
146 146 for path in paths:
147 147 _cachedfiles.add((path, b''))
148 148
149 149 def join(self, obj, fname):
150 150 return obj.sjoin(fname)
151 151
152 152
153 153 class changelogcache(storecache):
154 154 """filecache for the changelog"""
155 155
156 156 def __init__(self):
157 157 super(changelogcache, self).__init__()
158 158 _cachedfiles.add((b'00changelog.i', b''))
159 159 _cachedfiles.add((b'00changelog.n', b''))
160 160
161 161 def tracked_paths(self, obj):
162 162 paths = [self.join(obj, b'00changelog.i')]
163 163 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 164 paths.append(self.join(obj, b'00changelog.n'))
165 165 return paths
166 166
167 167
168 168 class manifestlogcache(storecache):
169 169 """filecache for the manifestlog"""
170 170
171 171 def __init__(self):
172 172 super(manifestlogcache, self).__init__()
173 173 _cachedfiles.add((b'00manifest.i', b''))
174 174 _cachedfiles.add((b'00manifest.n', b''))
175 175
176 176 def tracked_paths(self, obj):
177 177 paths = [self.join(obj, b'00manifest.i')]
178 178 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 179 paths.append(self.join(obj, b'00manifest.n'))
180 180 return paths
181 181
182 182
183 183 class mixedrepostorecache(_basefilecache):
184 184 """filecache for a mix files in .hg/store and outside"""
185 185
186 186 def __init__(self, *pathsandlocations):
187 187 # scmutil.filecache only uses the path for passing back into our
188 188 # join(), so we can safely pass a list of paths and locations
189 189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 190 _cachedfiles.update(pathsandlocations)
191 191
192 192 def join(self, obj, fnameandlocation):
193 193 fname, location = fnameandlocation
194 194 if location == b'plain':
195 195 return obj.vfs.join(fname)
196 196 else:
197 197 if location != b'':
198 198 raise error.ProgrammingError(
199 199 b'unexpected location: %s' % location
200 200 )
201 201 return obj.sjoin(fname)
202 202
203 203
204 204 def isfilecached(repo, name):
205 205 """check if a repo has already cached "name" filecache-ed property
206 206
207 207 This returns (cachedobj-or-None, iscached) tuple.
208 208 """
209 209 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 210 if not cacheentry:
211 211 return None, False
212 212 return cacheentry.obj, True
213 213
214 214
215 215 class unfilteredpropertycache(util.propertycache):
216 216 """propertycache that apply to unfiltered repo only"""
217 217
218 218 def __get__(self, repo, type=None):
219 219 unfi = repo.unfiltered()
220 220 if unfi is repo:
221 221 return super(unfilteredpropertycache, self).__get__(unfi)
222 222 return getattr(unfi, self.name)
223 223
224 224
225 225 class filteredpropertycache(util.propertycache):
226 226 """propertycache that must take filtering in account"""
227 227
228 228 def cachevalue(self, obj, value):
229 229 object.__setattr__(obj, self.name, value)
230 230
231 231
232 232 def hasunfilteredcache(repo, name):
233 233 """check if a repo has an unfilteredpropertycache value for <name>"""
234 234 return name in vars(repo.unfiltered())
235 235
236 236
237 237 def unfilteredmethod(orig):
238 238 """decorate method that always need to be run on unfiltered version"""
239 239
240 240 @functools.wraps(orig)
241 241 def wrapper(repo, *args, **kwargs):
242 242 return orig(repo.unfiltered(), *args, **kwargs)
243 243
244 244 return wrapper
245 245
246 246
247 247 moderncaps = {
248 248 b'lookup',
249 249 b'branchmap',
250 250 b'pushkey',
251 251 b'known',
252 252 b'getbundle',
253 253 b'unbundle',
254 254 }
255 255 legacycaps = moderncaps.union({b'changegroupsubset'})
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommandexecutor)
259 259 class localcommandexecutor:
260 260 def __init__(self, peer):
261 261 self._peer = peer
262 262 self._sent = False
263 263 self._closed = False
264 264
265 265 def __enter__(self):
266 266 return self
267 267
268 268 def __exit__(self, exctype, excvalue, exctb):
269 269 self.close()
270 270
271 271 def callcommand(self, command, args):
272 272 if self._sent:
273 273 raise error.ProgrammingError(
274 274 b'callcommand() cannot be used after sendcommands()'
275 275 )
276 276
277 277 if self._closed:
278 278 raise error.ProgrammingError(
279 279 b'callcommand() cannot be used after close()'
280 280 )
281 281
282 282 # We don't need to support anything fancy. Just call the named
283 283 # method on the peer and return a resolved future.
284 284 fn = getattr(self._peer, pycompat.sysstr(command))
285 285
286 286 f = futures.Future()
287 287
288 288 try:
289 289 result = fn(**pycompat.strkwargs(args))
290 290 except Exception:
291 291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
292 292 else:
293 293 f.set_result(result)
294 294
295 295 return f
296 296
297 297 def sendcommands(self):
298 298 self._sent = True
299 299
300 300 def close(self):
301 301 self._closed = True
302 302
303 303
304 304 @interfaceutil.implementer(repository.ipeercommands)
305 305 class localpeer(repository.peer):
306 306 '''peer for a local repo; reflects only the most recent API'''
307 307
308 308 def __init__(self, repo, caps=None, path=None):
309 309 super(localpeer, self).__init__(repo.ui, path=path)
310 310
311 311 if caps is None:
312 312 caps = moderncaps.copy()
313 313 self._repo = repo.filtered(b'served')
314 314
315 315 if repo._wanted_sidedata:
316 316 formatted = bundle2.format_remote_wanted_sidedata(repo)
317 317 caps.add(b'exp-wanted-sidedata=' + formatted)
318 318
319 319 self._caps = repo._restrictcapabilities(caps)
320 320
321 321 # Begin of _basepeer interface.
322 322
323 323 def url(self):
324 324 return self._repo.url()
325 325
326 326 def local(self):
327 327 return self._repo
328 328
329 329 def canpush(self):
330 330 return True
331 331
332 332 def close(self):
333 333 self._repo.close()
334 334
335 335 # End of _basepeer interface.
336 336
337 337 # Begin of _basewirecommands interface.
338 338
339 339 def branchmap(self):
340 340 return self._repo.branchmap()
341 341
342 342 def capabilities(self):
343 343 return self._caps
344 344
345 345 def clonebundles(self):
346 346 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
347 347
348 348 def debugwireargs(self, one, two, three=None, four=None, five=None):
349 349 """Used to test argument passing over the wire"""
350 350 return b"%s %s %s %s %s" % (
351 351 one,
352 352 two,
353 353 pycompat.bytestr(three),
354 354 pycompat.bytestr(four),
355 355 pycompat.bytestr(five),
356 356 )
357 357
358 358 def getbundle(
359 359 self,
360 360 source,
361 361 heads=None,
362 362 common=None,
363 363 bundlecaps=None,
364 364 remote_sidedata=None,
365 365 **kwargs
366 366 ):
367 367 chunks = exchange.getbundlechunks(
368 368 self._repo,
369 369 source,
370 370 heads=heads,
371 371 common=common,
372 372 bundlecaps=bundlecaps,
373 373 remote_sidedata=remote_sidedata,
374 374 **kwargs
375 375 )[1]
376 376 cb = util.chunkbuffer(chunks)
377 377
378 378 if exchange.bundle2requested(bundlecaps):
379 379 # When requesting a bundle2, getbundle returns a stream to make the
380 380 # wire level function happier. We need to build a proper object
381 381 # from it in local peer.
382 382 return bundle2.getunbundler(self.ui, cb)
383 383 else:
384 384 return changegroup.getunbundler(b'01', cb, None)
385 385
386 386 def heads(self):
387 387 return self._repo.heads()
388 388
389 389 def known(self, nodes):
390 390 return self._repo.known(nodes)
391 391
392 392 def listkeys(self, namespace):
393 393 return self._repo.listkeys(namespace)
394 394
395 395 def lookup(self, key):
396 396 return self._repo.lookup(key)
397 397
398 398 def pushkey(self, namespace, key, old, new):
399 399 return self._repo.pushkey(namespace, key, old, new)
400 400
401 401 def stream_out(self):
402 402 raise error.Abort(_(b'cannot perform stream clone against local peer'))
403 403
404 404 def unbundle(self, bundle, heads, url):
405 405 """apply a bundle on a repo
406 406
407 407 This function handles the repo locking itself."""
408 408 try:
409 409 try:
410 410 bundle = exchange.readbundle(self.ui, bundle, None)
411 411 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
412 412 if util.safehasattr(ret, b'getchunks'):
413 413 # This is a bundle20 object, turn it into an unbundler.
414 414 # This little dance should be dropped eventually when the
415 415 # API is finally improved.
416 416 stream = util.chunkbuffer(ret.getchunks())
417 417 ret = bundle2.getunbundler(self.ui, stream)
418 418 return ret
419 419 except Exception as exc:
420 420 # If the exception contains output salvaged from a bundle2
421 421 # reply, we need to make sure it is printed before continuing
422 422 # to fail. So we build a bundle2 with such output and consume
423 423 # it directly.
424 424 #
425 425 # This is not very elegant but allows a "simple" solution for
426 426 # issue4594
427 427 output = getattr(exc, '_bundle2salvagedoutput', ())
428 428 if output:
429 429 bundler = bundle2.bundle20(self._repo.ui)
430 430 for out in output:
431 431 bundler.addpart(out)
432 432 stream = util.chunkbuffer(bundler.getchunks())
433 433 b = bundle2.getunbundler(self.ui, stream)
434 434 bundle2.processbundle(self._repo, b)
435 435 raise
436 436 except error.PushRaced as exc:
437 437 raise error.ResponseError(
438 438 _(b'push failed:'), stringutil.forcebytestr(exc)
439 439 )
440 440
441 441 # End of _basewirecommands interface.
442 442
443 443 # Begin of peer interface.
444 444
445 445 def commandexecutor(self):
446 446 return localcommandexecutor(self)
447 447
448 448 # End of peer interface.
449 449
450 450
451 451 @interfaceutil.implementer(repository.ipeerlegacycommands)
452 452 class locallegacypeer(localpeer):
453 453 """peer extension which implements legacy methods too; used for tests with
454 454 restricted capabilities"""
455 455
456 456 def __init__(self, repo, path=None):
457 457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
458 458
459 459 # Begin of baselegacywirecommands interface.
460 460
461 461 def between(self, pairs):
462 462 return self._repo.between(pairs)
463 463
464 464 def branches(self, nodes):
465 465 return self._repo.branches(nodes)
466 466
467 467 def changegroup(self, nodes, source):
468 468 outgoing = discovery.outgoing(
469 469 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
470 470 )
471 471 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
472 472
473 473 def changegroupsubset(self, bases, heads, source):
474 474 outgoing = discovery.outgoing(
475 475 self._repo, missingroots=bases, ancestorsof=heads
476 476 )
477 477 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
478 478
479 479 # End of baselegacywirecommands interface.
480 480
481 481
482 482 # Functions receiving (ui, features) that extensions can register to impact
483 483 # the ability to load repositories with custom requirements. Only
484 484 # functions defined in loaded extensions are called.
485 485 #
486 486 # The function receives a set of requirement strings that the repository
487 487 # is capable of opening. Functions will typically add elements to the
488 488 # set to reflect that the extension knows how to handle that requirements.
489 489 featuresetupfuncs = set()
490 490
491 491
492 492 def _getsharedvfs(hgvfs, requirements):
493 493 """returns the vfs object pointing to root of shared source
494 494 repo for a shared repository
495 495
496 496 hgvfs is vfs pointing at .hg/ of current repo (shared one)
497 497 requirements is a set of requirements of current repo (shared one)
498 498 """
499 499 # The ``shared`` or ``relshared`` requirements indicate the
500 500 # store lives in the path contained in the ``.hg/sharedpath`` file.
501 501 # This is an absolute path for ``shared`` and relative to
502 502 # ``.hg/`` for ``relshared``.
503 503 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
504 504 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
505 505 sharedpath = util.normpath(hgvfs.join(sharedpath))
506 506
507 507 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
508 508
509 509 if not sharedvfs.exists():
510 510 raise error.RepoError(
511 511 _(b'.hg/sharedpath points to nonexistent directory %s')
512 512 % sharedvfs.base
513 513 )
514 514 return sharedvfs
515 515
516 516
517 517 def _readrequires(vfs, allowmissing):
518 518 """reads the require file present at root of this vfs
519 519 and return a set of requirements
520 520
521 521 If allowmissing is True, we suppress FileNotFoundError if raised"""
522 522 # requires file contains a newline-delimited list of
523 523 # features/capabilities the opener (us) must have in order to use
524 524 # the repository. This file was introduced in Mercurial 0.9.2,
525 525 # which means very old repositories may not have one. We assume
526 526 # a missing file translates to no requirements.
527 527 read = vfs.tryread if allowmissing else vfs.read
528 528 return set(read(b'requires').splitlines())
529 529
530 530
531 531 def makelocalrepository(baseui, path: bytes, intents=None):
532 532 """Create a local repository object.
533 533
534 534 Given arguments needed to construct a local repository, this function
535 535 performs various early repository loading functionality (such as
536 536 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
537 537 the repository can be opened, derives a type suitable for representing
538 538 that repository, and returns an instance of it.
539 539
540 540 The returned object conforms to the ``repository.completelocalrepository``
541 541 interface.
542 542
543 543 The repository type is derived by calling a series of factory functions
544 544 for each aspect/interface of the final repository. These are defined by
545 545 ``REPO_INTERFACES``.
546 546
547 547 Each factory function is called to produce a type implementing a specific
548 548 interface. The cumulative list of returned types will be combined into a
549 549 new type and that type will be instantiated to represent the local
550 550 repository.
551 551
552 552 The factory functions each receive various state that may be consulted
553 553 as part of deriving a type.
554 554
555 555 Extensions should wrap these factory functions to customize repository type
556 556 creation. Note that an extension's wrapped function may be called even if
557 557 that extension is not loaded for the repo being constructed. Extensions
558 558 should check if their ``__name__`` appears in the
559 559 ``extensionmodulenames`` set passed to the factory function and no-op if
560 560 not.
561 561 """
562 562 ui = baseui.copy()
563 563 # Prevent copying repo configuration.
564 564 ui.copy = baseui.copy
565 565
566 566 # Working directory VFS rooted at repository root.
567 567 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
568 568
569 569 # Main VFS for .hg/ directory.
570 570 hgpath = wdirvfs.join(b'.hg')
571 571 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
572 572 # Whether this repository is shared one or not
573 573 shared = False
574 574 # If this repository is shared, vfs pointing to shared repo
575 575 sharedvfs = None
576 576
577 577 # The .hg/ path should exist and should be a directory. All other
578 578 # cases are errors.
579 579 if not hgvfs.isdir():
580 580 try:
581 581 hgvfs.stat()
582 582 except FileNotFoundError:
583 583 pass
584 584 except ValueError as e:
585 585 # Can be raised on Python 3.8 when path is invalid.
586 586 raise error.Abort(
587 587 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
588 588 )
589 589
590 590 raise error.RepoError(_(b'repository %s not found') % path)
591 591
592 592 requirements = _readrequires(hgvfs, True)
593 593 shared = (
594 594 requirementsmod.SHARED_REQUIREMENT in requirements
595 595 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
596 596 )
597 597 storevfs = None
598 598 if shared:
599 599 # This is a shared repo
600 600 sharedvfs = _getsharedvfs(hgvfs, requirements)
601 601 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
602 602 else:
603 603 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
604 604
605 605 # if .hg/requires contains the sharesafe requirement, it means
606 606 # there exists a `.hg/store/requires` too and we should read it
607 607 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
608 608 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
609 609 # is not present, refer checkrequirementscompat() for that
610 610 #
611 611 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
612 612 # repository was shared the old way. We check the share source .hg/requires
613 613 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
614 614 # to be reshared
615 615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
616 616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
617 617 if (
618 618 shared
619 619 and requirementsmod.SHARESAFE_REQUIREMENT
620 620 not in _readrequires(sharedvfs, True)
621 621 ):
622 622 mismatch_warn = ui.configbool(
623 623 b'share', b'safe-mismatch.source-not-safe.warn'
624 624 )
625 625 mismatch_config = ui.config(
626 626 b'share', b'safe-mismatch.source-not-safe'
627 627 )
628 628 mismatch_verbose_upgrade = ui.configbool(
629 629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
630 630 )
631 631 if mismatch_config in (
632 632 b'downgrade-allow',
633 633 b'allow',
634 634 b'downgrade-abort',
635 635 ):
636 636 # prevent cyclic import localrepo -> upgrade -> localrepo
637 637 from . import upgrade
638 638
639 639 upgrade.downgrade_share_to_non_safe(
640 640 ui,
641 641 hgvfs,
642 642 sharedvfs,
643 643 requirements,
644 644 mismatch_config,
645 645 mismatch_warn,
646 646 mismatch_verbose_upgrade,
647 647 )
648 648 elif mismatch_config == b'abort':
649 649 raise error.Abort(
650 650 _(b"share source does not support share-safe requirement"),
651 651 hint=hint,
652 652 )
653 653 else:
654 654 raise error.Abort(
655 655 _(
656 656 b"share-safe mismatch with source.\nUnrecognized"
657 657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 658 b" set."
659 659 )
660 660 % mismatch_config,
661 661 hint=hint,
662 662 )
663 663 else:
664 664 requirements |= _readrequires(storevfs, False)
665 665 elif shared:
666 666 sourcerequires = _readrequires(sharedvfs, False)
667 667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 669 mismatch_warn = ui.configbool(
670 670 b'share', b'safe-mismatch.source-safe.warn'
671 671 )
672 672 mismatch_verbose_upgrade = ui.configbool(
673 673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
674 674 )
675 675 if mismatch_config in (
676 676 b'upgrade-allow',
677 677 b'allow',
678 678 b'upgrade-abort',
679 679 ):
680 680 # prevent cyclic import localrepo -> upgrade -> localrepo
681 681 from . import upgrade
682 682
683 683 upgrade.upgrade_share_to_safe(
684 684 ui,
685 685 hgvfs,
686 686 storevfs,
687 687 requirements,
688 688 mismatch_config,
689 689 mismatch_warn,
690 690 mismatch_verbose_upgrade,
691 691 )
692 692 elif mismatch_config == b'abort':
693 693 raise error.Abort(
694 694 _(
695 695 b'version mismatch: source uses share-safe'
696 696 b' functionality while the current share does not'
697 697 ),
698 698 hint=hint,
699 699 )
700 700 else:
701 701 raise error.Abort(
702 702 _(
703 703 b"share-safe mismatch with source.\nUnrecognized"
704 704 b" value '%s' of `share.safe-mismatch.source-safe` set."
705 705 )
706 706 % mismatch_config,
707 707 hint=hint,
708 708 )
709 709
710 710 # The .hg/hgrc file may load extensions or contain config options
711 711 # that influence repository construction. Attempt to load it and
712 712 # process any new extensions that it may have pulled in.
713 713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
714 714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
715 715 extensions.loadall(ui)
716 716 extensions.populateui(ui)
717 717
718 718 # Set of module names of extensions loaded for this repository.
719 719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 supportedrequirements = gathersupportedrequirements(ui)
722 722
723 723 # We first validate the requirements are known.
724 724 ensurerequirementsrecognized(requirements, supportedrequirements)
725 725
726 726 # Then we validate that the known set is reasonable to use together.
727 727 ensurerequirementscompatible(ui, requirements)
728 728
729 729 # TODO there are unhandled edge cases related to opening repositories with
730 730 # shared storage. If storage is shared, we should also test for requirements
731 731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
732 732 # that repo, as that repo may load extensions needed to open it. This is a
733 733 # bit complicated because we don't want the other hgrc to overwrite settings
734 734 # in this hgrc.
735 735 #
736 736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
737 737 # file when sharing repos. But if a requirement is added after the share is
738 738 # performed, thereby introducing a new requirement for the opener, we may
739 739 # will not see that and could encounter a run-time error interacting with
740 740 # that shared store since it has an unknown-to-us requirement.
741 741
742 742 # At this point, we know we should be capable of opening the repository.
743 743 # Now get on with doing that.
744 744
745 745 features = set()
746 746
747 747 # The "store" part of the repository holds versioned data. How it is
748 748 # accessed is determined by various requirements. If `shared` or
749 749 # `relshared` requirements are present, this indicates current repository
750 750 # is a share and store exists in path mentioned in `.hg/sharedpath`
751 751 if shared:
752 752 storebasepath = sharedvfs.base
753 753 cachepath = sharedvfs.join(b'cache')
754 754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
755 755 else:
756 756 storebasepath = hgvfs.base
757 757 cachepath = hgvfs.join(b'cache')
758 758 wcachepath = hgvfs.join(b'wcache')
759 759
760 760 # The store has changed over time and the exact layout is dictated by
761 761 # requirements. The store interface abstracts differences across all
762 762 # of them.
763 763 store = makestore(
764 764 requirements,
765 765 storebasepath,
766 766 lambda base: vfsmod.vfs(base, cacheaudited=True),
767 767 )
768 768 hgvfs.createmode = store.createmode
769 769
770 770 storevfs = store.vfs
771 771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
772 772
773 773 if (
774 774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
775 775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
776 776 ):
777 777 features.add(repository.REPO_FEATURE_SIDE_DATA)
778 778 # the revlogv2 docket introduced race condition that we need to fix
779 779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
780 780
781 781 # The cache vfs is used to manage cache files.
782 782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
783 783 cachevfs.createmode = store.createmode
784 784 # The cache vfs is used to manage cache files related to the working copy
785 785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
786 786 wcachevfs.createmode = store.createmode
787 787
788 788 # Now resolve the type for the repository object. We do this by repeatedly
789 789 # calling a factory function to produces types for specific aspects of the
790 790 # repo's operation. The aggregate returned types are used as base classes
791 791 # for a dynamically-derived type, which will represent our new repository.
792 792
793 793 bases = []
794 794 extrastate = {}
795 795
796 796 for iface, fn in REPO_INTERFACES:
797 797 # We pass all potentially useful state to give extensions tons of
798 798 # flexibility.
799 799 typ = fn()(
800 800 ui=ui,
801 801 intents=intents,
802 802 requirements=requirements,
803 803 features=features,
804 804 wdirvfs=wdirvfs,
805 805 hgvfs=hgvfs,
806 806 store=store,
807 807 storevfs=storevfs,
808 808 storeoptions=storevfs.options,
809 809 cachevfs=cachevfs,
810 810 wcachevfs=wcachevfs,
811 811 extensionmodulenames=extensionmodulenames,
812 812 extrastate=extrastate,
813 813 baseclasses=bases,
814 814 )
815 815
816 816 if not isinstance(typ, type):
817 817 raise error.ProgrammingError(
818 818 b'unable to construct type for %s' % iface
819 819 )
820 820
821 821 bases.append(typ)
822 822
823 823 # type() allows you to use characters in type names that wouldn't be
824 824 # recognized as Python symbols in source code. We abuse that to add
825 825 # rich information about our constructed repo.
826 826 name = pycompat.sysstr(
827 827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
828 828 )
829 829
830 830 cls = type(name, tuple(bases), {})
831 831
832 832 return cls(
833 833 baseui=baseui,
834 834 ui=ui,
835 835 origroot=path,
836 836 wdirvfs=wdirvfs,
837 837 hgvfs=hgvfs,
838 838 requirements=requirements,
839 839 supportedrequirements=supportedrequirements,
840 840 sharedpath=storebasepath,
841 841 store=store,
842 842 cachevfs=cachevfs,
843 843 wcachevfs=wcachevfs,
844 844 features=features,
845 845 intents=intents,
846 846 )
847 847
848 848
849 849 def loadhgrc(
850 850 ui,
851 851 wdirvfs: vfsmod.vfs,
852 852 hgvfs: vfsmod.vfs,
853 853 requirements,
854 854 sharedvfs: Optional[vfsmod.vfs] = None,
855 855 ):
856 856 """Load hgrc files/content into a ui instance.
857 857
858 858 This is called during repository opening to load any additional
859 859 config files or settings relevant to the current repository.
860 860
861 861 Returns a bool indicating whether any additional configs were loaded.
862 862
863 863 Extensions should monkeypatch this function to modify how per-repo
864 864 configs are loaded. For example, an extension may wish to pull in
865 865 configs from alternate files or sources.
866 866
867 867 sharedvfs is vfs object pointing to source repo if the current one is a
868 868 shared one
869 869 """
870 870 if not rcutil.use_repo_hgrc():
871 871 return False
872 872
873 873 ret = False
874 874 # first load config from shared source if we has to
875 875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
876 876 try:
877 877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
878 878 ret = True
879 879 except IOError:
880 880 pass
881 881
882 882 try:
883 883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
884 884 ret = True
885 885 except IOError:
886 886 pass
887 887
888 888 try:
889 889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
890 890 ret = True
891 891 except IOError:
892 892 pass
893 893
894 894 return ret
895 895
896 896
897 897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
898 898 """Perform additional actions after .hg/hgrc is loaded.
899 899
900 900 This function is called during repository loading immediately after
901 901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
902 902
903 903 The function can be used to validate configs, automatically add
904 904 options (including extensions) based on requirements, etc.
905 905 """
906 906
907 907 # Map of requirements to list of extensions to load automatically when
908 908 # requirement is present.
909 909 autoextensions = {
910 910 b'git': [b'git'],
911 911 b'largefiles': [b'largefiles'],
912 912 b'lfs': [b'lfs'],
913 913 }
914 914
915 915 for requirement, names in sorted(autoextensions.items()):
916 916 if requirement not in requirements:
917 917 continue
918 918
919 919 for name in names:
920 920 if not ui.hasconfig(b'extensions', name):
921 921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
922 922
923 923
924 924 def gathersupportedrequirements(ui):
925 925 """Determine the complete set of recognized requirements."""
926 926 # Start with all requirements supported by this file.
927 927 supported = set(localrepository._basesupported)
928 928
929 929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
930 930 # relevant to this ui instance.
931 931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
932 932
933 933 for fn in featuresetupfuncs:
934 934 if fn.__module__ in modules:
935 935 fn(ui, supported)
936 936
937 937 # Add derived requirements from registered compression engines.
938 938 for name in util.compengines:
939 939 engine = util.compengines[name]
940 940 if engine.available() and engine.revlogheader():
941 941 supported.add(b'exp-compression-%s' % name)
942 942 if engine.name() == b'zstd':
943 943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
944 944
945 945 return supported
946 946
947 947
948 948 def ensurerequirementsrecognized(requirements, supported):
949 949 """Validate that a set of local requirements is recognized.
950 950
951 951 Receives a set of requirements. Raises an ``error.RepoError`` if there
952 952 exists any requirement in that set that currently loaded code doesn't
953 953 recognize.
954 954
955 955 Returns a set of supported requirements.
956 956 """
957 957 missing = set()
958 958
959 959 for requirement in requirements:
960 960 if requirement in supported:
961 961 continue
962 962
963 963 if not requirement or not requirement[0:1].isalnum():
964 964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
965 965
966 966 missing.add(requirement)
967 967
968 968 if missing:
969 969 raise error.RequirementError(
970 970 _(b'repository requires features unknown to this Mercurial: %s')
971 971 % b' '.join(sorted(missing)),
972 972 hint=_(
973 973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
974 974 b'for more information'
975 975 ),
976 976 )
977 977
978 978
979 979 def ensurerequirementscompatible(ui, requirements):
980 980 """Validates that a set of recognized requirements is mutually compatible.
981 981
982 982 Some requirements may not be compatible with others or require
983 983 config options that aren't enabled. This function is called during
984 984 repository opening to ensure that the set of requirements needed
985 985 to open a repository is sane and compatible with config options.
986 986
987 987 Extensions can monkeypatch this function to perform additional
988 988 checking.
989 989
990 990 ``error.RepoError`` should be raised on failure.
991 991 """
992 992 if (
993 993 requirementsmod.SPARSE_REQUIREMENT in requirements
994 994 and not sparse.enabled
995 995 ):
996 996 raise error.RepoError(
997 997 _(
998 998 b'repository is using sparse feature but '
999 999 b'sparse is not enabled; enable the '
1000 1000 b'"sparse" extensions to access'
1001 1001 )
1002 1002 )
1003 1003
1004 1004
1005 1005 def makestore(requirements, path, vfstype):
1006 1006 """Construct a storage object for a repository."""
1007 1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1008 1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1009 1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1010 1010 return storemod.fncachestore(path, vfstype, dotencode)
1011 1011
1012 1012 return storemod.encodedstore(path, vfstype)
1013 1013
1014 1014 return storemod.basicstore(path, vfstype)
1015 1015
1016 1016
1017 1017 def resolvestorevfsoptions(ui, requirements, features):
1018 1018 """Resolve the options to pass to the store vfs opener.
1019 1019
1020 1020 The returned dict is used to influence behavior of the storage layer.
1021 1021 """
1022 1022 options = {}
1023 1023
1024 1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1025 1025 options[b'treemanifest'] = True
1026 1026
1027 1027 # experimental config: format.manifestcachesize
1028 1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1029 1029 if manifestcachesize is not None:
1030 1030 options[b'manifestcachesize'] = manifestcachesize
1031 1031
1032 1032 # In the absence of another requirement superseding a revlog-related
1033 1033 # requirement, we have to assume the repo is using revlog version 0.
1034 1034 # This revlog format is super old and we don't bother trying to parse
1035 1035 # opener options for it because those options wouldn't do anything
1036 1036 # meaningful on such old repos.
1037 1037 if (
1038 1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1039 1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1040 1040 ):
1041 1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1042 1042 else: # explicitly mark repo as using revlogv0
1043 1043 options[b'revlogv0'] = True
1044 1044
1045 1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1046 1046 options[b'copies-storage'] = b'changeset-sidedata'
1047 1047 else:
1048 1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1049 1049 copiesextramode = (b'changeset-only', b'compatibility')
1050 1050 if writecopiesto in copiesextramode:
1051 1051 options[b'copies-storage'] = b'extra'
1052 1052
1053 1053 return options
1054 1054
1055 1055
1056 1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1057 1057 """Resolve opener options specific to revlogs."""
1058 1058
1059 1059 options = {}
1060 1060 options[b'flagprocessors'] = {}
1061 1061
1062 1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 1063 options[b'revlogv1'] = True
1064 1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 1065 options[b'revlogv2'] = True
1066 1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 1067 options[b'changelogv2'] = True
1068 1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 1069 options[b'changelogv2.compute-rank'] = cmp_rank
1070 1070
1071 1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 1072 options[b'generaldelta'] = True
1073 1073
1074 1074 # experimental config: format.chunkcachesize
1075 1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 1076 if chunkcachesize is not None:
1077 1077 options[b'chunkcachesize'] = chunkcachesize
1078 1078
1079 1079 deltabothparents = ui.configbool(
1080 1080 b'storage', b'revlog.optimize-delta-parent-choice'
1081 1081 )
1082 1082 options[b'deltabothparents'] = deltabothparents
1083 1083 dps_cgds = ui.configint(
1084 1084 b'storage',
1085 1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1086 1086 )
1087 1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1088 1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1089 1089
1090 1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1091 1091 options[b'issue6528.fix-incoming'] = issue6528
1092 1092
1093 1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1094 1094 lazydeltabase = False
1095 1095 if lazydelta:
1096 1096 lazydeltabase = ui.configbool(
1097 1097 b'storage', b'revlog.reuse-external-delta-parent'
1098 1098 )
1099 1099 if lazydeltabase is None:
1100 1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1101 1101 options[b'lazydelta'] = lazydelta
1102 1102 options[b'lazydeltabase'] = lazydeltabase
1103 1103
1104 1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1105 1105 if 0 <= chainspan:
1106 1106 options[b'maxdeltachainspan'] = chainspan
1107 1107
1108 1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1109 1109 if mmapindexthreshold is not None:
1110 1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1111 1111
1112 1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1113 1113 srdensitythres = float(
1114 1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1115 1115 )
1116 1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1117 1117 options[b'with-sparse-read'] = withsparseread
1118 1118 options[b'sparse-read-density-threshold'] = srdensitythres
1119 1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1120 1120
1121 1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1122 1122 options[b'sparse-revlog'] = sparserevlog
1123 1123 if sparserevlog:
1124 1124 options[b'generaldelta'] = True
1125 1125
1126 1126 maxchainlen = None
1127 1127 if sparserevlog:
1128 1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1129 1129 # experimental config: format.maxchainlen
1130 1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1131 1131 if maxchainlen is not None:
1132 1132 options[b'maxchainlen'] = maxchainlen
1133 1133
1134 1134 for r in requirements:
1135 1135 # we allow multiple compression engine requirement to co-exist because
1136 1136 # strickly speaking, revlog seems to support mixed compression style.
1137 1137 #
1138 1138 # The compression used for new entries will be "the last one"
1139 1139 prefix = r.startswith
1140 1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1141 1141 options[b'compengine'] = r.split(b'-', 2)[2]
1142 1142
1143 1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1144 1144 if options[b'zlib.level'] is not None:
1145 1145 if not (0 <= options[b'zlib.level'] <= 9):
1146 1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1147 1147 raise error.Abort(msg % options[b'zlib.level'])
1148 1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1149 1149 if options[b'zstd.level'] is not None:
1150 1150 if not (0 <= options[b'zstd.level'] <= 22):
1151 1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1152 1152 raise error.Abort(msg % options[b'zstd.level'])
1153 1153
1154 1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1155 1155 options[b'enableellipsis'] = True
1156 1156
1157 1157 if ui.configbool(b'experimental', b'rust.index'):
1158 1158 options[b'rust.index'] = True
1159 1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1160 1160 slow_path = ui.config(
1161 1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 1162 )
1163 1163 if slow_path not in (b'allow', b'warn', b'abort'):
1164 1164 default = ui.config_default(
1165 1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1166 1166 )
1167 1167 msg = _(
1168 1168 b'unknown value for config '
1169 1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1170 1170 )
1171 1171 ui.warn(msg % slow_path)
1172 1172 if not ui.quiet:
1173 1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1174 1174 slow_path = default
1175 1175
1176 1176 msg = _(
1177 1177 b"accessing `persistent-nodemap` repository without associated "
1178 1178 b"fast implementation."
1179 1179 )
1180 1180 hint = _(
1181 1181 b"check `hg help config.format.use-persistent-nodemap` "
1182 1182 b"for details"
1183 1183 )
1184 1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1185 1185 if slow_path == b'warn':
1186 1186 msg = b"warning: " + msg + b'\n'
1187 1187 ui.warn(msg)
1188 1188 if not ui.quiet:
1189 1189 hint = b'(' + hint + b')\n'
1190 1190 ui.warn(hint)
1191 1191 if slow_path == b'abort':
1192 1192 raise error.Abort(msg, hint=hint)
1193 1193 options[b'persistent-nodemap'] = True
1194 1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1195 1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1196 1196 if slow_path not in (b'allow', b'warn', b'abort'):
1197 1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1198 1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1199 1199 ui.warn(msg % slow_path)
1200 1200 if not ui.quiet:
1201 1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1202 1202 slow_path = default
1203 1203
1204 1204 msg = _(
1205 1205 b"accessing `dirstate-v2` repository without associated "
1206 1206 b"fast implementation."
1207 1207 )
1208 1208 hint = _(
1209 1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1210 1210 )
1211 1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1212 1212 if slow_path == b'warn':
1213 1213 msg = b"warning: " + msg + b'\n'
1214 1214 ui.warn(msg)
1215 1215 if not ui.quiet:
1216 1216 hint = b'(' + hint + b')\n'
1217 1217 ui.warn(hint)
1218 1218 if slow_path == b'abort':
1219 1219 raise error.Abort(msg, hint=hint)
1220 1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1221 1221 options[b'persistent-nodemap.mmap'] = True
1222 1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1223 1223 options[b'devel-force-nodemap'] = True
1224 1224
1225 1225 return options
1226 1226
1227 1227
1228 1228 def makemain(**kwargs):
1229 1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1230 1230 return localrepository
1231 1231
1232 1232
1233 1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1234 1234 class revlogfilestorage:
1235 1235 """File storage when using revlogs."""
1236 1236
1237 1237 def file(self, path):
1238 1238 if path.startswith(b'/'):
1239 1239 path = path[1:]
1240 1240
1241 1241 return filelog.filelog(self.svfs, path)
1242 1242
1243 1243
1244 1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 1245 class revlognarrowfilestorage:
1246 1246 """File storage when using revlogs and narrow files."""
1247 1247
1248 1248 def file(self, path):
1249 1249 if path.startswith(b'/'):
1250 1250 path = path[1:]
1251 1251
1252 1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1253 1253
1254 1254
1255 1255 def makefilestorage(requirements, features, **kwargs):
1256 1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1257 1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1258 1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1259 1259
1260 1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1261 1261 return revlognarrowfilestorage
1262 1262 else:
1263 1263 return revlogfilestorage
1264 1264
1265 1265
1266 1266 # List of repository interfaces and factory functions for them. Each
1267 1267 # will be called in order during ``makelocalrepository()`` to iteratively
1268 1268 # derive the final type for a local repository instance. We capture the
1269 1269 # function as a lambda so we don't hold a reference and the module-level
1270 1270 # functions can be wrapped.
1271 1271 REPO_INTERFACES = [
1272 1272 (repository.ilocalrepositorymain, lambda: makemain),
1273 1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1274 1274 ]
1275 1275
1276 1276
1277 1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1278 1278 class localrepository:
1279 1279 """Main class for representing local repositories.
1280 1280
1281 1281 All local repositories are instances of this class.
1282 1282
1283 1283 Constructed on its own, instances of this class are not usable as
1284 1284 repository objects. To obtain a usable repository object, call
1285 1285 ``hg.repository()``, ``localrepo.instance()``, or
1286 1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1287 1287 ``instance()`` adds support for creating new repositories.
1288 1288 ``hg.repository()`` adds more extension integration, including calling
1289 1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1290 1290 used.
1291 1291 """
1292 1292
1293 1293 _basesupported = {
1294 1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1295 1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1296 1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1297 1297 requirementsmod.COPIESSDC_REQUIREMENT,
1298 1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1299 1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1300 1300 requirementsmod.DOTENCODE_REQUIREMENT,
1301 1301 requirementsmod.FNCACHE_REQUIREMENT,
1302 1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1303 1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1304 1304 requirementsmod.NODEMAP_REQUIREMENT,
1305 1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1306 1306 requirementsmod.REVLOGV1_REQUIREMENT,
1307 1307 requirementsmod.REVLOGV2_REQUIREMENT,
1308 1308 requirementsmod.SHARED_REQUIREMENT,
1309 1309 requirementsmod.SHARESAFE_REQUIREMENT,
1310 1310 requirementsmod.SPARSE_REQUIREMENT,
1311 1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1312 1312 requirementsmod.STORE_REQUIREMENT,
1313 1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1314 1314 }
1315 1315
1316 1316 # list of prefix for file which can be written without 'wlock'
1317 1317 # Extensions should extend this list when needed
1318 1318 _wlockfreeprefix = {
1319 1319 # We migh consider requiring 'wlock' for the next
1320 1320 # two, but pretty much all the existing code assume
1321 1321 # wlock is not needed so we keep them excluded for
1322 1322 # now.
1323 1323 b'hgrc',
1324 1324 b'requires',
1325 1325 # XXX cache is a complicatged business someone
1326 1326 # should investigate this in depth at some point
1327 1327 b'cache/',
1328 1328 # XXX bisect was still a bit too messy at the time
1329 1329 # this changeset was introduced. Someone should fix
1330 1330 # the remainig bit and drop this line
1331 1331 b'bisect.state',
1332 1332 }
1333 1333
1334 1334 def __init__(
1335 1335 self,
1336 1336 baseui,
1337 1337 ui,
1338 1338 origroot: bytes,
1339 1339 wdirvfs: vfsmod.vfs,
1340 1340 hgvfs: vfsmod.vfs,
1341 1341 requirements,
1342 1342 supportedrequirements,
1343 1343 sharedpath: bytes,
1344 1344 store,
1345 1345 cachevfs: vfsmod.vfs,
1346 1346 wcachevfs: vfsmod.vfs,
1347 1347 features,
1348 1348 intents=None,
1349 1349 ):
1350 1350 """Create a new local repository instance.
1351 1351
1352 1352 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1353 1353 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1354 1354 object.
1355 1355
1356 1356 Arguments:
1357 1357
1358 1358 baseui
1359 1359 ``ui.ui`` instance that ``ui`` argument was based off of.
1360 1360
1361 1361 ui
1362 1362 ``ui.ui`` instance for use by the repository.
1363 1363
1364 1364 origroot
1365 1365 ``bytes`` path to working directory root of this repository.
1366 1366
1367 1367 wdirvfs
1368 1368 ``vfs.vfs`` rooted at the working directory.
1369 1369
1370 1370 hgvfs
1371 1371 ``vfs.vfs`` rooted at .hg/
1372 1372
1373 1373 requirements
1374 1374 ``set`` of bytestrings representing repository opening requirements.
1375 1375
1376 1376 supportedrequirements
1377 1377 ``set`` of bytestrings representing repository requirements that we
1378 1378 know how to open. May be a supetset of ``requirements``.
1379 1379
1380 1380 sharedpath
1381 1381 ``bytes`` Defining path to storage base directory. Points to a
1382 1382 ``.hg/`` directory somewhere.
1383 1383
1384 1384 store
1385 1385 ``store.basicstore`` (or derived) instance providing access to
1386 1386 versioned storage.
1387 1387
1388 1388 cachevfs
1389 1389 ``vfs.vfs`` used for cache files.
1390 1390
1391 1391 wcachevfs
1392 1392 ``vfs.vfs`` used for cache files related to the working copy.
1393 1393
1394 1394 features
1395 1395 ``set`` of bytestrings defining features/capabilities of this
1396 1396 instance.
1397 1397
1398 1398 intents
1399 1399 ``set`` of system strings indicating what this repo will be used
1400 1400 for.
1401 1401 """
1402 1402 self.baseui = baseui
1403 1403 self.ui = ui
1404 1404 self.origroot = origroot
1405 1405 # vfs rooted at working directory.
1406 1406 self.wvfs = wdirvfs
1407 1407 self.root = wdirvfs.base
1408 1408 # vfs rooted at .hg/. Used to access most non-store paths.
1409 1409 self.vfs = hgvfs
1410 1410 self.path = hgvfs.base
1411 1411 self.requirements = requirements
1412 1412 self.nodeconstants = sha1nodeconstants
1413 1413 self.nullid = self.nodeconstants.nullid
1414 1414 self.supported = supportedrequirements
1415 1415 self.sharedpath = sharedpath
1416 1416 self.store = store
1417 1417 self.cachevfs = cachevfs
1418 1418 self.wcachevfs = wcachevfs
1419 1419 self.features = features
1420 1420
1421 1421 self.filtername = None
1422 1422
1423 1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1424 1424 b'devel', b'check-locks'
1425 1425 ):
1426 1426 self.vfs.audit = self._getvfsward(self.vfs.audit)
1427 1427 # A list of callback to shape the phase if no data were found.
1428 1428 # Callback are in the form: func(repo, roots) --> processed root.
1429 1429 # This list it to be filled by extension during repo setup
1430 1430 self._phasedefaults = []
1431 1431
1432 1432 color.setup(self.ui)
1433 1433
1434 1434 self.spath = self.store.path
1435 1435 self.svfs = self.store.vfs
1436 1436 self.sjoin = self.store.join
1437 1437 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1438 1438 b'devel', b'check-locks'
1439 1439 ):
1440 1440 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1441 1441 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1442 1442 else: # standard vfs
1443 1443 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1444 1444
1445 1445 self._dirstatevalidatewarned = False
1446 1446
1447 1447 self._branchcaches = branchmap.BranchMapCache()
1448 1448 self._revbranchcache = None
1449 1449 self._filterpats = {}
1450 1450 self._datafilters = {}
1451 1451 self._transref = self._lockref = self._wlockref = None
1452 1452
1453 1453 # A cache for various files under .hg/ that tracks file changes,
1454 1454 # (used by the filecache decorator)
1455 1455 #
1456 1456 # Maps a property name to its util.filecacheentry
1457 1457 self._filecache = {}
1458 1458
1459 1459 # hold sets of revision to be filtered
1460 1460 # should be cleared when something might have changed the filter value:
1461 1461 # - new changesets,
1462 1462 # - phase change,
1463 1463 # - new obsolescence marker,
1464 1464 # - working directory parent change,
1465 1465 # - bookmark changes
1466 1466 self.filteredrevcache = {}
1467 1467
1468 1468 self._dirstate = None
1469 1469 # post-dirstate-status hooks
1470 1470 self._postdsstatus = []
1471 1471
1472 self._pending_narrow_pats = None
1473
1472 1474 # generic mapping between names and nodes
1473 1475 self.names = namespaces.namespaces()
1474 1476
1475 1477 # Key to signature value.
1476 1478 self._sparsesignaturecache = {}
1477 1479 # Signature to cached matcher instance.
1478 1480 self._sparsematchercache = {}
1479 1481
1480 1482 self._extrafilterid = repoview.extrafilter(ui)
1481 1483
1482 1484 self.filecopiesmode = None
1483 1485 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1484 1486 self.filecopiesmode = b'changeset-sidedata'
1485 1487
1486 1488 self._wanted_sidedata = set()
1487 1489 self._sidedata_computers = {}
1488 1490 sidedatamod.set_sidedata_spec_for_repo(self)
1489 1491
1490 1492 def _getvfsward(self, origfunc):
1491 1493 """build a ward for self.vfs"""
1492 1494 rref = weakref.ref(self)
1493 1495
1494 1496 def checkvfs(path, mode=None):
1495 1497 ret = origfunc(path, mode=mode)
1496 1498 repo = rref()
1497 1499 if (
1498 1500 repo is None
1499 1501 or not util.safehasattr(repo, b'_wlockref')
1500 1502 or not util.safehasattr(repo, b'_lockref')
1501 1503 ):
1502 1504 return
1503 1505 if mode in (None, b'r', b'rb'):
1504 1506 return
1505 1507 if path.startswith(repo.path):
1506 1508 # truncate name relative to the repository (.hg)
1507 1509 path = path[len(repo.path) + 1 :]
1508 1510 if path.startswith(b'cache/'):
1509 1511 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1510 1512 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1511 1513 # path prefixes covered by 'lock'
1512 1514 vfs_path_prefixes = (
1513 1515 b'journal.',
1514 1516 b'undo.',
1515 1517 b'strip-backup/',
1516 1518 b'cache/',
1517 1519 )
1518 1520 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1519 1521 if repo._currentlock(repo._lockref) is None:
1520 1522 repo.ui.develwarn(
1521 1523 b'write with no lock: "%s"' % path,
1522 1524 stacklevel=3,
1523 1525 config=b'check-locks',
1524 1526 )
1525 1527 elif repo._currentlock(repo._wlockref) is None:
1526 1528 # rest of vfs files are covered by 'wlock'
1527 1529 #
1528 1530 # exclude special files
1529 1531 for prefix in self._wlockfreeprefix:
1530 1532 if path.startswith(prefix):
1531 1533 return
1532 1534 repo.ui.develwarn(
1533 1535 b'write with no wlock: "%s"' % path,
1534 1536 stacklevel=3,
1535 1537 config=b'check-locks',
1536 1538 )
1537 1539 return ret
1538 1540
1539 1541 return checkvfs
1540 1542
1541 1543 def _getsvfsward(self, origfunc):
1542 1544 """build a ward for self.svfs"""
1543 1545 rref = weakref.ref(self)
1544 1546
1545 1547 def checksvfs(path, mode=None):
1546 1548 ret = origfunc(path, mode=mode)
1547 1549 repo = rref()
1548 1550 if repo is None or not util.safehasattr(repo, b'_lockref'):
1549 1551 return
1550 1552 if mode in (None, b'r', b'rb'):
1551 1553 return
1552 1554 if path.startswith(repo.sharedpath):
1553 1555 # truncate name relative to the repository (.hg)
1554 1556 path = path[len(repo.sharedpath) + 1 :]
1555 1557 if repo._currentlock(repo._lockref) is None:
1556 1558 repo.ui.develwarn(
1557 1559 b'write with no lock: "%s"' % path, stacklevel=4
1558 1560 )
1559 1561 return ret
1560 1562
1561 1563 return checksvfs
1562 1564
1563 1565 def close(self):
1564 1566 self._writecaches()
1565 1567
1566 1568 def _writecaches(self):
1567 1569 if self._revbranchcache:
1568 1570 self._revbranchcache.write()
1569 1571
1570 1572 def _restrictcapabilities(self, caps):
1571 1573 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1572 1574 caps = set(caps)
1573 1575 capsblob = bundle2.encodecaps(
1574 1576 bundle2.getrepocaps(self, role=b'client')
1575 1577 )
1576 1578 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1577 1579 if self.ui.configbool(b'experimental', b'narrow'):
1578 1580 caps.add(wireprototypes.NARROWCAP)
1579 1581 return caps
1580 1582
1581 1583 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1582 1584 # self -> auditor -> self._checknested -> self
1583 1585
1584 1586 @property
1585 1587 def auditor(self):
1586 1588 # This is only used by context.workingctx.match in order to
1587 1589 # detect files in subrepos.
1588 1590 return pathutil.pathauditor(self.root, callback=self._checknested)
1589 1591
1590 1592 @property
1591 1593 def nofsauditor(self):
1592 1594 # This is only used by context.basectx.match in order to detect
1593 1595 # files in subrepos.
1594 1596 return pathutil.pathauditor(
1595 1597 self.root, callback=self._checknested, realfs=False, cached=True
1596 1598 )
1597 1599
1598 1600 def _checknested(self, path):
1599 1601 """Determine if path is a legal nested repository."""
1600 1602 if not path.startswith(self.root):
1601 1603 return False
1602 1604 subpath = path[len(self.root) + 1 :]
1603 1605 normsubpath = util.pconvert(subpath)
1604 1606
1605 1607 # XXX: Checking against the current working copy is wrong in
1606 1608 # the sense that it can reject things like
1607 1609 #
1608 1610 # $ hg cat -r 10 sub/x.txt
1609 1611 #
1610 1612 # if sub/ is no longer a subrepository in the working copy
1611 1613 # parent revision.
1612 1614 #
1613 1615 # However, it can of course also allow things that would have
1614 1616 # been rejected before, such as the above cat command if sub/
1615 1617 # is a subrepository now, but was a normal directory before.
1616 1618 # The old path auditor would have rejected by mistake since it
1617 1619 # panics when it sees sub/.hg/.
1618 1620 #
1619 1621 # All in all, checking against the working copy seems sensible
1620 1622 # since we want to prevent access to nested repositories on
1621 1623 # the filesystem *now*.
1622 1624 ctx = self[None]
1623 1625 parts = util.splitpath(subpath)
1624 1626 while parts:
1625 1627 prefix = b'/'.join(parts)
1626 1628 if prefix in ctx.substate:
1627 1629 if prefix == normsubpath:
1628 1630 return True
1629 1631 else:
1630 1632 sub = ctx.sub(prefix)
1631 1633 return sub.checknested(subpath[len(prefix) + 1 :])
1632 1634 else:
1633 1635 parts.pop()
1634 1636 return False
1635 1637
1636 1638 def peer(self, path=None):
1637 1639 return localpeer(self, path=path) # not cached to avoid reference cycle
1638 1640
1639 1641 def unfiltered(self):
1640 1642 """Return unfiltered version of the repository
1641 1643
1642 1644 Intended to be overwritten by filtered repo."""
1643 1645 return self
1644 1646
1645 1647 def filtered(self, name, visibilityexceptions=None):
1646 1648 """Return a filtered version of a repository
1647 1649
1648 1650 The `name` parameter is the identifier of the requested view. This
1649 1651 will return a repoview object set "exactly" to the specified view.
1650 1652
1651 1653 This function does not apply recursive filtering to a repository. For
1652 1654 example calling `repo.filtered("served")` will return a repoview using
1653 1655 the "served" view, regardless of the initial view used by `repo`.
1654 1656
1655 1657 In other word, there is always only one level of `repoview` "filtering".
1656 1658 """
1657 1659 if self._extrafilterid is not None and b'%' not in name:
1658 1660 name = name + b'%' + self._extrafilterid
1659 1661
1660 1662 cls = repoview.newtype(self.unfiltered().__class__)
1661 1663 return cls(self, name, visibilityexceptions)
1662 1664
1663 1665 @mixedrepostorecache(
1664 1666 (b'bookmarks', b'plain'),
1665 1667 (b'bookmarks.current', b'plain'),
1666 1668 (b'bookmarks', b''),
1667 1669 (b'00changelog.i', b''),
1668 1670 )
1669 1671 def _bookmarks(self):
1670 1672 # Since the multiple files involved in the transaction cannot be
1671 1673 # written atomically (with current repository format), there is a race
1672 1674 # condition here.
1673 1675 #
1674 1676 # 1) changelog content A is read
1675 1677 # 2) outside transaction update changelog to content B
1676 1678 # 3) outside transaction update bookmark file referring to content B
1677 1679 # 4) bookmarks file content is read and filtered against changelog-A
1678 1680 #
1679 1681 # When this happens, bookmarks against nodes missing from A are dropped.
1680 1682 #
1681 1683 # Having this happening during read is not great, but it become worse
1682 1684 # when this happen during write because the bookmarks to the "unknown"
1683 1685 # nodes will be dropped for good. However, writes happen within locks.
1684 1686 # This locking makes it possible to have a race free consistent read.
1685 1687 # For this purpose data read from disc before locking are
1686 1688 # "invalidated" right after the locks are taken. This invalidations are
1687 1689 # "light", the `filecache` mechanism keep the data in memory and will
1688 1690 # reuse them if the underlying files did not changed. Not parsing the
1689 1691 # same data multiple times helps performances.
1690 1692 #
1691 1693 # Unfortunately in the case describe above, the files tracked by the
1692 1694 # bookmarks file cache might not have changed, but the in-memory
1693 1695 # content is still "wrong" because we used an older changelog content
1694 1696 # to process the on-disk data. So after locking, the changelog would be
1695 1697 # refreshed but `_bookmarks` would be preserved.
1696 1698 # Adding `00changelog.i` to the list of tracked file is not
1697 1699 # enough, because at the time we build the content for `_bookmarks` in
1698 1700 # (4), the changelog file has already diverged from the content used
1699 1701 # for loading `changelog` in (1)
1700 1702 #
1701 1703 # To prevent the issue, we force the changelog to be explicitly
1702 1704 # reloaded while computing `_bookmarks`. The data race can still happen
1703 1705 # without the lock (with a narrower window), but it would no longer go
1704 1706 # undetected during the lock time refresh.
1705 1707 #
1706 1708 # The new schedule is as follow
1707 1709 #
1708 1710 # 1) filecache logic detect that `_bookmarks` needs to be computed
1709 1711 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1710 1712 # 3) We force `changelog` filecache to be tested
1711 1713 # 4) cachestat for `changelog` are captured (for changelog)
1712 1714 # 5) `_bookmarks` is computed and cached
1713 1715 #
1714 1716 # The step in (3) ensure we have a changelog at least as recent as the
1715 1717 # cache stat computed in (1). As a result at locking time:
1716 1718 # * if the changelog did not changed since (1) -> we can reuse the data
1717 1719 # * otherwise -> the bookmarks get refreshed.
1718 1720 self._refreshchangelog()
1719 1721 return bookmarks.bmstore(self)
1720 1722
1721 1723 def _refreshchangelog(self):
1722 1724 """make sure the in memory changelog match the on-disk one"""
1723 1725 if 'changelog' in vars(self) and self.currenttransaction() is None:
1724 1726 del self.changelog
1725 1727
1726 1728 @property
1727 1729 def _activebookmark(self):
1728 1730 return self._bookmarks.active
1729 1731
1730 1732 # _phasesets depend on changelog. what we need is to call
1731 1733 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1732 1734 # can't be easily expressed in filecache mechanism.
1733 1735 @storecache(b'phaseroots', b'00changelog.i')
1734 1736 def _phasecache(self):
1735 1737 return phases.phasecache(self, self._phasedefaults)
1736 1738
1737 1739 @storecache(b'obsstore')
1738 1740 def obsstore(self):
1739 1741 return obsolete.makestore(self.ui, self)
1740 1742
1741 1743 @changelogcache()
1742 1744 def changelog(repo):
1743 1745 # load dirstate before changelog to avoid race see issue6303
1744 1746 repo.dirstate.prefetch_parents()
1745 1747 return repo.store.changelog(
1746 1748 txnutil.mayhavepending(repo.root),
1747 1749 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1748 1750 )
1749 1751
1750 1752 @manifestlogcache()
1751 1753 def manifestlog(self):
1752 1754 return self.store.manifestlog(self, self._storenarrowmatch)
1753 1755
1754 1756 @unfilteredpropertycache
1755 1757 def dirstate(self):
1756 1758 if self._dirstate is None:
1757 1759 self._dirstate = self._makedirstate()
1758 1760 else:
1759 1761 self._dirstate.refresh()
1760 1762 return self._dirstate
1761 1763
1762 1764 def _makedirstate(self):
1763 1765 """Extension point for wrapping the dirstate per-repo."""
1764 1766 sparsematchfn = None
1765 1767 if sparse.use_sparse(self):
1766 1768 sparsematchfn = lambda: sparse.matcher(self)
1767 1769 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1768 1770 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1769 1771 use_dirstate_v2 = v2_req in self.requirements
1770 1772 use_tracked_hint = th in self.requirements
1771 1773
1772 1774 return dirstate.dirstate(
1773 1775 self.vfs,
1774 1776 self.ui,
1775 1777 self.root,
1776 1778 self._dirstatevalidate,
1777 1779 sparsematchfn,
1778 1780 self.nodeconstants,
1779 1781 use_dirstate_v2,
1780 1782 use_tracked_hint=use_tracked_hint,
1781 1783 )
1782 1784
1783 1785 def _dirstatevalidate(self, node):
1784 1786 try:
1785 1787 self.changelog.rev(node)
1786 1788 return node
1787 1789 except error.LookupError:
1788 1790 if not self._dirstatevalidatewarned:
1789 1791 self._dirstatevalidatewarned = True
1790 1792 self.ui.warn(
1791 1793 _(b"warning: ignoring unknown working parent %s!\n")
1792 1794 % short(node)
1793 1795 )
1794 1796 return self.nullid
1795 1797
1796 1798 @storecache(narrowspec.FILENAME)
1797 1799 def narrowpats(self):
1798 1800 """matcher patterns for this repository's narrowspec
1799 1801
1800 1802 A tuple of (includes, excludes).
1801 1803 """
1802 return narrowspec.load(self)
1804 # the narrow management should probably move into its own object
1805 val = self._pending_narrow_pats
1806 if val is None:
1807 val = narrowspec.load(self)
1808 return val
1803 1809
1804 1810 @storecache(narrowspec.FILENAME)
1805 1811 def _storenarrowmatch(self):
1806 1812 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1807 1813 return matchmod.always()
1808 1814 include, exclude = self.narrowpats
1809 1815 return narrowspec.match(self.root, include=include, exclude=exclude)
1810 1816
1811 1817 @storecache(narrowspec.FILENAME)
1812 1818 def _narrowmatch(self):
1813 1819 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1814 1820 return matchmod.always()
1815 1821 narrowspec.checkworkingcopynarrowspec(self)
1816 1822 include, exclude = self.narrowpats
1817 1823 return narrowspec.match(self.root, include=include, exclude=exclude)
1818 1824
1819 1825 def narrowmatch(self, match=None, includeexact=False):
1820 1826 """matcher corresponding the the repo's narrowspec
1821 1827
1822 1828 If `match` is given, then that will be intersected with the narrow
1823 1829 matcher.
1824 1830
1825 1831 If `includeexact` is True, then any exact matches from `match` will
1826 1832 be included even if they're outside the narrowspec.
1827 1833 """
1828 1834 if match:
1829 1835 if includeexact and not self._narrowmatch.always():
1830 1836 # do not exclude explicitly-specified paths so that they can
1831 1837 # be warned later on
1832 1838 em = matchmod.exact(match.files())
1833 1839 nm = matchmod.unionmatcher([self._narrowmatch, em])
1834 1840 return matchmod.intersectmatchers(match, nm)
1835 1841 return matchmod.intersectmatchers(match, self._narrowmatch)
1836 1842 return self._narrowmatch
1837 1843
1838 1844 def setnarrowpats(self, newincludes, newexcludes):
1839 1845 narrowspec.save(self, newincludes, newexcludes)
1840 1846 self.invalidate(clearfilecache=True)
1841 1847
1842 1848 @unfilteredpropertycache
1843 1849 def _quick_access_changeid_null(self):
1844 1850 return {
1845 1851 b'null': (nullrev, self.nodeconstants.nullid),
1846 1852 nullrev: (nullrev, self.nodeconstants.nullid),
1847 1853 self.nullid: (nullrev, self.nullid),
1848 1854 }
1849 1855
1850 1856 @unfilteredpropertycache
1851 1857 def _quick_access_changeid_wc(self):
1852 1858 # also fast path access to the working copy parents
1853 1859 # however, only do it for filter that ensure wc is visible.
1854 1860 quick = self._quick_access_changeid_null.copy()
1855 1861 cl = self.unfiltered().changelog
1856 1862 for node in self.dirstate.parents():
1857 1863 if node == self.nullid:
1858 1864 continue
1859 1865 rev = cl.index.get_rev(node)
1860 1866 if rev is None:
1861 1867 # unknown working copy parent case:
1862 1868 #
1863 1869 # skip the fast path and let higher code deal with it
1864 1870 continue
1865 1871 pair = (rev, node)
1866 1872 quick[rev] = pair
1867 1873 quick[node] = pair
1868 1874 # also add the parents of the parents
1869 1875 for r in cl.parentrevs(rev):
1870 1876 if r == nullrev:
1871 1877 continue
1872 1878 n = cl.node(r)
1873 1879 pair = (r, n)
1874 1880 quick[r] = pair
1875 1881 quick[n] = pair
1876 1882 p1node = self.dirstate.p1()
1877 1883 if p1node != self.nullid:
1878 1884 quick[b'.'] = quick[p1node]
1879 1885 return quick
1880 1886
1881 1887 @unfilteredmethod
1882 1888 def _quick_access_changeid_invalidate(self):
1883 1889 if '_quick_access_changeid_wc' in vars(self):
1884 1890 del self.__dict__['_quick_access_changeid_wc']
1885 1891
1886 1892 @property
1887 1893 def _quick_access_changeid(self):
1888 1894 """an helper dictionnary for __getitem__ calls
1889 1895
1890 1896 This contains a list of symbol we can recognise right away without
1891 1897 further processing.
1892 1898 """
1893 1899 if self.filtername in repoview.filter_has_wc:
1894 1900 return self._quick_access_changeid_wc
1895 1901 return self._quick_access_changeid_null
1896 1902
1897 1903 def __getitem__(self, changeid):
1898 1904 # dealing with special cases
1899 1905 if changeid is None:
1900 1906 return context.workingctx(self)
1901 1907 if isinstance(changeid, context.basectx):
1902 1908 return changeid
1903 1909
1904 1910 # dealing with multiple revisions
1905 1911 if isinstance(changeid, slice):
1906 1912 # wdirrev isn't contiguous so the slice shouldn't include it
1907 1913 return [
1908 1914 self[i]
1909 1915 for i in range(*changeid.indices(len(self)))
1910 1916 if i not in self.changelog.filteredrevs
1911 1917 ]
1912 1918
1913 1919 # dealing with some special values
1914 1920 quick_access = self._quick_access_changeid.get(changeid)
1915 1921 if quick_access is not None:
1916 1922 rev, node = quick_access
1917 1923 return context.changectx(self, rev, node, maybe_filtered=False)
1918 1924 if changeid == b'tip':
1919 1925 node = self.changelog.tip()
1920 1926 rev = self.changelog.rev(node)
1921 1927 return context.changectx(self, rev, node)
1922 1928
1923 1929 # dealing with arbitrary values
1924 1930 try:
1925 1931 if isinstance(changeid, int):
1926 1932 node = self.changelog.node(changeid)
1927 1933 rev = changeid
1928 1934 elif changeid == b'.':
1929 1935 # this is a hack to delay/avoid loading obsmarkers
1930 1936 # when we know that '.' won't be hidden
1931 1937 node = self.dirstate.p1()
1932 1938 rev = self.unfiltered().changelog.rev(node)
1933 1939 elif len(changeid) == self.nodeconstants.nodelen:
1934 1940 try:
1935 1941 node = changeid
1936 1942 rev = self.changelog.rev(changeid)
1937 1943 except error.FilteredLookupError:
1938 1944 changeid = hex(changeid) # for the error message
1939 1945 raise
1940 1946 except LookupError:
1941 1947 # check if it might have come from damaged dirstate
1942 1948 #
1943 1949 # XXX we could avoid the unfiltered if we had a recognizable
1944 1950 # exception for filtered changeset access
1945 1951 if (
1946 1952 self.local()
1947 1953 and changeid in self.unfiltered().dirstate.parents()
1948 1954 ):
1949 1955 msg = _(b"working directory has unknown parent '%s'!")
1950 1956 raise error.Abort(msg % short(changeid))
1951 1957 changeid = hex(changeid) # for the error message
1952 1958 raise
1953 1959
1954 1960 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1955 1961 node = bin(changeid)
1956 1962 rev = self.changelog.rev(node)
1957 1963 else:
1958 1964 raise error.ProgrammingError(
1959 1965 b"unsupported changeid '%s' of type %s"
1960 1966 % (changeid, pycompat.bytestr(type(changeid)))
1961 1967 )
1962 1968
1963 1969 return context.changectx(self, rev, node)
1964 1970
1965 1971 except (error.FilteredIndexError, error.FilteredLookupError):
1966 1972 raise error.FilteredRepoLookupError(
1967 1973 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1968 1974 )
1969 1975 except (IndexError, LookupError):
1970 1976 raise error.RepoLookupError(
1971 1977 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1972 1978 )
1973 1979 except error.WdirUnsupported:
1974 1980 return context.workingctx(self)
1975 1981
1976 1982 def __contains__(self, changeid):
1977 1983 """True if the given changeid exists"""
1978 1984 try:
1979 1985 self[changeid]
1980 1986 return True
1981 1987 except error.RepoLookupError:
1982 1988 return False
1983 1989
1984 1990 def __nonzero__(self):
1985 1991 return True
1986 1992
1987 1993 __bool__ = __nonzero__
1988 1994
1989 1995 def __len__(self):
1990 1996 # no need to pay the cost of repoview.changelog
1991 1997 unfi = self.unfiltered()
1992 1998 return len(unfi.changelog)
1993 1999
1994 2000 def __iter__(self):
1995 2001 return iter(self.changelog)
1996 2002
1997 2003 def revs(self, expr: bytes, *args):
1998 2004 """Find revisions matching a revset.
1999 2005
2000 2006 The revset is specified as a string ``expr`` that may contain
2001 2007 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2002 2008
2003 2009 Revset aliases from the configuration are not expanded. To expand
2004 2010 user aliases, consider calling ``scmutil.revrange()`` or
2005 2011 ``repo.anyrevs([expr], user=True)``.
2006 2012
2007 2013 Returns a smartset.abstractsmartset, which is a list-like interface
2008 2014 that contains integer revisions.
2009 2015 """
2010 2016 tree = revsetlang.spectree(expr, *args)
2011 2017 return revset.makematcher(tree)(self)
2012 2018
2013 2019 def set(self, expr: bytes, *args):
2014 2020 """Find revisions matching a revset and emit changectx instances.
2015 2021
2016 2022 This is a convenience wrapper around ``revs()`` that iterates the
2017 2023 result and is a generator of changectx instances.
2018 2024
2019 2025 Revset aliases from the configuration are not expanded. To expand
2020 2026 user aliases, consider calling ``scmutil.revrange()``.
2021 2027 """
2022 2028 for r in self.revs(expr, *args):
2023 2029 yield self[r]
2024 2030
2025 2031 def anyrevs(self, specs: bytes, user=False, localalias=None):
2026 2032 """Find revisions matching one of the given revsets.
2027 2033
2028 2034 Revset aliases from the configuration are not expanded by default. To
2029 2035 expand user aliases, specify ``user=True``. To provide some local
2030 2036 definitions overriding user aliases, set ``localalias`` to
2031 2037 ``{name: definitionstring}``.
2032 2038 """
2033 2039 if specs == [b'null']:
2034 2040 return revset.baseset([nullrev])
2035 2041 if specs == [b'.']:
2036 2042 quick_data = self._quick_access_changeid.get(b'.')
2037 2043 if quick_data is not None:
2038 2044 return revset.baseset([quick_data[0]])
2039 2045 if user:
2040 2046 m = revset.matchany(
2041 2047 self.ui,
2042 2048 specs,
2043 2049 lookup=revset.lookupfn(self),
2044 2050 localalias=localalias,
2045 2051 )
2046 2052 else:
2047 2053 m = revset.matchany(None, specs, localalias=localalias)
2048 2054 return m(self)
2049 2055
2050 2056 def url(self) -> bytes:
2051 2057 return b'file:' + self.root
2052 2058
2053 2059 def hook(self, name, throw=False, **args):
2054 2060 """Call a hook, passing this repo instance.
2055 2061
2056 2062 This a convenience method to aid invoking hooks. Extensions likely
2057 2063 won't call this unless they have registered a custom hook or are
2058 2064 replacing code that is expected to call a hook.
2059 2065 """
2060 2066 return hook.hook(self.ui, self, name, throw, **args)
2061 2067
2062 2068 @filteredpropertycache
2063 2069 def _tagscache(self):
2064 2070 """Returns a tagscache object that contains various tags related
2065 2071 caches."""
2066 2072
2067 2073 # This simplifies its cache management by having one decorated
2068 2074 # function (this one) and the rest simply fetch things from it.
2069 2075 class tagscache:
2070 2076 def __init__(self):
2071 2077 # These two define the set of tags for this repository. tags
2072 2078 # maps tag name to node; tagtypes maps tag name to 'global' or
2073 2079 # 'local'. (Global tags are defined by .hgtags across all
2074 2080 # heads, and local tags are defined in .hg/localtags.)
2075 2081 # They constitute the in-memory cache of tags.
2076 2082 self.tags = self.tagtypes = None
2077 2083
2078 2084 self.nodetagscache = self.tagslist = None
2079 2085
2080 2086 cache = tagscache()
2081 2087 cache.tags, cache.tagtypes = self._findtags()
2082 2088
2083 2089 return cache
2084 2090
2085 2091 def tags(self):
2086 2092 '''return a mapping of tag to node'''
2087 2093 t = {}
2088 2094 if self.changelog.filteredrevs:
2089 2095 tags, tt = self._findtags()
2090 2096 else:
2091 2097 tags = self._tagscache.tags
2092 2098 rev = self.changelog.rev
2093 2099 for k, v in tags.items():
2094 2100 try:
2095 2101 # ignore tags to unknown nodes
2096 2102 rev(v)
2097 2103 t[k] = v
2098 2104 except (error.LookupError, ValueError):
2099 2105 pass
2100 2106 return t
2101 2107
2102 2108 def _findtags(self):
2103 2109 """Do the hard work of finding tags. Return a pair of dicts
2104 2110 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2105 2111 maps tag name to a string like \'global\' or \'local\'.
2106 2112 Subclasses or extensions are free to add their own tags, but
2107 2113 should be aware that the returned dicts will be retained for the
2108 2114 duration of the localrepo object."""
2109 2115
2110 2116 # XXX what tagtype should subclasses/extensions use? Currently
2111 2117 # mq and bookmarks add tags, but do not set the tagtype at all.
2112 2118 # Should each extension invent its own tag type? Should there
2113 2119 # be one tagtype for all such "virtual" tags? Or is the status
2114 2120 # quo fine?
2115 2121
2116 2122 # map tag name to (node, hist)
2117 2123 alltags = tagsmod.findglobaltags(self.ui, self)
2118 2124 # map tag name to tag type
2119 2125 tagtypes = {tag: b'global' for tag in alltags}
2120 2126
2121 2127 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2122 2128
2123 2129 # Build the return dicts. Have to re-encode tag names because
2124 2130 # the tags module always uses UTF-8 (in order not to lose info
2125 2131 # writing to the cache), but the rest of Mercurial wants them in
2126 2132 # local encoding.
2127 2133 tags = {}
2128 2134 for name, (node, hist) in alltags.items():
2129 2135 if node != self.nullid:
2130 2136 tags[encoding.tolocal(name)] = node
2131 2137 tags[b'tip'] = self.changelog.tip()
2132 2138 tagtypes = {
2133 2139 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2134 2140 }
2135 2141 return (tags, tagtypes)
2136 2142
2137 2143 def tagtype(self, tagname):
2138 2144 """
2139 2145 return the type of the given tag. result can be:
2140 2146
2141 2147 'local' : a local tag
2142 2148 'global' : a global tag
2143 2149 None : tag does not exist
2144 2150 """
2145 2151
2146 2152 return self._tagscache.tagtypes.get(tagname)
2147 2153
2148 2154 def tagslist(self):
2149 2155 '''return a list of tags ordered by revision'''
2150 2156 if not self._tagscache.tagslist:
2151 2157 l = []
2152 2158 for t, n in self.tags().items():
2153 2159 l.append((self.changelog.rev(n), t, n))
2154 2160 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2155 2161
2156 2162 return self._tagscache.tagslist
2157 2163
2158 2164 def nodetags(self, node):
2159 2165 '''return the tags associated with a node'''
2160 2166 if not self._tagscache.nodetagscache:
2161 2167 nodetagscache = {}
2162 2168 for t, n in self._tagscache.tags.items():
2163 2169 nodetagscache.setdefault(n, []).append(t)
2164 2170 for tags in nodetagscache.values():
2165 2171 tags.sort()
2166 2172 self._tagscache.nodetagscache = nodetagscache
2167 2173 return self._tagscache.nodetagscache.get(node, [])
2168 2174
2169 2175 def nodebookmarks(self, node):
2170 2176 """return the list of bookmarks pointing to the specified node"""
2171 2177 return self._bookmarks.names(node)
2172 2178
2173 2179 def branchmap(self):
2174 2180 """returns a dictionary {branch: [branchheads]} with branchheads
2175 2181 ordered by increasing revision number"""
2176 2182 return self._branchcaches[self]
2177 2183
2178 2184 @unfilteredmethod
2179 2185 def revbranchcache(self):
2180 2186 if not self._revbranchcache:
2181 2187 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2182 2188 return self._revbranchcache
2183 2189
2184 2190 def register_changeset(self, rev, changelogrevision):
2185 2191 self.revbranchcache().setdata(rev, changelogrevision)
2186 2192
2187 2193 def branchtip(self, branch, ignoremissing=False):
2188 2194 """return the tip node for a given branch
2189 2195
2190 2196 If ignoremissing is True, then this method will not raise an error.
2191 2197 This is helpful for callers that only expect None for a missing branch
2192 2198 (e.g. namespace).
2193 2199
2194 2200 """
2195 2201 try:
2196 2202 return self.branchmap().branchtip(branch)
2197 2203 except KeyError:
2198 2204 if not ignoremissing:
2199 2205 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2200 2206 else:
2201 2207 pass
2202 2208
2203 2209 def lookup(self, key):
2204 2210 node = scmutil.revsymbol(self, key).node()
2205 2211 if node is None:
2206 2212 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2207 2213 return node
2208 2214
2209 2215 def lookupbranch(self, key):
2210 2216 if self.branchmap().hasbranch(key):
2211 2217 return key
2212 2218
2213 2219 return scmutil.revsymbol(self, key).branch()
2214 2220
2215 2221 def known(self, nodes):
2216 2222 cl = self.changelog
2217 2223 get_rev = cl.index.get_rev
2218 2224 filtered = cl.filteredrevs
2219 2225 result = []
2220 2226 for n in nodes:
2221 2227 r = get_rev(n)
2222 2228 resp = not (r is None or r in filtered)
2223 2229 result.append(resp)
2224 2230 return result
2225 2231
2226 2232 def local(self):
2227 2233 return self
2228 2234
2229 2235 def publishing(self):
2230 2236 # it's safe (and desirable) to trust the publish flag unconditionally
2231 2237 # so that we don't finalize changes shared between users via ssh or nfs
2232 2238 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2233 2239
2234 2240 def cancopy(self):
2235 2241 # so statichttprepo's override of local() works
2236 2242 if not self.local():
2237 2243 return False
2238 2244 if not self.publishing():
2239 2245 return True
2240 2246 # if publishing we can't copy if there is filtered content
2241 2247 return not self.filtered(b'visible').changelog.filteredrevs
2242 2248
2243 2249 def shared(self):
2244 2250 '''the type of shared repository (None if not shared)'''
2245 2251 if self.sharedpath != self.path:
2246 2252 return b'store'
2247 2253 return None
2248 2254
2249 2255 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2250 2256 return self.vfs.reljoin(self.root, f, *insidef)
2251 2257
2252 2258 def setparents(self, p1, p2=None):
2253 2259 if p2 is None:
2254 2260 p2 = self.nullid
2255 2261 self[None].setparents(p1, p2)
2256 2262 self._quick_access_changeid_invalidate()
2257 2263
2258 2264 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2259 2265 """changeid must be a changeset revision, if specified.
2260 2266 fileid can be a file revision or node."""
2261 2267 return context.filectx(
2262 2268 self, path, changeid, fileid, changectx=changectx
2263 2269 )
2264 2270
2265 2271 def getcwd(self) -> bytes:
2266 2272 return self.dirstate.getcwd()
2267 2273
2268 2274 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2269 2275 return self.dirstate.pathto(f, cwd)
2270 2276
2271 2277 def _loadfilter(self, filter):
2272 2278 if filter not in self._filterpats:
2273 2279 l = []
2274 2280 for pat, cmd in self.ui.configitems(filter):
2275 2281 if cmd == b'!':
2276 2282 continue
2277 2283 mf = matchmod.match(self.root, b'', [pat])
2278 2284 fn = None
2279 2285 params = cmd
2280 2286 for name, filterfn in self._datafilters.items():
2281 2287 if cmd.startswith(name):
2282 2288 fn = filterfn
2283 2289 params = cmd[len(name) :].lstrip()
2284 2290 break
2285 2291 if not fn:
2286 2292 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2287 2293 fn.__name__ = 'commandfilter'
2288 2294 # Wrap old filters not supporting keyword arguments
2289 2295 if not pycompat.getargspec(fn)[2]:
2290 2296 oldfn = fn
2291 2297 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2292 2298 fn.__name__ = 'compat-' + oldfn.__name__
2293 2299 l.append((mf, fn, params))
2294 2300 self._filterpats[filter] = l
2295 2301 return self._filterpats[filter]
2296 2302
2297 2303 def _filter(self, filterpats, filename, data):
2298 2304 for mf, fn, cmd in filterpats:
2299 2305 if mf(filename):
2300 2306 self.ui.debug(
2301 2307 b"filtering %s through %s\n"
2302 2308 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2303 2309 )
2304 2310 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2305 2311 break
2306 2312
2307 2313 return data
2308 2314
2309 2315 @unfilteredpropertycache
2310 2316 def _encodefilterpats(self):
2311 2317 return self._loadfilter(b'encode')
2312 2318
2313 2319 @unfilteredpropertycache
2314 2320 def _decodefilterpats(self):
2315 2321 return self._loadfilter(b'decode')
2316 2322
2317 2323 def adddatafilter(self, name, filter):
2318 2324 self._datafilters[name] = filter
2319 2325
2320 2326 def wread(self, filename: bytes) -> bytes:
2321 2327 if self.wvfs.islink(filename):
2322 2328 data = self.wvfs.readlink(filename)
2323 2329 else:
2324 2330 data = self.wvfs.read(filename)
2325 2331 return self._filter(self._encodefilterpats, filename, data)
2326 2332
2327 2333 def wwrite(
2328 2334 self,
2329 2335 filename: bytes,
2330 2336 data: bytes,
2331 2337 flags: bytes,
2332 2338 backgroundclose=False,
2333 2339 **kwargs
2334 2340 ) -> int:
2335 2341 """write ``data`` into ``filename`` in the working directory
2336 2342
2337 2343 This returns length of written (maybe decoded) data.
2338 2344 """
2339 2345 data = self._filter(self._decodefilterpats, filename, data)
2340 2346 if b'l' in flags:
2341 2347 self.wvfs.symlink(data, filename)
2342 2348 else:
2343 2349 self.wvfs.write(
2344 2350 filename, data, backgroundclose=backgroundclose, **kwargs
2345 2351 )
2346 2352 if b'x' in flags:
2347 2353 self.wvfs.setflags(filename, False, True)
2348 2354 else:
2349 2355 self.wvfs.setflags(filename, False, False)
2350 2356 return len(data)
2351 2357
2352 2358 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2353 2359 return self._filter(self._decodefilterpats, filename, data)
2354 2360
2355 2361 def currenttransaction(self):
2356 2362 """return the current transaction or None if non exists"""
2357 2363 if self._transref:
2358 2364 tr = self._transref()
2359 2365 else:
2360 2366 tr = None
2361 2367
2362 2368 if tr and tr.running():
2363 2369 return tr
2364 2370 return None
2365 2371
2366 2372 def transaction(self, desc, report=None):
2367 2373 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2368 2374 b'devel', b'check-locks'
2369 2375 ):
2370 2376 if self._currentlock(self._lockref) is None:
2371 2377 raise error.ProgrammingError(b'transaction requires locking')
2372 2378 tr = self.currenttransaction()
2373 2379 if tr is not None:
2374 2380 return tr.nest(name=desc)
2375 2381
2376 2382 # abort here if the journal already exists
2377 2383 if self.svfs.exists(b"journal"):
2378 2384 raise error.RepoError(
2379 2385 _(b"abandoned transaction found"),
2380 2386 hint=_(b"run 'hg recover' to clean up transaction"),
2381 2387 )
2382 2388
2383 2389 # At that point your dirstate should be clean:
2384 2390 #
2385 2391 # - If you don't have the wlock, why would you still have a dirty
2386 2392 # dirstate ?
2387 2393 #
2388 2394 # - If you hold the wlock, you should not be opening a transaction in
2389 2395 # the middle of a `distate.changing_*` block. The transaction needs to
2390 2396 # be open before that and wrap the change-context.
2391 2397 #
2392 2398 # - If you are not within a `dirstate.changing_*` context, why is our
2393 2399 # dirstate dirty?
2394 2400 if self.dirstate._dirty:
2395 2401 m = "cannot open a transaction with a dirty dirstate"
2396 2402 raise error.ProgrammingError(m)
2397 2403
2398 2404 idbase = b"%.40f#%f" % (random.random(), time.time())
2399 2405 ha = hex(hashutil.sha1(idbase).digest())
2400 2406 txnid = b'TXN:' + ha
2401 2407 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2402 2408
2403 2409 self._writejournal(desc)
2404 2410 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2405 2411 if report:
2406 2412 rp = report
2407 2413 else:
2408 2414 rp = self.ui.warn
2409 2415 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2410 2416 # we must avoid cyclic reference between repo and transaction.
2411 2417 reporef = weakref.ref(self)
2412 2418 # Code to track tag movement
2413 2419 #
2414 2420 # Since tags are all handled as file content, it is actually quite hard
2415 2421 # to track these movement from a code perspective. So we fallback to a
2416 2422 # tracking at the repository level. One could envision to track changes
2417 2423 # to the '.hgtags' file through changegroup apply but that fails to
2418 2424 # cope with case where transaction expose new heads without changegroup
2419 2425 # being involved (eg: phase movement).
2420 2426 #
2421 2427 # For now, We gate the feature behind a flag since this likely comes
2422 2428 # with performance impacts. The current code run more often than needed
2423 2429 # and do not use caches as much as it could. The current focus is on
2424 2430 # the behavior of the feature so we disable it by default. The flag
2425 2431 # will be removed when we are happy with the performance impact.
2426 2432 #
2427 2433 # Once this feature is no longer experimental move the following
2428 2434 # documentation to the appropriate help section:
2429 2435 #
2430 2436 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2431 2437 # tags (new or changed or deleted tags). In addition the details of
2432 2438 # these changes are made available in a file at:
2433 2439 # ``REPOROOT/.hg/changes/tags.changes``.
2434 2440 # Make sure you check for HG_TAG_MOVED before reading that file as it
2435 2441 # might exist from a previous transaction even if no tag were touched
2436 2442 # in this one. Changes are recorded in a line base format::
2437 2443 #
2438 2444 # <action> <hex-node> <tag-name>\n
2439 2445 #
2440 2446 # Actions are defined as follow:
2441 2447 # "-R": tag is removed,
2442 2448 # "+A": tag is added,
2443 2449 # "-M": tag is moved (old value),
2444 2450 # "+M": tag is moved (new value),
2445 2451 tracktags = lambda x: None
2446 2452 # experimental config: experimental.hook-track-tags
2447 2453 shouldtracktags = self.ui.configbool(
2448 2454 b'experimental', b'hook-track-tags'
2449 2455 )
2450 2456 if desc != b'strip' and shouldtracktags:
2451 2457 oldheads = self.changelog.headrevs()
2452 2458
2453 2459 def tracktags(tr2):
2454 2460 repo = reporef()
2455 2461 assert repo is not None # help pytype
2456 2462 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2457 2463 newheads = repo.changelog.headrevs()
2458 2464 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2459 2465 # notes: we compare lists here.
2460 2466 # As we do it only once buiding set would not be cheaper
2461 2467 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2462 2468 if changes:
2463 2469 tr2.hookargs[b'tag_moved'] = b'1'
2464 2470 with repo.vfs(
2465 2471 b'changes/tags.changes', b'w', atomictemp=True
2466 2472 ) as changesfile:
2467 2473 # note: we do not register the file to the transaction
2468 2474 # because we needs it to still exist on the transaction
2469 2475 # is close (for txnclose hooks)
2470 2476 tagsmod.writediff(changesfile, changes)
2471 2477
2472 2478 def validate(tr2):
2473 2479 """will run pre-closing hooks"""
2474 2480 # XXX the transaction API is a bit lacking here so we take a hacky
2475 2481 # path for now
2476 2482 #
2477 2483 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2478 2484 # dict is copied before these run. In addition we needs the data
2479 2485 # available to in memory hooks too.
2480 2486 #
2481 2487 # Moreover, we also need to make sure this runs before txnclose
2482 2488 # hooks and there is no "pending" mechanism that would execute
2483 2489 # logic only if hooks are about to run.
2484 2490 #
2485 2491 # Fixing this limitation of the transaction is also needed to track
2486 2492 # other families of changes (bookmarks, phases, obsolescence).
2487 2493 #
2488 2494 # This will have to be fixed before we remove the experimental
2489 2495 # gating.
2490 2496 tracktags(tr2)
2491 2497 repo = reporef()
2492 2498 assert repo is not None # help pytype
2493 2499
2494 2500 singleheadopt = (b'experimental', b'single-head-per-branch')
2495 2501 singlehead = repo.ui.configbool(*singleheadopt)
2496 2502 if singlehead:
2497 2503 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2498 2504 accountclosed = singleheadsub.get(
2499 2505 b"account-closed-heads", False
2500 2506 )
2501 2507 if singleheadsub.get(b"public-changes-only", False):
2502 2508 filtername = b"immutable"
2503 2509 else:
2504 2510 filtername = b"visible"
2505 2511 scmutil.enforcesinglehead(
2506 2512 repo, tr2, desc, accountclosed, filtername
2507 2513 )
2508 2514 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2509 2515 for name, (old, new) in sorted(
2510 2516 tr.changes[b'bookmarks'].items()
2511 2517 ):
2512 2518 args = tr.hookargs.copy()
2513 2519 args.update(bookmarks.preparehookargs(name, old, new))
2514 2520 repo.hook(
2515 2521 b'pretxnclose-bookmark',
2516 2522 throw=True,
2517 2523 **pycompat.strkwargs(args)
2518 2524 )
2519 2525 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2520 2526 cl = repo.unfiltered().changelog
2521 2527 for revs, (old, new) in tr.changes[b'phases']:
2522 2528 for rev in revs:
2523 2529 args = tr.hookargs.copy()
2524 2530 node = hex(cl.node(rev))
2525 2531 args.update(phases.preparehookargs(node, old, new))
2526 2532 repo.hook(
2527 2533 b'pretxnclose-phase',
2528 2534 throw=True,
2529 2535 **pycompat.strkwargs(args)
2530 2536 )
2531 2537
2532 2538 repo.hook(
2533 2539 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2534 2540 )
2535 2541
2536 2542 def releasefn(tr, success):
2537 2543 repo = reporef()
2538 2544 if repo is None:
2539 2545 # If the repo has been GC'd (and this release function is being
2540 2546 # called from transaction.__del__), there's not much we can do,
2541 2547 # so just leave the unfinished transaction there and let the
2542 2548 # user run `hg recover`.
2543 2549 return
2544 2550 if success:
2545 2551 # this should be explicitly invoked here, because
2546 2552 # in-memory changes aren't written out at closing
2547 2553 # transaction, if tr.addfilegenerator (via
2548 2554 # dirstate.write or so) isn't invoked while
2549 2555 # transaction running
2550 2556 repo.dirstate.write(None)
2551 2557 else:
2552 2558 # discard all changes (including ones already written
2553 2559 # out) in this transaction
2554 2560 narrowspec.restorebackup(self, b'journal.narrowspec')
2555 2561 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2556 2562
2557 2563 repo.invalidate(clearfilecache=True)
2558 2564
2559 2565 tr = transaction.transaction(
2560 2566 rp,
2561 2567 self.svfs,
2562 2568 vfsmap,
2563 2569 b"journal",
2564 2570 b"undo",
2565 2571 aftertrans(renames),
2566 2572 self.store.createmode,
2567 2573 validator=validate,
2568 2574 releasefn=releasefn,
2569 2575 checkambigfiles=_cachedfiles,
2570 2576 name=desc,
2571 2577 )
2572 2578 tr.changes[b'origrepolen'] = len(self)
2573 2579 tr.changes[b'obsmarkers'] = set()
2574 2580 tr.changes[b'phases'] = []
2575 2581 tr.changes[b'bookmarks'] = {}
2576 2582
2577 2583 tr.hookargs[b'txnid'] = txnid
2578 2584 tr.hookargs[b'txnname'] = desc
2579 2585 tr.hookargs[b'changes'] = tr.changes
2580 2586 # note: writing the fncache only during finalize mean that the file is
2581 2587 # outdated when running hooks. As fncache is used for streaming clone,
2582 2588 # this is not expected to break anything that happen during the hooks.
2583 2589 tr.addfinalize(b'flush-fncache', self.store.write)
2584 2590
2585 2591 def txnclosehook(tr2):
2586 2592 """To be run if transaction is successful, will schedule a hook run"""
2587 2593 # Don't reference tr2 in hook() so we don't hold a reference.
2588 2594 # This reduces memory consumption when there are multiple
2589 2595 # transactions per lock. This can likely go away if issue5045
2590 2596 # fixes the function accumulation.
2591 2597 hookargs = tr2.hookargs
2592 2598
2593 2599 def hookfunc(unused_success):
2594 2600 repo = reporef()
2595 2601 assert repo is not None # help pytype
2596 2602
2597 2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2598 2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2599 2605 for name, (old, new) in bmchanges:
2600 2606 args = tr.hookargs.copy()
2601 2607 args.update(bookmarks.preparehookargs(name, old, new))
2602 2608 repo.hook(
2603 2609 b'txnclose-bookmark',
2604 2610 throw=False,
2605 2611 **pycompat.strkwargs(args)
2606 2612 )
2607 2613
2608 2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2609 2615 cl = repo.unfiltered().changelog
2610 2616 phasemv = sorted(
2611 2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2612 2618 )
2613 2619 for revs, (old, new) in phasemv:
2614 2620 for rev in revs:
2615 2621 args = tr.hookargs.copy()
2616 2622 node = hex(cl.node(rev))
2617 2623 args.update(phases.preparehookargs(node, old, new))
2618 2624 repo.hook(
2619 2625 b'txnclose-phase',
2620 2626 throw=False,
2621 2627 **pycompat.strkwargs(args)
2622 2628 )
2623 2629
2624 2630 repo.hook(
2625 2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2626 2632 )
2627 2633
2628 2634 repo = reporef()
2629 2635 assert repo is not None # help pytype
2630 2636 repo._afterlock(hookfunc)
2631 2637
2632 2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2633 2639 # Include a leading "-" to make it happen before the transaction summary
2634 2640 # reports registered via scmutil.registersummarycallback() whose names
2635 2641 # are 00-txnreport etc. That way, the caches will be warm when the
2636 2642 # callbacks run.
2637 2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2638 2644
2639 2645 def txnaborthook(tr2):
2640 2646 """To be run if transaction is aborted"""
2641 2647 repo = reporef()
2642 2648 assert repo is not None # help pytype
2643 2649 repo.hook(
2644 2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2645 2651 )
2646 2652
2647 2653 tr.addabort(b'txnabort-hook', txnaborthook)
2648 2654 # avoid eager cache invalidation. in-memory data should be identical
2649 2655 # to stored data if transaction has no error.
2650 2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2651 2657 self._transref = weakref.ref(tr)
2652 2658 scmutil.registersummarycallback(self, tr, desc)
2653 2659 # This only exist to deal with the need of rollback to have viable
2654 2660 # parents at the end of the operation. So backup viable parents at the
2655 2661 # time of this operation.
2656 2662 #
2657 2663 # We only do it when the `wlock` is taken, otherwise other might be
2658 2664 # altering the dirstate under us.
2659 2665 #
2660 2666 # This is really not a great way to do this (first, because we cannot
2661 2667 # always do it). There are more viable alternative that exists
2662 2668 #
2663 2669 # - backing only the working copy parent in a dedicated files and doing
2664 2670 # a clean "keep-update" to them on `hg rollback`.
2665 2671 #
2666 2672 # - slightly changing the behavior an applying a logic similar to "hg
2667 2673 # strip" to pick a working copy destination on `hg rollback`
2668 2674 if self.currentwlock() is not None:
2669 2675 ds = self.dirstate
2670 2676
2671 2677 def backup_dirstate(tr):
2672 2678 for f in ds.all_file_names():
2673 2679 # hardlink backup is okay because `dirstate` is always
2674 2680 # atomically written and possible data file are append only
2675 2681 # and resistant to trailing data.
2676 2682 tr.addbackup(f, hardlink=True, location=b'plain')
2677 2683
2678 2684 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2679 2685 return tr
2680 2686
2681 2687 def _journalfiles(self):
2682 2688 return (
2683 2689 (self.svfs, b'journal'),
2684 2690 (self.svfs, b'journal.narrowspec'),
2685 2691 (self.vfs, b'journal.narrowspec.dirstate'),
2686 2692 (self.vfs, b'journal.branch'),
2687 2693 (self.vfs, b'journal.desc'),
2688 2694 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2689 2695 (self.svfs, b'journal.phaseroots'),
2690 2696 )
2691 2697
2692 2698 def undofiles(self):
2693 2699 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2694 2700
2695 2701 @unfilteredmethod
2696 2702 def _writejournal(self, desc):
2697 2703 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2698 2704 narrowspec.savebackup(self, b'journal.narrowspec')
2699 2705 self.vfs.write(
2700 2706 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2701 2707 )
2702 2708 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2703 2709 bookmarksvfs = bookmarks.bookmarksvfs(self)
2704 2710 bookmarksvfs.write(
2705 2711 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2706 2712 )
2707 2713 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2708 2714
2709 2715 def recover(self):
2710 2716 with self.lock():
2711 2717 if self.svfs.exists(b"journal"):
2712 2718 self.ui.status(_(b"rolling back interrupted transaction\n"))
2713 2719 vfsmap = {
2714 2720 b'': self.svfs,
2715 2721 b'plain': self.vfs,
2716 2722 }
2717 2723 transaction.rollback(
2718 2724 self.svfs,
2719 2725 vfsmap,
2720 2726 b"journal",
2721 2727 self.ui.warn,
2722 2728 checkambigfiles=_cachedfiles,
2723 2729 )
2724 2730 self.invalidate()
2725 2731 return True
2726 2732 else:
2727 2733 self.ui.warn(_(b"no interrupted transaction available\n"))
2728 2734 return False
2729 2735
2730 2736 def rollback(self, dryrun=False, force=False):
2731 2737 wlock = lock = None
2732 2738 try:
2733 2739 wlock = self.wlock()
2734 2740 lock = self.lock()
2735 2741 if self.svfs.exists(b"undo"):
2736 2742 return self._rollback(dryrun, force)
2737 2743 else:
2738 2744 self.ui.warn(_(b"no rollback information available\n"))
2739 2745 return 1
2740 2746 finally:
2741 2747 release(lock, wlock)
2742 2748
2743 2749 @unfilteredmethod # Until we get smarter cache management
2744 2750 def _rollback(self, dryrun, force):
2745 2751 ui = self.ui
2746 2752
2747 2753 parents = self.dirstate.parents()
2748 2754 try:
2749 2755 args = self.vfs.read(b'undo.desc').splitlines()
2750 2756 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2751 2757 if len(args) >= 3:
2752 2758 detail = args[2]
2753 2759 oldtip = oldlen - 1
2754 2760
2755 2761 if detail and ui.verbose:
2756 2762 msg = _(
2757 2763 b'repository tip rolled back to revision %d'
2758 2764 b' (undo %s: %s)\n'
2759 2765 ) % (oldtip, desc, detail)
2760 2766 else:
2761 2767 msg = _(
2762 2768 b'repository tip rolled back to revision %d (undo %s)\n'
2763 2769 ) % (oldtip, desc)
2764 2770 parentgone = any(self[p].rev() > oldtip for p in parents)
2765 2771 except IOError:
2766 2772 msg = _(b'rolling back unknown transaction\n')
2767 2773 desc = None
2768 2774 parentgone = True
2769 2775
2770 2776 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2771 2777 raise error.Abort(
2772 2778 _(
2773 2779 b'rollback of last commit while not checked out '
2774 2780 b'may lose data'
2775 2781 ),
2776 2782 hint=_(b'use -f to force'),
2777 2783 )
2778 2784
2779 2785 ui.status(msg)
2780 2786 if dryrun:
2781 2787 return 0
2782 2788
2783 2789 self.destroying()
2784 2790 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2785 2791 skip_journal_pattern = None
2786 2792 if not parentgone:
2787 2793 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2788 2794 transaction.rollback(
2789 2795 self.svfs,
2790 2796 vfsmap,
2791 2797 b'undo',
2792 2798 ui.warn,
2793 2799 checkambigfiles=_cachedfiles,
2794 2800 skip_journal_pattern=skip_journal_pattern,
2795 2801 )
2796 2802 bookmarksvfs = bookmarks.bookmarksvfs(self)
2797 2803 if bookmarksvfs.exists(b'undo.bookmarks'):
2798 2804 bookmarksvfs.rename(
2799 2805 b'undo.bookmarks', b'bookmarks', checkambig=True
2800 2806 )
2801 2807 if self.svfs.exists(b'undo.phaseroots'):
2802 2808 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2803 2809 self.invalidate()
2804 2810 self.dirstate.invalidate()
2805 2811
2806 2812 if parentgone:
2807 2813 # replace this with some explicit parent update in the future.
2808 2814 has_node = self.changelog.index.has_node
2809 2815 if not all(has_node(p) for p in self.dirstate._pl):
2810 2816 # There was no dirstate to backup initially, we need to drop
2811 2817 # the existing one.
2812 2818 with self.dirstate.changing_parents(self):
2813 2819 self.dirstate.setparents(self.nullid)
2814 2820 self.dirstate.clear()
2815 2821
2816 2822 narrowspec.restorebackup(self, b'undo.narrowspec')
2817 2823 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2818 2824 try:
2819 2825 branch = self.vfs.read(b'undo.branch')
2820 2826 self.dirstate.setbranch(encoding.tolocal(branch))
2821 2827 except IOError:
2822 2828 ui.warn(
2823 2829 _(
2824 2830 b'named branch could not be reset: '
2825 2831 b'current branch is still \'%s\'\n'
2826 2832 )
2827 2833 % self.dirstate.branch()
2828 2834 )
2829 2835
2830 2836 parents = tuple([p.rev() for p in self[None].parents()])
2831 2837 if len(parents) > 1:
2832 2838 ui.status(
2833 2839 _(
2834 2840 b'working directory now based on '
2835 2841 b'revisions %d and %d\n'
2836 2842 )
2837 2843 % parents
2838 2844 )
2839 2845 else:
2840 2846 ui.status(
2841 2847 _(b'working directory now based on revision %d\n') % parents
2842 2848 )
2843 2849 mergestatemod.mergestate.clean(self)
2844 2850
2845 2851 # TODO: if we know which new heads may result from this rollback, pass
2846 2852 # them to destroy(), which will prevent the branchhead cache from being
2847 2853 # invalidated.
2848 2854 self.destroyed()
2849 2855 return 0
2850 2856
2851 2857 def _buildcacheupdater(self, newtransaction):
2852 2858 """called during transaction to build the callback updating cache
2853 2859
2854 2860 Lives on the repository to help extension who might want to augment
2855 2861 this logic. For this purpose, the created transaction is passed to the
2856 2862 method.
2857 2863 """
2858 2864 # we must avoid cyclic reference between repo and transaction.
2859 2865 reporef = weakref.ref(self)
2860 2866
2861 2867 def updater(tr):
2862 2868 repo = reporef()
2863 2869 assert repo is not None # help pytype
2864 2870 repo.updatecaches(tr)
2865 2871
2866 2872 return updater
2867 2873
2868 2874 @unfilteredmethod
2869 2875 def updatecaches(self, tr=None, full=False, caches=None):
2870 2876 """warm appropriate caches
2871 2877
2872 2878 If this function is called after a transaction closed. The transaction
2873 2879 will be available in the 'tr' argument. This can be used to selectively
2874 2880 update caches relevant to the changes in that transaction.
2875 2881
2876 2882 If 'full' is set, make sure all caches the function knows about have
2877 2883 up-to-date data. Even the ones usually loaded more lazily.
2878 2884
2879 2885 The `full` argument can take a special "post-clone" value. In this case
2880 2886 the cache warming is made after a clone and of the slower cache might
2881 2887 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2882 2888 as we plan for a cleaner way to deal with this for 5.9.
2883 2889 """
2884 2890 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2885 2891 # During strip, many caches are invalid but
2886 2892 # later call to `destroyed` will refresh them.
2887 2893 return
2888 2894
2889 2895 unfi = self.unfiltered()
2890 2896
2891 2897 if full:
2892 2898 msg = (
2893 2899 "`full` argument for `repo.updatecaches` is deprecated\n"
2894 2900 "(use `caches=repository.CACHE_ALL` instead)"
2895 2901 )
2896 2902 self.ui.deprecwarn(msg, b"5.9")
2897 2903 caches = repository.CACHES_ALL
2898 2904 if full == b"post-clone":
2899 2905 caches = repository.CACHES_POST_CLONE
2900 2906 caches = repository.CACHES_ALL
2901 2907 elif caches is None:
2902 2908 caches = repository.CACHES_DEFAULT
2903 2909
2904 2910 if repository.CACHE_BRANCHMAP_SERVED in caches:
2905 2911 if tr is None or tr.changes[b'origrepolen'] < len(self):
2906 2912 # accessing the 'served' branchmap should refresh all the others,
2907 2913 self.ui.debug(b'updating the branch cache\n')
2908 2914 self.filtered(b'served').branchmap()
2909 2915 self.filtered(b'served.hidden').branchmap()
2910 2916 # flush all possibly delayed write.
2911 2917 self._branchcaches.write_delayed(self)
2912 2918
2913 2919 if repository.CACHE_CHANGELOG_CACHE in caches:
2914 2920 self.changelog.update_caches(transaction=tr)
2915 2921
2916 2922 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2917 2923 self.manifestlog.update_caches(transaction=tr)
2918 2924
2919 2925 if repository.CACHE_REV_BRANCH in caches:
2920 2926 rbc = unfi.revbranchcache()
2921 2927 for r in unfi.changelog:
2922 2928 rbc.branchinfo(r)
2923 2929 rbc.write()
2924 2930
2925 2931 if repository.CACHE_FULL_MANIFEST in caches:
2926 2932 # ensure the working copy parents are in the manifestfulltextcache
2927 2933 for ctx in self[b'.'].parents():
2928 2934 ctx.manifest() # accessing the manifest is enough
2929 2935
2930 2936 if repository.CACHE_FILE_NODE_TAGS in caches:
2931 2937 # accessing fnode cache warms the cache
2932 2938 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2933 2939
2934 2940 if repository.CACHE_TAGS_DEFAULT in caches:
2935 2941 # accessing tags warm the cache
2936 2942 self.tags()
2937 2943 if repository.CACHE_TAGS_SERVED in caches:
2938 2944 self.filtered(b'served').tags()
2939 2945
2940 2946 if repository.CACHE_BRANCHMAP_ALL in caches:
2941 2947 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2942 2948 # so we're forcing a write to cause these caches to be warmed up
2943 2949 # even if they haven't explicitly been requested yet (if they've
2944 2950 # never been used by hg, they won't ever have been written, even if
2945 2951 # they're a subset of another kind of cache that *has* been used).
2946 2952 for filt in repoview.filtertable.keys():
2947 2953 filtered = self.filtered(filt)
2948 2954 filtered.branchmap().write(filtered)
2949 2955
2950 2956 def invalidatecaches(self):
2951 2957 if '_tagscache' in vars(self):
2952 2958 # can't use delattr on proxy
2953 2959 del self.__dict__['_tagscache']
2954 2960
2955 2961 self._branchcaches.clear()
2956 2962 self.invalidatevolatilesets()
2957 2963 self._sparsesignaturecache.clear()
2958 2964
2959 2965 def invalidatevolatilesets(self):
2960 2966 self.filteredrevcache.clear()
2961 2967 obsolete.clearobscaches(self)
2962 2968 self._quick_access_changeid_invalidate()
2963 2969
2964 2970 def invalidatedirstate(self):
2965 2971 """Invalidates the dirstate, causing the next call to dirstate
2966 2972 to check if it was modified since the last time it was read,
2967 2973 rereading it if it has.
2968 2974
2969 2975 This is different to dirstate.invalidate() that it doesn't always
2970 2976 rereads the dirstate. Use dirstate.invalidate() if you want to
2971 2977 explicitly read the dirstate again (i.e. restoring it to a previous
2972 2978 known good state)."""
2973 2979 unfi = self.unfiltered()
2974 2980 if 'dirstate' in unfi.__dict__:
2975 2981 del unfi.__dict__['dirstate']
2976 2982
2977 2983 def invalidate(self, clearfilecache=False):
2978 2984 """Invalidates both store and non-store parts other than dirstate
2979 2985
2980 2986 If a transaction is running, invalidation of store is omitted,
2981 2987 because discarding in-memory changes might cause inconsistency
2982 2988 (e.g. incomplete fncache causes unintentional failure, but
2983 2989 redundant one doesn't).
2984 2990 """
2985 2991 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2986 2992 for k in list(self._filecache.keys()):
2987 2993 if (
2988 2994 k == b'changelog'
2989 2995 and self.currenttransaction()
2990 2996 and self.changelog._delayed
2991 2997 ):
2992 2998 # The changelog object may store unwritten revisions. We don't
2993 2999 # want to lose them.
2994 3000 # TODO: Solve the problem instead of working around it.
2995 3001 continue
2996 3002
2997 3003 if clearfilecache:
2998 3004 del self._filecache[k]
2999 3005 try:
3000 3006 delattr(unfiltered, k)
3001 3007 except AttributeError:
3002 3008 pass
3003 3009 self.invalidatecaches()
3004 3010 if not self.currenttransaction():
3005 3011 # TODO: Changing contents of store outside transaction
3006 3012 # causes inconsistency. We should make in-memory store
3007 3013 # changes detectable, and abort if changed.
3008 3014 self.store.invalidatecaches()
3009 3015
3010 3016 def invalidateall(self):
3011 3017 """Fully invalidates both store and non-store parts, causing the
3012 3018 subsequent operation to reread any outside changes."""
3013 3019 # extension should hook this to invalidate its caches
3014 3020 self.invalidate()
3015 3021 self.invalidatedirstate()
3016 3022
3017 3023 @unfilteredmethod
3018 3024 def _refreshfilecachestats(self, tr):
3019 3025 """Reload stats of cached files so that they are flagged as valid"""
3020 3026 for k, ce in self._filecache.items():
3021 3027 k = pycompat.sysstr(k)
3022 3028 if k == 'dirstate' or k not in self.__dict__:
3023 3029 continue
3024 3030 ce.refresh()
3025 3031
3026 3032 def _lock(
3027 3033 self,
3028 3034 vfs,
3029 3035 lockname,
3030 3036 wait,
3031 3037 releasefn,
3032 3038 acquirefn,
3033 3039 desc,
3034 3040 ):
3035 3041 timeout = 0
3036 3042 warntimeout = 0
3037 3043 if wait:
3038 3044 timeout = self.ui.configint(b"ui", b"timeout")
3039 3045 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3040 3046 # internal config: ui.signal-safe-lock
3041 3047 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3042 3048
3043 3049 l = lockmod.trylock(
3044 3050 self.ui,
3045 3051 vfs,
3046 3052 lockname,
3047 3053 timeout,
3048 3054 warntimeout,
3049 3055 releasefn=releasefn,
3050 3056 acquirefn=acquirefn,
3051 3057 desc=desc,
3052 3058 signalsafe=signalsafe,
3053 3059 )
3054 3060 return l
3055 3061
3056 3062 def _afterlock(self, callback):
3057 3063 """add a callback to be run when the repository is fully unlocked
3058 3064
3059 3065 The callback will be executed when the outermost lock is released
3060 3066 (with wlock being higher level than 'lock')."""
3061 3067 for ref in (self._wlockref, self._lockref):
3062 3068 l = ref and ref()
3063 3069 if l and l.held:
3064 3070 l.postrelease.append(callback)
3065 3071 break
3066 3072 else: # no lock have been found.
3067 3073 callback(True)
3068 3074
3069 3075 def lock(self, wait=True):
3070 3076 """Lock the repository store (.hg/store) and return a weak reference
3071 3077 to the lock. Use this before modifying the store (e.g. committing or
3072 3078 stripping). If you are opening a transaction, get a lock as well.)
3073 3079
3074 3080 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3075 3081 'wlock' first to avoid a dead-lock hazard."""
3076 3082 l = self._currentlock(self._lockref)
3077 3083 if l is not None:
3078 3084 l.lock()
3079 3085 return l
3080 3086
3081 3087 l = self._lock(
3082 3088 vfs=self.svfs,
3083 3089 lockname=b"lock",
3084 3090 wait=wait,
3085 3091 releasefn=None,
3086 3092 acquirefn=self.invalidate,
3087 3093 desc=_(b'repository %s') % self.origroot,
3088 3094 )
3089 3095 self._lockref = weakref.ref(l)
3090 3096 return l
3091 3097
3092 3098 def wlock(self, wait=True):
3093 3099 """Lock the non-store parts of the repository (everything under
3094 3100 .hg except .hg/store) and return a weak reference to the lock.
3095 3101
3096 3102 Use this before modifying files in .hg.
3097 3103
3098 3104 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3099 3105 'wlock' first to avoid a dead-lock hazard."""
3100 3106 l = self._wlockref() if self._wlockref else None
3101 3107 if l is not None and l.held:
3102 3108 l.lock()
3103 3109 return l
3104 3110
3105 3111 # We do not need to check for non-waiting lock acquisition. Such
3106 3112 # acquisition would not cause dead-lock as they would just fail.
3107 3113 if wait and (
3108 3114 self.ui.configbool(b'devel', b'all-warnings')
3109 3115 or self.ui.configbool(b'devel', b'check-locks')
3110 3116 ):
3111 3117 if self._currentlock(self._lockref) is not None:
3112 3118 self.ui.develwarn(b'"wlock" acquired after "lock"')
3113 3119
3114 3120 def unlock():
3115 3121 if self.dirstate.is_changing_any:
3116 3122 msg = b"wlock release in the middle of a changing parents"
3117 3123 self.ui.develwarn(msg)
3118 3124 self.dirstate.invalidate()
3119 3125 else:
3120 3126 if self.dirstate._dirty:
3121 3127 msg = b"dirty dirstate on wlock release"
3122 3128 self.ui.develwarn(msg)
3123 3129 self.dirstate.write(None)
3124 3130
3125 3131 unfi = self.unfiltered()
3126 3132 if 'dirstate' in unfi.__dict__:
3127 3133 del unfi.__dict__['dirstate']
3128 3134
3129 3135 l = self._lock(
3130 3136 self.vfs,
3131 3137 b"wlock",
3132 3138 wait,
3133 3139 unlock,
3134 3140 self.invalidatedirstate,
3135 3141 _(b'working directory of %s') % self.origroot,
3136 3142 )
3137 3143 self._wlockref = weakref.ref(l)
3138 3144 return l
3139 3145
3140 3146 def _currentlock(self, lockref):
3141 3147 """Returns the lock if it's held, or None if it's not."""
3142 3148 if lockref is None:
3143 3149 return None
3144 3150 l = lockref()
3145 3151 if l is None or not l.held:
3146 3152 return None
3147 3153 return l
3148 3154
3149 3155 def currentwlock(self):
3150 3156 """Returns the wlock if it's held, or None if it's not."""
3151 3157 return self._currentlock(self._wlockref)
3152 3158
3153 3159 def checkcommitpatterns(self, wctx, match, status, fail):
3154 3160 """check for commit arguments that aren't committable"""
3155 3161 if match.isexact() or match.prefix():
3156 3162 matched = set(status.modified + status.added + status.removed)
3157 3163
3158 3164 for f in match.files():
3159 3165 f = self.dirstate.normalize(f)
3160 3166 if f == b'.' or f in matched or f in wctx.substate:
3161 3167 continue
3162 3168 if f in status.deleted:
3163 3169 fail(f, _(b'file not found!'))
3164 3170 # Is it a directory that exists or used to exist?
3165 3171 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3166 3172 d = f + b'/'
3167 3173 for mf in matched:
3168 3174 if mf.startswith(d):
3169 3175 break
3170 3176 else:
3171 3177 fail(f, _(b"no match under directory!"))
3172 3178 elif f not in self.dirstate:
3173 3179 fail(f, _(b"file not tracked!"))
3174 3180
3175 3181 @unfilteredmethod
3176 3182 def commit(
3177 3183 self,
3178 3184 text=b"",
3179 3185 user=None,
3180 3186 date=None,
3181 3187 match=None,
3182 3188 force=False,
3183 3189 editor=None,
3184 3190 extra=None,
3185 3191 ):
3186 3192 """Add a new revision to current repository.
3187 3193
3188 3194 Revision information is gathered from the working directory,
3189 3195 match can be used to filter the committed files. If editor is
3190 3196 supplied, it is called to get a commit message.
3191 3197 """
3192 3198 if extra is None:
3193 3199 extra = {}
3194 3200
3195 3201 def fail(f, msg):
3196 3202 raise error.InputError(b'%s: %s' % (f, msg))
3197 3203
3198 3204 if not match:
3199 3205 match = matchmod.always()
3200 3206
3201 3207 if not force:
3202 3208 match.bad = fail
3203 3209
3204 3210 # lock() for recent changelog (see issue4368)
3205 3211 with self.wlock(), self.lock():
3206 3212 wctx = self[None]
3207 3213 merge = len(wctx.parents()) > 1
3208 3214
3209 3215 if not force and merge and not match.always():
3210 3216 raise error.Abort(
3211 3217 _(
3212 3218 b'cannot partially commit a merge '
3213 3219 b'(do not specify files or patterns)'
3214 3220 )
3215 3221 )
3216 3222
3217 3223 status = self.status(match=match, clean=force)
3218 3224 if force:
3219 3225 status.modified.extend(
3220 3226 status.clean
3221 3227 ) # mq may commit clean files
3222 3228
3223 3229 # check subrepos
3224 3230 subs, commitsubs, newstate = subrepoutil.precommit(
3225 3231 self.ui, wctx, status, match, force=force
3226 3232 )
3227 3233
3228 3234 # make sure all explicit patterns are matched
3229 3235 if not force:
3230 3236 self.checkcommitpatterns(wctx, match, status, fail)
3231 3237
3232 3238 cctx = context.workingcommitctx(
3233 3239 self, status, text, user, date, extra
3234 3240 )
3235 3241
3236 3242 ms = mergestatemod.mergestate.read(self)
3237 3243 mergeutil.checkunresolved(ms)
3238 3244
3239 3245 # internal config: ui.allowemptycommit
3240 3246 if cctx.isempty() and not self.ui.configbool(
3241 3247 b'ui', b'allowemptycommit'
3242 3248 ):
3243 3249 self.ui.debug(b'nothing to commit, clearing merge state\n')
3244 3250 ms.reset()
3245 3251 return None
3246 3252
3247 3253 if merge and cctx.deleted():
3248 3254 raise error.Abort(_(b"cannot commit merge with missing files"))
3249 3255
3250 3256 if editor:
3251 3257 cctx._text = editor(self, cctx, subs)
3252 3258 edited = text != cctx._text
3253 3259
3254 3260 # Save commit message in case this transaction gets rolled back
3255 3261 # (e.g. by a pretxncommit hook). Leave the content alone on
3256 3262 # the assumption that the user will use the same editor again.
3257 3263 msg_path = self.savecommitmessage(cctx._text)
3258 3264
3259 3265 # commit subs and write new state
3260 3266 if subs:
3261 3267 uipathfn = scmutil.getuipathfn(self)
3262 3268 for s in sorted(commitsubs):
3263 3269 sub = wctx.sub(s)
3264 3270 self.ui.status(
3265 3271 _(b'committing subrepository %s\n')
3266 3272 % uipathfn(subrepoutil.subrelpath(sub))
3267 3273 )
3268 3274 sr = sub.commit(cctx._text, user, date)
3269 3275 newstate[s] = (newstate[s][0], sr)
3270 3276 subrepoutil.writestate(self, newstate)
3271 3277
3272 3278 p1, p2 = self.dirstate.parents()
3273 3279 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3274 3280 try:
3275 3281 self.hook(
3276 3282 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3277 3283 )
3278 3284 with self.transaction(b'commit'):
3279 3285 ret = self.commitctx(cctx, True)
3280 3286 # update bookmarks, dirstate and mergestate
3281 3287 bookmarks.update(self, [p1, p2], ret)
3282 3288 cctx.markcommitted(ret)
3283 3289 ms.reset()
3284 3290 except: # re-raises
3285 3291 if edited:
3286 3292 self.ui.write(
3287 3293 _(b'note: commit message saved in %s\n') % msg_path
3288 3294 )
3289 3295 self.ui.write(
3290 3296 _(
3291 3297 b"note: use 'hg commit --logfile "
3292 3298 b"%s --edit' to reuse it\n"
3293 3299 )
3294 3300 % msg_path
3295 3301 )
3296 3302 raise
3297 3303
3298 3304 def commithook(unused_success):
3299 3305 # hack for command that use a temporary commit (eg: histedit)
3300 3306 # temporary commit got stripped before hook release
3301 3307 if self.changelog.hasnode(ret):
3302 3308 self.hook(
3303 3309 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3304 3310 )
3305 3311
3306 3312 self._afterlock(commithook)
3307 3313 return ret
3308 3314
3309 3315 @unfilteredmethod
3310 3316 def commitctx(self, ctx, error=False, origctx=None):
3311 3317 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3312 3318
3313 3319 @unfilteredmethod
3314 3320 def destroying(self):
3315 3321 """Inform the repository that nodes are about to be destroyed.
3316 3322 Intended for use by strip and rollback, so there's a common
3317 3323 place for anything that has to be done before destroying history.
3318 3324
3319 3325 This is mostly useful for saving state that is in memory and waiting
3320 3326 to be flushed when the current lock is released. Because a call to
3321 3327 destroyed is imminent, the repo will be invalidated causing those
3322 3328 changes to stay in memory (waiting for the next unlock), or vanish
3323 3329 completely.
3324 3330 """
3325 3331 # When using the same lock to commit and strip, the phasecache is left
3326 3332 # dirty after committing. Then when we strip, the repo is invalidated,
3327 3333 # causing those changes to disappear.
3328 3334 if '_phasecache' in vars(self):
3329 3335 self._phasecache.write()
3330 3336
3331 3337 @unfilteredmethod
3332 3338 def destroyed(self):
3333 3339 """Inform the repository that nodes have been destroyed.
3334 3340 Intended for use by strip and rollback, so there's a common
3335 3341 place for anything that has to be done after destroying history.
3336 3342 """
3337 3343 # When one tries to:
3338 3344 # 1) destroy nodes thus calling this method (e.g. strip)
3339 3345 # 2) use phasecache somewhere (e.g. commit)
3340 3346 #
3341 3347 # then 2) will fail because the phasecache contains nodes that were
3342 3348 # removed. We can either remove phasecache from the filecache,
3343 3349 # causing it to reload next time it is accessed, or simply filter
3344 3350 # the removed nodes now and write the updated cache.
3345 3351 self._phasecache.filterunknown(self)
3346 3352 self._phasecache.write()
3347 3353
3348 3354 # refresh all repository caches
3349 3355 self.updatecaches()
3350 3356
3351 3357 # Ensure the persistent tag cache is updated. Doing it now
3352 3358 # means that the tag cache only has to worry about destroyed
3353 3359 # heads immediately after a strip/rollback. That in turn
3354 3360 # guarantees that "cachetip == currenttip" (comparing both rev
3355 3361 # and node) always means no nodes have been added or destroyed.
3356 3362
3357 3363 # XXX this is suboptimal when qrefresh'ing: we strip the current
3358 3364 # head, refresh the tag cache, then immediately add a new head.
3359 3365 # But I think doing it this way is necessary for the "instant
3360 3366 # tag cache retrieval" case to work.
3361 3367 self.invalidate()
3362 3368
3363 3369 def status(
3364 3370 self,
3365 3371 node1=b'.',
3366 3372 node2=None,
3367 3373 match=None,
3368 3374 ignored=False,
3369 3375 clean=False,
3370 3376 unknown=False,
3371 3377 listsubrepos=False,
3372 3378 ):
3373 3379 '''a convenience method that calls node1.status(node2)'''
3374 3380 return self[node1].status(
3375 3381 node2, match, ignored, clean, unknown, listsubrepos
3376 3382 )
3377 3383
3378 3384 def addpostdsstatus(self, ps):
3379 3385 """Add a callback to run within the wlock, at the point at which status
3380 3386 fixups happen.
3381 3387
3382 3388 On status completion, callback(wctx, status) will be called with the
3383 3389 wlock held, unless the dirstate has changed from underneath or the wlock
3384 3390 couldn't be grabbed.
3385 3391
3386 3392 Callbacks should not capture and use a cached copy of the dirstate --
3387 3393 it might change in the meanwhile. Instead, they should access the
3388 3394 dirstate via wctx.repo().dirstate.
3389 3395
3390 3396 This list is emptied out after each status run -- extensions should
3391 3397 make sure it adds to this list each time dirstate.status is called.
3392 3398 Extensions should also make sure they don't call this for statuses
3393 3399 that don't involve the dirstate.
3394 3400 """
3395 3401
3396 3402 # The list is located here for uniqueness reasons -- it is actually
3397 3403 # managed by the workingctx, but that isn't unique per-repo.
3398 3404 self._postdsstatus.append(ps)
3399 3405
3400 3406 def postdsstatus(self):
3401 3407 """Used by workingctx to get the list of post-dirstate-status hooks."""
3402 3408 return self._postdsstatus
3403 3409
3404 3410 def clearpostdsstatus(self):
3405 3411 """Used by workingctx to clear post-dirstate-status hooks."""
3406 3412 del self._postdsstatus[:]
3407 3413
3408 3414 def heads(self, start=None):
3409 3415 if start is None:
3410 3416 cl = self.changelog
3411 3417 headrevs = reversed(cl.headrevs())
3412 3418 return [cl.node(rev) for rev in headrevs]
3413 3419
3414 3420 heads = self.changelog.heads(start)
3415 3421 # sort the output in rev descending order
3416 3422 return sorted(heads, key=self.changelog.rev, reverse=True)
3417 3423
3418 3424 def branchheads(self, branch=None, start=None, closed=False):
3419 3425 """return a (possibly filtered) list of heads for the given branch
3420 3426
3421 3427 Heads are returned in topological order, from newest to oldest.
3422 3428 If branch is None, use the dirstate branch.
3423 3429 If start is not None, return only heads reachable from start.
3424 3430 If closed is True, return heads that are marked as closed as well.
3425 3431 """
3426 3432 if branch is None:
3427 3433 branch = self[None].branch()
3428 3434 branches = self.branchmap()
3429 3435 if not branches.hasbranch(branch):
3430 3436 return []
3431 3437 # the cache returns heads ordered lowest to highest
3432 3438 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3433 3439 if start is not None:
3434 3440 # filter out the heads that cannot be reached from startrev
3435 3441 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3436 3442 bheads = [h for h in bheads if h in fbheads]
3437 3443 return bheads
3438 3444
3439 3445 def branches(self, nodes):
3440 3446 if not nodes:
3441 3447 nodes = [self.changelog.tip()]
3442 3448 b = []
3443 3449 for n in nodes:
3444 3450 t = n
3445 3451 while True:
3446 3452 p = self.changelog.parents(n)
3447 3453 if p[1] != self.nullid or p[0] == self.nullid:
3448 3454 b.append((t, n, p[0], p[1]))
3449 3455 break
3450 3456 n = p[0]
3451 3457 return b
3452 3458
3453 3459 def between(self, pairs):
3454 3460 r = []
3455 3461
3456 3462 for top, bottom in pairs:
3457 3463 n, l, i = top, [], 0
3458 3464 f = 1
3459 3465
3460 3466 while n != bottom and n != self.nullid:
3461 3467 p = self.changelog.parents(n)[0]
3462 3468 if i == f:
3463 3469 l.append(n)
3464 3470 f = f * 2
3465 3471 n = p
3466 3472 i += 1
3467 3473
3468 3474 r.append(l)
3469 3475
3470 3476 return r
3471 3477
3472 3478 def checkpush(self, pushop):
3473 3479 """Extensions can override this function if additional checks have
3474 3480 to be performed before pushing, or call it if they override push
3475 3481 command.
3476 3482 """
3477 3483
3478 3484 @unfilteredpropertycache
3479 3485 def prepushoutgoinghooks(self):
3480 3486 """Return util.hooks consists of a pushop with repo, remote, outgoing
3481 3487 methods, which are called before pushing changesets.
3482 3488 """
3483 3489 return util.hooks()
3484 3490
3485 3491 def pushkey(self, namespace, key, old, new):
3486 3492 try:
3487 3493 tr = self.currenttransaction()
3488 3494 hookargs = {}
3489 3495 if tr is not None:
3490 3496 hookargs.update(tr.hookargs)
3491 3497 hookargs = pycompat.strkwargs(hookargs)
3492 3498 hookargs['namespace'] = namespace
3493 3499 hookargs['key'] = key
3494 3500 hookargs['old'] = old
3495 3501 hookargs['new'] = new
3496 3502 self.hook(b'prepushkey', throw=True, **hookargs)
3497 3503 except error.HookAbort as exc:
3498 3504 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3499 3505 if exc.hint:
3500 3506 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3501 3507 return False
3502 3508 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3503 3509 ret = pushkey.push(self, namespace, key, old, new)
3504 3510
3505 3511 def runhook(unused_success):
3506 3512 self.hook(
3507 3513 b'pushkey',
3508 3514 namespace=namespace,
3509 3515 key=key,
3510 3516 old=old,
3511 3517 new=new,
3512 3518 ret=ret,
3513 3519 )
3514 3520
3515 3521 self._afterlock(runhook)
3516 3522 return ret
3517 3523
3518 3524 def listkeys(self, namespace):
3519 3525 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3520 3526 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3521 3527 values = pushkey.list(self, namespace)
3522 3528 self.hook(b'listkeys', namespace=namespace, values=values)
3523 3529 return values
3524 3530
3525 3531 def debugwireargs(self, one, two, three=None, four=None, five=None):
3526 3532 '''used to test argument passing over the wire'''
3527 3533 return b"%s %s %s %s %s" % (
3528 3534 one,
3529 3535 two,
3530 3536 pycompat.bytestr(three),
3531 3537 pycompat.bytestr(four),
3532 3538 pycompat.bytestr(five),
3533 3539 )
3534 3540
3535 3541 def savecommitmessage(self, text):
3536 3542 fp = self.vfs(b'last-message.txt', b'wb')
3537 3543 try:
3538 3544 fp.write(text)
3539 3545 finally:
3540 3546 fp.close()
3541 3547 return self.pathto(fp.name[len(self.root) + 1 :])
3542 3548
3543 3549 def register_wanted_sidedata(self, category):
3544 3550 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3545 3551 # Only revlogv2 repos can want sidedata.
3546 3552 return
3547 3553 self._wanted_sidedata.add(pycompat.bytestr(category))
3548 3554
3549 3555 def register_sidedata_computer(
3550 3556 self, kind, category, keys, computer, flags, replace=False
3551 3557 ):
3552 3558 if kind not in revlogconst.ALL_KINDS:
3553 3559 msg = _(b"unexpected revlog kind '%s'.")
3554 3560 raise error.ProgrammingError(msg % kind)
3555 3561 category = pycompat.bytestr(category)
3556 3562 already_registered = category in self._sidedata_computers.get(kind, [])
3557 3563 if already_registered and not replace:
3558 3564 msg = _(
3559 3565 b"cannot register a sidedata computer twice for category '%s'."
3560 3566 )
3561 3567 raise error.ProgrammingError(msg % category)
3562 3568 if replace and not already_registered:
3563 3569 msg = _(
3564 3570 b"cannot replace a sidedata computer that isn't registered "
3565 3571 b"for category '%s'."
3566 3572 )
3567 3573 raise error.ProgrammingError(msg % category)
3568 3574 self._sidedata_computers.setdefault(kind, {})
3569 3575 self._sidedata_computers[kind][category] = (keys, computer, flags)
3570 3576
3571 3577
3572 3578 # used to avoid circular references so destructors work
3573 3579 def aftertrans(files):
3574 3580 renamefiles = [tuple(t) for t in files]
3575 3581
3576 3582 def a():
3577 3583 for vfs, src, dest in renamefiles:
3578 3584 # if src and dest refer to a same file, vfs.rename is a no-op,
3579 3585 # leaving both src and dest on disk. delete dest to make sure
3580 3586 # the rename couldn't be such a no-op.
3581 3587 vfs.tryunlink(dest)
3582 3588 try:
3583 3589 vfs.rename(src, dest)
3584 3590 except FileNotFoundError: # journal file does not yet exist
3585 3591 pass
3586 3592
3587 3593 return a
3588 3594
3589 3595
3590 3596 def undoname(fn: bytes) -> bytes:
3591 3597 base, name = os.path.split(fn)
3592 3598 assert name.startswith(b'journal')
3593 3599 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3594 3600
3595 3601
3596 3602 def instance(ui, path: bytes, create, intents=None, createopts=None):
3597 3603 # prevent cyclic import localrepo -> upgrade -> localrepo
3598 3604 from . import upgrade
3599 3605
3600 3606 localpath = urlutil.urllocalpath(path)
3601 3607 if create:
3602 3608 createrepository(ui, localpath, createopts=createopts)
3603 3609
3604 3610 def repo_maker():
3605 3611 return makelocalrepository(ui, localpath, intents=intents)
3606 3612
3607 3613 repo = repo_maker()
3608 3614 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3609 3615 return repo
3610 3616
3611 3617
3612 3618 def islocal(path: bytes) -> bool:
3613 3619 return True
3614 3620
3615 3621
3616 3622 def defaultcreateopts(ui, createopts=None):
3617 3623 """Populate the default creation options for a repository.
3618 3624
3619 3625 A dictionary of explicitly requested creation options can be passed
3620 3626 in. Missing keys will be populated.
3621 3627 """
3622 3628 createopts = dict(createopts or {})
3623 3629
3624 3630 if b'backend' not in createopts:
3625 3631 # experimental config: storage.new-repo-backend
3626 3632 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3627 3633
3628 3634 return createopts
3629 3635
3630 3636
3631 3637 def clone_requirements(ui, createopts, srcrepo):
3632 3638 """clone the requirements of a local repo for a local clone
3633 3639
3634 3640 The store requirements are unchanged while the working copy requirements
3635 3641 depends on the configuration
3636 3642 """
3637 3643 target_requirements = set()
3638 3644 if not srcrepo.requirements:
3639 3645 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3640 3646 # with it.
3641 3647 return target_requirements
3642 3648 createopts = defaultcreateopts(ui, createopts=createopts)
3643 3649 for r in newreporequirements(ui, createopts):
3644 3650 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3645 3651 target_requirements.add(r)
3646 3652
3647 3653 for r in srcrepo.requirements:
3648 3654 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3649 3655 target_requirements.add(r)
3650 3656 return target_requirements
3651 3657
3652 3658
3653 3659 def newreporequirements(ui, createopts):
3654 3660 """Determine the set of requirements for a new local repository.
3655 3661
3656 3662 Extensions can wrap this function to specify custom requirements for
3657 3663 new repositories.
3658 3664 """
3659 3665
3660 3666 if b'backend' not in createopts:
3661 3667 raise error.ProgrammingError(
3662 3668 b'backend key not present in createopts; '
3663 3669 b'was defaultcreateopts() called?'
3664 3670 )
3665 3671
3666 3672 if createopts[b'backend'] != b'revlogv1':
3667 3673 raise error.Abort(
3668 3674 _(
3669 3675 b'unable to determine repository requirements for '
3670 3676 b'storage backend: %s'
3671 3677 )
3672 3678 % createopts[b'backend']
3673 3679 )
3674 3680
3675 3681 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3676 3682 if ui.configbool(b'format', b'usestore'):
3677 3683 requirements.add(requirementsmod.STORE_REQUIREMENT)
3678 3684 if ui.configbool(b'format', b'usefncache'):
3679 3685 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3680 3686 if ui.configbool(b'format', b'dotencode'):
3681 3687 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3682 3688
3683 3689 compengines = ui.configlist(b'format', b'revlog-compression')
3684 3690 for compengine in compengines:
3685 3691 if compengine in util.compengines:
3686 3692 engine = util.compengines[compengine]
3687 3693 if engine.available() and engine.revlogheader():
3688 3694 break
3689 3695 else:
3690 3696 raise error.Abort(
3691 3697 _(
3692 3698 b'compression engines %s defined by '
3693 3699 b'format.revlog-compression not available'
3694 3700 )
3695 3701 % b', '.join(b'"%s"' % e for e in compengines),
3696 3702 hint=_(
3697 3703 b'run "hg debuginstall" to list available '
3698 3704 b'compression engines'
3699 3705 ),
3700 3706 )
3701 3707
3702 3708 # zlib is the historical default and doesn't need an explicit requirement.
3703 3709 if compengine == b'zstd':
3704 3710 requirements.add(b'revlog-compression-zstd')
3705 3711 elif compengine != b'zlib':
3706 3712 requirements.add(b'exp-compression-%s' % compengine)
3707 3713
3708 3714 if scmutil.gdinitconfig(ui):
3709 3715 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3710 3716 if ui.configbool(b'format', b'sparse-revlog'):
3711 3717 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3712 3718
3713 3719 # experimental config: format.use-dirstate-v2
3714 3720 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3715 3721 if ui.configbool(b'format', b'use-dirstate-v2'):
3716 3722 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3717 3723
3718 3724 # experimental config: format.exp-use-copies-side-data-changeset
3719 3725 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3720 3726 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3721 3727 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3722 3728 if ui.configbool(b'experimental', b'treemanifest'):
3723 3729 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3724 3730
3725 3731 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3726 3732 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3727 3733 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3728 3734
3729 3735 revlogv2 = ui.config(b'experimental', b'revlogv2')
3730 3736 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3731 3737 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3732 3738 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3733 3739 # experimental config: format.internal-phase
3734 3740 if ui.configbool(b'format', b'use-internal-phase'):
3735 3741 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3736 3742
3737 3743 # experimental config: format.exp-archived-phase
3738 3744 if ui.configbool(b'format', b'exp-archived-phase'):
3739 3745 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3740 3746
3741 3747 if createopts.get(b'narrowfiles'):
3742 3748 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3743 3749
3744 3750 if createopts.get(b'lfs'):
3745 3751 requirements.add(b'lfs')
3746 3752
3747 3753 if ui.configbool(b'format', b'bookmarks-in-store'):
3748 3754 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3749 3755
3750 3756 if ui.configbool(b'format', b'use-persistent-nodemap'):
3751 3757 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3752 3758
3753 3759 # if share-safe is enabled, let's create the new repository with the new
3754 3760 # requirement
3755 3761 if ui.configbool(b'format', b'use-share-safe'):
3756 3762 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3757 3763
3758 3764 # if we are creating a share-repoΒΉ we have to handle requirement
3759 3765 # differently.
3760 3766 #
3761 3767 # [1] (i.e. reusing the store from another repository, just having a
3762 3768 # working copy)
3763 3769 if b'sharedrepo' in createopts:
3764 3770 source_requirements = set(createopts[b'sharedrepo'].requirements)
3765 3771
3766 3772 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3767 3773 # share to an old school repository, we have to copy the
3768 3774 # requirements and hope for the best.
3769 3775 requirements = source_requirements
3770 3776 else:
3771 3777 # We have control on the working copy only, so "copy" the non
3772 3778 # working copy part over, ignoring previous logic.
3773 3779 to_drop = set()
3774 3780 for req in requirements:
3775 3781 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3776 3782 continue
3777 3783 if req in source_requirements:
3778 3784 continue
3779 3785 to_drop.add(req)
3780 3786 requirements -= to_drop
3781 3787 requirements |= source_requirements
3782 3788
3783 3789 if createopts.get(b'sharedrelative'):
3784 3790 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3785 3791 else:
3786 3792 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3787 3793
3788 3794 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3789 3795 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3790 3796 msg = _(b"ignoring unknown tracked key version: %d\n")
3791 3797 hint = _(
3792 3798 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3793 3799 )
3794 3800 if version != 1:
3795 3801 ui.warn(msg % version, hint=hint)
3796 3802 else:
3797 3803 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3798 3804
3799 3805 return requirements
3800 3806
3801 3807
3802 3808 def checkrequirementscompat(ui, requirements):
3803 3809 """Checks compatibility of repository requirements enabled and disabled.
3804 3810
3805 3811 Returns a set of requirements which needs to be dropped because dependend
3806 3812 requirements are not enabled. Also warns users about it"""
3807 3813
3808 3814 dropped = set()
3809 3815
3810 3816 if requirementsmod.STORE_REQUIREMENT not in requirements:
3811 3817 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3812 3818 ui.warn(
3813 3819 _(
3814 3820 b'ignoring enabled \'format.bookmarks-in-store\' config '
3815 3821 b'beacuse it is incompatible with disabled '
3816 3822 b'\'format.usestore\' config\n'
3817 3823 )
3818 3824 )
3819 3825 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3820 3826
3821 3827 if (
3822 3828 requirementsmod.SHARED_REQUIREMENT in requirements
3823 3829 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3824 3830 ):
3825 3831 raise error.Abort(
3826 3832 _(
3827 3833 b"cannot create shared repository as source was created"
3828 3834 b" with 'format.usestore' config disabled"
3829 3835 )
3830 3836 )
3831 3837
3832 3838 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3833 3839 if ui.hasconfig(b'format', b'use-share-safe'):
3834 3840 msg = _(
3835 3841 b"ignoring enabled 'format.use-share-safe' config because "
3836 3842 b"it is incompatible with disabled 'format.usestore'"
3837 3843 b" config\n"
3838 3844 )
3839 3845 ui.warn(msg)
3840 3846 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3841 3847
3842 3848 return dropped
3843 3849
3844 3850
3845 3851 def filterknowncreateopts(ui, createopts):
3846 3852 """Filters a dict of repo creation options against options that are known.
3847 3853
3848 3854 Receives a dict of repo creation options and returns a dict of those
3849 3855 options that we don't know how to handle.
3850 3856
3851 3857 This function is called as part of repository creation. If the
3852 3858 returned dict contains any items, repository creation will not
3853 3859 be allowed, as it means there was a request to create a repository
3854 3860 with options not recognized by loaded code.
3855 3861
3856 3862 Extensions can wrap this function to filter out creation options
3857 3863 they know how to handle.
3858 3864 """
3859 3865 known = {
3860 3866 b'backend',
3861 3867 b'lfs',
3862 3868 b'narrowfiles',
3863 3869 b'sharedrepo',
3864 3870 b'sharedrelative',
3865 3871 b'shareditems',
3866 3872 b'shallowfilestore',
3867 3873 }
3868 3874
3869 3875 return {k: v for k, v in createopts.items() if k not in known}
3870 3876
3871 3877
3872 3878 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3873 3879 """Create a new repository in a vfs.
3874 3880
3875 3881 ``path`` path to the new repo's working directory.
3876 3882 ``createopts`` options for the new repository.
3877 3883 ``requirement`` predefined set of requirements.
3878 3884 (incompatible with ``createopts``)
3879 3885
3880 3886 The following keys for ``createopts`` are recognized:
3881 3887
3882 3888 backend
3883 3889 The storage backend to use.
3884 3890 lfs
3885 3891 Repository will be created with ``lfs`` requirement. The lfs extension
3886 3892 will automatically be loaded when the repository is accessed.
3887 3893 narrowfiles
3888 3894 Set up repository to support narrow file storage.
3889 3895 sharedrepo
3890 3896 Repository object from which storage should be shared.
3891 3897 sharedrelative
3892 3898 Boolean indicating if the path to the shared repo should be
3893 3899 stored as relative. By default, the pointer to the "parent" repo
3894 3900 is stored as an absolute path.
3895 3901 shareditems
3896 3902 Set of items to share to the new repository (in addition to storage).
3897 3903 shallowfilestore
3898 3904 Indicates that storage for files should be shallow (not all ancestor
3899 3905 revisions are known).
3900 3906 """
3901 3907
3902 3908 if requirements is not None:
3903 3909 if createopts is not None:
3904 3910 msg = b'cannot specify both createopts and requirements'
3905 3911 raise error.ProgrammingError(msg)
3906 3912 createopts = {}
3907 3913 else:
3908 3914 createopts = defaultcreateopts(ui, createopts=createopts)
3909 3915
3910 3916 unknownopts = filterknowncreateopts(ui, createopts)
3911 3917
3912 3918 if not isinstance(unknownopts, dict):
3913 3919 raise error.ProgrammingError(
3914 3920 b'filterknowncreateopts() did not return a dict'
3915 3921 )
3916 3922
3917 3923 if unknownopts:
3918 3924 raise error.Abort(
3919 3925 _(
3920 3926 b'unable to create repository because of unknown '
3921 3927 b'creation option: %s'
3922 3928 )
3923 3929 % b', '.join(sorted(unknownopts)),
3924 3930 hint=_(b'is a required extension not loaded?'),
3925 3931 )
3926 3932
3927 3933 requirements = newreporequirements(ui, createopts=createopts)
3928 3934 requirements -= checkrequirementscompat(ui, requirements)
3929 3935
3930 3936 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3931 3937
3932 3938 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3933 3939 if hgvfs.exists():
3934 3940 raise error.RepoError(_(b'repository %s already exists') % path)
3935 3941
3936 3942 if b'sharedrepo' in createopts:
3937 3943 sharedpath = createopts[b'sharedrepo'].sharedpath
3938 3944
3939 3945 if createopts.get(b'sharedrelative'):
3940 3946 try:
3941 3947 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3942 3948 sharedpath = util.pconvert(sharedpath)
3943 3949 except (IOError, ValueError) as e:
3944 3950 # ValueError is raised on Windows if the drive letters differ
3945 3951 # on each path.
3946 3952 raise error.Abort(
3947 3953 _(b'cannot calculate relative path'),
3948 3954 hint=stringutil.forcebytestr(e),
3949 3955 )
3950 3956
3951 3957 if not wdirvfs.exists():
3952 3958 wdirvfs.makedirs()
3953 3959
3954 3960 hgvfs.makedir(notindexed=True)
3955 3961 if b'sharedrepo' not in createopts:
3956 3962 hgvfs.mkdir(b'cache')
3957 3963 hgvfs.mkdir(b'wcache')
3958 3964
3959 3965 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3960 3966 if has_store and b'sharedrepo' not in createopts:
3961 3967 hgvfs.mkdir(b'store')
3962 3968
3963 3969 # We create an invalid changelog outside the store so very old
3964 3970 # Mercurial versions (which didn't know about the requirements
3965 3971 # file) encounter an error on reading the changelog. This
3966 3972 # effectively locks out old clients and prevents them from
3967 3973 # mucking with a repo in an unknown format.
3968 3974 #
3969 3975 # The revlog header has version 65535, which won't be recognized by
3970 3976 # such old clients.
3971 3977 hgvfs.append(
3972 3978 b'00changelog.i',
3973 3979 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3974 3980 b'layout',
3975 3981 )
3976 3982
3977 3983 # Filter the requirements into working copy and store ones
3978 3984 wcreq, storereq = scmutil.filterrequirements(requirements)
3979 3985 # write working copy ones
3980 3986 scmutil.writerequires(hgvfs, wcreq)
3981 3987 # If there are store requirements and the current repository
3982 3988 # is not a shared one, write stored requirements
3983 3989 # For new shared repository, we don't need to write the store
3984 3990 # requirements as they are already present in store requires
3985 3991 if storereq and b'sharedrepo' not in createopts:
3986 3992 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3987 3993 scmutil.writerequires(storevfs, storereq)
3988 3994
3989 3995 # Write out file telling readers where to find the shared store.
3990 3996 if b'sharedrepo' in createopts:
3991 3997 hgvfs.write(b'sharedpath', sharedpath)
3992 3998
3993 3999 if createopts.get(b'shareditems'):
3994 4000 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3995 4001 hgvfs.write(b'shared', shared)
3996 4002
3997 4003
3998 4004 def poisonrepository(repo):
3999 4005 """Poison a repository instance so it can no longer be used."""
4000 4006 # Perform any cleanup on the instance.
4001 4007 repo.close()
4002 4008
4003 4009 # Our strategy is to replace the type of the object with one that
4004 4010 # has all attribute lookups result in error.
4005 4011 #
4006 4012 # But we have to allow the close() method because some constructors
4007 4013 # of repos call close() on repo references.
4008 4014 class poisonedrepository:
4009 4015 def __getattribute__(self, item):
4010 4016 if item == 'close':
4011 4017 return object.__getattribute__(self, item)
4012 4018
4013 4019 raise error.ProgrammingError(
4014 4020 b'repo instances should not be used after unshare'
4015 4021 )
4016 4022
4017 4023 def close(self):
4018 4024 pass
4019 4025
4020 4026 # We may have a repoview, which intercepts __setattr__. So be sure
4021 4027 # we operate at the lowest level possible.
4022 4028 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,362 +1,393 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import weakref
8 9
9 10 from .i18n import _
10 11 from .pycompat import getattr
11 12 from . import (
12 13 error,
13 14 match as matchmod,
14 15 merge,
15 16 mergestate as mergestatemod,
16 17 requirements,
17 18 scmutil,
18 19 sparse,
19 20 util,
20 21 )
21 22
22 23 # The file in .hg/store/ that indicates which paths exit in the store
23 24 FILENAME = b'narrowspec'
24 25 # The file in .hg/ that indicates which paths exit in the dirstate
25 26 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26 27
27 28 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 29 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 30 # that patterns are supplied by clients and executed on remote servers
30 31 # as part of wire protocol commands. That means that changes to this
31 32 # data structure influence the wire protocol and should not be taken
32 33 # lightly - especially removals.
33 34 VALID_PREFIXES = (
34 35 b'path:',
35 36 b'rootfilesin:',
36 37 )
37 38
38 39
39 40 def normalizesplitpattern(kind, pat):
40 41 """Returns the normalized version of a pattern and kind.
41 42
42 43 Returns a tuple with the normalized kind and normalized pattern.
43 44 """
44 45 pat = pat.rstrip(b'/')
45 46 _validatepattern(pat)
46 47 return kind, pat
47 48
48 49
49 50 def _numlines(s):
50 51 """Returns the number of lines in s, including ending empty lines."""
51 52 # We use splitlines because it is Unicode-friendly and thus Python 3
52 53 # compatible. However, it does not count empty lines at the end, so trick
53 54 # it by adding a character at the end.
54 55 return len((s + b'x').splitlines())
55 56
56 57
57 58 def _validatepattern(pat):
58 59 """Validates the pattern and aborts if it is invalid.
59 60
60 61 Patterns are stored in the narrowspec as newline-separated
61 62 POSIX-style bytestring paths. There's no escaping.
62 63 """
63 64
64 65 # We use newlines as separators in the narrowspec file, so don't allow them
65 66 # in patterns.
66 67 if _numlines(pat) > 1:
67 68 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68 69
69 70 components = pat.split(b'/')
70 71 if b'.' in components or b'..' in components:
71 72 raise error.Abort(
72 73 _(b'"." and ".." are not allowed in narrowspec paths')
73 74 )
74 75
75 76
76 77 def normalizepattern(pattern, defaultkind=b'path'):
77 78 """Returns the normalized version of a text-format pattern.
78 79
79 80 If the pattern has no kind, the default will be added.
80 81 """
81 82 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 83 return b'%s:%s' % normalizesplitpattern(kind, pat)
83 84
84 85
85 86 def parsepatterns(pats):
86 87 """Parses an iterable of patterns into a typed pattern set.
87 88
88 89 Patterns are assumed to be ``path:`` if no prefix is present.
89 90 For safety and performance reasons, only some prefixes are allowed.
90 91 See ``validatepatterns()``.
91 92
92 93 This function should be used on patterns that come from the user to
93 94 normalize and validate them to the internal data structure used for
94 95 representing patterns.
95 96 """
96 97 res = {normalizepattern(orig) for orig in pats}
97 98 validatepatterns(res)
98 99 return res
99 100
100 101
101 102 def validatepatterns(pats):
102 103 """Validate that patterns are in the expected data structure and format.
103 104
104 105 And that is a set of normalized patterns beginning with ``path:`` or
105 106 ``rootfilesin:``.
106 107
107 108 This function should be used to validate internal data structures
108 109 and patterns that are loaded from sources that use the internal,
109 110 prefixed pattern representation (but can't necessarily be fully trusted).
110 111 """
111 112 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
112 113 if not isinstance(pats, set):
113 114 raise error.ProgrammingError(
114 115 b'narrow patterns should be a set; got %r' % pats
115 116 )
116 117
117 118 for pat in pats:
118 119 if not pat.startswith(VALID_PREFIXES):
119 120 # Use a Mercurial exception because this can happen due to user
120 121 # bugs (e.g. manually updating spec file).
121 122 raise error.Abort(
122 123 _(b'invalid prefix on narrow pattern: %s') % pat,
123 124 hint=_(
124 125 b'narrow patterns must begin with one of '
125 126 b'the following: %s'
126 127 )
127 128 % b', '.join(VALID_PREFIXES),
128 129 )
129 130
130 131
131 132 def format(includes, excludes):
132 133 output = b'[include]\n'
133 134 for i in sorted(includes - excludes):
134 135 output += i + b'\n'
135 136 output += b'[exclude]\n'
136 137 for e in sorted(excludes):
137 138 output += e + b'\n'
138 139 return output
139 140
140 141
141 142 def match(root, include=None, exclude=None):
142 143 if not include:
143 144 # Passing empty include and empty exclude to matchmod.match()
144 145 # gives a matcher that matches everything, so explicitly use
145 146 # the nevermatcher.
146 147 return matchmod.never()
147 148 return matchmod.match(
148 149 root, b'', [], include=include or [], exclude=exclude or []
149 150 )
150 151
151 152
152 153 def parseconfig(ui, spec):
153 154 # maybe we should care about the profiles returned too
154 155 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
155 156 if profiles:
156 157 raise error.Abort(
157 158 _(
158 159 b"including other spec files using '%include' is not"
159 160 b" supported in narrowspec"
160 161 )
161 162 )
162 163
163 164 validatepatterns(includepats)
164 165 validatepatterns(excludepats)
165 166
166 167 return includepats, excludepats
167 168
168 169
169 170 def load(repo):
170 171 # Treat "narrowspec does not exist" the same as "narrowspec file exists
171 172 # and is empty".
172 173 spec = repo.svfs.tryread(FILENAME)
173 174 return parseconfig(repo.ui, spec)
174 175
175 176
176 177 def save(repo, includepats, excludepats):
178 repo = repo.unfiltered()
179
177 180 validatepatterns(includepats)
178 181 validatepatterns(excludepats)
179 182 spec = format(includepats, excludepats)
183
184 tr = repo.currenttransaction()
185 if tr is None:
180 186 repo.svfs.write(FILENAME, spec)
187 else:
188 # the roundtrip is sometime different
189 # not taking any chance for now
190 value = parseconfig(repo.ui, spec)
191 reporef = weakref.ref(repo)
192
193 def clean_pending(tr):
194 r = reporef()
195 if r is not None:
196 r._pending_narrow_pats = None
197
198 tr.addpostclose(b'narrow-spec', clean_pending)
199 tr.addabort(b'narrow-spec', clean_pending)
200 repo._pending_narrow_pats = value
201
202 def write_spec(f):
203 f.write(spec)
204
205 tr.addfilegenerator(
206 # XXX think about order at some point
207 b"narrow-spec",
208 (FILENAME,),
209 write_spec,
210 location=b'store',
211 )
181 212
182 213
183 214 def copytoworkingcopy(repo):
184 215 spec = format(*repo.narrowpats)
185 216 repo.vfs.write(DIRSTATE_FILENAME, spec)
186 217
187 218
188 219 def savebackup(repo, backupname):
189 220 if requirements.NARROW_REQUIREMENT not in repo.requirements:
190 221 return
191 222 svfs = repo.svfs
192 223 svfs.tryunlink(backupname)
193 224 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
194 225
195 226
196 227 def restorebackup(repo, backupname):
197 228 if requirements.NARROW_REQUIREMENT not in repo.requirements:
198 229 return
199 230 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
200 231
201 232
202 233 def savewcbackup(repo, backupname):
203 234 if requirements.NARROW_REQUIREMENT not in repo.requirements:
204 235 return
205 236 vfs = repo.vfs
206 237 vfs.tryunlink(backupname)
207 238 # It may not exist in old repos
208 239 if vfs.exists(DIRSTATE_FILENAME):
209 240 util.copyfile(
210 241 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
211 242 )
212 243
213 244
214 245 def restorewcbackup(repo, backupname):
215 246 if requirements.NARROW_REQUIREMENT not in repo.requirements:
216 247 return
217 248 # It may not exist in old repos
218 249 if repo.vfs.exists(backupname):
219 250 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
220 251
221 252
222 253 def clearwcbackup(repo, backupname):
223 254 if requirements.NARROW_REQUIREMENT not in repo.requirements:
224 255 return
225 256 repo.vfs.tryunlink(backupname)
226 257
227 258
228 259 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
229 260 r"""Restricts the patterns according to repo settings,
230 261 results in a logical AND operation
231 262
232 263 :param req_includes: requested includes
233 264 :param req_excludes: requested excludes
234 265 :param repo_includes: repo includes
235 266 :param repo_excludes: repo excludes
236 267 :return: include patterns, exclude patterns, and invalid include patterns.
237 268 """
238 269 res_excludes = set(req_excludes)
239 270 res_excludes.update(repo_excludes)
240 271 invalid_includes = []
241 272 if not req_includes:
242 273 res_includes = set(repo_includes)
243 274 elif b'path:.' not in repo_includes:
244 275 res_includes = []
245 276 for req_include in req_includes:
246 277 req_include = util.expandpath(util.normpath(req_include))
247 278 if req_include in repo_includes:
248 279 res_includes.append(req_include)
249 280 continue
250 281 valid = False
251 282 for repo_include in repo_includes:
252 283 if req_include.startswith(repo_include + b'/'):
253 284 valid = True
254 285 res_includes.append(req_include)
255 286 break
256 287 if not valid:
257 288 invalid_includes.append(req_include)
258 289 if len(res_includes) == 0:
259 290 res_excludes = {b'path:.'}
260 291 else:
261 292 res_includes = set(res_includes)
262 293 else:
263 294 res_includes = set(req_includes)
264 295 return res_includes, res_excludes, invalid_includes
265 296
266 297
267 298 # These two are extracted for extensions (specifically for Google's CitC file
268 299 # system)
269 300 def _deletecleanfiles(repo, files):
270 301 for f in files:
271 302 repo.wvfs.unlinkpath(f)
272 303
273 304
274 305 def _writeaddedfiles(repo, pctx, files):
275 306 mresult = merge.mergeresult()
276 307 mf = repo[b'.'].manifest()
277 308 for f in files:
278 309 if not repo.wvfs.exists(f):
279 310 mresult.addfile(
280 311 f,
281 312 mergestatemod.ACTION_GET,
282 313 (mf.flags(f), False),
283 314 b"narrowspec updated",
284 315 )
285 316 merge.applyupdates(
286 317 repo,
287 318 mresult,
288 319 wctx=repo[None],
289 320 mctx=repo[b'.'],
290 321 overwrite=False,
291 322 wantfiledata=False,
292 323 )
293 324
294 325
295 326 def checkworkingcopynarrowspec(repo):
296 327 # Avoid infinite recursion when updating the working copy
297 328 if getattr(repo, '_updatingnarrowspec', False):
298 329 return
299 330 storespec = repo.narrowpats
300 331 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 332 wcspec = parseconfig(repo.ui, wcspec)
302 333 if wcspec != storespec:
303 334 raise error.StateError(
304 335 _(b"working copy's narrowspec is stale"),
305 336 hint=_(b"run 'hg tracked --update-working-copy'"),
306 337 )
307 338
308 339
309 340 def updateworkingcopy(repo, assumeclean=False):
310 341 """updates the working copy and dirstate from the store narrowspec
311 342
312 343 When assumeclean=True, files that are not known to be clean will also
313 344 be deleted. It is then up to the caller to make sure they are clean.
314 345 """
315 346 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
316 347 newincludes, newexcludes = repo.narrowpats
317 348 repo._updatingnarrowspec = True
318 349
319 350 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
320 351 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
321 352 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
322 353 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
323 354 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
324 355
325 356 assert repo.currentwlock() is not None
326 357 ds = repo.dirstate
327 358 with ds.running_status(repo):
328 359 lookup, status, _mtime_boundary = ds.status(
329 360 removedmatch,
330 361 subrepos=[],
331 362 ignored=True,
332 363 clean=True,
333 364 unknown=True,
334 365 )
335 366 trackeddirty = status.modified + status.added
336 367 clean = status.clean
337 368 if assumeclean:
338 369 clean.extend(lookup)
339 370 else:
340 371 trackeddirty.extend(lookup)
341 372 _deletecleanfiles(repo, clean)
342 373 uipathfn = scmutil.getuipathfn(repo)
343 374 for f in sorted(trackeddirty):
344 375 repo.ui.status(
345 376 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
346 377 )
347 378 for f in sorted(status.unknown):
348 379 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
349 380 for f in sorted(status.ignored):
350 381 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
351 382 for f in clean + trackeddirty:
352 383 ds.update_file(f, p1_tracked=False, wc_tracked=False)
353 384
354 385 pctx = repo[b'.']
355 386
356 387 # only update added files that are in the sparse checkout
357 388 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
358 389 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
359 390 for f in newfiles:
360 391 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
361 392 _writeaddedfiles(repo, pctx, newfiles)
362 393 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now