##// END OF EJS Templates
localrepo: purge filecache attribute using there unicode name...
marmoute -
r51812:b3174be5 default
parent child Browse files
Show More
@@ -1,4043 +1,4047 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from .pycompat import (
32 32 delattr,
33 33 getattr,
34 34 )
35 35 from . import (
36 36 bookmarks,
37 37 branchmap,
38 38 bundle2,
39 39 bundlecaches,
40 40 changegroup,
41 41 color,
42 42 commit,
43 43 context,
44 44 dirstate,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 policy,
62 62 pushkey,
63 63 pycompat,
64 64 rcutil,
65 65 repoview,
66 66 requirements as requirementsmod,
67 67 revlog,
68 68 revset,
69 69 revsetlang,
70 70 scmutil,
71 71 sparse,
72 72 store as storemod,
73 73 subrepoutil,
74 74 tags as tagsmod,
75 75 transaction,
76 76 txnutil,
77 77 util,
78 78 vfs as vfsmod,
79 79 wireprototypes,
80 80 )
81 81
82 82 from .interfaces import (
83 83 repository,
84 84 util as interfaceutil,
85 85 )
86 86
87 87 from .utils import (
88 88 hashutil,
89 89 procutil,
90 90 stringutil,
91 91 urlutil,
92 92 )
93 93
94 94 from .revlogutils import (
95 95 concurrency_checker as revlogchecker,
96 96 constants as revlogconst,
97 97 sidedata as sidedatamod,
98 98 )
99 99
100 100 release = lockmod.release
101 101 urlerr = util.urlerr
102 102 urlreq = util.urlreq
103 103
104 104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
105 105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
106 106 )
107 107
108 108 # set of (path, vfs-location) tuples. vfs-location is:
109 109 # - 'plain for vfs relative paths
110 110 # - '' for svfs relative paths
111 111 _cachedfiles = set()
112 112
113 113
114 114 class _basefilecache(scmutil.filecache):
115 115 """All filecache usage on repo are done for logic that should be unfiltered"""
116 116
117 117 def __get__(self, repo, type=None):
118 118 if repo is None:
119 119 return self
120 120 # proxy to unfiltered __dict__ since filtered repo has no entry
121 121 unfi = repo.unfiltered()
122 122 try:
123 123 return unfi.__dict__[self.sname]
124 124 except KeyError:
125 125 pass
126 126 return super(_basefilecache, self).__get__(unfi, type)
127 127
128 128 def set(self, repo, value):
129 129 return super(_basefilecache, self).set(repo.unfiltered(), value)
130 130
131 131
132 132 class repofilecache(_basefilecache):
133 133 """filecache for files in .hg but outside of .hg/store"""
134 134
135 135 def __init__(self, *paths):
136 136 super(repofilecache, self).__init__(*paths)
137 137 for path in paths:
138 138 _cachedfiles.add((path, b'plain'))
139 139
140 140 def join(self, obj, fname):
141 141 return obj.vfs.join(fname)
142 142
143 143
144 144 class storecache(_basefilecache):
145 145 """filecache for files in the store"""
146 146
147 147 def __init__(self, *paths):
148 148 super(storecache, self).__init__(*paths)
149 149 for path in paths:
150 150 _cachedfiles.add((path, b''))
151 151
152 152 def join(self, obj, fname):
153 153 return obj.sjoin(fname)
154 154
155 155
156 156 class changelogcache(storecache):
157 157 """filecache for the changelog"""
158 158
159 159 def __init__(self):
160 160 super(changelogcache, self).__init__()
161 161 _cachedfiles.add((b'00changelog.i', b''))
162 162 _cachedfiles.add((b'00changelog.n', b''))
163 163
164 164 def tracked_paths(self, obj):
165 165 paths = [self.join(obj, b'00changelog.i')]
166 166 if obj.store.opener.options.get(b'persistent-nodemap', False):
167 167 paths.append(self.join(obj, b'00changelog.n'))
168 168 return paths
169 169
170 170
171 171 class manifestlogcache(storecache):
172 172 """filecache for the manifestlog"""
173 173
174 174 def __init__(self):
175 175 super(manifestlogcache, self).__init__()
176 176 _cachedfiles.add((b'00manifest.i', b''))
177 177 _cachedfiles.add((b'00manifest.n', b''))
178 178
179 179 def tracked_paths(self, obj):
180 180 paths = [self.join(obj, b'00manifest.i')]
181 181 if obj.store.opener.options.get(b'persistent-nodemap', False):
182 182 paths.append(self.join(obj, b'00manifest.n'))
183 183 return paths
184 184
185 185
186 186 class mixedrepostorecache(_basefilecache):
187 187 """filecache for a mix files in .hg/store and outside"""
188 188
189 189 def __init__(self, *pathsandlocations):
190 190 # scmutil.filecache only uses the path for passing back into our
191 191 # join(), so we can safely pass a list of paths and locations
192 192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
193 193 _cachedfiles.update(pathsandlocations)
194 194
195 195 def join(self, obj, fnameandlocation):
196 196 fname, location = fnameandlocation
197 197 if location == b'plain':
198 198 return obj.vfs.join(fname)
199 199 else:
200 200 if location != b'':
201 201 raise error.ProgrammingError(
202 202 b'unexpected location: %s' % location
203 203 )
204 204 return obj.sjoin(fname)
205 205
206 206
207 207 def isfilecached(repo, name):
208 208 """check if a repo has already cached "name" filecache-ed property
209 209
210 210 This returns (cachedobj-or-None, iscached) tuple.
211 211 """
212 212 cacheentry = repo.unfiltered()._filecache.get(name, None)
213 213 if not cacheentry:
214 214 return None, False
215 215 return cacheentry.obj, True
216 216
217 217
218 218 class unfilteredpropertycache(util.propertycache):
219 219 """propertycache that apply to unfiltered repo only"""
220 220
221 221 def __get__(self, repo, type=None):
222 222 unfi = repo.unfiltered()
223 223 if unfi is repo:
224 224 return super(unfilteredpropertycache, self).__get__(unfi)
225 225 return getattr(unfi, self.name)
226 226
227 227
228 228 class filteredpropertycache(util.propertycache):
229 229 """propertycache that must take filtering in account"""
230 230
231 231 def cachevalue(self, obj, value):
232 232 object.__setattr__(obj, self.name, value)
233 233
234 234
235 235 def hasunfilteredcache(repo, name):
236 236 """check if a repo has an unfilteredpropertycache value for <name>"""
237 237 return name in vars(repo.unfiltered())
238 238
239 239
240 240 def unfilteredmethod(orig):
241 241 """decorate method that always need to be run on unfiltered version"""
242 242
243 243 @functools.wraps(orig)
244 244 def wrapper(repo, *args, **kwargs):
245 245 return orig(repo.unfiltered(), *args, **kwargs)
246 246
247 247 return wrapper
248 248
249 249
250 250 moderncaps = {
251 251 b'lookup',
252 252 b'branchmap',
253 253 b'pushkey',
254 254 b'known',
255 255 b'getbundle',
256 256 b'unbundle',
257 257 }
258 258 legacycaps = moderncaps.union({b'changegroupsubset'})
259 259
260 260
261 261 @interfaceutil.implementer(repository.ipeercommandexecutor)
262 262 class localcommandexecutor:
263 263 def __init__(self, peer):
264 264 self._peer = peer
265 265 self._sent = False
266 266 self._closed = False
267 267
268 268 def __enter__(self):
269 269 return self
270 270
271 271 def __exit__(self, exctype, excvalue, exctb):
272 272 self.close()
273 273
274 274 def callcommand(self, command, args):
275 275 if self._sent:
276 276 raise error.ProgrammingError(
277 277 b'callcommand() cannot be used after sendcommands()'
278 278 )
279 279
280 280 if self._closed:
281 281 raise error.ProgrammingError(
282 282 b'callcommand() cannot be used after close()'
283 283 )
284 284
285 285 # We don't need to support anything fancy. Just call the named
286 286 # method on the peer and return a resolved future.
287 287 fn = getattr(self._peer, pycompat.sysstr(command))
288 288
289 289 f = futures.Future()
290 290
291 291 try:
292 292 result = fn(**pycompat.strkwargs(args))
293 293 except Exception:
294 294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
295 295 else:
296 296 f.set_result(result)
297 297
298 298 return f
299 299
300 300 def sendcommands(self):
301 301 self._sent = True
302 302
303 303 def close(self):
304 304 self._closed = True
305 305
306 306
307 307 @interfaceutil.implementer(repository.ipeercommands)
308 308 class localpeer(repository.peer):
309 309 '''peer for a local repo; reflects only the most recent API'''
310 310
311 311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
312 312 super(localpeer, self).__init__(
313 313 repo.ui, path=path, remotehidden=remotehidden
314 314 )
315 315
316 316 if caps is None:
317 317 caps = moderncaps.copy()
318 318 if remotehidden:
319 319 self._repo = repo.filtered(b'served.hidden')
320 320 else:
321 321 self._repo = repo.filtered(b'served')
322 322 if repo._wanted_sidedata:
323 323 formatted = bundle2.format_remote_wanted_sidedata(repo)
324 324 caps.add(b'exp-wanted-sidedata=' + formatted)
325 325
326 326 self._caps = repo._restrictcapabilities(caps)
327 327
328 328 # Begin of _basepeer interface.
329 329
330 330 def url(self):
331 331 return self._repo.url()
332 332
333 333 def local(self):
334 334 return self._repo
335 335
336 336 def canpush(self):
337 337 return True
338 338
339 339 def close(self):
340 340 self._repo.close()
341 341
342 342 # End of _basepeer interface.
343 343
344 344 # Begin of _basewirecommands interface.
345 345
346 346 def branchmap(self):
347 347 return self._repo.branchmap()
348 348
349 349 def capabilities(self):
350 350 return self._caps
351 351
352 352 def get_cached_bundle_inline(self, path):
353 353 # not needed with local peer
354 354 raise NotImplementedError
355 355
356 356 def clonebundles(self):
357 357 return bundlecaches.get_manifest(self._repo)
358 358
359 359 def debugwireargs(self, one, two, three=None, four=None, five=None):
360 360 """Used to test argument passing over the wire"""
361 361 return b"%s %s %s %s %s" % (
362 362 one,
363 363 two,
364 364 pycompat.bytestr(three),
365 365 pycompat.bytestr(four),
366 366 pycompat.bytestr(five),
367 367 )
368 368
369 369 def getbundle(
370 370 self,
371 371 source,
372 372 heads=None,
373 373 common=None,
374 374 bundlecaps=None,
375 375 remote_sidedata=None,
376 376 **kwargs
377 377 ):
378 378 chunks = exchange.getbundlechunks(
379 379 self._repo,
380 380 source,
381 381 heads=heads,
382 382 common=common,
383 383 bundlecaps=bundlecaps,
384 384 remote_sidedata=remote_sidedata,
385 385 **kwargs
386 386 )[1]
387 387 cb = util.chunkbuffer(chunks)
388 388
389 389 if exchange.bundle2requested(bundlecaps):
390 390 # When requesting a bundle2, getbundle returns a stream to make the
391 391 # wire level function happier. We need to build a proper object
392 392 # from it in local peer.
393 393 return bundle2.getunbundler(self.ui, cb)
394 394 else:
395 395 return changegroup.getunbundler(b'01', cb, None)
396 396
397 397 def heads(self):
398 398 return self._repo.heads()
399 399
400 400 def known(self, nodes):
401 401 return self._repo.known(nodes)
402 402
403 403 def listkeys(self, namespace):
404 404 return self._repo.listkeys(namespace)
405 405
406 406 def lookup(self, key):
407 407 return self._repo.lookup(key)
408 408
409 409 def pushkey(self, namespace, key, old, new):
410 410 return self._repo.pushkey(namespace, key, old, new)
411 411
412 412 def stream_out(self):
413 413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
414 414
415 415 def unbundle(self, bundle, heads, url):
416 416 """apply a bundle on a repo
417 417
418 418 This function handles the repo locking itself."""
419 419 try:
420 420 try:
421 421 bundle = exchange.readbundle(self.ui, bundle, None)
422 422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
423 423 if util.safehasattr(ret, 'getchunks'):
424 424 # This is a bundle20 object, turn it into an unbundler.
425 425 # This little dance should be dropped eventually when the
426 426 # API is finally improved.
427 427 stream = util.chunkbuffer(ret.getchunks())
428 428 ret = bundle2.getunbundler(self.ui, stream)
429 429 return ret
430 430 except Exception as exc:
431 431 # If the exception contains output salvaged from a bundle2
432 432 # reply, we need to make sure it is printed before continuing
433 433 # to fail. So we build a bundle2 with such output and consume
434 434 # it directly.
435 435 #
436 436 # This is not very elegant but allows a "simple" solution for
437 437 # issue4594
438 438 output = getattr(exc, '_bundle2salvagedoutput', ())
439 439 if output:
440 440 bundler = bundle2.bundle20(self._repo.ui)
441 441 for out in output:
442 442 bundler.addpart(out)
443 443 stream = util.chunkbuffer(bundler.getchunks())
444 444 b = bundle2.getunbundler(self.ui, stream)
445 445 bundle2.processbundle(self._repo, b)
446 446 raise
447 447 except error.PushRaced as exc:
448 448 raise error.ResponseError(
449 449 _(b'push failed:'), stringutil.forcebytestr(exc)
450 450 )
451 451
452 452 # End of _basewirecommands interface.
453 453
454 454 # Begin of peer interface.
455 455
456 456 def commandexecutor(self):
457 457 return localcommandexecutor(self)
458 458
459 459 # End of peer interface.
460 460
461 461
462 462 @interfaceutil.implementer(repository.ipeerlegacycommands)
463 463 class locallegacypeer(localpeer):
464 464 """peer extension which implements legacy methods too; used for tests with
465 465 restricted capabilities"""
466 466
467 467 def __init__(self, repo, path=None, remotehidden=False):
468 468 super(locallegacypeer, self).__init__(
469 469 repo, caps=legacycaps, path=path, remotehidden=remotehidden
470 470 )
471 471
472 472 # Begin of baselegacywirecommands interface.
473 473
474 474 def between(self, pairs):
475 475 return self._repo.between(pairs)
476 476
477 477 def branches(self, nodes):
478 478 return self._repo.branches(nodes)
479 479
480 480 def changegroup(self, nodes, source):
481 481 outgoing = discovery.outgoing(
482 482 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
483 483 )
484 484 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
485 485
486 486 def changegroupsubset(self, bases, heads, source):
487 487 outgoing = discovery.outgoing(
488 488 self._repo, missingroots=bases, ancestorsof=heads
489 489 )
490 490 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
491 491
492 492 # End of baselegacywirecommands interface.
493 493
494 494
495 495 # Functions receiving (ui, features) that extensions can register to impact
496 496 # the ability to load repositories with custom requirements. Only
497 497 # functions defined in loaded extensions are called.
498 498 #
499 499 # The function receives a set of requirement strings that the repository
500 500 # is capable of opening. Functions will typically add elements to the
501 501 # set to reflect that the extension knows how to handle that requirements.
502 502 featuresetupfuncs = set()
503 503
504 504
505 505 def _getsharedvfs(hgvfs, requirements):
506 506 """returns the vfs object pointing to root of shared source
507 507 repo for a shared repository
508 508
509 509 hgvfs is vfs pointing at .hg/ of current repo (shared one)
510 510 requirements is a set of requirements of current repo (shared one)
511 511 """
512 512 # The ``shared`` or ``relshared`` requirements indicate the
513 513 # store lives in the path contained in the ``.hg/sharedpath`` file.
514 514 # This is an absolute path for ``shared`` and relative to
515 515 # ``.hg/`` for ``relshared``.
516 516 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
517 517 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
518 518 sharedpath = util.normpath(hgvfs.join(sharedpath))
519 519
520 520 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
521 521
522 522 if not sharedvfs.exists():
523 523 raise error.RepoError(
524 524 _(b'.hg/sharedpath points to nonexistent directory %s')
525 525 % sharedvfs.base
526 526 )
527 527 return sharedvfs
528 528
529 529
530 530 def _readrequires(vfs, allowmissing):
531 531 """reads the require file present at root of this vfs
532 532 and return a set of requirements
533 533
534 534 If allowmissing is True, we suppress FileNotFoundError if raised"""
535 535 # requires file contains a newline-delimited list of
536 536 # features/capabilities the opener (us) must have in order to use
537 537 # the repository. This file was introduced in Mercurial 0.9.2,
538 538 # which means very old repositories may not have one. We assume
539 539 # a missing file translates to no requirements.
540 540 read = vfs.tryread if allowmissing else vfs.read
541 541 return set(read(b'requires').splitlines())
542 542
543 543
544 544 def makelocalrepository(baseui, path: bytes, intents=None):
545 545 """Create a local repository object.
546 546
547 547 Given arguments needed to construct a local repository, this function
548 548 performs various early repository loading functionality (such as
549 549 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
550 550 the repository can be opened, derives a type suitable for representing
551 551 that repository, and returns an instance of it.
552 552
553 553 The returned object conforms to the ``repository.completelocalrepository``
554 554 interface.
555 555
556 556 The repository type is derived by calling a series of factory functions
557 557 for each aspect/interface of the final repository. These are defined by
558 558 ``REPO_INTERFACES``.
559 559
560 560 Each factory function is called to produce a type implementing a specific
561 561 interface. The cumulative list of returned types will be combined into a
562 562 new type and that type will be instantiated to represent the local
563 563 repository.
564 564
565 565 The factory functions each receive various state that may be consulted
566 566 as part of deriving a type.
567 567
568 568 Extensions should wrap these factory functions to customize repository type
569 569 creation. Note that an extension's wrapped function may be called even if
570 570 that extension is not loaded for the repo being constructed. Extensions
571 571 should check if their ``__name__`` appears in the
572 572 ``extensionmodulenames`` set passed to the factory function and no-op if
573 573 not.
574 574 """
575 575 ui = baseui.copy()
576 576 # Prevent copying repo configuration.
577 577 ui.copy = baseui.copy
578 578
579 579 # Working directory VFS rooted at repository root.
580 580 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
581 581
582 582 # Main VFS for .hg/ directory.
583 583 hgpath = wdirvfs.join(b'.hg')
584 584 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
585 585 # Whether this repository is shared one or not
586 586 shared = False
587 587 # If this repository is shared, vfs pointing to shared repo
588 588 sharedvfs = None
589 589
590 590 # The .hg/ path should exist and should be a directory. All other
591 591 # cases are errors.
592 592 if not hgvfs.isdir():
593 593 try:
594 594 hgvfs.stat()
595 595 except FileNotFoundError:
596 596 pass
597 597 except ValueError as e:
598 598 # Can be raised on Python 3.8 when path is invalid.
599 599 raise error.Abort(
600 600 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
601 601 )
602 602
603 603 raise error.RepoError(_(b'repository %s not found') % path)
604 604
605 605 requirements = _readrequires(hgvfs, True)
606 606 shared = (
607 607 requirementsmod.SHARED_REQUIREMENT in requirements
608 608 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
609 609 )
610 610 storevfs = None
611 611 if shared:
612 612 # This is a shared repo
613 613 sharedvfs = _getsharedvfs(hgvfs, requirements)
614 614 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
615 615 else:
616 616 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
617 617
618 618 # if .hg/requires contains the sharesafe requirement, it means
619 619 # there exists a `.hg/store/requires` too and we should read it
620 620 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
621 621 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
622 622 # is not present, refer checkrequirementscompat() for that
623 623 #
624 624 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
625 625 # repository was shared the old way. We check the share source .hg/requires
626 626 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
627 627 # to be reshared
628 628 hint = _(b"see `hg help config.format.use-share-safe` for more information")
629 629 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
630 630 if (
631 631 shared
632 632 and requirementsmod.SHARESAFE_REQUIREMENT
633 633 not in _readrequires(sharedvfs, True)
634 634 ):
635 635 mismatch_warn = ui.configbool(
636 636 b'share', b'safe-mismatch.source-not-safe.warn'
637 637 )
638 638 mismatch_config = ui.config(
639 639 b'share', b'safe-mismatch.source-not-safe'
640 640 )
641 641 mismatch_verbose_upgrade = ui.configbool(
642 642 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
643 643 )
644 644 if mismatch_config in (
645 645 b'downgrade-allow',
646 646 b'allow',
647 647 b'downgrade-abort',
648 648 ):
649 649 # prevent cyclic import localrepo -> upgrade -> localrepo
650 650 from . import upgrade
651 651
652 652 upgrade.downgrade_share_to_non_safe(
653 653 ui,
654 654 hgvfs,
655 655 sharedvfs,
656 656 requirements,
657 657 mismatch_config,
658 658 mismatch_warn,
659 659 mismatch_verbose_upgrade,
660 660 )
661 661 elif mismatch_config == b'abort':
662 662 raise error.Abort(
663 663 _(b"share source does not support share-safe requirement"),
664 664 hint=hint,
665 665 )
666 666 else:
667 667 raise error.Abort(
668 668 _(
669 669 b"share-safe mismatch with source.\nUnrecognized"
670 670 b" value '%s' of `share.safe-mismatch.source-not-safe`"
671 671 b" set."
672 672 )
673 673 % mismatch_config,
674 674 hint=hint,
675 675 )
676 676 else:
677 677 requirements |= _readrequires(storevfs, False)
678 678 elif shared:
679 679 sourcerequires = _readrequires(sharedvfs, False)
680 680 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
681 681 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
682 682 mismatch_warn = ui.configbool(
683 683 b'share', b'safe-mismatch.source-safe.warn'
684 684 )
685 685 mismatch_verbose_upgrade = ui.configbool(
686 686 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
687 687 )
688 688 if mismatch_config in (
689 689 b'upgrade-allow',
690 690 b'allow',
691 691 b'upgrade-abort',
692 692 ):
693 693 # prevent cyclic import localrepo -> upgrade -> localrepo
694 694 from . import upgrade
695 695
696 696 upgrade.upgrade_share_to_safe(
697 697 ui,
698 698 hgvfs,
699 699 storevfs,
700 700 requirements,
701 701 mismatch_config,
702 702 mismatch_warn,
703 703 mismatch_verbose_upgrade,
704 704 )
705 705 elif mismatch_config == b'abort':
706 706 raise error.Abort(
707 707 _(
708 708 b'version mismatch: source uses share-safe'
709 709 b' functionality while the current share does not'
710 710 ),
711 711 hint=hint,
712 712 )
713 713 else:
714 714 raise error.Abort(
715 715 _(
716 716 b"share-safe mismatch with source.\nUnrecognized"
717 717 b" value '%s' of `share.safe-mismatch.source-safe` set."
718 718 )
719 719 % mismatch_config,
720 720 hint=hint,
721 721 )
722 722
723 723 # The .hg/hgrc file may load extensions or contain config options
724 724 # that influence repository construction. Attempt to load it and
725 725 # process any new extensions that it may have pulled in.
726 726 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
727 727 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
728 728 extensions.loadall(ui)
729 729 extensions.populateui(ui)
730 730
731 731 # Set of module names of extensions loaded for this repository.
732 732 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
733 733
734 734 supportedrequirements = gathersupportedrequirements(ui)
735 735
736 736 # We first validate the requirements are known.
737 737 ensurerequirementsrecognized(requirements, supportedrequirements)
738 738
739 739 # Then we validate that the known set is reasonable to use together.
740 740 ensurerequirementscompatible(ui, requirements)
741 741
742 742 # TODO there are unhandled edge cases related to opening repositories with
743 743 # shared storage. If storage is shared, we should also test for requirements
744 744 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
745 745 # that repo, as that repo may load extensions needed to open it. This is a
746 746 # bit complicated because we don't want the other hgrc to overwrite settings
747 747 # in this hgrc.
748 748 #
749 749 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
750 750 # file when sharing repos. But if a requirement is added after the share is
751 751 # performed, thereby introducing a new requirement for the opener, we may
752 752 # will not see that and could encounter a run-time error interacting with
753 753 # that shared store since it has an unknown-to-us requirement.
754 754
755 755 # At this point, we know we should be capable of opening the repository.
756 756 # Now get on with doing that.
757 757
758 758 features = set()
759 759
760 760 # The "store" part of the repository holds versioned data. How it is
761 761 # accessed is determined by various requirements. If `shared` or
762 762 # `relshared` requirements are present, this indicates current repository
763 763 # is a share and store exists in path mentioned in `.hg/sharedpath`
764 764 if shared:
765 765 storebasepath = sharedvfs.base
766 766 cachepath = sharedvfs.join(b'cache')
767 767 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
768 768 else:
769 769 storebasepath = hgvfs.base
770 770 cachepath = hgvfs.join(b'cache')
771 771 wcachepath = hgvfs.join(b'wcache')
772 772
773 773 # The store has changed over time and the exact layout is dictated by
774 774 # requirements. The store interface abstracts differences across all
775 775 # of them.
776 776 store = makestore(
777 777 requirements,
778 778 storebasepath,
779 779 lambda base: vfsmod.vfs(base, cacheaudited=True),
780 780 )
781 781 hgvfs.createmode = store.createmode
782 782
783 783 storevfs = store.vfs
784 784 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
785 785
786 786 if (
787 787 requirementsmod.REVLOGV2_REQUIREMENT in requirements
788 788 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
789 789 ):
790 790 features.add(repository.REPO_FEATURE_SIDE_DATA)
791 791 # the revlogv2 docket introduced race condition that we need to fix
792 792 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
793 793
794 794 # The cache vfs is used to manage cache files.
795 795 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
796 796 cachevfs.createmode = store.createmode
797 797 # The cache vfs is used to manage cache files related to the working copy
798 798 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
799 799 wcachevfs.createmode = store.createmode
800 800
801 801 # Now resolve the type for the repository object. We do this by repeatedly
802 802 # calling a factory function to produces types for specific aspects of the
803 803 # repo's operation. The aggregate returned types are used as base classes
804 804 # for a dynamically-derived type, which will represent our new repository.
805 805
806 806 bases = []
807 807 extrastate = {}
808 808
809 809 for iface, fn in REPO_INTERFACES:
810 810 # We pass all potentially useful state to give extensions tons of
811 811 # flexibility.
812 812 typ = fn()(
813 813 ui=ui,
814 814 intents=intents,
815 815 requirements=requirements,
816 816 features=features,
817 817 wdirvfs=wdirvfs,
818 818 hgvfs=hgvfs,
819 819 store=store,
820 820 storevfs=storevfs,
821 821 storeoptions=storevfs.options,
822 822 cachevfs=cachevfs,
823 823 wcachevfs=wcachevfs,
824 824 extensionmodulenames=extensionmodulenames,
825 825 extrastate=extrastate,
826 826 baseclasses=bases,
827 827 )
828 828
829 829 if not isinstance(typ, type):
830 830 raise error.ProgrammingError(
831 831 b'unable to construct type for %s' % iface
832 832 )
833 833
834 834 bases.append(typ)
835 835
836 836 # type() allows you to use characters in type names that wouldn't be
837 837 # recognized as Python symbols in source code. We abuse that to add
838 838 # rich information about our constructed repo.
839 839 name = pycompat.sysstr(
840 840 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
841 841 )
842 842
843 843 cls = type(name, tuple(bases), {})
844 844
845 845 return cls(
846 846 baseui=baseui,
847 847 ui=ui,
848 848 origroot=path,
849 849 wdirvfs=wdirvfs,
850 850 hgvfs=hgvfs,
851 851 requirements=requirements,
852 852 supportedrequirements=supportedrequirements,
853 853 sharedpath=storebasepath,
854 854 store=store,
855 855 cachevfs=cachevfs,
856 856 wcachevfs=wcachevfs,
857 857 features=features,
858 858 intents=intents,
859 859 )
860 860
861 861
862 862 def loadhgrc(
863 863 ui,
864 864 wdirvfs: vfsmod.vfs,
865 865 hgvfs: vfsmod.vfs,
866 866 requirements,
867 867 sharedvfs: Optional[vfsmod.vfs] = None,
868 868 ):
869 869 """Load hgrc files/content into a ui instance.
870 870
871 871 This is called during repository opening to load any additional
872 872 config files or settings relevant to the current repository.
873 873
874 874 Returns a bool indicating whether any additional configs were loaded.
875 875
876 876 Extensions should monkeypatch this function to modify how per-repo
877 877 configs are loaded. For example, an extension may wish to pull in
878 878 configs from alternate files or sources.
879 879
880 880 sharedvfs is vfs object pointing to source repo if the current one is a
881 881 shared one
882 882 """
883 883 if not rcutil.use_repo_hgrc():
884 884 return False
885 885
886 886 ret = False
887 887 # first load config from shared source if we has to
888 888 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
889 889 try:
890 890 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
891 891 ret = True
892 892 except IOError:
893 893 pass
894 894
895 895 try:
896 896 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
897 897 ret = True
898 898 except IOError:
899 899 pass
900 900
901 901 try:
902 902 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
903 903 ret = True
904 904 except IOError:
905 905 pass
906 906
907 907 return ret
908 908
909 909
910 910 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
911 911 """Perform additional actions after .hg/hgrc is loaded.
912 912
913 913 This function is called during repository loading immediately after
914 914 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
915 915
916 916 The function can be used to validate configs, automatically add
917 917 options (including extensions) based on requirements, etc.
918 918 """
919 919
920 920 # Map of requirements to list of extensions to load automatically when
921 921 # requirement is present.
922 922 autoextensions = {
923 923 b'git': [b'git'],
924 924 b'largefiles': [b'largefiles'],
925 925 b'lfs': [b'lfs'],
926 926 }
927 927
928 928 for requirement, names in sorted(autoextensions.items()):
929 929 if requirement not in requirements:
930 930 continue
931 931
932 932 for name in names:
933 933 if not ui.hasconfig(b'extensions', name):
934 934 ui.setconfig(b'extensions', name, b'', source=b'autoload')
935 935
936 936
937 937 def gathersupportedrequirements(ui):
938 938 """Determine the complete set of recognized requirements."""
939 939 # Start with all requirements supported by this file.
940 940 supported = set(localrepository._basesupported)
941 941
942 942 # Execute ``featuresetupfuncs`` entries if they belong to an extension
943 943 # relevant to this ui instance.
944 944 modules = {m.__name__ for n, m in extensions.extensions(ui)}
945 945
946 946 for fn in featuresetupfuncs:
947 947 if fn.__module__ in modules:
948 948 fn(ui, supported)
949 949
950 950 # Add derived requirements from registered compression engines.
951 951 for name in util.compengines:
952 952 engine = util.compengines[name]
953 953 if engine.available() and engine.revlogheader():
954 954 supported.add(b'exp-compression-%s' % name)
955 955 if engine.name() == b'zstd':
956 956 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
957 957
958 958 return supported
959 959
960 960
961 961 def ensurerequirementsrecognized(requirements, supported):
962 962 """Validate that a set of local requirements is recognized.
963 963
964 964 Receives a set of requirements. Raises an ``error.RepoError`` if there
965 965 exists any requirement in that set that currently loaded code doesn't
966 966 recognize.
967 967
968 968 Returns a set of supported requirements.
969 969 """
970 970 missing = set()
971 971
972 972 for requirement in requirements:
973 973 if requirement in supported:
974 974 continue
975 975
976 976 if not requirement or not requirement[0:1].isalnum():
977 977 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
978 978
979 979 missing.add(requirement)
980 980
981 981 if missing:
982 982 raise error.RequirementError(
983 983 _(b'repository requires features unknown to this Mercurial: %s')
984 984 % b' '.join(sorted(missing)),
985 985 hint=_(
986 986 b'see https://mercurial-scm.org/wiki/MissingRequirement '
987 987 b'for more information'
988 988 ),
989 989 )
990 990
991 991
992 992 def ensurerequirementscompatible(ui, requirements):
993 993 """Validates that a set of recognized requirements is mutually compatible.
994 994
995 995 Some requirements may not be compatible with others or require
996 996 config options that aren't enabled. This function is called during
997 997 repository opening to ensure that the set of requirements needed
998 998 to open a repository is sane and compatible with config options.
999 999
1000 1000 Extensions can monkeypatch this function to perform additional
1001 1001 checking.
1002 1002
1003 1003 ``error.RepoError`` should be raised on failure.
1004 1004 """
1005 1005 if (
1006 1006 requirementsmod.SPARSE_REQUIREMENT in requirements
1007 1007 and not sparse.enabled
1008 1008 ):
1009 1009 raise error.RepoError(
1010 1010 _(
1011 1011 b'repository is using sparse feature but '
1012 1012 b'sparse is not enabled; enable the '
1013 1013 b'"sparse" extensions to access'
1014 1014 )
1015 1015 )
1016 1016
1017 1017
1018 1018 def makestore(requirements, path, vfstype):
1019 1019 """Construct a storage object for a repository."""
1020 1020 if requirementsmod.STORE_REQUIREMENT in requirements:
1021 1021 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1022 1022 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1023 1023 return storemod.fncachestore(path, vfstype, dotencode)
1024 1024
1025 1025 return storemod.encodedstore(path, vfstype)
1026 1026
1027 1027 return storemod.basicstore(path, vfstype)
1028 1028
1029 1029
1030 1030 def resolvestorevfsoptions(ui, requirements, features):
1031 1031 """Resolve the options to pass to the store vfs opener.
1032 1032
1033 1033 The returned dict is used to influence behavior of the storage layer.
1034 1034 """
1035 1035 options = {}
1036 1036
1037 1037 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1038 1038 options[b'treemanifest'] = True
1039 1039
1040 1040 # experimental config: format.manifestcachesize
1041 1041 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1042 1042 if manifestcachesize is not None:
1043 1043 options[b'manifestcachesize'] = manifestcachesize
1044 1044
1045 1045 # In the absence of another requirement superseding a revlog-related
1046 1046 # requirement, we have to assume the repo is using revlog version 0.
1047 1047 # This revlog format is super old and we don't bother trying to parse
1048 1048 # opener options for it because those options wouldn't do anything
1049 1049 # meaningful on such old repos.
1050 1050 if (
1051 1051 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1052 1052 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1053 1053 ):
1054 1054 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1055 1055 else: # explicitly mark repo as using revlogv0
1056 1056 options[b'revlogv0'] = True
1057 1057
1058 1058 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1059 1059 options[b'copies-storage'] = b'changeset-sidedata'
1060 1060 else:
1061 1061 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1062 1062 copiesextramode = (b'changeset-only', b'compatibility')
1063 1063 if writecopiesto in copiesextramode:
1064 1064 options[b'copies-storage'] = b'extra'
1065 1065
1066 1066 return options
1067 1067
1068 1068
1069 1069 def resolverevlogstorevfsoptions(ui, requirements, features):
1070 1070 """Resolve opener options specific to revlogs."""
1071 1071
1072 1072 options = {}
1073 1073 options[b'flagprocessors'] = {}
1074 1074
1075 1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 1076 options[b'revlogv1'] = True
1077 1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 1078 options[b'revlogv2'] = True
1079 1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 1080 options[b'changelogv2'] = True
1081 1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083 1083
1084 1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 1085 options[b'generaldelta'] = True
1086 1086
1087 1087 # experimental config: format.chunkcachesize
1088 1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 1089 if chunkcachesize is not None:
1090 1090 options[b'chunkcachesize'] = chunkcachesize
1091 1091
1092 1092 deltabothparents = ui.configbool(
1093 1093 b'storage', b'revlog.optimize-delta-parent-choice'
1094 1094 )
1095 1095 options[b'deltabothparents'] = deltabothparents
1096 1096 dps_cgds = ui.configint(
1097 1097 b'storage',
1098 1098 b'revlog.delta-parent-search.candidate-group-chunk-size',
1099 1099 )
1100 1100 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1101 1101 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1102 1102
1103 1103 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1104 1104 options[b'issue6528.fix-incoming'] = issue6528
1105 1105
1106 1106 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1107 1107 lazydeltabase = False
1108 1108 if lazydelta:
1109 1109 lazydeltabase = ui.configbool(
1110 1110 b'storage', b'revlog.reuse-external-delta-parent'
1111 1111 )
1112 1112 if lazydeltabase is None:
1113 1113 lazydeltabase = not scmutil.gddeltaconfig(ui)
1114 1114 options[b'lazydelta'] = lazydelta
1115 1115 options[b'lazydeltabase'] = lazydeltabase
1116 1116
1117 1117 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1118 1118 if 0 <= chainspan:
1119 1119 options[b'maxdeltachainspan'] = chainspan
1120 1120
1121 1121 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1122 1122 if mmapindexthreshold is not None:
1123 1123 options[b'mmapindexthreshold'] = mmapindexthreshold
1124 1124
1125 1125 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1126 1126 srdensitythres = float(
1127 1127 ui.config(b'experimental', b'sparse-read.density-threshold')
1128 1128 )
1129 1129 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1130 1130 options[b'with-sparse-read'] = withsparseread
1131 1131 options[b'sparse-read-density-threshold'] = srdensitythres
1132 1132 options[b'sparse-read-min-gap-size'] = srmingapsize
1133 1133
1134 1134 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1135 1135 options[b'sparse-revlog'] = sparserevlog
1136 1136 if sparserevlog:
1137 1137 options[b'generaldelta'] = True
1138 1138
1139 1139 maxchainlen = None
1140 1140 if sparserevlog:
1141 1141 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1142 1142 # experimental config: format.maxchainlen
1143 1143 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1144 1144 if maxchainlen is not None:
1145 1145 options[b'maxchainlen'] = maxchainlen
1146 1146
1147 1147 for r in requirements:
1148 1148 # we allow multiple compression engine requirement to co-exist because
1149 1149 # strickly speaking, revlog seems to support mixed compression style.
1150 1150 #
1151 1151 # The compression used for new entries will be "the last one"
1152 1152 prefix = r.startswith
1153 1153 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1154 1154 options[b'compengine'] = r.split(b'-', 2)[2]
1155 1155
1156 1156 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1157 1157 if options[b'zlib.level'] is not None:
1158 1158 if not (0 <= options[b'zlib.level'] <= 9):
1159 1159 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1160 1160 raise error.Abort(msg % options[b'zlib.level'])
1161 1161 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1162 1162 if options[b'zstd.level'] is not None:
1163 1163 if not (0 <= options[b'zstd.level'] <= 22):
1164 1164 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1165 1165 raise error.Abort(msg % options[b'zstd.level'])
1166 1166
1167 1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1168 1168 options[b'enableellipsis'] = True
1169 1169
1170 1170 if ui.configbool(b'experimental', b'rust.index'):
1171 1171 options[b'rust.index'] = True
1172 1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1173 1173 slow_path = ui.config(
1174 1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1175 1175 )
1176 1176 if slow_path not in (b'allow', b'warn', b'abort'):
1177 1177 default = ui.config_default(
1178 1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1179 1179 )
1180 1180 msg = _(
1181 1181 b'unknown value for config '
1182 1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1183 1183 )
1184 1184 ui.warn(msg % slow_path)
1185 1185 if not ui.quiet:
1186 1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1187 1187 slow_path = default
1188 1188
1189 1189 msg = _(
1190 1190 b"accessing `persistent-nodemap` repository without associated "
1191 1191 b"fast implementation."
1192 1192 )
1193 1193 hint = _(
1194 1194 b"check `hg help config.format.use-persistent-nodemap` "
1195 1195 b"for details"
1196 1196 )
1197 1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1198 1198 if slow_path == b'warn':
1199 1199 msg = b"warning: " + msg + b'\n'
1200 1200 ui.warn(msg)
1201 1201 if not ui.quiet:
1202 1202 hint = b'(' + hint + b')\n'
1203 1203 ui.warn(hint)
1204 1204 if slow_path == b'abort':
1205 1205 raise error.Abort(msg, hint=hint)
1206 1206 options[b'persistent-nodemap'] = True
1207 1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1208 1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1209 1209 if slow_path not in (b'allow', b'warn', b'abort'):
1210 1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1211 1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1212 1212 ui.warn(msg % slow_path)
1213 1213 if not ui.quiet:
1214 1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1215 1215 slow_path = default
1216 1216
1217 1217 msg = _(
1218 1218 b"accessing `dirstate-v2` repository without associated "
1219 1219 b"fast implementation."
1220 1220 )
1221 1221 hint = _(
1222 1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1223 1223 )
1224 1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1225 1225 if slow_path == b'warn':
1226 1226 msg = b"warning: " + msg + b'\n'
1227 1227 ui.warn(msg)
1228 1228 if not ui.quiet:
1229 1229 hint = b'(' + hint + b')\n'
1230 1230 ui.warn(hint)
1231 1231 if slow_path == b'abort':
1232 1232 raise error.Abort(msg, hint=hint)
1233 1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1234 1234 options[b'persistent-nodemap.mmap'] = True
1235 1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1236 1236 options[b'devel-force-nodemap'] = True
1237 1237
1238 1238 return options
1239 1239
1240 1240
1241 1241 def makemain(**kwargs):
1242 1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1243 1243 return localrepository
1244 1244
1245 1245
1246 1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 1247 class revlogfilestorage:
1248 1248 """File storage when using revlogs."""
1249 1249
1250 1250 def file(self, path):
1251 1251 if path.startswith(b'/'):
1252 1252 path = path[1:]
1253 1253
1254 1254 try_split = (
1255 1255 self.currenttransaction() is not None
1256 1256 or txnutil.mayhavepending(self.root)
1257 1257 )
1258 1258
1259 1259 return filelog.filelog(self.svfs, path, try_split=try_split)
1260 1260
1261 1261
1262 1262 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1263 1263 class revlognarrowfilestorage:
1264 1264 """File storage when using revlogs and narrow files."""
1265 1265
1266 1266 def file(self, path):
1267 1267 if path.startswith(b'/'):
1268 1268 path = path[1:]
1269 1269
1270 1270 try_split = (
1271 1271 self.currenttransaction() is not None
1272 1272 or txnutil.mayhavepending(self.root)
1273 1273 )
1274 1274 return filelog.narrowfilelog(
1275 1275 self.svfs, path, self._storenarrowmatch, try_split=try_split
1276 1276 )
1277 1277
1278 1278
1279 1279 def makefilestorage(requirements, features, **kwargs):
1280 1280 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1281 1281 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1282 1282 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1283 1283
1284 1284 if requirementsmod.NARROW_REQUIREMENT in requirements:
1285 1285 return revlognarrowfilestorage
1286 1286 else:
1287 1287 return revlogfilestorage
1288 1288
1289 1289
1290 1290 # List of repository interfaces and factory functions for them. Each
1291 1291 # will be called in order during ``makelocalrepository()`` to iteratively
1292 1292 # derive the final type for a local repository instance. We capture the
1293 1293 # function as a lambda so we don't hold a reference and the module-level
1294 1294 # functions can be wrapped.
1295 1295 REPO_INTERFACES = [
1296 1296 (repository.ilocalrepositorymain, lambda: makemain),
1297 1297 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1298 1298 ]
1299 1299
1300 1300
1301 1301 @interfaceutil.implementer(repository.ilocalrepositorymain)
1302 1302 class localrepository:
1303 1303 """Main class for representing local repositories.
1304 1304
1305 1305 All local repositories are instances of this class.
1306 1306
1307 1307 Constructed on its own, instances of this class are not usable as
1308 1308 repository objects. To obtain a usable repository object, call
1309 1309 ``hg.repository()``, ``localrepo.instance()``, or
1310 1310 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1311 1311 ``instance()`` adds support for creating new repositories.
1312 1312 ``hg.repository()`` adds more extension integration, including calling
1313 1313 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1314 1314 used.
1315 1315 """
1316 1316
1317 1317 _basesupported = {
1318 1318 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1319 1319 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1320 1320 requirementsmod.CHANGELOGV2_REQUIREMENT,
1321 1321 requirementsmod.COPIESSDC_REQUIREMENT,
1322 1322 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1323 1323 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1324 1324 requirementsmod.DOTENCODE_REQUIREMENT,
1325 1325 requirementsmod.FNCACHE_REQUIREMENT,
1326 1326 requirementsmod.GENERALDELTA_REQUIREMENT,
1327 1327 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1328 1328 requirementsmod.NODEMAP_REQUIREMENT,
1329 1329 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1330 1330 requirementsmod.REVLOGV1_REQUIREMENT,
1331 1331 requirementsmod.REVLOGV2_REQUIREMENT,
1332 1332 requirementsmod.SHARED_REQUIREMENT,
1333 1333 requirementsmod.SHARESAFE_REQUIREMENT,
1334 1334 requirementsmod.SPARSE_REQUIREMENT,
1335 1335 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1336 1336 requirementsmod.STORE_REQUIREMENT,
1337 1337 requirementsmod.TREEMANIFEST_REQUIREMENT,
1338 1338 }
1339 1339
1340 1340 # list of prefix for file which can be written without 'wlock'
1341 1341 # Extensions should extend this list when needed
1342 1342 _wlockfreeprefix = {
1343 1343 # We migh consider requiring 'wlock' for the next
1344 1344 # two, but pretty much all the existing code assume
1345 1345 # wlock is not needed so we keep them excluded for
1346 1346 # now.
1347 1347 b'hgrc',
1348 1348 b'requires',
1349 1349 # XXX cache is a complicatged business someone
1350 1350 # should investigate this in depth at some point
1351 1351 b'cache/',
1352 1352 # XXX bisect was still a bit too messy at the time
1353 1353 # this changeset was introduced. Someone should fix
1354 1354 # the remainig bit and drop this line
1355 1355 b'bisect.state',
1356 1356 }
1357 1357
1358 1358 def __init__(
1359 1359 self,
1360 1360 baseui,
1361 1361 ui,
1362 1362 origroot: bytes,
1363 1363 wdirvfs: vfsmod.vfs,
1364 1364 hgvfs: vfsmod.vfs,
1365 1365 requirements,
1366 1366 supportedrequirements,
1367 1367 sharedpath: bytes,
1368 1368 store,
1369 1369 cachevfs: vfsmod.vfs,
1370 1370 wcachevfs: vfsmod.vfs,
1371 1371 features,
1372 1372 intents=None,
1373 1373 ):
1374 1374 """Create a new local repository instance.
1375 1375
1376 1376 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1377 1377 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1378 1378 object.
1379 1379
1380 1380 Arguments:
1381 1381
1382 1382 baseui
1383 1383 ``ui.ui`` instance that ``ui`` argument was based off of.
1384 1384
1385 1385 ui
1386 1386 ``ui.ui`` instance for use by the repository.
1387 1387
1388 1388 origroot
1389 1389 ``bytes`` path to working directory root of this repository.
1390 1390
1391 1391 wdirvfs
1392 1392 ``vfs.vfs`` rooted at the working directory.
1393 1393
1394 1394 hgvfs
1395 1395 ``vfs.vfs`` rooted at .hg/
1396 1396
1397 1397 requirements
1398 1398 ``set`` of bytestrings representing repository opening requirements.
1399 1399
1400 1400 supportedrequirements
1401 1401 ``set`` of bytestrings representing repository requirements that we
1402 1402 know how to open. May be a supetset of ``requirements``.
1403 1403
1404 1404 sharedpath
1405 1405 ``bytes`` Defining path to storage base directory. Points to a
1406 1406 ``.hg/`` directory somewhere.
1407 1407
1408 1408 store
1409 1409 ``store.basicstore`` (or derived) instance providing access to
1410 1410 versioned storage.
1411 1411
1412 1412 cachevfs
1413 1413 ``vfs.vfs`` used for cache files.
1414 1414
1415 1415 wcachevfs
1416 1416 ``vfs.vfs`` used for cache files related to the working copy.
1417 1417
1418 1418 features
1419 1419 ``set`` of bytestrings defining features/capabilities of this
1420 1420 instance.
1421 1421
1422 1422 intents
1423 1423 ``set`` of system strings indicating what this repo will be used
1424 1424 for.
1425 1425 """
1426 1426 self.baseui = baseui
1427 1427 self.ui = ui
1428 1428 self.origroot = origroot
1429 1429 # vfs rooted at working directory.
1430 1430 self.wvfs = wdirvfs
1431 1431 self.root = wdirvfs.base
1432 1432 # vfs rooted at .hg/. Used to access most non-store paths.
1433 1433 self.vfs = hgvfs
1434 1434 self.path = hgvfs.base
1435 1435 self.requirements = requirements
1436 1436 self.nodeconstants = sha1nodeconstants
1437 1437 self.nullid = self.nodeconstants.nullid
1438 1438 self.supported = supportedrequirements
1439 1439 self.sharedpath = sharedpath
1440 1440 self.store = store
1441 1441 self.cachevfs = cachevfs
1442 1442 self.wcachevfs = wcachevfs
1443 1443 self.features = features
1444 1444
1445 1445 self.filtername = None
1446 1446
1447 1447 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1448 1448 b'devel', b'check-locks'
1449 1449 ):
1450 1450 self.vfs.audit = self._getvfsward(self.vfs.audit)
1451 1451 # A list of callback to shape the phase if no data were found.
1452 1452 # Callback are in the form: func(repo, roots) --> processed root.
1453 1453 # This list it to be filled by extension during repo setup
1454 1454 self._phasedefaults = []
1455 1455
1456 1456 color.setup(self.ui)
1457 1457
1458 1458 self.spath = self.store.path
1459 1459 self.svfs = self.store.vfs
1460 1460 self.sjoin = self.store.join
1461 1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1462 1462 b'devel', b'check-locks'
1463 1463 ):
1464 1464 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1465 1465 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1466 1466 else: # standard vfs
1467 1467 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1468 1468
1469 1469 self._dirstatevalidatewarned = False
1470 1470
1471 1471 self._branchcaches = branchmap.BranchMapCache()
1472 1472 self._revbranchcache = None
1473 1473 self._filterpats = {}
1474 1474 self._datafilters = {}
1475 1475 self._transref = self._lockref = self._wlockref = None
1476 1476
1477 1477 # A cache for various files under .hg/ that tracks file changes,
1478 1478 # (used by the filecache decorator)
1479 1479 #
1480 1480 # Maps a property name to its util.filecacheentry
1481 1481 self._filecache = {}
1482 1482
1483 1483 # hold sets of revision to be filtered
1484 1484 # should be cleared when something might have changed the filter value:
1485 1485 # - new changesets,
1486 1486 # - phase change,
1487 1487 # - new obsolescence marker,
1488 1488 # - working directory parent change,
1489 1489 # - bookmark changes
1490 1490 self.filteredrevcache = {}
1491 1491
1492 1492 self._dirstate = None
1493 1493 # post-dirstate-status hooks
1494 1494 self._postdsstatus = []
1495 1495
1496 1496 self._pending_narrow_pats = None
1497 1497 self._pending_narrow_pats_dirstate = None
1498 1498
1499 1499 # generic mapping between names and nodes
1500 1500 self.names = namespaces.namespaces()
1501 1501
1502 1502 # Key to signature value.
1503 1503 self._sparsesignaturecache = {}
1504 1504 # Signature to cached matcher instance.
1505 1505 self._sparsematchercache = {}
1506 1506
1507 1507 self._extrafilterid = repoview.extrafilter(ui)
1508 1508
1509 1509 self.filecopiesmode = None
1510 1510 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1511 1511 self.filecopiesmode = b'changeset-sidedata'
1512 1512
1513 1513 self._wanted_sidedata = set()
1514 1514 self._sidedata_computers = {}
1515 1515 sidedatamod.set_sidedata_spec_for_repo(self)
1516 1516
1517 1517 def _getvfsward(self, origfunc):
1518 1518 """build a ward for self.vfs"""
1519 1519 rref = weakref.ref(self)
1520 1520
1521 1521 def checkvfs(path, mode=None):
1522 1522 ret = origfunc(path, mode=mode)
1523 1523 repo = rref()
1524 1524 if (
1525 1525 repo is None
1526 1526 or not util.safehasattr(repo, '_wlockref')
1527 1527 or not util.safehasattr(repo, '_lockref')
1528 1528 ):
1529 1529 return
1530 1530 if mode in (None, b'r', b'rb'):
1531 1531 return
1532 1532 if path.startswith(repo.path):
1533 1533 # truncate name relative to the repository (.hg)
1534 1534 path = path[len(repo.path) + 1 :]
1535 1535 if path.startswith(b'cache/'):
1536 1536 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1537 1537 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1538 1538 # path prefixes covered by 'lock'
1539 1539 vfs_path_prefixes = (
1540 1540 b'journal.',
1541 1541 b'undo.',
1542 1542 b'strip-backup/',
1543 1543 b'cache/',
1544 1544 )
1545 1545 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1546 1546 if repo._currentlock(repo._lockref) is None:
1547 1547 repo.ui.develwarn(
1548 1548 b'write with no lock: "%s"' % path,
1549 1549 stacklevel=3,
1550 1550 config=b'check-locks',
1551 1551 )
1552 1552 elif repo._currentlock(repo._wlockref) is None:
1553 1553 # rest of vfs files are covered by 'wlock'
1554 1554 #
1555 1555 # exclude special files
1556 1556 for prefix in self._wlockfreeprefix:
1557 1557 if path.startswith(prefix):
1558 1558 return
1559 1559 repo.ui.develwarn(
1560 1560 b'write with no wlock: "%s"' % path,
1561 1561 stacklevel=3,
1562 1562 config=b'check-locks',
1563 1563 )
1564 1564 return ret
1565 1565
1566 1566 return checkvfs
1567 1567
1568 1568 def _getsvfsward(self, origfunc):
1569 1569 """build a ward for self.svfs"""
1570 1570 rref = weakref.ref(self)
1571 1571
1572 1572 def checksvfs(path, mode=None):
1573 1573 ret = origfunc(path, mode=mode)
1574 1574 repo = rref()
1575 1575 if repo is None or not util.safehasattr(repo, '_lockref'):
1576 1576 return
1577 1577 if mode in (None, b'r', b'rb'):
1578 1578 return
1579 1579 if path.startswith(repo.sharedpath):
1580 1580 # truncate name relative to the repository (.hg)
1581 1581 path = path[len(repo.sharedpath) + 1 :]
1582 1582 if repo._currentlock(repo._lockref) is None:
1583 1583 repo.ui.develwarn(
1584 1584 b'write with no lock: "%s"' % path, stacklevel=4
1585 1585 )
1586 1586 return ret
1587 1587
1588 1588 return checksvfs
1589 1589
1590 1590 @property
1591 1591 def vfs_map(self):
1592 1592 return {
1593 1593 b'': self.svfs,
1594 1594 b'plain': self.vfs,
1595 1595 b'store': self.svfs,
1596 1596 }
1597 1597
1598 1598 def close(self):
1599 1599 self._writecaches()
1600 1600
1601 1601 def _writecaches(self):
1602 1602 if self._revbranchcache:
1603 1603 self._revbranchcache.write()
1604 1604
1605 1605 def _restrictcapabilities(self, caps):
1606 1606 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1607 1607 caps = set(caps)
1608 1608 capsblob = bundle2.encodecaps(
1609 1609 bundle2.getrepocaps(self, role=b'client')
1610 1610 )
1611 1611 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1612 1612 if self.ui.configbool(b'experimental', b'narrow'):
1613 1613 caps.add(wireprototypes.NARROWCAP)
1614 1614 return caps
1615 1615
1616 1616 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1617 1617 # self -> auditor -> self._checknested -> self
1618 1618
1619 1619 @property
1620 1620 def auditor(self):
1621 1621 # This is only used by context.workingctx.match in order to
1622 1622 # detect files in subrepos.
1623 1623 return pathutil.pathauditor(self.root, callback=self._checknested)
1624 1624
1625 1625 @property
1626 1626 def nofsauditor(self):
1627 1627 # This is only used by context.basectx.match in order to detect
1628 1628 # files in subrepos.
1629 1629 return pathutil.pathauditor(
1630 1630 self.root, callback=self._checknested, realfs=False, cached=True
1631 1631 )
1632 1632
1633 1633 def _checknested(self, path):
1634 1634 """Determine if path is a legal nested repository."""
1635 1635 if not path.startswith(self.root):
1636 1636 return False
1637 1637 subpath = path[len(self.root) + 1 :]
1638 1638 normsubpath = util.pconvert(subpath)
1639 1639
1640 1640 # XXX: Checking against the current working copy is wrong in
1641 1641 # the sense that it can reject things like
1642 1642 #
1643 1643 # $ hg cat -r 10 sub/x.txt
1644 1644 #
1645 1645 # if sub/ is no longer a subrepository in the working copy
1646 1646 # parent revision.
1647 1647 #
1648 1648 # However, it can of course also allow things that would have
1649 1649 # been rejected before, such as the above cat command if sub/
1650 1650 # is a subrepository now, but was a normal directory before.
1651 1651 # The old path auditor would have rejected by mistake since it
1652 1652 # panics when it sees sub/.hg/.
1653 1653 #
1654 1654 # All in all, checking against the working copy seems sensible
1655 1655 # since we want to prevent access to nested repositories on
1656 1656 # the filesystem *now*.
1657 1657 ctx = self[None]
1658 1658 parts = util.splitpath(subpath)
1659 1659 while parts:
1660 1660 prefix = b'/'.join(parts)
1661 1661 if prefix in ctx.substate:
1662 1662 if prefix == normsubpath:
1663 1663 return True
1664 1664 else:
1665 1665 sub = ctx.sub(prefix)
1666 1666 return sub.checknested(subpath[len(prefix) + 1 :])
1667 1667 else:
1668 1668 parts.pop()
1669 1669 return False
1670 1670
1671 1671 def peer(self, path=None, remotehidden=False):
1672 1672 return localpeer(
1673 1673 self, path=path, remotehidden=remotehidden
1674 1674 ) # not cached to avoid reference cycle
1675 1675
1676 1676 def unfiltered(self):
1677 1677 """Return unfiltered version of the repository
1678 1678
1679 1679 Intended to be overwritten by filtered repo."""
1680 1680 return self
1681 1681
1682 1682 def filtered(self, name, visibilityexceptions=None):
1683 1683 """Return a filtered version of a repository
1684 1684
1685 1685 The `name` parameter is the identifier of the requested view. This
1686 1686 will return a repoview object set "exactly" to the specified view.
1687 1687
1688 1688 This function does not apply recursive filtering to a repository. For
1689 1689 example calling `repo.filtered("served")` will return a repoview using
1690 1690 the "served" view, regardless of the initial view used by `repo`.
1691 1691
1692 1692 In other word, there is always only one level of `repoview` "filtering".
1693 1693 """
1694 1694 if self._extrafilterid is not None and b'%' not in name:
1695 1695 name = name + b'%' + self._extrafilterid
1696 1696
1697 1697 cls = repoview.newtype(self.unfiltered().__class__)
1698 1698 return cls(self, name, visibilityexceptions)
1699 1699
1700 1700 @mixedrepostorecache(
1701 1701 (b'bookmarks', b'plain'),
1702 1702 (b'bookmarks.current', b'plain'),
1703 1703 (b'bookmarks', b''),
1704 1704 (b'00changelog.i', b''),
1705 1705 )
1706 1706 def _bookmarks(self):
1707 1707 # Since the multiple files involved in the transaction cannot be
1708 1708 # written atomically (with current repository format), there is a race
1709 1709 # condition here.
1710 1710 #
1711 1711 # 1) changelog content A is read
1712 1712 # 2) outside transaction update changelog to content B
1713 1713 # 3) outside transaction update bookmark file referring to content B
1714 1714 # 4) bookmarks file content is read and filtered against changelog-A
1715 1715 #
1716 1716 # When this happens, bookmarks against nodes missing from A are dropped.
1717 1717 #
1718 1718 # Having this happening during read is not great, but it become worse
1719 1719 # when this happen during write because the bookmarks to the "unknown"
1720 1720 # nodes will be dropped for good. However, writes happen within locks.
1721 1721 # This locking makes it possible to have a race free consistent read.
1722 1722 # For this purpose data read from disc before locking are
1723 1723 # "invalidated" right after the locks are taken. This invalidations are
1724 1724 # "light", the `filecache` mechanism keep the data in memory and will
1725 1725 # reuse them if the underlying files did not changed. Not parsing the
1726 1726 # same data multiple times helps performances.
1727 1727 #
1728 1728 # Unfortunately in the case describe above, the files tracked by the
1729 1729 # bookmarks file cache might not have changed, but the in-memory
1730 1730 # content is still "wrong" because we used an older changelog content
1731 1731 # to process the on-disk data. So after locking, the changelog would be
1732 1732 # refreshed but `_bookmarks` would be preserved.
1733 1733 # Adding `00changelog.i` to the list of tracked file is not
1734 1734 # enough, because at the time we build the content for `_bookmarks` in
1735 1735 # (4), the changelog file has already diverged from the content used
1736 1736 # for loading `changelog` in (1)
1737 1737 #
1738 1738 # To prevent the issue, we force the changelog to be explicitly
1739 1739 # reloaded while computing `_bookmarks`. The data race can still happen
1740 1740 # without the lock (with a narrower window), but it would no longer go
1741 1741 # undetected during the lock time refresh.
1742 1742 #
1743 1743 # The new schedule is as follow
1744 1744 #
1745 1745 # 1) filecache logic detect that `_bookmarks` needs to be computed
1746 1746 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1747 1747 # 3) We force `changelog` filecache to be tested
1748 1748 # 4) cachestat for `changelog` are captured (for changelog)
1749 1749 # 5) `_bookmarks` is computed and cached
1750 1750 #
1751 1751 # The step in (3) ensure we have a changelog at least as recent as the
1752 1752 # cache stat computed in (1). As a result at locking time:
1753 1753 # * if the changelog did not changed since (1) -> we can reuse the data
1754 1754 # * otherwise -> the bookmarks get refreshed.
1755 1755 self._refreshchangelog()
1756 1756 return bookmarks.bmstore(self)
1757 1757
1758 1758 def _refreshchangelog(self):
1759 1759 """make sure the in memory changelog match the on-disk one"""
1760 1760 if 'changelog' in vars(self) and self.currenttransaction() is None:
1761 1761 del self.changelog
1762 1762
1763 1763 @property
1764 1764 def _activebookmark(self):
1765 1765 return self._bookmarks.active
1766 1766
1767 1767 # _phasesets depend on changelog. what we need is to call
1768 1768 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1769 1769 # can't be easily expressed in filecache mechanism.
1770 1770 @storecache(b'phaseroots', b'00changelog.i')
1771 1771 def _phasecache(self):
1772 1772 return phases.phasecache(self, self._phasedefaults)
1773 1773
1774 1774 @storecache(b'obsstore')
1775 1775 def obsstore(self):
1776 1776 return obsolete.makestore(self.ui, self)
1777 1777
1778 1778 @changelogcache()
1779 1779 def changelog(repo):
1780 1780 # load dirstate before changelog to avoid race see issue6303
1781 1781 repo.dirstate.prefetch_parents()
1782 1782 return repo.store.changelog(
1783 1783 txnutil.mayhavepending(repo.root),
1784 1784 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1785 1785 )
1786 1786
1787 1787 @manifestlogcache()
1788 1788 def manifestlog(self):
1789 1789 return self.store.manifestlog(self, self._storenarrowmatch)
1790 1790
1791 1791 @unfilteredpropertycache
1792 1792 def dirstate(self):
1793 1793 if self._dirstate is None:
1794 1794 self._dirstate = self._makedirstate()
1795 1795 else:
1796 1796 self._dirstate.refresh()
1797 1797 return self._dirstate
1798 1798
1799 1799 def _makedirstate(self):
1800 1800 """Extension point for wrapping the dirstate per-repo."""
1801 1801 sparsematchfn = None
1802 1802 if sparse.use_sparse(self):
1803 1803 sparsematchfn = lambda: sparse.matcher(self)
1804 1804 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1805 1805 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1806 1806 use_dirstate_v2 = v2_req in self.requirements
1807 1807 use_tracked_hint = th in self.requirements
1808 1808
1809 1809 return dirstate.dirstate(
1810 1810 self.vfs,
1811 1811 self.ui,
1812 1812 self.root,
1813 1813 self._dirstatevalidate,
1814 1814 sparsematchfn,
1815 1815 self.nodeconstants,
1816 1816 use_dirstate_v2,
1817 1817 use_tracked_hint=use_tracked_hint,
1818 1818 )
1819 1819
1820 1820 def _dirstatevalidate(self, node):
1821 1821 okay = True
1822 1822 try:
1823 1823 self.changelog.rev(node)
1824 1824 except error.LookupError:
1825 1825 # If the parent are unknown it might just be because the changelog
1826 1826 # in memory is lagging behind the dirstate in memory. So try to
1827 1827 # refresh the changelog first.
1828 1828 #
1829 1829 # We only do so if we don't hold the lock, if we do hold the lock
1830 1830 # the invalidation at that time should have taken care of this and
1831 1831 # something is very fishy.
1832 1832 if self.currentlock() is None:
1833 1833 self.invalidate()
1834 1834 try:
1835 1835 self.changelog.rev(node)
1836 1836 except error.LookupError:
1837 1837 okay = False
1838 1838 else:
1839 1839 # XXX we should consider raising an error here.
1840 1840 okay = False
1841 1841 if okay:
1842 1842 return node
1843 1843 else:
1844 1844 if not self._dirstatevalidatewarned:
1845 1845 self._dirstatevalidatewarned = True
1846 1846 self.ui.warn(
1847 1847 _(b"warning: ignoring unknown working parent %s!\n")
1848 1848 % short(node)
1849 1849 )
1850 1850 return self.nullid
1851 1851
1852 1852 @storecache(narrowspec.FILENAME)
1853 1853 def narrowpats(self):
1854 1854 """matcher patterns for this repository's narrowspec
1855 1855
1856 1856 A tuple of (includes, excludes).
1857 1857 """
1858 1858 # the narrow management should probably move into its own object
1859 1859 val = self._pending_narrow_pats
1860 1860 if val is None:
1861 1861 val = narrowspec.load(self)
1862 1862 return val
1863 1863
1864 1864 @storecache(narrowspec.FILENAME)
1865 1865 def _storenarrowmatch(self):
1866 1866 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1867 1867 return matchmod.always()
1868 1868 include, exclude = self.narrowpats
1869 1869 return narrowspec.match(self.root, include=include, exclude=exclude)
1870 1870
1871 1871 @storecache(narrowspec.FILENAME)
1872 1872 def _narrowmatch(self):
1873 1873 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1874 1874 return matchmod.always()
1875 1875 narrowspec.checkworkingcopynarrowspec(self)
1876 1876 include, exclude = self.narrowpats
1877 1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878 1878
1879 1879 def narrowmatch(self, match=None, includeexact=False):
1880 1880 """matcher corresponding the the repo's narrowspec
1881 1881
1882 1882 If `match` is given, then that will be intersected with the narrow
1883 1883 matcher.
1884 1884
1885 1885 If `includeexact` is True, then any exact matches from `match` will
1886 1886 be included even if they're outside the narrowspec.
1887 1887 """
1888 1888 if match:
1889 1889 if includeexact and not self._narrowmatch.always():
1890 1890 # do not exclude explicitly-specified paths so that they can
1891 1891 # be warned later on
1892 1892 em = matchmod.exact(match.files())
1893 1893 nm = matchmod.unionmatcher([self._narrowmatch, em])
1894 1894 return matchmod.intersectmatchers(match, nm)
1895 1895 return matchmod.intersectmatchers(match, self._narrowmatch)
1896 1896 return self._narrowmatch
1897 1897
1898 1898 def setnarrowpats(self, newincludes, newexcludes):
1899 1899 narrowspec.save(self, newincludes, newexcludes)
1900 1900 self.invalidate(clearfilecache=True)
1901 1901
1902 1902 @unfilteredpropertycache
1903 1903 def _quick_access_changeid_null(self):
1904 1904 return {
1905 1905 b'null': (nullrev, self.nodeconstants.nullid),
1906 1906 nullrev: (nullrev, self.nodeconstants.nullid),
1907 1907 self.nullid: (nullrev, self.nullid),
1908 1908 }
1909 1909
1910 1910 @unfilteredpropertycache
1911 1911 def _quick_access_changeid_wc(self):
1912 1912 # also fast path access to the working copy parents
1913 1913 # however, only do it for filter that ensure wc is visible.
1914 1914 quick = self._quick_access_changeid_null.copy()
1915 1915 cl = self.unfiltered().changelog
1916 1916 for node in self.dirstate.parents():
1917 1917 if node == self.nullid:
1918 1918 continue
1919 1919 rev = cl.index.get_rev(node)
1920 1920 if rev is None:
1921 1921 # unknown working copy parent case:
1922 1922 #
1923 1923 # skip the fast path and let higher code deal with it
1924 1924 continue
1925 1925 pair = (rev, node)
1926 1926 quick[rev] = pair
1927 1927 quick[node] = pair
1928 1928 # also add the parents of the parents
1929 1929 for r in cl.parentrevs(rev):
1930 1930 if r == nullrev:
1931 1931 continue
1932 1932 n = cl.node(r)
1933 1933 pair = (r, n)
1934 1934 quick[r] = pair
1935 1935 quick[n] = pair
1936 1936 p1node = self.dirstate.p1()
1937 1937 if p1node != self.nullid:
1938 1938 quick[b'.'] = quick[p1node]
1939 1939 return quick
1940 1940
1941 1941 @unfilteredmethod
1942 1942 def _quick_access_changeid_invalidate(self):
1943 1943 if '_quick_access_changeid_wc' in vars(self):
1944 1944 del self.__dict__['_quick_access_changeid_wc']
1945 1945
1946 1946 @property
1947 1947 def _quick_access_changeid(self):
1948 1948 """an helper dictionnary for __getitem__ calls
1949 1949
1950 1950 This contains a list of symbol we can recognise right away without
1951 1951 further processing.
1952 1952 """
1953 1953 if self.filtername in repoview.filter_has_wc:
1954 1954 return self._quick_access_changeid_wc
1955 1955 return self._quick_access_changeid_null
1956 1956
1957 1957 def __getitem__(self, changeid):
1958 1958 # dealing with special cases
1959 1959 if changeid is None:
1960 1960 return context.workingctx(self)
1961 1961 if isinstance(changeid, context.basectx):
1962 1962 return changeid
1963 1963
1964 1964 # dealing with multiple revisions
1965 1965 if isinstance(changeid, slice):
1966 1966 # wdirrev isn't contiguous so the slice shouldn't include it
1967 1967 return [
1968 1968 self[i]
1969 1969 for i in range(*changeid.indices(len(self)))
1970 1970 if i not in self.changelog.filteredrevs
1971 1971 ]
1972 1972
1973 1973 # dealing with some special values
1974 1974 quick_access = self._quick_access_changeid.get(changeid)
1975 1975 if quick_access is not None:
1976 1976 rev, node = quick_access
1977 1977 return context.changectx(self, rev, node, maybe_filtered=False)
1978 1978 if changeid == b'tip':
1979 1979 node = self.changelog.tip()
1980 1980 rev = self.changelog.rev(node)
1981 1981 return context.changectx(self, rev, node)
1982 1982
1983 1983 # dealing with arbitrary values
1984 1984 try:
1985 1985 if isinstance(changeid, int):
1986 1986 node = self.changelog.node(changeid)
1987 1987 rev = changeid
1988 1988 elif changeid == b'.':
1989 1989 # this is a hack to delay/avoid loading obsmarkers
1990 1990 # when we know that '.' won't be hidden
1991 1991 node = self.dirstate.p1()
1992 1992 rev = self.unfiltered().changelog.rev(node)
1993 1993 elif len(changeid) == self.nodeconstants.nodelen:
1994 1994 try:
1995 1995 node = changeid
1996 1996 rev = self.changelog.rev(changeid)
1997 1997 except error.FilteredLookupError:
1998 1998 changeid = hex(changeid) # for the error message
1999 1999 raise
2000 2000 except LookupError:
2001 2001 # check if it might have come from damaged dirstate
2002 2002 #
2003 2003 # XXX we could avoid the unfiltered if we had a recognizable
2004 2004 # exception for filtered changeset access
2005 2005 if (
2006 2006 self.local()
2007 2007 and changeid in self.unfiltered().dirstate.parents()
2008 2008 ):
2009 2009 msg = _(b"working directory has unknown parent '%s'!")
2010 2010 raise error.Abort(msg % short(changeid))
2011 2011 changeid = hex(changeid) # for the error message
2012 2012 raise
2013 2013
2014 2014 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2015 2015 node = bin(changeid)
2016 2016 rev = self.changelog.rev(node)
2017 2017 else:
2018 2018 raise error.ProgrammingError(
2019 2019 b"unsupported changeid '%s' of type %s"
2020 2020 % (changeid, pycompat.bytestr(type(changeid)))
2021 2021 )
2022 2022
2023 2023 return context.changectx(self, rev, node)
2024 2024
2025 2025 except (error.FilteredIndexError, error.FilteredLookupError):
2026 2026 raise error.FilteredRepoLookupError(
2027 2027 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2028 2028 )
2029 2029 except (IndexError, LookupError):
2030 2030 raise error.RepoLookupError(
2031 2031 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2032 2032 )
2033 2033 except error.WdirUnsupported:
2034 2034 return context.workingctx(self)
2035 2035
2036 2036 def __contains__(self, changeid):
2037 2037 """True if the given changeid exists"""
2038 2038 try:
2039 2039 self[changeid]
2040 2040 return True
2041 2041 except error.RepoLookupError:
2042 2042 return False
2043 2043
2044 2044 def __nonzero__(self):
2045 2045 return True
2046 2046
2047 2047 __bool__ = __nonzero__
2048 2048
2049 2049 def __len__(self):
2050 2050 # no need to pay the cost of repoview.changelog
2051 2051 unfi = self.unfiltered()
2052 2052 return len(unfi.changelog)
2053 2053
2054 2054 def __iter__(self):
2055 2055 return iter(self.changelog)
2056 2056
2057 2057 def revs(self, expr: bytes, *args):
2058 2058 """Find revisions matching a revset.
2059 2059
2060 2060 The revset is specified as a string ``expr`` that may contain
2061 2061 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2062 2062
2063 2063 Revset aliases from the configuration are not expanded. To expand
2064 2064 user aliases, consider calling ``scmutil.revrange()`` or
2065 2065 ``repo.anyrevs([expr], user=True)``.
2066 2066
2067 2067 Returns a smartset.abstractsmartset, which is a list-like interface
2068 2068 that contains integer revisions.
2069 2069 """
2070 2070 tree = revsetlang.spectree(expr, *args)
2071 2071 return revset.makematcher(tree)(self)
2072 2072
2073 2073 def set(self, expr: bytes, *args):
2074 2074 """Find revisions matching a revset and emit changectx instances.
2075 2075
2076 2076 This is a convenience wrapper around ``revs()`` that iterates the
2077 2077 result and is a generator of changectx instances.
2078 2078
2079 2079 Revset aliases from the configuration are not expanded. To expand
2080 2080 user aliases, consider calling ``scmutil.revrange()``.
2081 2081 """
2082 2082 for r in self.revs(expr, *args):
2083 2083 yield self[r]
2084 2084
2085 2085 def anyrevs(self, specs: bytes, user=False, localalias=None):
2086 2086 """Find revisions matching one of the given revsets.
2087 2087
2088 2088 Revset aliases from the configuration are not expanded by default. To
2089 2089 expand user aliases, specify ``user=True``. To provide some local
2090 2090 definitions overriding user aliases, set ``localalias`` to
2091 2091 ``{name: definitionstring}``.
2092 2092 """
2093 2093 if specs == [b'null']:
2094 2094 return revset.baseset([nullrev])
2095 2095 if specs == [b'.']:
2096 2096 quick_data = self._quick_access_changeid.get(b'.')
2097 2097 if quick_data is not None:
2098 2098 return revset.baseset([quick_data[0]])
2099 2099 if user:
2100 2100 m = revset.matchany(
2101 2101 self.ui,
2102 2102 specs,
2103 2103 lookup=revset.lookupfn(self),
2104 2104 localalias=localalias,
2105 2105 )
2106 2106 else:
2107 2107 m = revset.matchany(None, specs, localalias=localalias)
2108 2108 return m(self)
2109 2109
2110 2110 def url(self) -> bytes:
2111 2111 return b'file:' + self.root
2112 2112
2113 2113 def hook(self, name, throw=False, **args):
2114 2114 """Call a hook, passing this repo instance.
2115 2115
2116 2116 This a convenience method to aid invoking hooks. Extensions likely
2117 2117 won't call this unless they have registered a custom hook or are
2118 2118 replacing code that is expected to call a hook.
2119 2119 """
2120 2120 return hook.hook(self.ui, self, name, throw, **args)
2121 2121
2122 2122 @filteredpropertycache
2123 2123 def _tagscache(self):
2124 2124 """Returns a tagscache object that contains various tags related
2125 2125 caches."""
2126 2126
2127 2127 # This simplifies its cache management by having one decorated
2128 2128 # function (this one) and the rest simply fetch things from it.
2129 2129 class tagscache:
2130 2130 def __init__(self):
2131 2131 # These two define the set of tags for this repository. tags
2132 2132 # maps tag name to node; tagtypes maps tag name to 'global' or
2133 2133 # 'local'. (Global tags are defined by .hgtags across all
2134 2134 # heads, and local tags are defined in .hg/localtags.)
2135 2135 # They constitute the in-memory cache of tags.
2136 2136 self.tags = self.tagtypes = None
2137 2137
2138 2138 self.nodetagscache = self.tagslist = None
2139 2139
2140 2140 cache = tagscache()
2141 2141 cache.tags, cache.tagtypes = self._findtags()
2142 2142
2143 2143 return cache
2144 2144
2145 2145 def tags(self):
2146 2146 '''return a mapping of tag to node'''
2147 2147 t = {}
2148 2148 if self.changelog.filteredrevs:
2149 2149 tags, tt = self._findtags()
2150 2150 else:
2151 2151 tags = self._tagscache.tags
2152 2152 rev = self.changelog.rev
2153 2153 for k, v in tags.items():
2154 2154 try:
2155 2155 # ignore tags to unknown nodes
2156 2156 rev(v)
2157 2157 t[k] = v
2158 2158 except (error.LookupError, ValueError):
2159 2159 pass
2160 2160 return t
2161 2161
2162 2162 def _findtags(self):
2163 2163 """Do the hard work of finding tags. Return a pair of dicts
2164 2164 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2165 2165 maps tag name to a string like \'global\' or \'local\'.
2166 2166 Subclasses or extensions are free to add their own tags, but
2167 2167 should be aware that the returned dicts will be retained for the
2168 2168 duration of the localrepo object."""
2169 2169
2170 2170 # XXX what tagtype should subclasses/extensions use? Currently
2171 2171 # mq and bookmarks add tags, but do not set the tagtype at all.
2172 2172 # Should each extension invent its own tag type? Should there
2173 2173 # be one tagtype for all such "virtual" tags? Or is the status
2174 2174 # quo fine?
2175 2175
2176 2176 # map tag name to (node, hist)
2177 2177 alltags = tagsmod.findglobaltags(self.ui, self)
2178 2178 # map tag name to tag type
2179 2179 tagtypes = {tag: b'global' for tag in alltags}
2180 2180
2181 2181 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2182 2182
2183 2183 # Build the return dicts. Have to re-encode tag names because
2184 2184 # the tags module always uses UTF-8 (in order not to lose info
2185 2185 # writing to the cache), but the rest of Mercurial wants them in
2186 2186 # local encoding.
2187 2187 tags = {}
2188 2188 for name, (node, hist) in alltags.items():
2189 2189 if node != self.nullid:
2190 2190 tags[encoding.tolocal(name)] = node
2191 2191 tags[b'tip'] = self.changelog.tip()
2192 2192 tagtypes = {
2193 2193 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2194 2194 }
2195 2195 return (tags, tagtypes)
2196 2196
2197 2197 def tagtype(self, tagname):
2198 2198 """
2199 2199 return the type of the given tag. result can be:
2200 2200
2201 2201 'local' : a local tag
2202 2202 'global' : a global tag
2203 2203 None : tag does not exist
2204 2204 """
2205 2205
2206 2206 return self._tagscache.tagtypes.get(tagname)
2207 2207
2208 2208 def tagslist(self):
2209 2209 '''return a list of tags ordered by revision'''
2210 2210 if not self._tagscache.tagslist:
2211 2211 l = []
2212 2212 for t, n in self.tags().items():
2213 2213 l.append((self.changelog.rev(n), t, n))
2214 2214 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2215 2215
2216 2216 return self._tagscache.tagslist
2217 2217
2218 2218 def nodetags(self, node):
2219 2219 '''return the tags associated with a node'''
2220 2220 if not self._tagscache.nodetagscache:
2221 2221 nodetagscache = {}
2222 2222 for t, n in self._tagscache.tags.items():
2223 2223 nodetagscache.setdefault(n, []).append(t)
2224 2224 for tags in nodetagscache.values():
2225 2225 tags.sort()
2226 2226 self._tagscache.nodetagscache = nodetagscache
2227 2227 return self._tagscache.nodetagscache.get(node, [])
2228 2228
2229 2229 def nodebookmarks(self, node):
2230 2230 """return the list of bookmarks pointing to the specified node"""
2231 2231 return self._bookmarks.names(node)
2232 2232
2233 2233 def branchmap(self):
2234 2234 """returns a dictionary {branch: [branchheads]} with branchheads
2235 2235 ordered by increasing revision number"""
2236 2236 return self._branchcaches[self]
2237 2237
2238 2238 @unfilteredmethod
2239 2239 def revbranchcache(self):
2240 2240 if not self._revbranchcache:
2241 2241 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2242 2242 return self._revbranchcache
2243 2243
2244 2244 def register_changeset(self, rev, changelogrevision):
2245 2245 self.revbranchcache().setdata(rev, changelogrevision)
2246 2246
2247 2247 def branchtip(self, branch, ignoremissing=False):
2248 2248 """return the tip node for a given branch
2249 2249
2250 2250 If ignoremissing is True, then this method will not raise an error.
2251 2251 This is helpful for callers that only expect None for a missing branch
2252 2252 (e.g. namespace).
2253 2253
2254 2254 """
2255 2255 try:
2256 2256 return self.branchmap().branchtip(branch)
2257 2257 except KeyError:
2258 2258 if not ignoremissing:
2259 2259 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2260 2260 else:
2261 2261 pass
2262 2262
2263 2263 def lookup(self, key):
2264 2264 node = scmutil.revsymbol(self, key).node()
2265 2265 if node is None:
2266 2266 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2267 2267 return node
2268 2268
2269 2269 def lookupbranch(self, key):
2270 2270 if self.branchmap().hasbranch(key):
2271 2271 return key
2272 2272
2273 2273 return scmutil.revsymbol(self, key).branch()
2274 2274
2275 2275 def known(self, nodes):
2276 2276 cl = self.changelog
2277 2277 get_rev = cl.index.get_rev
2278 2278 filtered = cl.filteredrevs
2279 2279 result = []
2280 2280 for n in nodes:
2281 2281 r = get_rev(n)
2282 2282 resp = not (r is None or r in filtered)
2283 2283 result.append(resp)
2284 2284 return result
2285 2285
2286 2286 def local(self):
2287 2287 return self
2288 2288
2289 2289 def publishing(self):
2290 2290 # it's safe (and desirable) to trust the publish flag unconditionally
2291 2291 # so that we don't finalize changes shared between users via ssh or nfs
2292 2292 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2293 2293
2294 2294 def cancopy(self):
2295 2295 # so statichttprepo's override of local() works
2296 2296 if not self.local():
2297 2297 return False
2298 2298 if not self.publishing():
2299 2299 return True
2300 2300 # if publishing we can't copy if there is filtered content
2301 2301 return not self.filtered(b'visible').changelog.filteredrevs
2302 2302
2303 2303 def shared(self):
2304 2304 '''the type of shared repository (None if not shared)'''
2305 2305 if self.sharedpath != self.path:
2306 2306 return b'store'
2307 2307 return None
2308 2308
2309 2309 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2310 2310 return self.vfs.reljoin(self.root, f, *insidef)
2311 2311
2312 2312 def setparents(self, p1, p2=None):
2313 2313 if p2 is None:
2314 2314 p2 = self.nullid
2315 2315 self[None].setparents(p1, p2)
2316 2316 self._quick_access_changeid_invalidate()
2317 2317
2318 2318 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2319 2319 """changeid must be a changeset revision, if specified.
2320 2320 fileid can be a file revision or node."""
2321 2321 return context.filectx(
2322 2322 self, path, changeid, fileid, changectx=changectx
2323 2323 )
2324 2324
2325 2325 def getcwd(self) -> bytes:
2326 2326 return self.dirstate.getcwd()
2327 2327
2328 2328 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2329 2329 return self.dirstate.pathto(f, cwd)
2330 2330
2331 2331 def _loadfilter(self, filter):
2332 2332 if filter not in self._filterpats:
2333 2333 l = []
2334 2334 for pat, cmd in self.ui.configitems(filter):
2335 2335 if cmd == b'!':
2336 2336 continue
2337 2337 mf = matchmod.match(self.root, b'', [pat])
2338 2338 fn = None
2339 2339 params = cmd
2340 2340 for name, filterfn in self._datafilters.items():
2341 2341 if cmd.startswith(name):
2342 2342 fn = filterfn
2343 2343 params = cmd[len(name) :].lstrip()
2344 2344 break
2345 2345 if not fn:
2346 2346 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2347 2347 fn.__name__ = 'commandfilter'
2348 2348 # Wrap old filters not supporting keyword arguments
2349 2349 if not pycompat.getargspec(fn)[2]:
2350 2350 oldfn = fn
2351 2351 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2352 2352 fn.__name__ = 'compat-' + oldfn.__name__
2353 2353 l.append((mf, fn, params))
2354 2354 self._filterpats[filter] = l
2355 2355 return self._filterpats[filter]
2356 2356
2357 2357 def _filter(self, filterpats, filename, data):
2358 2358 for mf, fn, cmd in filterpats:
2359 2359 if mf(filename):
2360 2360 self.ui.debug(
2361 2361 b"filtering %s through %s\n"
2362 2362 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2363 2363 )
2364 2364 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2365 2365 break
2366 2366
2367 2367 return data
2368 2368
2369 2369 @unfilteredpropertycache
2370 2370 def _encodefilterpats(self):
2371 2371 return self._loadfilter(b'encode')
2372 2372
2373 2373 @unfilteredpropertycache
2374 2374 def _decodefilterpats(self):
2375 2375 return self._loadfilter(b'decode')
2376 2376
2377 2377 def adddatafilter(self, name, filter):
2378 2378 self._datafilters[name] = filter
2379 2379
2380 2380 def wread(self, filename: bytes) -> bytes:
2381 2381 if self.wvfs.islink(filename):
2382 2382 data = self.wvfs.readlink(filename)
2383 2383 else:
2384 2384 data = self.wvfs.read(filename)
2385 2385 return self._filter(self._encodefilterpats, filename, data)
2386 2386
2387 2387 def wwrite(
2388 2388 self,
2389 2389 filename: bytes,
2390 2390 data: bytes,
2391 2391 flags: bytes,
2392 2392 backgroundclose=False,
2393 2393 **kwargs
2394 2394 ) -> int:
2395 2395 """write ``data`` into ``filename`` in the working directory
2396 2396
2397 2397 This returns length of written (maybe decoded) data.
2398 2398 """
2399 2399 data = self._filter(self._decodefilterpats, filename, data)
2400 2400 if b'l' in flags:
2401 2401 self.wvfs.symlink(data, filename)
2402 2402 else:
2403 2403 self.wvfs.write(
2404 2404 filename, data, backgroundclose=backgroundclose, **kwargs
2405 2405 )
2406 2406 if b'x' in flags:
2407 2407 self.wvfs.setflags(filename, False, True)
2408 2408 else:
2409 2409 self.wvfs.setflags(filename, False, False)
2410 2410 return len(data)
2411 2411
2412 2412 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2413 2413 return self._filter(self._decodefilterpats, filename, data)
2414 2414
2415 2415 def currenttransaction(self):
2416 2416 """return the current transaction or None if non exists"""
2417 2417 if self._transref:
2418 2418 tr = self._transref()
2419 2419 else:
2420 2420 tr = None
2421 2421
2422 2422 if tr and tr.running():
2423 2423 return tr
2424 2424 return None
2425 2425
2426 2426 def transaction(self, desc, report=None):
2427 2427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2428 2428 b'devel', b'check-locks'
2429 2429 ):
2430 2430 if self._currentlock(self._lockref) is None:
2431 2431 raise error.ProgrammingError(b'transaction requires locking')
2432 2432 tr = self.currenttransaction()
2433 2433 if tr is not None:
2434 2434 return tr.nest(name=desc)
2435 2435
2436 2436 # abort here if the journal already exists
2437 2437 if self.svfs.exists(b"journal"):
2438 2438 raise error.RepoError(
2439 2439 _(b"abandoned transaction found"),
2440 2440 hint=_(b"run 'hg recover' to clean up transaction"),
2441 2441 )
2442 2442
2443 2443 # At that point your dirstate should be clean:
2444 2444 #
2445 2445 # - If you don't have the wlock, why would you still have a dirty
2446 2446 # dirstate ?
2447 2447 #
2448 2448 # - If you hold the wlock, you should not be opening a transaction in
2449 2449 # the middle of a `distate.changing_*` block. The transaction needs to
2450 2450 # be open before that and wrap the change-context.
2451 2451 #
2452 2452 # - If you are not within a `dirstate.changing_*` context, why is our
2453 2453 # dirstate dirty?
2454 2454 if self.dirstate._dirty:
2455 2455 m = "cannot open a transaction with a dirty dirstate"
2456 2456 raise error.ProgrammingError(m)
2457 2457
2458 2458 idbase = b"%.40f#%f" % (random.random(), time.time())
2459 2459 ha = hex(hashutil.sha1(idbase).digest())
2460 2460 txnid = b'TXN:' + ha
2461 2461 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2462 2462
2463 2463 self._writejournal(desc)
2464 2464 if report:
2465 2465 rp = report
2466 2466 else:
2467 2467 rp = self.ui.warn
2468 2468 vfsmap = self.vfs_map
2469 2469 # we must avoid cyclic reference between repo and transaction.
2470 2470 reporef = weakref.ref(self)
2471 2471 # Code to track tag movement
2472 2472 #
2473 2473 # Since tags are all handled as file content, it is actually quite hard
2474 2474 # to track these movement from a code perspective. So we fallback to a
2475 2475 # tracking at the repository level. One could envision to track changes
2476 2476 # to the '.hgtags' file through changegroup apply but that fails to
2477 2477 # cope with case where transaction expose new heads without changegroup
2478 2478 # being involved (eg: phase movement).
2479 2479 #
2480 2480 # For now, We gate the feature behind a flag since this likely comes
2481 2481 # with performance impacts. The current code run more often than needed
2482 2482 # and do not use caches as much as it could. The current focus is on
2483 2483 # the behavior of the feature so we disable it by default. The flag
2484 2484 # will be removed when we are happy with the performance impact.
2485 2485 #
2486 2486 # Once this feature is no longer experimental move the following
2487 2487 # documentation to the appropriate help section:
2488 2488 #
2489 2489 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2490 2490 # tags (new or changed or deleted tags). In addition the details of
2491 2491 # these changes are made available in a file at:
2492 2492 # ``REPOROOT/.hg/changes/tags.changes``.
2493 2493 # Make sure you check for HG_TAG_MOVED before reading that file as it
2494 2494 # might exist from a previous transaction even if no tag were touched
2495 2495 # in this one. Changes are recorded in a line base format::
2496 2496 #
2497 2497 # <action> <hex-node> <tag-name>\n
2498 2498 #
2499 2499 # Actions are defined as follow:
2500 2500 # "-R": tag is removed,
2501 2501 # "+A": tag is added,
2502 2502 # "-M": tag is moved (old value),
2503 2503 # "+M": tag is moved (new value),
2504 2504 tracktags = lambda x: None
2505 2505 # experimental config: experimental.hook-track-tags
2506 2506 shouldtracktags = self.ui.configbool(
2507 2507 b'experimental', b'hook-track-tags'
2508 2508 )
2509 2509 if desc != b'strip' and shouldtracktags:
2510 2510 oldheads = self.changelog.headrevs()
2511 2511
2512 2512 def tracktags(tr2):
2513 2513 repo = reporef()
2514 2514 assert repo is not None # help pytype
2515 2515 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2516 2516 newheads = repo.changelog.headrevs()
2517 2517 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2518 2518 # notes: we compare lists here.
2519 2519 # As we do it only once buiding set would not be cheaper
2520 2520 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2521 2521 if changes:
2522 2522 tr2.hookargs[b'tag_moved'] = b'1'
2523 2523 with repo.vfs(
2524 2524 b'changes/tags.changes', b'w', atomictemp=True
2525 2525 ) as changesfile:
2526 2526 # note: we do not register the file to the transaction
2527 2527 # because we needs it to still exist on the transaction
2528 2528 # is close (for txnclose hooks)
2529 2529 tagsmod.writediff(changesfile, changes)
2530 2530
2531 2531 def validate(tr2):
2532 2532 """will run pre-closing hooks"""
2533 2533 # XXX the transaction API is a bit lacking here so we take a hacky
2534 2534 # path for now
2535 2535 #
2536 2536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2537 2537 # dict is copied before these run. In addition we needs the data
2538 2538 # available to in memory hooks too.
2539 2539 #
2540 2540 # Moreover, we also need to make sure this runs before txnclose
2541 2541 # hooks and there is no "pending" mechanism that would execute
2542 2542 # logic only if hooks are about to run.
2543 2543 #
2544 2544 # Fixing this limitation of the transaction is also needed to track
2545 2545 # other families of changes (bookmarks, phases, obsolescence).
2546 2546 #
2547 2547 # This will have to be fixed before we remove the experimental
2548 2548 # gating.
2549 2549 tracktags(tr2)
2550 2550 repo = reporef()
2551 2551 assert repo is not None # help pytype
2552 2552
2553 2553 singleheadopt = (b'experimental', b'single-head-per-branch')
2554 2554 singlehead = repo.ui.configbool(*singleheadopt)
2555 2555 if singlehead:
2556 2556 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2557 2557 accountclosed = singleheadsub.get(
2558 2558 b"account-closed-heads", False
2559 2559 )
2560 2560 if singleheadsub.get(b"public-changes-only", False):
2561 2561 filtername = b"immutable"
2562 2562 else:
2563 2563 filtername = b"visible"
2564 2564 scmutil.enforcesinglehead(
2565 2565 repo, tr2, desc, accountclosed, filtername
2566 2566 )
2567 2567 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2568 2568 for name, (old, new) in sorted(
2569 2569 tr.changes[b'bookmarks'].items()
2570 2570 ):
2571 2571 args = tr.hookargs.copy()
2572 2572 args.update(bookmarks.preparehookargs(name, old, new))
2573 2573 repo.hook(
2574 2574 b'pretxnclose-bookmark',
2575 2575 throw=True,
2576 2576 **pycompat.strkwargs(args)
2577 2577 )
2578 2578 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2579 2579 cl = repo.unfiltered().changelog
2580 2580 for revs, (old, new) in tr.changes[b'phases']:
2581 2581 for rev in revs:
2582 2582 args = tr.hookargs.copy()
2583 2583 node = hex(cl.node(rev))
2584 2584 args.update(phases.preparehookargs(node, old, new))
2585 2585 repo.hook(
2586 2586 b'pretxnclose-phase',
2587 2587 throw=True,
2588 2588 **pycompat.strkwargs(args)
2589 2589 )
2590 2590
2591 2591 repo.hook(
2592 2592 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2593 2593 )
2594 2594
2595 2595 def releasefn(tr, success):
2596 2596 repo = reporef()
2597 2597 if repo is None:
2598 2598 # If the repo has been GC'd (and this release function is being
2599 2599 # called from transaction.__del__), there's not much we can do,
2600 2600 # so just leave the unfinished transaction there and let the
2601 2601 # user run `hg recover`.
2602 2602 return
2603 2603 if success:
2604 2604 # this should be explicitly invoked here, because
2605 2605 # in-memory changes aren't written out at closing
2606 2606 # transaction, if tr.addfilegenerator (via
2607 2607 # dirstate.write or so) isn't invoked while
2608 2608 # transaction running
2609 2609 repo.dirstate.write(None)
2610 2610 else:
2611 2611 # discard all changes (including ones already written
2612 2612 # out) in this transaction
2613 2613 repo.invalidate(clearfilecache=True)
2614 2614
2615 2615 tr = transaction.transaction(
2616 2616 rp,
2617 2617 self.svfs,
2618 2618 vfsmap,
2619 2619 b"journal",
2620 2620 b"undo",
2621 2621 lambda: None,
2622 2622 self.store.createmode,
2623 2623 validator=validate,
2624 2624 releasefn=releasefn,
2625 2625 checkambigfiles=_cachedfiles,
2626 2626 name=desc,
2627 2627 )
2628 2628 for vfs_id, path in self._journalfiles():
2629 2629 tr.add_journal(vfs_id, path)
2630 2630 tr.changes[b'origrepolen'] = len(self)
2631 2631 tr.changes[b'obsmarkers'] = set()
2632 2632 tr.changes[b'phases'] = []
2633 2633 tr.changes[b'bookmarks'] = {}
2634 2634
2635 2635 tr.hookargs[b'txnid'] = txnid
2636 2636 tr.hookargs[b'txnname'] = desc
2637 2637 tr.hookargs[b'changes'] = tr.changes
2638 2638 # note: writing the fncache only during finalize mean that the file is
2639 2639 # outdated when running hooks. As fncache is used for streaming clone,
2640 2640 # this is not expected to break anything that happen during the hooks.
2641 2641 tr.addfinalize(b'flush-fncache', self.store.write)
2642 2642
2643 2643 def txnclosehook(tr2):
2644 2644 """To be run if transaction is successful, will schedule a hook run"""
2645 2645 # Don't reference tr2 in hook() so we don't hold a reference.
2646 2646 # This reduces memory consumption when there are multiple
2647 2647 # transactions per lock. This can likely go away if issue5045
2648 2648 # fixes the function accumulation.
2649 2649 hookargs = tr2.hookargs
2650 2650
2651 2651 def hookfunc(unused_success):
2652 2652 repo = reporef()
2653 2653 assert repo is not None # help pytype
2654 2654
2655 2655 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2656 2656 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2657 2657 for name, (old, new) in bmchanges:
2658 2658 args = tr.hookargs.copy()
2659 2659 args.update(bookmarks.preparehookargs(name, old, new))
2660 2660 repo.hook(
2661 2661 b'txnclose-bookmark',
2662 2662 throw=False,
2663 2663 **pycompat.strkwargs(args)
2664 2664 )
2665 2665
2666 2666 if hook.hashook(repo.ui, b'txnclose-phase'):
2667 2667 cl = repo.unfiltered().changelog
2668 2668 phasemv = sorted(
2669 2669 tr.changes[b'phases'], key=lambda r: r[0][0]
2670 2670 )
2671 2671 for revs, (old, new) in phasemv:
2672 2672 for rev in revs:
2673 2673 args = tr.hookargs.copy()
2674 2674 node = hex(cl.node(rev))
2675 2675 args.update(phases.preparehookargs(node, old, new))
2676 2676 repo.hook(
2677 2677 b'txnclose-phase',
2678 2678 throw=False,
2679 2679 **pycompat.strkwargs(args)
2680 2680 )
2681 2681
2682 2682 repo.hook(
2683 2683 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2684 2684 )
2685 2685
2686 2686 repo = reporef()
2687 2687 assert repo is not None # help pytype
2688 2688 repo._afterlock(hookfunc)
2689 2689
2690 2690 tr.addfinalize(b'txnclose-hook', txnclosehook)
2691 2691 # Include a leading "-" to make it happen before the transaction summary
2692 2692 # reports registered via scmutil.registersummarycallback() whose names
2693 2693 # are 00-txnreport etc. That way, the caches will be warm when the
2694 2694 # callbacks run.
2695 2695 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2696 2696
2697 2697 def txnaborthook(tr2):
2698 2698 """To be run if transaction is aborted"""
2699 2699 repo = reporef()
2700 2700 assert repo is not None # help pytype
2701 2701 repo.hook(
2702 2702 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2703 2703 )
2704 2704
2705 2705 tr.addabort(b'txnabort-hook', txnaborthook)
2706 2706 # avoid eager cache invalidation. in-memory data should be identical
2707 2707 # to stored data if transaction has no error.
2708 2708 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2709 2709 self._transref = weakref.ref(tr)
2710 2710 scmutil.registersummarycallback(self, tr, desc)
2711 2711 # This only exist to deal with the need of rollback to have viable
2712 2712 # parents at the end of the operation. So backup viable parents at the
2713 2713 # time of this operation.
2714 2714 #
2715 2715 # We only do it when the `wlock` is taken, otherwise other might be
2716 2716 # altering the dirstate under us.
2717 2717 #
2718 2718 # This is really not a great way to do this (first, because we cannot
2719 2719 # always do it). There are more viable alternative that exists
2720 2720 #
2721 2721 # - backing only the working copy parent in a dedicated files and doing
2722 2722 # a clean "keep-update" to them on `hg rollback`.
2723 2723 #
2724 2724 # - slightly changing the behavior an applying a logic similar to "hg
2725 2725 # strip" to pick a working copy destination on `hg rollback`
2726 2726 if self.currentwlock() is not None:
2727 2727 ds = self.dirstate
2728 2728 if not self.vfs.exists(b'branch'):
2729 2729 # force a file to be written if None exist
2730 2730 ds.setbranch(b'default', None)
2731 2731
2732 2732 def backup_dirstate(tr):
2733 2733 for f in ds.all_file_names():
2734 2734 # hardlink backup is okay because `dirstate` is always
2735 2735 # atomically written and possible data file are append only
2736 2736 # and resistant to trailing data.
2737 2737 tr.addbackup(f, hardlink=True, location=b'plain')
2738 2738
2739 2739 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2740 2740 return tr
2741 2741
2742 2742 def _journalfiles(self):
2743 2743 return (
2744 2744 (self.svfs, b'journal'),
2745 2745 (self.vfs, b'journal.desc'),
2746 2746 )
2747 2747
2748 2748 def undofiles(self):
2749 2749 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2750 2750
2751 2751 @unfilteredmethod
2752 2752 def _writejournal(self, desc):
2753 2753 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2754 2754
2755 2755 def recover(self):
2756 2756 with self.lock():
2757 2757 if self.svfs.exists(b"journal"):
2758 2758 self.ui.status(_(b"rolling back interrupted transaction\n"))
2759 2759 vfsmap = self.vfs_map
2760 2760 transaction.rollback(
2761 2761 self.svfs,
2762 2762 vfsmap,
2763 2763 b"journal",
2764 2764 self.ui.warn,
2765 2765 checkambigfiles=_cachedfiles,
2766 2766 )
2767 2767 self.invalidate()
2768 2768 return True
2769 2769 else:
2770 2770 self.ui.warn(_(b"no interrupted transaction available\n"))
2771 2771 return False
2772 2772
2773 2773 def rollback(self, dryrun=False, force=False):
2774 2774 wlock = lock = None
2775 2775 try:
2776 2776 wlock = self.wlock()
2777 2777 lock = self.lock()
2778 2778 if self.svfs.exists(b"undo"):
2779 2779 return self._rollback(dryrun, force)
2780 2780 else:
2781 2781 self.ui.warn(_(b"no rollback information available\n"))
2782 2782 return 1
2783 2783 finally:
2784 2784 release(lock, wlock)
2785 2785
2786 2786 @unfilteredmethod # Until we get smarter cache management
2787 2787 def _rollback(self, dryrun, force):
2788 2788 ui = self.ui
2789 2789
2790 2790 parents = self.dirstate.parents()
2791 2791 try:
2792 2792 args = self.vfs.read(b'undo.desc').splitlines()
2793 2793 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2794 2794 if len(args) >= 3:
2795 2795 detail = args[2]
2796 2796 oldtip = oldlen - 1
2797 2797
2798 2798 if detail and ui.verbose:
2799 2799 msg = _(
2800 2800 b'repository tip rolled back to revision %d'
2801 2801 b' (undo %s: %s)\n'
2802 2802 ) % (oldtip, desc, detail)
2803 2803 else:
2804 2804 msg = _(
2805 2805 b'repository tip rolled back to revision %d (undo %s)\n'
2806 2806 ) % (oldtip, desc)
2807 2807 parentgone = any(self[p].rev() > oldtip for p in parents)
2808 2808 except IOError:
2809 2809 msg = _(b'rolling back unknown transaction\n')
2810 2810 desc = None
2811 2811 parentgone = True
2812 2812
2813 2813 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2814 2814 raise error.Abort(
2815 2815 _(
2816 2816 b'rollback of last commit while not checked out '
2817 2817 b'may lose data'
2818 2818 ),
2819 2819 hint=_(b'use -f to force'),
2820 2820 )
2821 2821
2822 2822 ui.status(msg)
2823 2823 if dryrun:
2824 2824 return 0
2825 2825
2826 2826 self.destroying()
2827 2827 vfsmap = self.vfs_map
2828 2828 skip_journal_pattern = None
2829 2829 if not parentgone:
2830 2830 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2831 2831 transaction.rollback(
2832 2832 self.svfs,
2833 2833 vfsmap,
2834 2834 b'undo',
2835 2835 ui.warn,
2836 2836 checkambigfiles=_cachedfiles,
2837 2837 skip_journal_pattern=skip_journal_pattern,
2838 2838 )
2839 2839 self.invalidate()
2840 2840 self.dirstate.invalidate()
2841 2841
2842 2842 if parentgone:
2843 2843 # replace this with some explicit parent update in the future.
2844 2844 has_node = self.changelog.index.has_node
2845 2845 if not all(has_node(p) for p in self.dirstate._pl):
2846 2846 # There was no dirstate to backup initially, we need to drop
2847 2847 # the existing one.
2848 2848 with self.dirstate.changing_parents(self):
2849 2849 self.dirstate.setparents(self.nullid)
2850 2850 self.dirstate.clear()
2851 2851
2852 2852 parents = tuple([p.rev() for p in self[None].parents()])
2853 2853 if len(parents) > 1:
2854 2854 ui.status(
2855 2855 _(
2856 2856 b'working directory now based on '
2857 2857 b'revisions %d and %d\n'
2858 2858 )
2859 2859 % parents
2860 2860 )
2861 2861 else:
2862 2862 ui.status(
2863 2863 _(b'working directory now based on revision %d\n') % parents
2864 2864 )
2865 2865 mergestatemod.mergestate.clean(self)
2866 2866
2867 2867 # TODO: if we know which new heads may result from this rollback, pass
2868 2868 # them to destroy(), which will prevent the branchhead cache from being
2869 2869 # invalidated.
2870 2870 self.destroyed()
2871 2871 return 0
2872 2872
2873 2873 def _buildcacheupdater(self, newtransaction):
2874 2874 """called during transaction to build the callback updating cache
2875 2875
2876 2876 Lives on the repository to help extension who might want to augment
2877 2877 this logic. For this purpose, the created transaction is passed to the
2878 2878 method.
2879 2879 """
2880 2880 # we must avoid cyclic reference between repo and transaction.
2881 2881 reporef = weakref.ref(self)
2882 2882
2883 2883 def updater(tr):
2884 2884 repo = reporef()
2885 2885 assert repo is not None # help pytype
2886 2886 repo.updatecaches(tr)
2887 2887
2888 2888 return updater
2889 2889
2890 2890 @unfilteredmethod
2891 2891 def updatecaches(self, tr=None, full=False, caches=None):
2892 2892 """warm appropriate caches
2893 2893
2894 2894 If this function is called after a transaction closed. The transaction
2895 2895 will be available in the 'tr' argument. This can be used to selectively
2896 2896 update caches relevant to the changes in that transaction.
2897 2897
2898 2898 If 'full' is set, make sure all caches the function knows about have
2899 2899 up-to-date data. Even the ones usually loaded more lazily.
2900 2900
2901 2901 The `full` argument can take a special "post-clone" value. In this case
2902 2902 the cache warming is made after a clone and of the slower cache might
2903 2903 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2904 2904 as we plan for a cleaner way to deal with this for 5.9.
2905 2905 """
2906 2906 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2907 2907 # During strip, many caches are invalid but
2908 2908 # later call to `destroyed` will refresh them.
2909 2909 return
2910 2910
2911 2911 unfi = self.unfiltered()
2912 2912
2913 2913 if full:
2914 2914 msg = (
2915 2915 "`full` argument for `repo.updatecaches` is deprecated\n"
2916 2916 "(use `caches=repository.CACHE_ALL` instead)"
2917 2917 )
2918 2918 self.ui.deprecwarn(msg, b"5.9")
2919 2919 caches = repository.CACHES_ALL
2920 2920 if full == b"post-clone":
2921 2921 caches = repository.CACHES_POST_CLONE
2922 2922 caches = repository.CACHES_ALL
2923 2923 elif caches is None:
2924 2924 caches = repository.CACHES_DEFAULT
2925 2925
2926 2926 if repository.CACHE_BRANCHMAP_SERVED in caches:
2927 2927 if tr is None or tr.changes[b'origrepolen'] < len(self):
2928 2928 # accessing the 'served' branchmap should refresh all the others,
2929 2929 self.ui.debug(b'updating the branch cache\n')
2930 2930 self.filtered(b'served').branchmap()
2931 2931 self.filtered(b'served.hidden').branchmap()
2932 2932 # flush all possibly delayed write.
2933 2933 self._branchcaches.write_delayed(self)
2934 2934
2935 2935 if repository.CACHE_CHANGELOG_CACHE in caches:
2936 2936 self.changelog.update_caches(transaction=tr)
2937 2937
2938 2938 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2939 2939 self.manifestlog.update_caches(transaction=tr)
2940 2940 for entry in self.store.walk():
2941 2941 if not entry.is_revlog:
2942 2942 continue
2943 2943 if not entry.is_manifestlog:
2944 2944 continue
2945 2945 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2946 2946 if manifestrevlog is not None:
2947 2947 manifestrevlog.update_caches(transaction=tr)
2948 2948
2949 2949 if repository.CACHE_REV_BRANCH in caches:
2950 2950 rbc = unfi.revbranchcache()
2951 2951 for r in unfi.changelog:
2952 2952 rbc.branchinfo(r)
2953 2953 rbc.write()
2954 2954
2955 2955 if repository.CACHE_FULL_MANIFEST in caches:
2956 2956 # ensure the working copy parents are in the manifestfulltextcache
2957 2957 for ctx in self[b'.'].parents():
2958 2958 ctx.manifest() # accessing the manifest is enough
2959 2959
2960 2960 if repository.CACHE_FILE_NODE_TAGS in caches:
2961 2961 # accessing fnode cache warms the cache
2962 2962 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2963 2963
2964 2964 if repository.CACHE_TAGS_DEFAULT in caches:
2965 2965 # accessing tags warm the cache
2966 2966 self.tags()
2967 2967 if repository.CACHE_TAGS_SERVED in caches:
2968 2968 self.filtered(b'served').tags()
2969 2969
2970 2970 if repository.CACHE_BRANCHMAP_ALL in caches:
2971 2971 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2972 2972 # so we're forcing a write to cause these caches to be warmed up
2973 2973 # even if they haven't explicitly been requested yet (if they've
2974 2974 # never been used by hg, they won't ever have been written, even if
2975 2975 # they're a subset of another kind of cache that *has* been used).
2976 2976 for filt in repoview.filtertable.keys():
2977 2977 filtered = self.filtered(filt)
2978 2978 filtered.branchmap().write(filtered)
2979 2979
2980 2980 def invalidatecaches(self):
2981 2981 if '_tagscache' in vars(self):
2982 2982 # can't use delattr on proxy
2983 2983 del self.__dict__['_tagscache']
2984 2984
2985 2985 self._branchcaches.clear()
2986 2986 self.invalidatevolatilesets()
2987 2987 self._sparsesignaturecache.clear()
2988 2988
2989 2989 def invalidatevolatilesets(self):
2990 2990 self.filteredrevcache.clear()
2991 2991 obsolete.clearobscaches(self)
2992 2992 self._quick_access_changeid_invalidate()
2993 2993
2994 2994 def invalidatedirstate(self):
2995 2995 """Invalidates the dirstate, causing the next call to dirstate
2996 2996 to check if it was modified since the last time it was read,
2997 2997 rereading it if it has.
2998 2998
2999 2999 This is different to dirstate.invalidate() that it doesn't always
3000 3000 rereads the dirstate. Use dirstate.invalidate() if you want to
3001 3001 explicitly read the dirstate again (i.e. restoring it to a previous
3002 3002 known good state)."""
3003 3003 unfi = self.unfiltered()
3004 3004 if 'dirstate' in unfi.__dict__:
3005 3005 assert not self.dirstate.is_changing_any
3006 3006 del unfi.__dict__['dirstate']
3007 3007
3008 3008 def invalidate(self, clearfilecache=False):
3009 3009 """Invalidates both store and non-store parts other than dirstate
3010 3010
3011 3011 If a transaction is running, invalidation of store is omitted,
3012 3012 because discarding in-memory changes might cause inconsistency
3013 3013 (e.g. incomplete fncache causes unintentional failure, but
3014 3014 redundant one doesn't).
3015 3015 """
3016 3016 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3017 3017 for k in list(self._filecache.keys()):
3018 3018 if (
3019 3019 k == b'changelog'
3020 3020 and self.currenttransaction()
3021 3021 and self.changelog._delayed
3022 3022 ):
3023 3023 # The changelog object may store unwritten revisions. We don't
3024 3024 # want to lose them.
3025 3025 # TODO: Solve the problem instead of working around it.
3026 3026 continue
3027 3027
3028 3028 if clearfilecache:
3029 3029 del self._filecache[k]
3030 3030 try:
3031 delattr(unfiltered, k)
3031 # XXX ideally, the key would be a unicode string to match the
3032 # fact it refers to an attribut name. However changing this was
3033 # a bit a scope creep compared to the series cleaning up
3034 # del/set/getattr so we kept thing simple here.
3035 delattr(unfiltered, pycompat.sysstr(k))
3032 3036 except AttributeError:
3033 3037 pass
3034 3038 self.invalidatecaches()
3035 3039 if not self.currenttransaction():
3036 3040 # TODO: Changing contents of store outside transaction
3037 3041 # causes inconsistency. We should make in-memory store
3038 3042 # changes detectable, and abort if changed.
3039 3043 self.store.invalidatecaches()
3040 3044
3041 3045 def invalidateall(self):
3042 3046 """Fully invalidates both store and non-store parts, causing the
3043 3047 subsequent operation to reread any outside changes."""
3044 3048 # extension should hook this to invalidate its caches
3045 3049 self.invalidate()
3046 3050 self.invalidatedirstate()
3047 3051
3048 3052 @unfilteredmethod
3049 3053 def _refreshfilecachestats(self, tr):
3050 3054 """Reload stats of cached files so that they are flagged as valid"""
3051 3055 for k, ce in self._filecache.items():
3052 3056 k = pycompat.sysstr(k)
3053 3057 if k == 'dirstate' or k not in self.__dict__:
3054 3058 continue
3055 3059 ce.refresh()
3056 3060
3057 3061 def _lock(
3058 3062 self,
3059 3063 vfs,
3060 3064 lockname,
3061 3065 wait,
3062 3066 releasefn,
3063 3067 acquirefn,
3064 3068 desc,
3065 3069 ):
3066 3070 timeout = 0
3067 3071 warntimeout = 0
3068 3072 if wait:
3069 3073 timeout = self.ui.configint(b"ui", b"timeout")
3070 3074 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3071 3075 # internal config: ui.signal-safe-lock
3072 3076 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3073 3077
3074 3078 l = lockmod.trylock(
3075 3079 self.ui,
3076 3080 vfs,
3077 3081 lockname,
3078 3082 timeout,
3079 3083 warntimeout,
3080 3084 releasefn=releasefn,
3081 3085 acquirefn=acquirefn,
3082 3086 desc=desc,
3083 3087 signalsafe=signalsafe,
3084 3088 )
3085 3089 return l
3086 3090
3087 3091 def _afterlock(self, callback):
3088 3092 """add a callback to be run when the repository is fully unlocked
3089 3093
3090 3094 The callback will be executed when the outermost lock is released
3091 3095 (with wlock being higher level than 'lock')."""
3092 3096 for ref in (self._wlockref, self._lockref):
3093 3097 l = ref and ref()
3094 3098 if l and l.held:
3095 3099 l.postrelease.append(callback)
3096 3100 break
3097 3101 else: # no lock have been found.
3098 3102 callback(True)
3099 3103
3100 3104 def lock(self, wait=True):
3101 3105 """Lock the repository store (.hg/store) and return a weak reference
3102 3106 to the lock. Use this before modifying the store (e.g. committing or
3103 3107 stripping). If you are opening a transaction, get a lock as well.)
3104 3108
3105 3109 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3106 3110 'wlock' first to avoid a dead-lock hazard."""
3107 3111 l = self._currentlock(self._lockref)
3108 3112 if l is not None:
3109 3113 l.lock()
3110 3114 return l
3111 3115
3112 3116 l = self._lock(
3113 3117 vfs=self.svfs,
3114 3118 lockname=b"lock",
3115 3119 wait=wait,
3116 3120 releasefn=None,
3117 3121 acquirefn=self.invalidate,
3118 3122 desc=_(b'repository %s') % self.origroot,
3119 3123 )
3120 3124 self._lockref = weakref.ref(l)
3121 3125 return l
3122 3126
3123 3127 def wlock(self, wait=True):
3124 3128 """Lock the non-store parts of the repository (everything under
3125 3129 .hg except .hg/store) and return a weak reference to the lock.
3126 3130
3127 3131 Use this before modifying files in .hg.
3128 3132
3129 3133 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3130 3134 'wlock' first to avoid a dead-lock hazard."""
3131 3135 l = self._wlockref() if self._wlockref else None
3132 3136 if l is not None and l.held:
3133 3137 l.lock()
3134 3138 return l
3135 3139
3136 3140 # We do not need to check for non-waiting lock acquisition. Such
3137 3141 # acquisition would not cause dead-lock as they would just fail.
3138 3142 if wait and (
3139 3143 self.ui.configbool(b'devel', b'all-warnings')
3140 3144 or self.ui.configbool(b'devel', b'check-locks')
3141 3145 ):
3142 3146 if self._currentlock(self._lockref) is not None:
3143 3147 self.ui.develwarn(b'"wlock" acquired after "lock"')
3144 3148
3145 3149 def unlock():
3146 3150 if self.dirstate.is_changing_any:
3147 3151 msg = b"wlock release in the middle of a changing parents"
3148 3152 self.ui.develwarn(msg)
3149 3153 self.dirstate.invalidate()
3150 3154 else:
3151 3155 if self.dirstate._dirty:
3152 3156 msg = b"dirty dirstate on wlock release"
3153 3157 self.ui.develwarn(msg)
3154 3158 self.dirstate.write(None)
3155 3159
3156 3160 unfi = self.unfiltered()
3157 3161 if 'dirstate' in unfi.__dict__:
3158 3162 del unfi.__dict__['dirstate']
3159 3163
3160 3164 l = self._lock(
3161 3165 self.vfs,
3162 3166 b"wlock",
3163 3167 wait,
3164 3168 unlock,
3165 3169 self.invalidatedirstate,
3166 3170 _(b'working directory of %s') % self.origroot,
3167 3171 )
3168 3172 self._wlockref = weakref.ref(l)
3169 3173 return l
3170 3174
3171 3175 def _currentlock(self, lockref):
3172 3176 """Returns the lock if it's held, or None if it's not."""
3173 3177 if lockref is None:
3174 3178 return None
3175 3179 l = lockref()
3176 3180 if l is None or not l.held:
3177 3181 return None
3178 3182 return l
3179 3183
3180 3184 def currentwlock(self):
3181 3185 """Returns the wlock if it's held, or None if it's not."""
3182 3186 return self._currentlock(self._wlockref)
3183 3187
3184 3188 def currentlock(self):
3185 3189 """Returns the lock if it's held, or None if it's not."""
3186 3190 return self._currentlock(self._lockref)
3187 3191
3188 3192 def checkcommitpatterns(self, wctx, match, status, fail):
3189 3193 """check for commit arguments that aren't committable"""
3190 3194 if match.isexact() or match.prefix():
3191 3195 matched = set(status.modified + status.added + status.removed)
3192 3196
3193 3197 for f in match.files():
3194 3198 f = self.dirstate.normalize(f)
3195 3199 if f == b'.' or f in matched or f in wctx.substate:
3196 3200 continue
3197 3201 if f in status.deleted:
3198 3202 fail(f, _(b'file not found!'))
3199 3203 # Is it a directory that exists or used to exist?
3200 3204 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3201 3205 d = f + b'/'
3202 3206 for mf in matched:
3203 3207 if mf.startswith(d):
3204 3208 break
3205 3209 else:
3206 3210 fail(f, _(b"no match under directory!"))
3207 3211 elif f not in self.dirstate:
3208 3212 fail(f, _(b"file not tracked!"))
3209 3213
3210 3214 @unfilteredmethod
3211 3215 def commit(
3212 3216 self,
3213 3217 text=b"",
3214 3218 user=None,
3215 3219 date=None,
3216 3220 match=None,
3217 3221 force=False,
3218 3222 editor=None,
3219 3223 extra=None,
3220 3224 ):
3221 3225 """Add a new revision to current repository.
3222 3226
3223 3227 Revision information is gathered from the working directory,
3224 3228 match can be used to filter the committed files. If editor is
3225 3229 supplied, it is called to get a commit message.
3226 3230 """
3227 3231 if extra is None:
3228 3232 extra = {}
3229 3233
3230 3234 def fail(f, msg):
3231 3235 raise error.InputError(b'%s: %s' % (f, msg))
3232 3236
3233 3237 if not match:
3234 3238 match = matchmod.always()
3235 3239
3236 3240 if not force:
3237 3241 match.bad = fail
3238 3242
3239 3243 # lock() for recent changelog (see issue4368)
3240 3244 with self.wlock(), self.lock():
3241 3245 wctx = self[None]
3242 3246 merge = len(wctx.parents()) > 1
3243 3247
3244 3248 if not force and merge and not match.always():
3245 3249 raise error.Abort(
3246 3250 _(
3247 3251 b'cannot partially commit a merge '
3248 3252 b'(do not specify files or patterns)'
3249 3253 )
3250 3254 )
3251 3255
3252 3256 status = self.status(match=match, clean=force)
3253 3257 if force:
3254 3258 status.modified.extend(
3255 3259 status.clean
3256 3260 ) # mq may commit clean files
3257 3261
3258 3262 # check subrepos
3259 3263 subs, commitsubs, newstate = subrepoutil.precommit(
3260 3264 self.ui, wctx, status, match, force=force
3261 3265 )
3262 3266
3263 3267 # make sure all explicit patterns are matched
3264 3268 if not force:
3265 3269 self.checkcommitpatterns(wctx, match, status, fail)
3266 3270
3267 3271 cctx = context.workingcommitctx(
3268 3272 self, status, text, user, date, extra
3269 3273 )
3270 3274
3271 3275 ms = mergestatemod.mergestate.read(self)
3272 3276 mergeutil.checkunresolved(ms)
3273 3277
3274 3278 # internal config: ui.allowemptycommit
3275 3279 if cctx.isempty() and not self.ui.configbool(
3276 3280 b'ui', b'allowemptycommit'
3277 3281 ):
3278 3282 self.ui.debug(b'nothing to commit, clearing merge state\n')
3279 3283 ms.reset()
3280 3284 return None
3281 3285
3282 3286 if merge and cctx.deleted():
3283 3287 raise error.Abort(_(b"cannot commit merge with missing files"))
3284 3288
3285 3289 if editor:
3286 3290 cctx._text = editor(self, cctx, subs)
3287 3291 edited = text != cctx._text
3288 3292
3289 3293 # Save commit message in case this transaction gets rolled back
3290 3294 # (e.g. by a pretxncommit hook). Leave the content alone on
3291 3295 # the assumption that the user will use the same editor again.
3292 3296 msg_path = self.savecommitmessage(cctx._text)
3293 3297
3294 3298 # commit subs and write new state
3295 3299 if subs:
3296 3300 uipathfn = scmutil.getuipathfn(self)
3297 3301 for s in sorted(commitsubs):
3298 3302 sub = wctx.sub(s)
3299 3303 self.ui.status(
3300 3304 _(b'committing subrepository %s\n')
3301 3305 % uipathfn(subrepoutil.subrelpath(sub))
3302 3306 )
3303 3307 sr = sub.commit(cctx._text, user, date)
3304 3308 newstate[s] = (newstate[s][0], sr)
3305 3309 subrepoutil.writestate(self, newstate)
3306 3310
3307 3311 p1, p2 = self.dirstate.parents()
3308 3312 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3309 3313 try:
3310 3314 self.hook(
3311 3315 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3312 3316 )
3313 3317 with self.transaction(b'commit'):
3314 3318 ret = self.commitctx(cctx, True)
3315 3319 # update bookmarks, dirstate and mergestate
3316 3320 bookmarks.update(self, [p1, p2], ret)
3317 3321 cctx.markcommitted(ret)
3318 3322 ms.reset()
3319 3323 except: # re-raises
3320 3324 if edited:
3321 3325 self.ui.write(
3322 3326 _(b'note: commit message saved in %s\n') % msg_path
3323 3327 )
3324 3328 self.ui.write(
3325 3329 _(
3326 3330 b"note: use 'hg commit --logfile "
3327 3331 b"%s --edit' to reuse it\n"
3328 3332 )
3329 3333 % msg_path
3330 3334 )
3331 3335 raise
3332 3336
3333 3337 def commithook(unused_success):
3334 3338 # hack for command that use a temporary commit (eg: histedit)
3335 3339 # temporary commit got stripped before hook release
3336 3340 if self.changelog.hasnode(ret):
3337 3341 self.hook(
3338 3342 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3339 3343 )
3340 3344
3341 3345 self._afterlock(commithook)
3342 3346 return ret
3343 3347
3344 3348 @unfilteredmethod
3345 3349 def commitctx(self, ctx, error=False, origctx=None):
3346 3350 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3347 3351
3348 3352 @unfilteredmethod
3349 3353 def destroying(self):
3350 3354 """Inform the repository that nodes are about to be destroyed.
3351 3355 Intended for use by strip and rollback, so there's a common
3352 3356 place for anything that has to be done before destroying history.
3353 3357
3354 3358 This is mostly useful for saving state that is in memory and waiting
3355 3359 to be flushed when the current lock is released. Because a call to
3356 3360 destroyed is imminent, the repo will be invalidated causing those
3357 3361 changes to stay in memory (waiting for the next unlock), or vanish
3358 3362 completely.
3359 3363 """
3360 3364 # When using the same lock to commit and strip, the phasecache is left
3361 3365 # dirty after committing. Then when we strip, the repo is invalidated,
3362 3366 # causing those changes to disappear.
3363 3367 if '_phasecache' in vars(self):
3364 3368 self._phasecache.write()
3365 3369
3366 3370 @unfilteredmethod
3367 3371 def destroyed(self):
3368 3372 """Inform the repository that nodes have been destroyed.
3369 3373 Intended for use by strip and rollback, so there's a common
3370 3374 place for anything that has to be done after destroying history.
3371 3375 """
3372 3376 # When one tries to:
3373 3377 # 1) destroy nodes thus calling this method (e.g. strip)
3374 3378 # 2) use phasecache somewhere (e.g. commit)
3375 3379 #
3376 3380 # then 2) will fail because the phasecache contains nodes that were
3377 3381 # removed. We can either remove phasecache from the filecache,
3378 3382 # causing it to reload next time it is accessed, or simply filter
3379 3383 # the removed nodes now and write the updated cache.
3380 3384 self._phasecache.filterunknown(self)
3381 3385 self._phasecache.write()
3382 3386
3383 3387 # refresh all repository caches
3384 3388 self.updatecaches()
3385 3389
3386 3390 # Ensure the persistent tag cache is updated. Doing it now
3387 3391 # means that the tag cache only has to worry about destroyed
3388 3392 # heads immediately after a strip/rollback. That in turn
3389 3393 # guarantees that "cachetip == currenttip" (comparing both rev
3390 3394 # and node) always means no nodes have been added or destroyed.
3391 3395
3392 3396 # XXX this is suboptimal when qrefresh'ing: we strip the current
3393 3397 # head, refresh the tag cache, then immediately add a new head.
3394 3398 # But I think doing it this way is necessary for the "instant
3395 3399 # tag cache retrieval" case to work.
3396 3400 self.invalidate()
3397 3401
3398 3402 def status(
3399 3403 self,
3400 3404 node1=b'.',
3401 3405 node2=None,
3402 3406 match=None,
3403 3407 ignored=False,
3404 3408 clean=False,
3405 3409 unknown=False,
3406 3410 listsubrepos=False,
3407 3411 ):
3408 3412 '''a convenience method that calls node1.status(node2)'''
3409 3413 return self[node1].status(
3410 3414 node2, match, ignored, clean, unknown, listsubrepos
3411 3415 )
3412 3416
3413 3417 def addpostdsstatus(self, ps):
3414 3418 """Add a callback to run within the wlock, at the point at which status
3415 3419 fixups happen.
3416 3420
3417 3421 On status completion, callback(wctx, status) will be called with the
3418 3422 wlock held, unless the dirstate has changed from underneath or the wlock
3419 3423 couldn't be grabbed.
3420 3424
3421 3425 Callbacks should not capture and use a cached copy of the dirstate --
3422 3426 it might change in the meanwhile. Instead, they should access the
3423 3427 dirstate via wctx.repo().dirstate.
3424 3428
3425 3429 This list is emptied out after each status run -- extensions should
3426 3430 make sure it adds to this list each time dirstate.status is called.
3427 3431 Extensions should also make sure they don't call this for statuses
3428 3432 that don't involve the dirstate.
3429 3433 """
3430 3434
3431 3435 # The list is located here for uniqueness reasons -- it is actually
3432 3436 # managed by the workingctx, but that isn't unique per-repo.
3433 3437 self._postdsstatus.append(ps)
3434 3438
3435 3439 def postdsstatus(self):
3436 3440 """Used by workingctx to get the list of post-dirstate-status hooks."""
3437 3441 return self._postdsstatus
3438 3442
3439 3443 def clearpostdsstatus(self):
3440 3444 """Used by workingctx to clear post-dirstate-status hooks."""
3441 3445 del self._postdsstatus[:]
3442 3446
3443 3447 def heads(self, start=None):
3444 3448 if start is None:
3445 3449 cl = self.changelog
3446 3450 headrevs = reversed(cl.headrevs())
3447 3451 return [cl.node(rev) for rev in headrevs]
3448 3452
3449 3453 heads = self.changelog.heads(start)
3450 3454 # sort the output in rev descending order
3451 3455 return sorted(heads, key=self.changelog.rev, reverse=True)
3452 3456
3453 3457 def branchheads(self, branch=None, start=None, closed=False):
3454 3458 """return a (possibly filtered) list of heads for the given branch
3455 3459
3456 3460 Heads are returned in topological order, from newest to oldest.
3457 3461 If branch is None, use the dirstate branch.
3458 3462 If start is not None, return only heads reachable from start.
3459 3463 If closed is True, return heads that are marked as closed as well.
3460 3464 """
3461 3465 if branch is None:
3462 3466 branch = self[None].branch()
3463 3467 branches = self.branchmap()
3464 3468 if not branches.hasbranch(branch):
3465 3469 return []
3466 3470 # the cache returns heads ordered lowest to highest
3467 3471 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3468 3472 if start is not None:
3469 3473 # filter out the heads that cannot be reached from startrev
3470 3474 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3471 3475 bheads = [h for h in bheads if h in fbheads]
3472 3476 return bheads
3473 3477
3474 3478 def branches(self, nodes):
3475 3479 if not nodes:
3476 3480 nodes = [self.changelog.tip()]
3477 3481 b = []
3478 3482 for n in nodes:
3479 3483 t = n
3480 3484 while True:
3481 3485 p = self.changelog.parents(n)
3482 3486 if p[1] != self.nullid or p[0] == self.nullid:
3483 3487 b.append((t, n, p[0], p[1]))
3484 3488 break
3485 3489 n = p[0]
3486 3490 return b
3487 3491
3488 3492 def between(self, pairs):
3489 3493 r = []
3490 3494
3491 3495 for top, bottom in pairs:
3492 3496 n, l, i = top, [], 0
3493 3497 f = 1
3494 3498
3495 3499 while n != bottom and n != self.nullid:
3496 3500 p = self.changelog.parents(n)[0]
3497 3501 if i == f:
3498 3502 l.append(n)
3499 3503 f = f * 2
3500 3504 n = p
3501 3505 i += 1
3502 3506
3503 3507 r.append(l)
3504 3508
3505 3509 return r
3506 3510
3507 3511 def checkpush(self, pushop):
3508 3512 """Extensions can override this function if additional checks have
3509 3513 to be performed before pushing, or call it if they override push
3510 3514 command.
3511 3515 """
3512 3516
3513 3517 @unfilteredpropertycache
3514 3518 def prepushoutgoinghooks(self):
3515 3519 """Return util.hooks consists of a pushop with repo, remote, outgoing
3516 3520 methods, which are called before pushing changesets.
3517 3521 """
3518 3522 return util.hooks()
3519 3523
3520 3524 def pushkey(self, namespace, key, old, new):
3521 3525 try:
3522 3526 tr = self.currenttransaction()
3523 3527 hookargs = {}
3524 3528 if tr is not None:
3525 3529 hookargs.update(tr.hookargs)
3526 3530 hookargs = pycompat.strkwargs(hookargs)
3527 3531 hookargs['namespace'] = namespace
3528 3532 hookargs['key'] = key
3529 3533 hookargs['old'] = old
3530 3534 hookargs['new'] = new
3531 3535 self.hook(b'prepushkey', throw=True, **hookargs)
3532 3536 except error.HookAbort as exc:
3533 3537 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3534 3538 if exc.hint:
3535 3539 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3536 3540 return False
3537 3541 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3538 3542 ret = pushkey.push(self, namespace, key, old, new)
3539 3543
3540 3544 def runhook(unused_success):
3541 3545 self.hook(
3542 3546 b'pushkey',
3543 3547 namespace=namespace,
3544 3548 key=key,
3545 3549 old=old,
3546 3550 new=new,
3547 3551 ret=ret,
3548 3552 )
3549 3553
3550 3554 self._afterlock(runhook)
3551 3555 return ret
3552 3556
3553 3557 def listkeys(self, namespace):
3554 3558 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3555 3559 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3556 3560 values = pushkey.list(self, namespace)
3557 3561 self.hook(b'listkeys', namespace=namespace, values=values)
3558 3562 return values
3559 3563
3560 3564 def debugwireargs(self, one, two, three=None, four=None, five=None):
3561 3565 '''used to test argument passing over the wire'''
3562 3566 return b"%s %s %s %s %s" % (
3563 3567 one,
3564 3568 two,
3565 3569 pycompat.bytestr(three),
3566 3570 pycompat.bytestr(four),
3567 3571 pycompat.bytestr(five),
3568 3572 )
3569 3573
3570 3574 def savecommitmessage(self, text):
3571 3575 fp = self.vfs(b'last-message.txt', b'wb')
3572 3576 try:
3573 3577 fp.write(text)
3574 3578 finally:
3575 3579 fp.close()
3576 3580 return self.pathto(fp.name[len(self.root) + 1 :])
3577 3581
3578 3582 def register_wanted_sidedata(self, category):
3579 3583 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3580 3584 # Only revlogv2 repos can want sidedata.
3581 3585 return
3582 3586 self._wanted_sidedata.add(pycompat.bytestr(category))
3583 3587
3584 3588 def register_sidedata_computer(
3585 3589 self, kind, category, keys, computer, flags, replace=False
3586 3590 ):
3587 3591 if kind not in revlogconst.ALL_KINDS:
3588 3592 msg = _(b"unexpected revlog kind '%s'.")
3589 3593 raise error.ProgrammingError(msg % kind)
3590 3594 category = pycompat.bytestr(category)
3591 3595 already_registered = category in self._sidedata_computers.get(kind, [])
3592 3596 if already_registered and not replace:
3593 3597 msg = _(
3594 3598 b"cannot register a sidedata computer twice for category '%s'."
3595 3599 )
3596 3600 raise error.ProgrammingError(msg % category)
3597 3601 if replace and not already_registered:
3598 3602 msg = _(
3599 3603 b"cannot replace a sidedata computer that isn't registered "
3600 3604 b"for category '%s'."
3601 3605 )
3602 3606 raise error.ProgrammingError(msg % category)
3603 3607 self._sidedata_computers.setdefault(kind, {})
3604 3608 self._sidedata_computers[kind][category] = (keys, computer, flags)
3605 3609
3606 3610
3607 3611 def undoname(fn: bytes) -> bytes:
3608 3612 base, name = os.path.split(fn)
3609 3613 assert name.startswith(b'journal')
3610 3614 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3611 3615
3612 3616
3613 3617 def instance(ui, path: bytes, create, intents=None, createopts=None):
3614 3618 # prevent cyclic import localrepo -> upgrade -> localrepo
3615 3619 from . import upgrade
3616 3620
3617 3621 localpath = urlutil.urllocalpath(path)
3618 3622 if create:
3619 3623 createrepository(ui, localpath, createopts=createopts)
3620 3624
3621 3625 def repo_maker():
3622 3626 return makelocalrepository(ui, localpath, intents=intents)
3623 3627
3624 3628 repo = repo_maker()
3625 3629 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3626 3630 return repo
3627 3631
3628 3632
3629 3633 def islocal(path: bytes) -> bool:
3630 3634 return True
3631 3635
3632 3636
3633 3637 def defaultcreateopts(ui, createopts=None):
3634 3638 """Populate the default creation options for a repository.
3635 3639
3636 3640 A dictionary of explicitly requested creation options can be passed
3637 3641 in. Missing keys will be populated.
3638 3642 """
3639 3643 createopts = dict(createopts or {})
3640 3644
3641 3645 if b'backend' not in createopts:
3642 3646 # experimental config: storage.new-repo-backend
3643 3647 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3644 3648
3645 3649 return createopts
3646 3650
3647 3651
3648 3652 def clone_requirements(ui, createopts, srcrepo):
3649 3653 """clone the requirements of a local repo for a local clone
3650 3654
3651 3655 The store requirements are unchanged while the working copy requirements
3652 3656 depends on the configuration
3653 3657 """
3654 3658 target_requirements = set()
3655 3659 if not srcrepo.requirements:
3656 3660 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3657 3661 # with it.
3658 3662 return target_requirements
3659 3663 createopts = defaultcreateopts(ui, createopts=createopts)
3660 3664 for r in newreporequirements(ui, createopts):
3661 3665 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3662 3666 target_requirements.add(r)
3663 3667
3664 3668 for r in srcrepo.requirements:
3665 3669 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3666 3670 target_requirements.add(r)
3667 3671 return target_requirements
3668 3672
3669 3673
3670 3674 def newreporequirements(ui, createopts):
3671 3675 """Determine the set of requirements for a new local repository.
3672 3676
3673 3677 Extensions can wrap this function to specify custom requirements for
3674 3678 new repositories.
3675 3679 """
3676 3680
3677 3681 if b'backend' not in createopts:
3678 3682 raise error.ProgrammingError(
3679 3683 b'backend key not present in createopts; '
3680 3684 b'was defaultcreateopts() called?'
3681 3685 )
3682 3686
3683 3687 if createopts[b'backend'] != b'revlogv1':
3684 3688 raise error.Abort(
3685 3689 _(
3686 3690 b'unable to determine repository requirements for '
3687 3691 b'storage backend: %s'
3688 3692 )
3689 3693 % createopts[b'backend']
3690 3694 )
3691 3695
3692 3696 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3693 3697 if ui.configbool(b'format', b'usestore'):
3694 3698 requirements.add(requirementsmod.STORE_REQUIREMENT)
3695 3699 if ui.configbool(b'format', b'usefncache'):
3696 3700 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3697 3701 if ui.configbool(b'format', b'dotencode'):
3698 3702 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3699 3703
3700 3704 compengines = ui.configlist(b'format', b'revlog-compression')
3701 3705 for compengine in compengines:
3702 3706 if compengine in util.compengines:
3703 3707 engine = util.compengines[compengine]
3704 3708 if engine.available() and engine.revlogheader():
3705 3709 break
3706 3710 else:
3707 3711 raise error.Abort(
3708 3712 _(
3709 3713 b'compression engines %s defined by '
3710 3714 b'format.revlog-compression not available'
3711 3715 )
3712 3716 % b', '.join(b'"%s"' % e for e in compengines),
3713 3717 hint=_(
3714 3718 b'run "hg debuginstall" to list available '
3715 3719 b'compression engines'
3716 3720 ),
3717 3721 )
3718 3722
3719 3723 # zlib is the historical default and doesn't need an explicit requirement.
3720 3724 if compengine == b'zstd':
3721 3725 requirements.add(b'revlog-compression-zstd')
3722 3726 elif compengine != b'zlib':
3723 3727 requirements.add(b'exp-compression-%s' % compengine)
3724 3728
3725 3729 if scmutil.gdinitconfig(ui):
3726 3730 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3727 3731 if ui.configbool(b'format', b'sparse-revlog'):
3728 3732 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3729 3733
3730 3734 # experimental config: format.use-dirstate-v2
3731 3735 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3732 3736 if ui.configbool(b'format', b'use-dirstate-v2'):
3733 3737 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3734 3738
3735 3739 # experimental config: format.exp-use-copies-side-data-changeset
3736 3740 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3737 3741 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3738 3742 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3739 3743 if ui.configbool(b'experimental', b'treemanifest'):
3740 3744 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3741 3745
3742 3746 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3743 3747 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3744 3748 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3745 3749
3746 3750 revlogv2 = ui.config(b'experimental', b'revlogv2')
3747 3751 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3748 3752 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3749 3753 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3750 3754 # experimental config: format.internal-phase
3751 3755 if ui.configbool(b'format', b'use-internal-phase'):
3752 3756 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3753 3757
3754 3758 # experimental config: format.exp-archived-phase
3755 3759 if ui.configbool(b'format', b'exp-archived-phase'):
3756 3760 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3757 3761
3758 3762 if createopts.get(b'narrowfiles'):
3759 3763 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3760 3764
3761 3765 if createopts.get(b'lfs'):
3762 3766 requirements.add(b'lfs')
3763 3767
3764 3768 if ui.configbool(b'format', b'bookmarks-in-store'):
3765 3769 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3766 3770
3767 3771 # The feature is disabled unless a fast implementation is available.
3768 3772 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 3773 if ui.configbool(
3770 3774 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 3775 ):
3772 3776 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3773 3777
3774 3778 # if share-safe is enabled, let's create the new repository with the new
3775 3779 # requirement
3776 3780 if ui.configbool(b'format', b'use-share-safe'):
3777 3781 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3778 3782
3779 3783 # if we are creating a share-repo¹ we have to handle requirement
3780 3784 # differently.
3781 3785 #
3782 3786 # [1] (i.e. reusing the store from another repository, just having a
3783 3787 # working copy)
3784 3788 if b'sharedrepo' in createopts:
3785 3789 source_requirements = set(createopts[b'sharedrepo'].requirements)
3786 3790
3787 3791 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3788 3792 # share to an old school repository, we have to copy the
3789 3793 # requirements and hope for the best.
3790 3794 requirements = source_requirements
3791 3795 else:
3792 3796 # We have control on the working copy only, so "copy" the non
3793 3797 # working copy part over, ignoring previous logic.
3794 3798 to_drop = set()
3795 3799 for req in requirements:
3796 3800 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3797 3801 continue
3798 3802 if req in source_requirements:
3799 3803 continue
3800 3804 to_drop.add(req)
3801 3805 requirements -= to_drop
3802 3806 requirements |= source_requirements
3803 3807
3804 3808 if createopts.get(b'sharedrelative'):
3805 3809 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3806 3810 else:
3807 3811 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3808 3812
3809 3813 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3810 3814 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3811 3815 msg = _(b"ignoring unknown tracked key version: %d\n")
3812 3816 hint = _(
3813 3817 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3814 3818 )
3815 3819 if version != 1:
3816 3820 ui.warn(msg % version, hint=hint)
3817 3821 else:
3818 3822 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3819 3823
3820 3824 return requirements
3821 3825
3822 3826
3823 3827 def checkrequirementscompat(ui, requirements):
3824 3828 """Checks compatibility of repository requirements enabled and disabled.
3825 3829
3826 3830 Returns a set of requirements which needs to be dropped because dependend
3827 3831 requirements are not enabled. Also warns users about it"""
3828 3832
3829 3833 dropped = set()
3830 3834
3831 3835 if requirementsmod.STORE_REQUIREMENT not in requirements:
3832 3836 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3833 3837 ui.warn(
3834 3838 _(
3835 3839 b'ignoring enabled \'format.bookmarks-in-store\' config '
3836 3840 b'beacuse it is incompatible with disabled '
3837 3841 b'\'format.usestore\' config\n'
3838 3842 )
3839 3843 )
3840 3844 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3841 3845
3842 3846 if (
3843 3847 requirementsmod.SHARED_REQUIREMENT in requirements
3844 3848 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3845 3849 ):
3846 3850 raise error.Abort(
3847 3851 _(
3848 3852 b"cannot create shared repository as source was created"
3849 3853 b" with 'format.usestore' config disabled"
3850 3854 )
3851 3855 )
3852 3856
3853 3857 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3854 3858 if ui.hasconfig(b'format', b'use-share-safe'):
3855 3859 msg = _(
3856 3860 b"ignoring enabled 'format.use-share-safe' config because "
3857 3861 b"it is incompatible with disabled 'format.usestore'"
3858 3862 b" config\n"
3859 3863 )
3860 3864 ui.warn(msg)
3861 3865 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3862 3866
3863 3867 return dropped
3864 3868
3865 3869
3866 3870 def filterknowncreateopts(ui, createopts):
3867 3871 """Filters a dict of repo creation options against options that are known.
3868 3872
3869 3873 Receives a dict of repo creation options and returns a dict of those
3870 3874 options that we don't know how to handle.
3871 3875
3872 3876 This function is called as part of repository creation. If the
3873 3877 returned dict contains any items, repository creation will not
3874 3878 be allowed, as it means there was a request to create a repository
3875 3879 with options not recognized by loaded code.
3876 3880
3877 3881 Extensions can wrap this function to filter out creation options
3878 3882 they know how to handle.
3879 3883 """
3880 3884 known = {
3881 3885 b'backend',
3882 3886 b'lfs',
3883 3887 b'narrowfiles',
3884 3888 b'sharedrepo',
3885 3889 b'sharedrelative',
3886 3890 b'shareditems',
3887 3891 b'shallowfilestore',
3888 3892 }
3889 3893
3890 3894 return {k: v for k, v in createopts.items() if k not in known}
3891 3895
3892 3896
3893 3897 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3894 3898 """Create a new repository in a vfs.
3895 3899
3896 3900 ``path`` path to the new repo's working directory.
3897 3901 ``createopts`` options for the new repository.
3898 3902 ``requirement`` predefined set of requirements.
3899 3903 (incompatible with ``createopts``)
3900 3904
3901 3905 The following keys for ``createopts`` are recognized:
3902 3906
3903 3907 backend
3904 3908 The storage backend to use.
3905 3909 lfs
3906 3910 Repository will be created with ``lfs`` requirement. The lfs extension
3907 3911 will automatically be loaded when the repository is accessed.
3908 3912 narrowfiles
3909 3913 Set up repository to support narrow file storage.
3910 3914 sharedrepo
3911 3915 Repository object from which storage should be shared.
3912 3916 sharedrelative
3913 3917 Boolean indicating if the path to the shared repo should be
3914 3918 stored as relative. By default, the pointer to the "parent" repo
3915 3919 is stored as an absolute path.
3916 3920 shareditems
3917 3921 Set of items to share to the new repository (in addition to storage).
3918 3922 shallowfilestore
3919 3923 Indicates that storage for files should be shallow (not all ancestor
3920 3924 revisions are known).
3921 3925 """
3922 3926
3923 3927 if requirements is not None:
3924 3928 if createopts is not None:
3925 3929 msg = b'cannot specify both createopts and requirements'
3926 3930 raise error.ProgrammingError(msg)
3927 3931 createopts = {}
3928 3932 else:
3929 3933 createopts = defaultcreateopts(ui, createopts=createopts)
3930 3934
3931 3935 unknownopts = filterknowncreateopts(ui, createopts)
3932 3936
3933 3937 if not isinstance(unknownopts, dict):
3934 3938 raise error.ProgrammingError(
3935 3939 b'filterknowncreateopts() did not return a dict'
3936 3940 )
3937 3941
3938 3942 if unknownopts:
3939 3943 raise error.Abort(
3940 3944 _(
3941 3945 b'unable to create repository because of unknown '
3942 3946 b'creation option: %s'
3943 3947 )
3944 3948 % b', '.join(sorted(unknownopts)),
3945 3949 hint=_(b'is a required extension not loaded?'),
3946 3950 )
3947 3951
3948 3952 requirements = newreporequirements(ui, createopts=createopts)
3949 3953 requirements -= checkrequirementscompat(ui, requirements)
3950 3954
3951 3955 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3952 3956
3953 3957 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3954 3958 if hgvfs.exists():
3955 3959 raise error.RepoError(_(b'repository %s already exists') % path)
3956 3960
3957 3961 if b'sharedrepo' in createopts:
3958 3962 sharedpath = createopts[b'sharedrepo'].sharedpath
3959 3963
3960 3964 if createopts.get(b'sharedrelative'):
3961 3965 try:
3962 3966 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3963 3967 sharedpath = util.pconvert(sharedpath)
3964 3968 except (IOError, ValueError) as e:
3965 3969 # ValueError is raised on Windows if the drive letters differ
3966 3970 # on each path.
3967 3971 raise error.Abort(
3968 3972 _(b'cannot calculate relative path'),
3969 3973 hint=stringutil.forcebytestr(e),
3970 3974 )
3971 3975
3972 3976 if not wdirvfs.exists():
3973 3977 wdirvfs.makedirs()
3974 3978
3975 3979 hgvfs.makedir(notindexed=True)
3976 3980 if b'sharedrepo' not in createopts:
3977 3981 hgvfs.mkdir(b'cache')
3978 3982 hgvfs.mkdir(b'wcache')
3979 3983
3980 3984 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3981 3985 if has_store and b'sharedrepo' not in createopts:
3982 3986 hgvfs.mkdir(b'store')
3983 3987
3984 3988 # We create an invalid changelog outside the store so very old
3985 3989 # Mercurial versions (which didn't know about the requirements
3986 3990 # file) encounter an error on reading the changelog. This
3987 3991 # effectively locks out old clients and prevents them from
3988 3992 # mucking with a repo in an unknown format.
3989 3993 #
3990 3994 # The revlog header has version 65535, which won't be recognized by
3991 3995 # such old clients.
3992 3996 hgvfs.append(
3993 3997 b'00changelog.i',
3994 3998 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3995 3999 b'layout',
3996 4000 )
3997 4001
3998 4002 # Filter the requirements into working copy and store ones
3999 4003 wcreq, storereq = scmutil.filterrequirements(requirements)
4000 4004 # write working copy ones
4001 4005 scmutil.writerequires(hgvfs, wcreq)
4002 4006 # If there are store requirements and the current repository
4003 4007 # is not a shared one, write stored requirements
4004 4008 # For new shared repository, we don't need to write the store
4005 4009 # requirements as they are already present in store requires
4006 4010 if storereq and b'sharedrepo' not in createopts:
4007 4011 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4008 4012 scmutil.writerequires(storevfs, storereq)
4009 4013
4010 4014 # Write out file telling readers where to find the shared store.
4011 4015 if b'sharedrepo' in createopts:
4012 4016 hgvfs.write(b'sharedpath', sharedpath)
4013 4017
4014 4018 if createopts.get(b'shareditems'):
4015 4019 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4016 4020 hgvfs.write(b'shared', shared)
4017 4021
4018 4022
4019 4023 def poisonrepository(repo):
4020 4024 """Poison a repository instance so it can no longer be used."""
4021 4025 # Perform any cleanup on the instance.
4022 4026 repo.close()
4023 4027
4024 4028 # Our strategy is to replace the type of the object with one that
4025 4029 # has all attribute lookups result in error.
4026 4030 #
4027 4031 # But we have to allow the close() method because some constructors
4028 4032 # of repos call close() on repo references.
4029 4033 class poisonedrepository:
4030 4034 def __getattribute__(self, item):
4031 4035 if item == 'close':
4032 4036 return object.__getattribute__(self, item)
4033 4037
4034 4038 raise error.ProgrammingError(
4035 4039 b'repo instances should not be used after unshare'
4036 4040 )
4037 4041
4038 4042 def close(self):
4039 4043 pass
4040 4044
4041 4045 # We may have a repoview, which intercepts __setattr__. So be sure
4042 4046 # we operate at the lowest level possible.
4043 4047 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2325 +1,2329 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status:
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(subpaths.items()):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 if isinstance(inst, error.RepoLookupError):
184 184 detailed_exit_code = 10
185 185 ui.error(_(b"abort: %s\n") % inst)
186 186 if inst.hint:
187 187 ui.error(_(b"(%s)\n") % inst.hint)
188 188 except error.ResponseError as inst:
189 189 ui.error(_(b"abort: %s") % inst.args[0])
190 190 msg = inst.args[1]
191 191 if isinstance(msg, type(u'')):
192 192 msg = pycompat.sysbytes(msg)
193 193 if msg is None:
194 194 ui.error(b"\n")
195 195 elif not isinstance(msg, bytes):
196 196 ui.error(b" %r\n" % (msg,))
197 197 elif not msg:
198 198 ui.error(_(b" empty string\n"))
199 199 else:
200 200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 201 except error.CensoredNodeError as inst:
202 202 ui.error(_(b"abort: file censored %s\n") % inst)
203 203 except error.WdirUnsupported:
204 204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 205 except error.Error as inst:
206 206 if inst.detailed_exit_code is not None:
207 207 detailed_exit_code = inst.detailed_exit_code
208 208 if inst.coarse_exit_code is not None:
209 209 coarse_exit_code = inst.coarse_exit_code
210 210 ui.error(inst.format())
211 211 except error.WorkerError as inst:
212 212 # Don't print a message -- the worker already should have
213 213 return inst.status_code
214 214 except ImportError as inst:
215 215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 216 m = stringutil.forcebytestr(inst).split()[-1]
217 217 if m in b"mpatch bdiff".split():
218 218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 219 elif m in b"zlib".split():
220 220 ui.error(_(b"(is your Python install correct?)\n"))
221 221 except util.urlerr.httperror as inst:
222 222 detailed_exit_code = 100
223 223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 224 except util.urlerr.urlerror as inst:
225 225 detailed_exit_code = 100
226 226 try: # usually it is in the form (errno, strerror)
227 227 reason = inst.reason.args[1]
228 228 except (AttributeError, IndexError):
229 229 # it might be anything, for example a string
230 230 reason = inst.reason
231 231 if isinstance(reason, str):
232 232 # SSLError of Python 2.7.9 contains a unicode
233 233 reason = encoding.unitolocal(reason)
234 234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 235 except (IOError, OSError) as inst:
236 236 if (
237 237 util.safehasattr(inst, "args")
238 238 and inst.args
239 239 and inst.args[0] == errno.EPIPE
240 240 ):
241 241 pass
242 242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 243 if getattr(inst, "filename", None) is not None:
244 244 ui.error(
245 245 _(b"abort: %s: '%s'\n")
246 246 % (
247 247 encoding.strtolocal(inst.strerror),
248 248 stringutil.forcebytestr(inst.filename),
249 249 )
250 250 )
251 251 else:
252 252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else: # suspicious IOError
254 254 raise
255 255 except MemoryError:
256 256 ui.error(_(b"abort: out of memory\n"))
257 257 except SystemExit as inst:
258 258 # Commands shouldn't sys.exit directly, but give a return code.
259 259 # Just in case catch this and and pass exit code to caller.
260 260 detailed_exit_code = 254
261 261 coarse_exit_code = inst.code
262 262
263 263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 264 return detailed_exit_code
265 265 else:
266 266 return coarse_exit_code
267 267
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in [b'tip', b'.', b'null']:
273 273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 274 for c in (b':', b'\0', b'\n', b'\r'):
275 275 if c in lbl:
276 276 raise error.InputError(
277 277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 278 )
279 279 try:
280 280 int(lbl)
281 281 if b'_' in lbl:
282 282 # If label contains underscores, Python might consider it an
283 283 # integer (with "_" as visual separators), but we do not.
284 284 # See PEP 515 - Underscores in Numeric Literals.
285 285 raise ValueError
286 286 raise error.InputError(_(b"cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.InputError(
291 291 _(b"leading or trailing whitespace in name %r") % lbl
292 292 )
293 293
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if b'\r' in f or b'\n' in f:
298 298 raise error.InputError(
299 299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
300 300 % pycompat.bytestr(f)
301 301 )
302 302
303 303
304 304 def checkportable(ui, f):
305 305 '''Check if filename f is portable and warn or abort depending on config'''
306 306 checkfilename(f)
307 307 abort, warn = checkportabilityalert(ui)
308 308 if abort or warn:
309 309 msg = util.checkwinfilename(f)
310 310 if msg:
311 311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
312 312 if abort:
313 313 raise error.InputError(msg)
314 314 ui.warn(_(b"warning: %s\n") % msg)
315 315
316 316
317 317 def checkportabilityalert(ui):
318 318 """check if the user's config requests nothing, a warning, or abort for
319 319 non-portable filenames"""
320 320 val = ui.config(b'ui', b'portablefilenames')
321 321 lval = val.lower()
322 322 bval = stringutil.parsebool(val)
323 323 abort = pycompat.iswindows or lval == b'abort'
324 324 warn = bval or lval == b'warn'
325 325 if bval is None and not (warn or abort or lval == b'ignore'):
326 326 raise error.ConfigError(
327 327 _(b"ui.portablefilenames value is invalid ('%s')") % val
328 328 )
329 329 return abort, warn
330 330
331 331
332 332 class casecollisionauditor:
333 333 def __init__(self, ui, abort, dirstate):
334 334 self._ui = ui
335 335 self._abort = abort
336 336 allfiles = b'\0'.join(dirstate)
337 337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
338 338 self._dirstate = dirstate
339 339 # The purpose of _newfiles is so that we don't complain about
340 340 # case collisions if someone were to call this object with the
341 341 # same filename twice.
342 342 self._newfiles = set()
343 343
344 344 def __call__(self, f):
345 345 if f in self._newfiles:
346 346 return
347 347 fl = encoding.lower(f)
348 348 if fl in self._loweredfiles and f not in self._dirstate:
349 349 msg = _(b'possible case-folding collision for %s') % f
350 350 if self._abort:
351 351 raise error.StateError(msg)
352 352 self._ui.warn(_(b"warning: %s\n") % msg)
353 353 self._loweredfiles.add(fl)
354 354 self._newfiles.add(f)
355 355
356 356
357 357 def filteredhash(repo, maxrev, needobsolete=False):
358 358 """build hash of filtered revisions in the current repoview.
359 359
360 360 Multiple caches perform up-to-date validation by checking that the
361 361 tiprev and tipnode stored in the cache file match the current repository.
362 362 However, this is not sufficient for validating repoviews because the set
363 363 of revisions in the view may change without the repository tiprev and
364 364 tipnode changing.
365 365
366 366 This function hashes all the revs filtered from the view (and, optionally,
367 367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
368 368 """
369 369 cl = repo.changelog
370 370 if needobsolete:
371 371 obsrevs = obsolete.getrevs(repo, b'obsolete')
372 372 if not cl.filteredrevs and not obsrevs:
373 373 return None
374 374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
375 375 else:
376 376 if not cl.filteredrevs:
377 377 return None
378 378 key = maxrev
379 379 obsrevs = frozenset()
380 380
381 381 result = cl._filteredrevs_hashcache.get(key)
382 382 if not result:
383 383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
384 384 if revs:
385 385 s = hashutil.sha1()
386 386 for rev in revs:
387 387 s.update(b'%d;' % rev)
388 388 result = s.digest()
389 389 cl._filteredrevs_hashcache[key] = result
390 390 return result
391 391
392 392
393 393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
394 394 """yield every hg repository under path, always recursively.
395 395 The recurse flag will only control recursion into repo working dirs"""
396 396
397 397 def errhandler(err):
398 398 if err.filename == path:
399 399 raise err
400 400
401 401 samestat = getattr(os.path, 'samestat', None)
402 402 if followsym and samestat is not None:
403 403
404 404 def adddir(dirlst, dirname):
405 405 dirstat = os.stat(dirname)
406 406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
407 407 if not match:
408 408 dirlst.append(dirstat)
409 409 return not match
410 410
411 411 else:
412 412 followsym = False
413 413
414 414 if (seen_dirs is None) and followsym:
415 415 seen_dirs = []
416 416 adddir(seen_dirs, path)
417 417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
418 418 dirs.sort()
419 419 if b'.hg' in dirs:
420 420 yield root # found a repository
421 421 qroot = os.path.join(root, b'.hg', b'patches')
422 422 if os.path.isdir(os.path.join(qroot, b'.hg')):
423 423 yield qroot # we have a patch queue repo here
424 424 if recurse:
425 425 # avoid recursing inside the .hg directory
426 426 dirs.remove(b'.hg')
427 427 else:
428 428 dirs[:] = [] # don't descend further
429 429 elif followsym:
430 430 newdirs = []
431 431 for d in dirs:
432 432 fname = os.path.join(root, d)
433 433 if adddir(seen_dirs, fname):
434 434 if os.path.islink(fname):
435 435 for hgname in walkrepos(fname, True, seen_dirs):
436 436 yield hgname
437 437 else:
438 438 newdirs.append(d)
439 439 dirs[:] = newdirs
440 440
441 441
442 442 def binnode(ctx):
443 443 """Return binary node id for a given basectx"""
444 444 node = ctx.node()
445 445 if node is None:
446 446 return ctx.repo().nodeconstants.wdirid
447 447 return node
448 448
449 449
450 450 def intrev(ctx):
451 451 """Return integer for a given basectx that can be used in comparison or
452 452 arithmetic operation"""
453 453 rev = ctx.rev()
454 454 if rev is None:
455 455 return wdirrev
456 456 return rev
457 457
458 458
459 459 def formatchangeid(ctx):
460 460 """Format changectx as '{rev}:{node|formatnode}', which is the default
461 461 template provided by logcmdutil.changesettemplater"""
462 462 repo = ctx.repo()
463 463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
464 464
465 465
466 466 def formatrevnode(ui, rev, node):
467 467 """Format given revision and node depending on the current verbosity"""
468 468 if ui.debugflag:
469 469 hexfunc = hex
470 470 else:
471 471 hexfunc = short
472 472 return b'%d:%s' % (rev, hexfunc(node))
473 473
474 474
475 475 def resolvehexnodeidprefix(repo, prefix):
476 476 if prefix.startswith(b'x'):
477 477 prefix = prefix[1:]
478 478 try:
479 479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
480 480 # This matches the shortesthexnodeidprefix() function below.
481 481 node = repo.unfiltered().changelog._partialmatch(prefix)
482 482 except error.AmbiguousPrefixLookupError:
483 483 revset = repo.ui.config(
484 484 b'experimental', b'revisions.disambiguatewithin'
485 485 )
486 486 if revset:
487 487 # Clear config to avoid infinite recursion
488 488 configoverrides = {
489 489 (b'experimental', b'revisions.disambiguatewithin'): None
490 490 }
491 491 with repo.ui.configoverride(configoverrides):
492 492 revs = repo.anyrevs([revset], user=True)
493 493 matches = []
494 494 for rev in revs:
495 495 node = repo.changelog.node(rev)
496 496 if hex(node).startswith(prefix):
497 497 matches.append(node)
498 498 if len(matches) == 1:
499 499 return matches[0]
500 500 raise
501 501 if node is None:
502 502 return
503 503 repo.changelog.rev(node) # make sure node isn't filtered
504 504 return node
505 505
506 506
507 507 def mayberevnum(repo, prefix):
508 508 """Checks if the given prefix may be mistaken for a revision number"""
509 509 try:
510 510 i = int(prefix)
511 511 # if we are a pure int, then starting with zero will not be
512 512 # confused as a rev; or, obviously, if the int is larger
513 513 # than the value of the tip rev. We still need to disambiguate if
514 514 # prefix == '0', since that *is* a valid revnum.
515 515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
516 516 return False
517 517 return True
518 518 except ValueError:
519 519 return False
520 520
521 521
522 522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
523 523 """Find the shortest unambiguous prefix that matches hexnode.
524 524
525 525 If "cache" is not None, it must be a dictionary that can be used for
526 526 caching between calls to this method.
527 527 """
528 528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
529 529 # which would be unacceptably slow. so we look for hash collision in
530 530 # unfiltered space, which means some hashes may be slightly longer.
531 531
532 532 minlength = max(minlength, 1)
533 533
534 534 def disambiguate(prefix):
535 535 """Disambiguate against revnums."""
536 536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
537 537 if mayberevnum(repo, prefix):
538 538 return b'x' + prefix
539 539 else:
540 540 return prefix
541 541
542 542 hexnode = hex(node)
543 543 for length in range(len(prefix), len(hexnode) + 1):
544 544 prefix = hexnode[:length]
545 545 if not mayberevnum(repo, prefix):
546 546 return prefix
547 547
548 548 cl = repo.unfiltered().changelog
549 549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
550 550 if revset:
551 551 revs = None
552 552 if cache is not None:
553 553 revs = cache.get(b'disambiguationrevset')
554 554 if revs is None:
555 555 revs = repo.anyrevs([revset], user=True)
556 556 if cache is not None:
557 557 cache[b'disambiguationrevset'] = revs
558 558 if cl.rev(node) in revs:
559 559 hexnode = hex(node)
560 560 nodetree = None
561 561 if cache is not None:
562 562 nodetree = cache.get(b'disambiguationnodetree')
563 563 if not nodetree:
564 564 if util.safehasattr(parsers, 'nodetree'):
565 565 # The CExt is the only implementation to provide a nodetree
566 566 # class so far.
567 567 index = cl.index
568 568 if util.safehasattr(index, 'get_cindex'):
569 569 # the rust wrapped need to give access to its internal index
570 570 index = index.get_cindex()
571 571 nodetree = parsers.nodetree(index, len(revs))
572 572 for r in revs:
573 573 nodetree.insert(r)
574 574 if cache is not None:
575 575 cache[b'disambiguationnodetree'] = nodetree
576 576 if nodetree is not None:
577 577 length = max(nodetree.shortest(node), minlength)
578 578 prefix = hexnode[:length]
579 579 return disambiguate(prefix)
580 580 for length in range(minlength, len(hexnode) + 1):
581 581 matches = []
582 582 prefix = hexnode[:length]
583 583 for rev in revs:
584 584 otherhexnode = repo[rev].hex()
585 585 if prefix == otherhexnode[:length]:
586 586 matches.append(otherhexnode)
587 587 if len(matches) == 1:
588 588 return disambiguate(prefix)
589 589
590 590 try:
591 591 return disambiguate(cl.shortest(node, minlength))
592 592 except error.LookupError:
593 593 raise error.RepoLookupError()
594 594
595 595
596 596 def isrevsymbol(repo, symbol):
597 597 """Checks if a symbol exists in the repo.
598 598
599 599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
600 600 symbol is an ambiguous nodeid prefix.
601 601 """
602 602 try:
603 603 revsymbol(repo, symbol)
604 604 return True
605 605 except error.RepoLookupError:
606 606 return False
607 607
608 608
609 609 def revsymbol(repo, symbol):
610 610 """Returns a context given a single revision symbol (as string).
611 611
612 612 This is similar to revsingle(), but accepts only a single revision symbol,
613 613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
614 614 not "max(public())".
615 615 """
616 616 if not isinstance(symbol, bytes):
617 617 msg = (
618 618 b"symbol (%s of type %s) was not a string, did you mean "
619 619 b"repo[symbol]?" % (symbol, type(symbol))
620 620 )
621 621 raise error.ProgrammingError(msg)
622 622 try:
623 623 if symbol in (b'.', b'tip', b'null'):
624 624 return repo[symbol]
625 625
626 626 try:
627 627 r = int(symbol)
628 628 if b'%d' % r != symbol:
629 629 raise ValueError
630 630 l = len(repo.changelog)
631 631 if r < 0:
632 632 r += l
633 633 if r < 0 or r >= l and r != wdirrev:
634 634 raise ValueError
635 635 return repo[r]
636 636 except error.FilteredIndexError:
637 637 raise
638 638 except (ValueError, OverflowError, IndexError):
639 639 pass
640 640
641 641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
642 642 try:
643 643 node = bin(symbol)
644 644 rev = repo.changelog.rev(node)
645 645 return repo[rev]
646 646 except error.FilteredLookupError:
647 647 raise
648 648 except (binascii.Error, LookupError):
649 649 pass
650 650
651 651 # look up bookmarks through the name interface
652 652 try:
653 653 node = repo.names.singlenode(repo, symbol)
654 654 rev = repo.changelog.rev(node)
655 655 return repo[rev]
656 656 except KeyError:
657 657 pass
658 658
659 659 node = resolvehexnodeidprefix(repo, symbol)
660 660 if node is not None:
661 661 rev = repo.changelog.rev(node)
662 662 return repo[rev]
663 663
664 664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
665 665
666 666 except error.WdirUnsupported:
667 667 return repo[None]
668 668 except (
669 669 error.FilteredIndexError,
670 670 error.FilteredLookupError,
671 671 error.FilteredRepoLookupError,
672 672 ):
673 673 raise _filterederror(repo, symbol)
674 674
675 675
676 676 def _filterederror(repo, changeid):
677 677 """build an exception to be raised about a filtered changeid
678 678
679 679 This is extracted in a function to help extensions (eg: evolve) to
680 680 experiment with various message variants."""
681 681 if repo.filtername.startswith(b'visible'):
682 682
683 683 # Check if the changeset is obsolete
684 684 unfilteredrepo = repo.unfiltered()
685 685 ctx = revsymbol(unfilteredrepo, changeid)
686 686
687 687 # If the changeset is obsolete, enrich the message with the reason
688 688 # that made this changeset not visible
689 689 if ctx.obsolete():
690 690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
691 691 else:
692 692 msg = _(b"hidden revision '%s'") % changeid
693 693
694 694 hint = _(b'use --hidden to access hidden revisions')
695 695
696 696 return error.FilteredRepoLookupError(msg, hint=hint)
697 697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
698 698 msg %= (changeid, repo.filtername)
699 699 return error.FilteredRepoLookupError(msg)
700 700
701 701
702 702 def revsingle(repo, revspec, default=b'.', localalias=None):
703 703 if not revspec and revspec != 0:
704 704 return repo[default]
705 705
706 706 l = revrange(repo, [revspec], localalias=localalias)
707 707 if not l:
708 708 raise error.InputError(_(b'empty revision set'))
709 709 return repo[l.last()]
710 710
711 711
712 712 def _pairspec(revspec):
713 713 tree = revsetlang.parse(revspec)
714 714 return tree and tree[0] in (
715 715 b'range',
716 716 b'rangepre',
717 717 b'rangepost',
718 718 b'rangeall',
719 719 )
720 720
721 721
722 722 def revpair(repo, revs):
723 723 if not revs:
724 724 return repo[b'.'], repo[None]
725 725
726 726 l = revrange(repo, revs)
727 727
728 728 if not l:
729 729 raise error.InputError(_(b'empty revision range'))
730 730
731 731 first = l.first()
732 732 second = l.last()
733 733
734 734 if (
735 735 first == second
736 736 and len(revs) >= 2
737 737 and not all(revrange(repo, [r]) for r in revs)
738 738 ):
739 739 raise error.InputError(_(b'empty revision on one side of range'))
740 740
741 741 # if top-level is range expression, the result must always be a pair
742 742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
743 743 return repo[first], repo[None]
744 744
745 745 return repo[first], repo[second]
746 746
747 747
748 748 def revrange(repo, specs, localalias=None):
749 749 """Execute 1 to many revsets and return the union.
750 750
751 751 This is the preferred mechanism for executing revsets using user-specified
752 752 config options, such as revset aliases.
753 753
754 754 The revsets specified by ``specs`` will be executed via a chained ``OR``
755 755 expression. If ``specs`` is empty, an empty result is returned.
756 756
757 757 ``specs`` can contain integers, in which case they are assumed to be
758 758 revision numbers.
759 759
760 760 It is assumed the revsets are already formatted. If you have arguments
761 761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
762 762 and pass the result as an element of ``specs``.
763 763
764 764 Specifying a single revset is allowed.
765 765
766 766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
767 767 integer revisions.
768 768 """
769 769 allspecs = []
770 770 for spec in specs:
771 771 if isinstance(spec, int):
772 772 spec = revsetlang.formatspec(b'%d', spec)
773 773 allspecs.append(spec)
774 774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
775 775
776 776
777 777 def increasingwindows(windowsize=8, sizelimit=512):
778 778 while True:
779 779 yield windowsize
780 780 if windowsize < sizelimit:
781 781 windowsize *= 2
782 782
783 783
784 784 def walkchangerevs(repo, revs, makefilematcher, prepare):
785 785 """Iterate over files and the revs in a "windowed" way.
786 786
787 787 Callers most commonly need to iterate backwards over the history
788 788 in which they are interested. Doing so has awful (quadratic-looking)
789 789 performance, so we use iterators in a "windowed" way.
790 790
791 791 We walk a window of revisions in the desired order. Within the
792 792 window, we first walk forwards to gather data, then in the desired
793 793 order (usually backwards) to display it.
794 794
795 795 This function returns an iterator yielding contexts. Before
796 796 yielding each context, the iterator will first call the prepare
797 797 function on each context in the window in forward order."""
798 798
799 799 if not revs:
800 800 return []
801 801 change = repo.__getitem__
802 802
803 803 def iterate():
804 804 it = iter(revs)
805 805 stopiteration = False
806 806 for windowsize in increasingwindows():
807 807 nrevs = []
808 808 for i in range(windowsize):
809 809 rev = next(it, None)
810 810 if rev is None:
811 811 stopiteration = True
812 812 break
813 813 nrevs.append(rev)
814 814 for rev in sorted(nrevs):
815 815 ctx = change(rev)
816 816 prepare(ctx, makefilematcher(ctx))
817 817 for rev in nrevs:
818 818 yield change(rev)
819 819
820 820 if stopiteration:
821 821 break
822 822
823 823 return iterate()
824 824
825 825
826 826 def meaningfulparents(repo, ctx):
827 827 """Return list of meaningful (or all if debug) parentrevs for rev.
828 828
829 829 For merges (two non-nullrev revisions) both parents are meaningful.
830 830 Otherwise the first parent revision is considered meaningful if it
831 831 is not the preceding revision.
832 832 """
833 833 parents = ctx.parents()
834 834 if len(parents) > 1:
835 835 return parents
836 836 if repo.ui.debugflag:
837 837 return [parents[0], repo[nullrev]]
838 838 if parents[0].rev() >= intrev(ctx) - 1:
839 839 return []
840 840 return parents
841 841
842 842
843 843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
844 844 """Return a function that produced paths for presenting to the user.
845 845
846 846 The returned function takes a repo-relative path and produces a path
847 847 that can be presented in the UI.
848 848
849 849 Depending on the value of ui.relative-paths, either a repo-relative or
850 850 cwd-relative path will be produced.
851 851
852 852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
853 853
854 854 If forcerelativevalue is not None, then that value will be used regardless
855 855 of what ui.relative-paths is set to.
856 856 """
857 857 if forcerelativevalue is not None:
858 858 relative = forcerelativevalue
859 859 else:
860 860 config = repo.ui.config(b'ui', b'relative-paths')
861 861 if config == b'legacy':
862 862 relative = legacyrelativevalue
863 863 else:
864 864 relative = stringutil.parsebool(config)
865 865 if relative is None:
866 866 raise error.ConfigError(
867 867 _(b"ui.relative-paths is not a boolean ('%s')") % config
868 868 )
869 869
870 870 if relative:
871 871 cwd = repo.getcwd()
872 872 if cwd != b'':
873 873 # this branch would work even if cwd == b'' (ie cwd = repo
874 874 # root), but its generality makes the returned function slower
875 875 pathto = repo.pathto
876 876 return lambda f: pathto(f, cwd)
877 877 if repo.ui.configbool(b'ui', b'slash'):
878 878 return lambda f: f
879 879 else:
880 880 return util.localpath
881 881
882 882
883 883 def subdiruipathfn(subpath, uipathfn):
884 884 '''Create a new uipathfn that treats the file as relative to subpath.'''
885 885 return lambda f: uipathfn(posixpath.join(subpath, f))
886 886
887 887
888 888 def anypats(pats, opts):
889 889 """Checks if any patterns, including --include and --exclude were given.
890 890
891 891 Some commands (e.g. addremove) use this condition for deciding whether to
892 892 print absolute or relative paths.
893 893 """
894 894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
895 895
896 896
897 897 def expandpats(pats):
898 898 """Expand bare globs when running on windows.
899 899 On posix we assume it already has already been done by sh."""
900 900 if not util.expandglobs:
901 901 return list(pats)
902 902 ret = []
903 903 for kindpat in pats:
904 904 kind, pat = matchmod._patsplit(kindpat, None)
905 905 if kind is None:
906 906 try:
907 907 globbed = glob.glob(pat)
908 908 except re.error:
909 909 globbed = [pat]
910 910 if globbed:
911 911 ret.extend(globbed)
912 912 continue
913 913 ret.append(kindpat)
914 914 return ret
915 915
916 916
917 917 def matchandpats(
918 918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
919 919 ):
920 920 """Return a matcher and the patterns that were used.
921 921 The matcher will warn about bad matches, unless an alternate badfn callback
922 922 is provided."""
923 923 if opts is None:
924 924 opts = {}
925 925 if not globbed and default == b'relpath':
926 926 pats = expandpats(pats or [])
927 927
928 928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
929 929
930 930 def bad(f, msg):
931 931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
932 932
933 933 if badfn is None:
934 934 badfn = bad
935 935
936 936 m = ctx.match(
937 937 pats,
938 938 opts.get(b'include'),
939 939 opts.get(b'exclude'),
940 940 default,
941 941 listsubrepos=opts.get(b'subrepos'),
942 942 badfn=badfn,
943 943 )
944 944
945 945 if m.always():
946 946 pats = []
947 947 return m, pats
948 948
949 949
950 950 def match(
951 951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
952 952 ):
953 953 '''Return a matcher that will warn about bad matches.'''
954 954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
955 955
956 956
957 957 def matchall(repo):
958 958 '''Return a matcher that will efficiently match everything.'''
959 959 return matchmod.always()
960 960
961 961
962 962 def matchfiles(repo, files, badfn=None):
963 963 '''Return a matcher that will efficiently match exactly these files.'''
964 964 return matchmod.exact(files, badfn=badfn)
965 965
966 966
967 967 def parsefollowlinespattern(repo, rev, pat, msg):
968 968 """Return a file name from `pat` pattern suitable for usage in followlines
969 969 logic.
970 970 """
971 971 if not matchmod.patkind(pat):
972 972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
973 973 else:
974 974 ctx = repo[rev]
975 975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
976 976 files = [f for f in ctx if m(f)]
977 977 if len(files) != 1:
978 978 raise error.ParseError(msg)
979 979 return files[0]
980 980
981 981
982 982 def getorigvfs(ui, repo):
983 983 """return a vfs suitable to save 'orig' file
984 984
985 985 return None if no special directory is configured"""
986 986 origbackuppath = ui.config(b'ui', b'origbackuppath')
987 987 if not origbackuppath:
988 988 return None
989 989 return vfs.vfs(repo.wvfs.join(origbackuppath))
990 990
991 991
992 992 def backuppath(ui, repo, filepath):
993 993 """customize where working copy backup files (.orig files) are created
994 994
995 995 Fetch user defined path from config file: [ui] origbackuppath = <path>
996 996 Fall back to default (filepath with .orig suffix) if not specified
997 997
998 998 filepath is repo-relative
999 999
1000 1000 Returns an absolute path
1001 1001 """
1002 1002 origvfs = getorigvfs(ui, repo)
1003 1003 if origvfs is None:
1004 1004 return repo.wjoin(filepath + b".orig")
1005 1005
1006 1006 origbackupdir = origvfs.dirname(filepath)
1007 1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1008 1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1009 1009
1010 1010 # Remove any files that conflict with the backup file's path
1011 1011 for f in reversed(list(pathutil.finddirs(filepath))):
1012 1012 if origvfs.isfileorlink(f):
1013 1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1014 1014 origvfs.unlink(f)
1015 1015 break
1016 1016
1017 1017 origvfs.makedirs(origbackupdir)
1018 1018
1019 1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1020 1020 ui.note(
1021 1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1022 1022 )
1023 1023 origvfs.rmtree(filepath, forcibly=True)
1024 1024
1025 1025 return origvfs.join(filepath)
1026 1026
1027 1027
1028 1028 class _containsnode:
1029 1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1030 1030
1031 1031 def __init__(self, repo, revcontainer):
1032 1032 self._torev = repo.changelog.rev
1033 1033 self._revcontains = revcontainer.__contains__
1034 1034
1035 1035 def __contains__(self, node):
1036 1036 return self._revcontains(self._torev(node))
1037 1037
1038 1038
1039 1039 def cleanupnodes(
1040 1040 repo,
1041 1041 replacements,
1042 1042 operation,
1043 1043 moves=None,
1044 1044 metadata=None,
1045 1045 fixphase=False,
1046 1046 targetphase=None,
1047 1047 backup=True,
1048 1048 ):
1049 1049 """do common cleanups when old nodes are replaced by new nodes
1050 1050
1051 1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1052 1052 (we might also want to move working directory parent in the future)
1053 1053
1054 1054 By default, bookmark moves are calculated automatically from 'replacements',
1055 1055 but 'moves' can be used to override that. Also, 'moves' may include
1056 1056 additional bookmark moves that should not have associated obsmarkers.
1057 1057
1058 1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1059 1059 have replacements. operation is a string, like "rebase".
1060 1060
1061 1061 metadata is dictionary containing metadata to be stored in obsmarker if
1062 1062 obsolescence is enabled.
1063 1063 """
1064 1064 assert fixphase or targetphase is None
1065 1065 if not replacements and not moves:
1066 1066 return
1067 1067
1068 1068 # translate mapping's other forms
1069 1069 if not util.safehasattr(replacements, 'items'):
1070 1070 replacements = {(n,): () for n in replacements}
1071 1071 else:
1072 1072 # upgrading non tuple "source" to tuple ones for BC
1073 1073 repls = {}
1074 1074 for key, value in replacements.items():
1075 1075 if not isinstance(key, tuple):
1076 1076 key = (key,)
1077 1077 repls[key] = value
1078 1078 replacements = repls
1079 1079
1080 1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1081 1081 unfi = repo.unfiltered()
1082 1082
1083 1083 # Calculate bookmark movements
1084 1084 if moves is None:
1085 1085 moves = {}
1086 1086 for oldnodes, newnodes in replacements.items():
1087 1087 for oldnode in oldnodes:
1088 1088 if oldnode in moves:
1089 1089 continue
1090 1090 if len(newnodes) > 1:
1091 1091 # usually a split, take the one with biggest rev number
1092 1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1093 1093 elif len(newnodes) == 0:
1094 1094 # move bookmark backwards
1095 1095 allreplaced = []
1096 1096 for rep in replacements:
1097 1097 allreplaced.extend(rep)
1098 1098 roots = list(
1099 1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1100 1100 )
1101 1101 if roots:
1102 1102 newnode = roots[0].node()
1103 1103 else:
1104 1104 newnode = repo.nullid
1105 1105 else:
1106 1106 newnode = newnodes[0]
1107 1107 moves[oldnode] = newnode
1108 1108
1109 1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1110 1110 toretract = {}
1111 1111 toadvance = {}
1112 1112 if fixphase:
1113 1113 precursors = {}
1114 1114 for oldnodes, newnodes in replacements.items():
1115 1115 for oldnode in oldnodes:
1116 1116 for newnode in newnodes:
1117 1117 precursors.setdefault(newnode, []).append(oldnode)
1118 1118
1119 1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1120 1120 newphases = {}
1121 1121
1122 1122 def phase(ctx):
1123 1123 return newphases.get(ctx.node(), ctx.phase())
1124 1124
1125 1125 for newnode in allnewnodes:
1126 1126 ctx = unfi[newnode]
1127 1127 parentphase = max(phase(p) for p in ctx.parents())
1128 1128 if targetphase is None:
1129 1129 oldphase = max(
1130 1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1131 1131 )
1132 1132 newphase = max(oldphase, parentphase)
1133 1133 else:
1134 1134 newphase = max(targetphase, parentphase)
1135 1135 newphases[newnode] = newphase
1136 1136 if newphase > ctx.phase():
1137 1137 toretract.setdefault(newphase, []).append(newnode)
1138 1138 elif newphase < ctx.phase():
1139 1139 toadvance.setdefault(newphase, []).append(newnode)
1140 1140
1141 1141 with repo.transaction(b'cleanup') as tr:
1142 1142 # Move bookmarks
1143 1143 bmarks = repo._bookmarks
1144 1144 bmarkchanges = []
1145 1145 for oldnode, newnode in moves.items():
1146 1146 oldbmarks = repo.nodebookmarks(oldnode)
1147 1147 if not oldbmarks:
1148 1148 continue
1149 1149 from . import bookmarks # avoid import cycle
1150 1150
1151 1151 repo.ui.debug(
1152 1152 b'moving bookmarks %r from %s to %s\n'
1153 1153 % (
1154 1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1155 1155 hex(oldnode),
1156 1156 hex(newnode),
1157 1157 )
1158 1158 )
1159 1159 # Delete divergent bookmarks being parents of related newnodes
1160 1160 deleterevs = repo.revs(
1161 1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1162 1162 allnewnodes,
1163 1163 newnode,
1164 1164 oldnode,
1165 1165 )
1166 1166 deletenodes = _containsnode(repo, deleterevs)
1167 1167 for name in oldbmarks:
1168 1168 bmarkchanges.append((name, newnode))
1169 1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1170 1170 bmarkchanges.append((b, None))
1171 1171
1172 1172 if bmarkchanges:
1173 1173 bmarks.applychanges(repo, tr, bmarkchanges)
1174 1174
1175 1175 for phase, nodes in toretract.items():
1176 1176 phases.retractboundary(repo, tr, phase, nodes)
1177 1177 for phase, nodes in toadvance.items():
1178 1178 phases.advanceboundary(repo, tr, phase, nodes)
1179 1179
1180 1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1181 1181 # Obsolete or strip nodes
1182 1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1183 1183 # If a node is already obsoleted, and we want to obsolete it
1184 1184 # without a successor, skip that obssolete request since it's
1185 1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1186 1186 # Also sort the node in topology order, that might be useful for
1187 1187 # some obsstore logic.
1188 1188 # NOTE: the sorting might belong to createmarkers.
1189 1189 torev = unfi.changelog.rev
1190 1190 sortfunc = lambda ns: torev(ns[0][0])
1191 1191 rels = []
1192 1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1193 1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1194 1194 rels.append(rel)
1195 1195 if rels:
1196 1196 obsolete.createmarkers(
1197 1197 repo, rels, operation=operation, metadata=metadata
1198 1198 )
1199 1199 elif phases.supportarchived(repo) and mayusearchived:
1200 1200 # this assume we do not have "unstable" nodes above the cleaned ones
1201 1201 allreplaced = set()
1202 1202 for ns in replacements.keys():
1203 1203 allreplaced.update(ns)
1204 1204 if backup:
1205 1205 from . import repair # avoid import cycle
1206 1206
1207 1207 node = min(allreplaced, key=repo.changelog.rev)
1208 1208 repair.backupbundle(
1209 1209 repo, allreplaced, allreplaced, node, operation
1210 1210 )
1211 1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1212 1212 else:
1213 1213 from . import repair # avoid import cycle
1214 1214
1215 1215 tostrip = list(n for ns in replacements for n in ns)
1216 1216 if tostrip:
1217 1217 repair.delayedstrip(
1218 1218 repo.ui, repo, tostrip, operation, backup=backup
1219 1219 )
1220 1220
1221 1221
1222 1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 1223 if opts is None:
1224 1224 opts = {}
1225 1225 m = matcher
1226 1226 dry_run = opts.get(b'dry_run')
1227 1227 try:
1228 1228 similarity = float(opts.get(b'similarity') or 0)
1229 1229 except ValueError:
1230 1230 raise error.InputError(_(b'similarity must be a number'))
1231 1231 if similarity < 0 or similarity > 100:
1232 1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1233 1233 similarity /= 100.0
1234 1234
1235 1235 ret = 0
1236 1236
1237 1237 wctx = repo[None]
1238 1238 for subpath in sorted(wctx.substate):
1239 1239 submatch = matchmod.subdirmatcher(subpath, m)
1240 1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1241 1241 sub = wctx.sub(subpath)
1242 1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1243 1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1244 1244 try:
1245 1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1246 1246 ret = 1
1247 1247 except error.LookupError:
1248 1248 repo.ui.status(
1249 1249 _(b"skipping missing subrepository: %s\n")
1250 1250 % uipathfn(subpath)
1251 1251 )
1252 1252
1253 1253 rejected = []
1254 1254
1255 1255 def badfn(f, msg):
1256 1256 if f in m.files():
1257 1257 m.bad(f, msg)
1258 1258 rejected.append(f)
1259 1259
1260 1260 badmatch = matchmod.badmatch(m, badfn)
1261 1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1262 1262 repo, badmatch
1263 1263 )
1264 1264
1265 1265 unknownset = set(unknown + forgotten)
1266 1266 toprint = unknownset.copy()
1267 1267 toprint.update(deleted)
1268 1268 for abs in sorted(toprint):
1269 1269 if repo.ui.verbose or not m.exact(abs):
1270 1270 if abs in unknownset:
1271 1271 status = _(b'adding %s\n') % uipathfn(abs)
1272 1272 label = b'ui.addremove.added'
1273 1273 else:
1274 1274 status = _(b'removing %s\n') % uipathfn(abs)
1275 1275 label = b'ui.addremove.removed'
1276 1276 repo.ui.status(status, label=label)
1277 1277
1278 1278 renames = _findrenames(
1279 1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 1280 )
1281 1281
1282 1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 1283 if open_tr is not None:
1284 1284 open_tr()
1285 1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1286 1286
1287 1287 for f in rejected:
1288 1288 if f in m.files():
1289 1289 return 1
1290 1290 return ret
1291 1291
1292 1292
1293 1293 def marktouched(repo, files, similarity=0.0):
1294 1294 """Assert that files have somehow been operated upon. files are relative to
1295 1295 the repo root."""
1296 1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1297 1297 rejected = []
1298 1298
1299 1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1300 1300
1301 1301 if repo.ui.verbose:
1302 1302 unknownset = set(unknown + forgotten)
1303 1303 toprint = unknownset.copy()
1304 1304 toprint.update(deleted)
1305 1305 for abs in sorted(toprint):
1306 1306 if abs in unknownset:
1307 1307 status = _(b'adding %s\n') % abs
1308 1308 else:
1309 1309 status = _(b'removing %s\n') % abs
1310 1310 repo.ui.status(status)
1311 1311
1312 1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1313 1313 # the messages above too. legacyrelativevalue=True is consistent with how
1314 1314 # it used to work.
1315 1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1316 1316 renames = _findrenames(
1317 1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1318 1318 )
1319 1319
1320 1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1321 1321
1322 1322 for f in rejected:
1323 1323 if f in m.files():
1324 1324 return 1
1325 1325 return 0
1326 1326
1327 1327
1328 1328 def _interestingfiles(repo, matcher):
1329 1329 """Walk dirstate with matcher, looking for files that addremove would care
1330 1330 about.
1331 1331
1332 1332 This is different from dirstate.status because it doesn't care about
1333 1333 whether files are modified or clean."""
1334 1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1335 1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1336 1336
1337 1337 ctx = repo[None]
1338 1338 dirstate = repo.dirstate
1339 1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1340 1340 walkresults = dirstate.walk(
1341 1341 matcher,
1342 1342 subrepos=sorted(ctx.substate),
1343 1343 unknown=True,
1344 1344 ignored=False,
1345 1345 full=False,
1346 1346 )
1347 1347 for abs, st in walkresults.items():
1348 1348 entry = dirstate.get_entry(abs)
1349 1349 if (not entry.any_tracked) and audit_path.check(abs):
1350 1350 unknown.append(abs)
1351 1351 elif (not entry.removed) and not st:
1352 1352 deleted.append(abs)
1353 1353 elif entry.removed and st:
1354 1354 forgotten.append(abs)
1355 1355 # for finding renames
1356 1356 elif entry.removed and not st:
1357 1357 removed.append(abs)
1358 1358 elif entry.added:
1359 1359 added.append(abs)
1360 1360
1361 1361 return added, unknown, deleted, removed, forgotten
1362 1362
1363 1363
1364 1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1365 1365 '''Find renames from removed files to added ones.'''
1366 1366 renames = {}
1367 1367 if similarity > 0:
1368 1368 for old, new, score in similar.findrenames(
1369 1369 repo, added, removed, similarity
1370 1370 ):
1371 1371 if (
1372 1372 repo.ui.verbose
1373 1373 or not matcher.exact(old)
1374 1374 or not matcher.exact(new)
1375 1375 ):
1376 1376 repo.ui.status(
1377 1377 _(
1378 1378 b'recording removal of %s as rename to %s '
1379 1379 b'(%d%% similar)\n'
1380 1380 )
1381 1381 % (uipathfn(old), uipathfn(new), score * 100)
1382 1382 )
1383 1383 renames[new] = old
1384 1384 return renames
1385 1385
1386 1386
1387 1387 def _markchanges(repo, unknown, deleted, renames):
1388 1388 """Marks the files in unknown as added, the files in deleted as removed,
1389 1389 and the files in renames as copied."""
1390 1390 wctx = repo[None]
1391 1391 with repo.wlock():
1392 1392 wctx.forget(deleted)
1393 1393 wctx.add(unknown)
1394 1394 for new, old in renames.items():
1395 1395 wctx.copy(old, new)
1396 1396
1397 1397
1398 1398 def getrenamedfn(repo, endrev=None):
1399 1399 if copiesmod.usechangesetcentricalgo(repo):
1400 1400
1401 1401 def getrenamed(fn, rev):
1402 1402 ctx = repo[rev]
1403 1403 p1copies = ctx.p1copies()
1404 1404 if fn in p1copies:
1405 1405 return p1copies[fn]
1406 1406 p2copies = ctx.p2copies()
1407 1407 if fn in p2copies:
1408 1408 return p2copies[fn]
1409 1409 return None
1410 1410
1411 1411 return getrenamed
1412 1412
1413 1413 rcache = {}
1414 1414 if endrev is None:
1415 1415 endrev = len(repo)
1416 1416
1417 1417 def getrenamed(fn, rev):
1418 1418 """looks up all renames for a file (up to endrev) the first
1419 1419 time the file is given. It indexes on the changerev and only
1420 1420 parses the manifest if linkrev != changerev.
1421 1421 Returns rename info for fn at changerev rev."""
1422 1422 if fn not in rcache:
1423 1423 rcache[fn] = {}
1424 1424 fl = repo.file(fn)
1425 1425 for i in fl:
1426 1426 lr = fl.linkrev(i)
1427 1427 renamed = fl.renamed(fl.node(i))
1428 1428 rcache[fn][lr] = renamed and renamed[0]
1429 1429 if lr >= endrev:
1430 1430 break
1431 1431 if rev in rcache[fn]:
1432 1432 return rcache[fn][rev]
1433 1433
1434 1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1435 1435 # filectx logic.
1436 1436 try:
1437 1437 return repo[rev][fn].copysource()
1438 1438 except error.LookupError:
1439 1439 return None
1440 1440
1441 1441 return getrenamed
1442 1442
1443 1443
1444 1444 def getcopiesfn(repo, endrev=None):
1445 1445 if copiesmod.usechangesetcentricalgo(repo):
1446 1446
1447 1447 def copiesfn(ctx):
1448 1448 if ctx.p2copies():
1449 1449 allcopies = ctx.p1copies().copy()
1450 1450 # There should be no overlap
1451 1451 allcopies.update(ctx.p2copies())
1452 1452 return sorted(allcopies.items())
1453 1453 else:
1454 1454 return sorted(ctx.p1copies().items())
1455 1455
1456 1456 else:
1457 1457 getrenamed = getrenamedfn(repo, endrev)
1458 1458
1459 1459 def copiesfn(ctx):
1460 1460 copies = []
1461 1461 for fn in ctx.files():
1462 1462 rename = getrenamed(fn, ctx.rev())
1463 1463 if rename:
1464 1464 copies.append((fn, rename))
1465 1465 return copies
1466 1466
1467 1467 return copiesfn
1468 1468
1469 1469
1470 1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1471 1471 """Update the dirstate to reflect the intent of copying src to dst. For
1472 1472 different reasons it might not end with dst being marked as copied from src.
1473 1473 """
1474 1474 origsrc = repo.dirstate.copied(src) or src
1475 1475 if dst == origsrc: # copying back a copy?
1476 1476 entry = repo.dirstate.get_entry(dst)
1477 1477 if (entry.added or not entry.tracked) and not dryrun:
1478 1478 repo.dirstate.set_tracked(dst)
1479 1479 else:
1480 1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1481 1481 if not ui.quiet:
1482 1482 ui.warn(
1483 1483 _(
1484 1484 b"%s has not been committed yet, so no copy "
1485 1485 b"data will be stored for %s.\n"
1486 1486 )
1487 1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1488 1488 )
1489 1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1490 1490 wctx.add([dst])
1491 1491 elif not dryrun:
1492 1492 wctx.copy(origsrc, dst)
1493 1493
1494 1494
1495 1495 def movedirstate(repo, newctx, match=None):
1496 1496 """Move the dirstate to newctx and adjust it as necessary.
1497 1497
1498 1498 A matcher can be provided as an optimization. It is probably a bug to pass
1499 1499 a matcher that doesn't match all the differences between the parent of the
1500 1500 working copy and newctx.
1501 1501 """
1502 1502 oldctx = repo[b'.']
1503 1503 ds = repo.dirstate
1504 1504 copies = dict(ds.copies())
1505 1505 ds.setparents(newctx.node(), repo.nullid)
1506 1506 s = newctx.status(oldctx, match=match)
1507 1507
1508 1508 for f in s.modified:
1509 1509 ds.update_file_p1(f, p1_tracked=True)
1510 1510
1511 1511 for f in s.added:
1512 1512 ds.update_file_p1(f, p1_tracked=False)
1513 1513
1514 1514 for f in s.removed:
1515 1515 ds.update_file_p1(f, p1_tracked=True)
1516 1516
1517 1517 # Merge old parent and old working dir copies
1518 1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1519 1519 oldcopies.update(copies)
1520 1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1521 1521 # Adjust the dirstate copies
1522 1522 for dst, src in copies.items():
1523 1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1524 1524 src = None
1525 1525 ds.copy(src, dst)
1526 1526 repo._quick_access_changeid_invalidate()
1527 1527
1528 1528
1529 1529 def filterrequirements(requirements):
1530 1530 """filters the requirements into two sets:
1531 1531
1532 1532 wcreq: requirements which should be written in .hg/requires
1533 1533 storereq: which should be written in .hg/store/requires
1534 1534
1535 1535 Returns (wcreq, storereq)
1536 1536 """
1537 1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1538 1538 wc, store = set(), set()
1539 1539 for r in requirements:
1540 1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1541 1541 wc.add(r)
1542 1542 else:
1543 1543 store.add(r)
1544 1544 return wc, store
1545 1545 return requirements, None
1546 1546
1547 1547
1548 1548 def istreemanifest(repo):
1549 1549 """returns whether the repository is using treemanifest or not"""
1550 1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1551 1551
1552 1552
1553 1553 def writereporequirements(repo, requirements=None):
1554 1554 """writes requirements for the repo
1555 1555
1556 1556 Requirements are written to .hg/requires and .hg/store/requires based
1557 1557 on whether share-safe mode is enabled and which requirements are wdir
1558 1558 requirements and which are store requirements
1559 1559 """
1560 1560 if requirements:
1561 1561 repo.requirements = requirements
1562 1562 wcreq, storereq = filterrequirements(repo.requirements)
1563 1563 if wcreq is not None:
1564 1564 writerequires(repo.vfs, wcreq)
1565 1565 if storereq is not None:
1566 1566 writerequires(repo.svfs, storereq)
1567 1567 elif repo.ui.configbool(b'format', b'usestore'):
1568 1568 # only remove store requires if we are using store
1569 1569 repo.svfs.tryunlink(b'requires')
1570 1570
1571 1571
1572 1572 def writerequires(opener, requirements):
1573 1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1574 1574 for r in sorted(requirements):
1575 1575 fp.write(b"%s\n" % r)
1576 1576
1577 1577
1578 1578 class filecachesubentry:
1579 1579 def __init__(self, path, stat):
1580 1580 self.path = path
1581 1581 self.cachestat = None
1582 1582 self._cacheable = None
1583 1583
1584 1584 if stat:
1585 1585 self.cachestat = filecachesubentry.stat(self.path)
1586 1586
1587 1587 if self.cachestat:
1588 1588 self._cacheable = self.cachestat.cacheable()
1589 1589 else:
1590 1590 # None means we don't know yet
1591 1591 self._cacheable = None
1592 1592
1593 1593 def refresh(self):
1594 1594 if self.cacheable():
1595 1595 self.cachestat = filecachesubentry.stat(self.path)
1596 1596
1597 1597 def cacheable(self):
1598 1598 if self._cacheable is not None:
1599 1599 return self._cacheable
1600 1600
1601 1601 # we don't know yet, assume it is for now
1602 1602 return True
1603 1603
1604 1604 def changed(self):
1605 1605 # no point in going further if we can't cache it
1606 1606 if not self.cacheable():
1607 1607 return True
1608 1608
1609 1609 newstat = filecachesubentry.stat(self.path)
1610 1610
1611 1611 # we may not know if it's cacheable yet, check again now
1612 1612 if newstat and self._cacheable is None:
1613 1613 self._cacheable = newstat.cacheable()
1614 1614
1615 1615 # check again
1616 1616 if not self._cacheable:
1617 1617 return True
1618 1618
1619 1619 if self.cachestat != newstat:
1620 1620 self.cachestat = newstat
1621 1621 return True
1622 1622 else:
1623 1623 return False
1624 1624
1625 1625 @staticmethod
1626 1626 def stat(path):
1627 1627 try:
1628 1628 return util.cachestat(path)
1629 1629 except FileNotFoundError:
1630 1630 pass
1631 1631
1632 1632
1633 1633 class filecacheentry:
1634 1634 def __init__(self, paths, stat=True):
1635 1635 self._entries = []
1636 1636 for path in paths:
1637 1637 self._entries.append(filecachesubentry(path, stat))
1638 1638
1639 1639 def changed(self):
1640 1640 '''true if any entry has changed'''
1641 1641 for entry in self._entries:
1642 1642 if entry.changed():
1643 1643 return True
1644 1644 return False
1645 1645
1646 1646 def refresh(self):
1647 1647 for entry in self._entries:
1648 1648 entry.refresh()
1649 1649
1650 1650
1651 1651 class filecache:
1652 1652 """A property like decorator that tracks files under .hg/ for updates.
1653 1653
1654 1654 On first access, the files defined as arguments are stat()ed and the
1655 1655 results cached. The decorated function is called. The results are stashed
1656 1656 away in a ``_filecache`` dict on the object whose method is decorated.
1657 1657
1658 1658 On subsequent access, the cached result is used as it is set to the
1659 1659 instance dictionary.
1660 1660
1661 1661 On external property set/delete operations, the caller must update the
1662 1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1663 1663 instead of directly setting <attr>.
1664 1664
1665 1665 When using the property API, the cached data is always used if available.
1666 1666 No stat() is performed to check if the file has changed.
1667 1667
1668 1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1669 1669 can populate an entry before the property's getter is called. In this case,
1670 1670 entries in ``_filecache`` will be used during property operations,
1671 1671 if available. If the underlying file changes, it is up to external callers
1672 1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1673 1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1674 1674 remove the ``filecacheentry``.
1675 1675 """
1676 1676
1677 1677 def __init__(self, *paths):
1678 1678 self.paths = paths
1679 1679
1680 1680 def tracked_paths(self, obj):
1681 1681 return [self.join(obj, path) for path in self.paths]
1682 1682
1683 1683 def join(self, obj, fname):
1684 1684 """Used to compute the runtime path of a cached file.
1685 1685
1686 1686 Users should subclass filecache and provide their own version of this
1687 1687 function to call the appropriate join function on 'obj' (an instance
1688 1688 of the class that its member function was decorated).
1689 1689 """
1690 1690 raise NotImplementedError
1691 1691
1692 1692 def __call__(self, func):
1693 1693 self.func = func
1694 1694 self.sname = func.__name__
1695 # XXX We should be using a unicode string instead of bytes for the main
1696 # name (and the _filecache key). The fact we use bytes is a remains
1697 # from Python2, since the name is derived from an attribute name a
1698 # `str` is a better fit now that we support Python3 only
1695 1699 self.name = pycompat.sysbytes(self.sname)
1696 1700 return self
1697 1701
1698 1702 def __get__(self, obj, type=None):
1699 1703 # if accessed on the class, return the descriptor itself.
1700 1704 if obj is None:
1701 1705 return self
1702 1706
1703 1707 assert self.sname not in obj.__dict__
1704 1708
1705 1709 entry = obj._filecache.get(self.name)
1706 1710
1707 1711 if entry:
1708 1712 if entry.changed():
1709 1713 entry.obj = self.func(obj)
1710 1714 else:
1711 1715 paths = self.tracked_paths(obj)
1712 1716
1713 1717 # We stat -before- creating the object so our cache doesn't lie if
1714 1718 # a writer modified between the time we read and stat
1715 1719 entry = filecacheentry(paths, True)
1716 1720 entry.obj = self.func(obj)
1717 1721
1718 1722 obj._filecache[self.name] = entry
1719 1723
1720 1724 obj.__dict__[self.sname] = entry.obj
1721 1725 return entry.obj
1722 1726
1723 1727 # don't implement __set__(), which would make __dict__ lookup as slow as
1724 1728 # function call.
1725 1729
1726 1730 def set(self, obj, value):
1727 1731 if self.name not in obj._filecache:
1728 1732 # we add an entry for the missing value because X in __dict__
1729 1733 # implies X in _filecache
1730 1734 paths = self.tracked_paths(obj)
1731 1735 ce = filecacheentry(paths, False)
1732 1736 obj._filecache[self.name] = ce
1733 1737 else:
1734 1738 ce = obj._filecache[self.name]
1735 1739
1736 1740 ce.obj = value # update cached copy
1737 1741 obj.__dict__[self.sname] = value # update copy returned by obj.x
1738 1742
1739 1743
1740 1744 def extdatasource(repo, source):
1741 1745 """Gather a map of rev -> value dict from the specified source
1742 1746
1743 1747 A source spec is treated as a URL, with a special case shell: type
1744 1748 for parsing the output from a shell command.
1745 1749
1746 1750 The data is parsed as a series of newline-separated records where
1747 1751 each record is a revision specifier optionally followed by a space
1748 1752 and a freeform string value. If the revision is known locally, it
1749 1753 is converted to a rev, otherwise the record is skipped.
1750 1754
1751 1755 Note that both key and value are treated as UTF-8 and converted to
1752 1756 the local encoding. This allows uniformity between local and
1753 1757 remote data sources.
1754 1758 """
1755 1759
1756 1760 spec = repo.ui.config(b"extdata", source)
1757 1761 if not spec:
1758 1762 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1759 1763
1760 1764 data = {}
1761 1765 src = proc = None
1762 1766 try:
1763 1767 if spec.startswith(b"shell:"):
1764 1768 # external commands should be run relative to the repo root
1765 1769 cmd = spec[6:]
1766 1770 proc = subprocess.Popen(
1767 1771 procutil.tonativestr(cmd),
1768 1772 shell=True,
1769 1773 bufsize=-1,
1770 1774 close_fds=procutil.closefds,
1771 1775 stdout=subprocess.PIPE,
1772 1776 cwd=procutil.tonativestr(repo.root),
1773 1777 )
1774 1778 src = proc.stdout
1775 1779 else:
1776 1780 # treat as a URL or file
1777 1781 src = url.open(repo.ui, spec)
1778 1782 for l in src:
1779 1783 if b" " in l:
1780 1784 k, v = l.strip().split(b" ", 1)
1781 1785 else:
1782 1786 k, v = l.strip(), b""
1783 1787
1784 1788 k = encoding.tolocal(k)
1785 1789 try:
1786 1790 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1787 1791 except (error.LookupError, error.RepoLookupError, error.InputError):
1788 1792 pass # we ignore data for nodes that don't exist locally
1789 1793 finally:
1790 1794 if proc:
1791 1795 try:
1792 1796 proc.communicate()
1793 1797 except ValueError:
1794 1798 # This happens if we started iterating src and then
1795 1799 # get a parse error on a line. It should be safe to ignore.
1796 1800 pass
1797 1801 if src:
1798 1802 src.close()
1799 1803 if proc and proc.returncode != 0:
1800 1804 raise error.Abort(
1801 1805 _(b"extdata command '%s' failed: %s")
1802 1806 % (cmd, procutil.explainexit(proc.returncode))
1803 1807 )
1804 1808
1805 1809 return data
1806 1810
1807 1811
1808 1812 class progress:
1809 1813 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1810 1814 self.ui = ui
1811 1815 self.pos = 0
1812 1816 self.topic = topic
1813 1817 self.unit = unit
1814 1818 self.total = total
1815 1819 self.debug = ui.configbool(b'progress', b'debug')
1816 1820 self._updatebar = updatebar
1817 1821
1818 1822 def __enter__(self):
1819 1823 return self
1820 1824
1821 1825 def __exit__(self, exc_type, exc_value, exc_tb):
1822 1826 self.complete()
1823 1827
1824 1828 def update(self, pos, item=b"", total=None):
1825 1829 assert pos is not None
1826 1830 if total:
1827 1831 self.total = total
1828 1832 self.pos = pos
1829 1833 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1830 1834 if self.debug:
1831 1835 self._printdebug(item)
1832 1836
1833 1837 def increment(self, step=1, item=b"", total=None):
1834 1838 self.update(self.pos + step, item, total)
1835 1839
1836 1840 def complete(self):
1837 1841 self.pos = None
1838 1842 self.unit = b""
1839 1843 self.total = None
1840 1844 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1841 1845
1842 1846 def _printdebug(self, item):
1843 1847 unit = b''
1844 1848 if self.unit:
1845 1849 unit = b' ' + self.unit
1846 1850 if item:
1847 1851 item = b' ' + item
1848 1852
1849 1853 if self.total:
1850 1854 pct = 100.0 * self.pos / self.total
1851 1855 self.ui.debug(
1852 1856 b'%s:%s %d/%d%s (%4.2f%%)\n'
1853 1857 % (self.topic, item, self.pos, self.total, unit, pct)
1854 1858 )
1855 1859 else:
1856 1860 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1857 1861
1858 1862
1859 1863 def gdinitconfig(ui):
1860 1864 """helper function to know if a repo should be created as general delta"""
1861 1865 # experimental config: format.generaldelta
1862 1866 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1863 1867 b'format', b'usegeneraldelta'
1864 1868 )
1865 1869
1866 1870
1867 1871 def gddeltaconfig(ui):
1868 1872 """helper function to know if incoming deltas should be optimized
1869 1873
1870 1874 The `format.generaldelta` config is an old form of the config that also
1871 1875 implies that incoming delta-bases should be never be trusted. This function
1872 1876 exists for this purpose.
1873 1877 """
1874 1878 # experimental config: format.generaldelta
1875 1879 return ui.configbool(b'format', b'generaldelta')
1876 1880
1877 1881
1878 1882 class simplekeyvaluefile:
1879 1883 """A simple file with key=value lines
1880 1884
1881 1885 Keys must be alphanumerics and start with a letter, values must not
1882 1886 contain '\n' characters"""
1883 1887
1884 1888 firstlinekey = b'__firstline'
1885 1889
1886 1890 def __init__(self, vfs, path, keys=None):
1887 1891 self.vfs = vfs
1888 1892 self.path = path
1889 1893
1890 1894 def read(self, firstlinenonkeyval=False):
1891 1895 """Read the contents of a simple key-value file
1892 1896
1893 1897 'firstlinenonkeyval' indicates whether the first line of file should
1894 1898 be treated as a key-value pair or reuturned fully under the
1895 1899 __firstline key."""
1896 1900 lines = self.vfs.readlines(self.path)
1897 1901 d = {}
1898 1902 if firstlinenonkeyval:
1899 1903 if not lines:
1900 1904 e = _(b"empty simplekeyvalue file")
1901 1905 raise error.CorruptedState(e)
1902 1906 # we don't want to include '\n' in the __firstline
1903 1907 d[self.firstlinekey] = lines[0][:-1]
1904 1908 del lines[0]
1905 1909
1906 1910 try:
1907 1911 # the 'if line.strip()' part prevents us from failing on empty
1908 1912 # lines which only contain '\n' therefore are not skipped
1909 1913 # by 'if line'
1910 1914 updatedict = dict(
1911 1915 line[:-1].split(b'=', 1) for line in lines if line.strip()
1912 1916 )
1913 1917 if self.firstlinekey in updatedict:
1914 1918 e = _(b"%r can't be used as a key")
1915 1919 raise error.CorruptedState(e % self.firstlinekey)
1916 1920 d.update(updatedict)
1917 1921 except ValueError as e:
1918 1922 raise error.CorruptedState(stringutil.forcebytestr(e))
1919 1923 return d
1920 1924
1921 1925 def write(self, data, firstline=None):
1922 1926 """Write key=>value mapping to a file
1923 1927 data is a dict. Keys must be alphanumerical and start with a letter.
1924 1928 Values must not contain newline characters.
1925 1929
1926 1930 If 'firstline' is not None, it is written to file before
1927 1931 everything else, as it is, not in a key=value form"""
1928 1932 lines = []
1929 1933 if firstline is not None:
1930 1934 lines.append(b'%s\n' % firstline)
1931 1935
1932 1936 for k, v in data.items():
1933 1937 if k == self.firstlinekey:
1934 1938 e = b"key name '%s' is reserved" % self.firstlinekey
1935 1939 raise error.ProgrammingError(e)
1936 1940 if not k[0:1].isalpha():
1937 1941 e = b"keys must start with a letter in a key-value file"
1938 1942 raise error.ProgrammingError(e)
1939 1943 if not k.isalnum():
1940 1944 e = b"invalid key name in a simple key-value file"
1941 1945 raise error.ProgrammingError(e)
1942 1946 if b'\n' in v:
1943 1947 e = b"invalid value in a simple key-value file"
1944 1948 raise error.ProgrammingError(e)
1945 1949 lines.append(b"%s=%s\n" % (k, v))
1946 1950 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1947 1951 fp.write(b''.join(lines))
1948 1952
1949 1953
1950 1954 _reportobsoletedsource = [
1951 1955 b'debugobsolete',
1952 1956 b'pull',
1953 1957 b'push',
1954 1958 b'serve',
1955 1959 b'unbundle',
1956 1960 ]
1957 1961
1958 1962 _reportnewcssource = [
1959 1963 b'pull',
1960 1964 b'unbundle',
1961 1965 ]
1962 1966
1963 1967
1964 1968 def prefetchfiles(repo, revmatches):
1965 1969 """Invokes the registered file prefetch functions, allowing extensions to
1966 1970 ensure the corresponding files are available locally, before the command
1967 1971 uses them.
1968 1972
1969 1973 Args:
1970 1974 revmatches: a list of (revision, match) tuples to indicate the files to
1971 1975 fetch at each revision. If any of the match elements is None, it matches
1972 1976 all files.
1973 1977 """
1974 1978
1975 1979 def _matcher(m):
1976 1980 if m:
1977 1981 assert isinstance(m, matchmod.basematcher)
1978 1982 # The command itself will complain about files that don't exist, so
1979 1983 # don't duplicate the message.
1980 1984 return matchmod.badmatch(m, lambda fn, msg: None)
1981 1985 else:
1982 1986 return matchall(repo)
1983 1987
1984 1988 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1985 1989
1986 1990 fileprefetchhooks(repo, revbadmatches)
1987 1991
1988 1992
1989 1993 # a list of (repo, revs, match) prefetch functions
1990 1994 fileprefetchhooks = util.hooks()
1991 1995
1992 1996 # A marker that tells the evolve extension to suppress its own reporting
1993 1997 _reportstroubledchangesets = True
1994 1998
1995 1999
1996 2000 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1997 2001 """register a callback to issue a summary after the transaction is closed
1998 2002
1999 2003 If as_validator is true, then the callbacks are registered as transaction
2000 2004 validators instead
2001 2005 """
2002 2006
2003 2007 def txmatch(sources):
2004 2008 return any(txnname.startswith(source) for source in sources)
2005 2009
2006 2010 categories = []
2007 2011
2008 2012 def reportsummary(func):
2009 2013 """decorator for report callbacks."""
2010 2014 # The repoview life cycle is shorter than the one of the actual
2011 2015 # underlying repository. So the filtered object can die before the
2012 2016 # weakref is used leading to troubles. We keep a reference to the
2013 2017 # unfiltered object and restore the filtering when retrieving the
2014 2018 # repository through the weakref.
2015 2019 filtername = repo.filtername
2016 2020 reporef = weakref.ref(repo.unfiltered())
2017 2021
2018 2022 def wrapped(tr):
2019 2023 repo = reporef()
2020 2024 if filtername:
2021 2025 assert repo is not None # help pytype
2022 2026 repo = repo.filtered(filtername)
2023 2027 func(repo, tr)
2024 2028
2025 2029 newcat = b'%02i-txnreport' % len(categories)
2026 2030 if as_validator:
2027 2031 otr.addvalidator(newcat, wrapped)
2028 2032 else:
2029 2033 otr.addpostclose(newcat, wrapped)
2030 2034 categories.append(newcat)
2031 2035 return wrapped
2032 2036
2033 2037 @reportsummary
2034 2038 def reportchangegroup(repo, tr):
2035 2039 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2036 2040 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2037 2041 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2038 2042 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2039 2043 if cgchangesets or cgrevisions or cgfiles:
2040 2044 htext = b""
2041 2045 if cgheads:
2042 2046 htext = _(b" (%+d heads)") % cgheads
2043 2047 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2044 2048 if as_validator:
2045 2049 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2046 2050 assert repo is not None # help pytype
2047 2051 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2048 2052
2049 2053 if txmatch(_reportobsoletedsource):
2050 2054
2051 2055 @reportsummary
2052 2056 def reportobsoleted(repo, tr):
2053 2057 obsoleted = obsutil.getobsoleted(repo, tr)
2054 2058 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2055 2059 if newmarkers:
2056 2060 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2057 2061 if obsoleted:
2058 2062 msg = _(b'obsoleted %i changesets\n')
2059 2063 if as_validator:
2060 2064 msg = _(b'obsoleting %i changesets\n')
2061 2065 repo.ui.status(msg % len(obsoleted))
2062 2066
2063 2067 if obsolete.isenabled(
2064 2068 repo, obsolete.createmarkersopt
2065 2069 ) and repo.ui.configbool(
2066 2070 b'experimental', b'evolution.report-instabilities'
2067 2071 ):
2068 2072 instabilitytypes = [
2069 2073 (b'orphan', b'orphan'),
2070 2074 (b'phase-divergent', b'phasedivergent'),
2071 2075 (b'content-divergent', b'contentdivergent'),
2072 2076 ]
2073 2077
2074 2078 def getinstabilitycounts(repo):
2075 2079 filtered = repo.changelog.filteredrevs
2076 2080 counts = {}
2077 2081 for instability, revset in instabilitytypes:
2078 2082 counts[instability] = len(
2079 2083 set(obsolete.getrevs(repo, revset)) - filtered
2080 2084 )
2081 2085 return counts
2082 2086
2083 2087 oldinstabilitycounts = getinstabilitycounts(repo)
2084 2088
2085 2089 @reportsummary
2086 2090 def reportnewinstabilities(repo, tr):
2087 2091 newinstabilitycounts = getinstabilitycounts(repo)
2088 2092 for instability, revset in instabilitytypes:
2089 2093 delta = (
2090 2094 newinstabilitycounts[instability]
2091 2095 - oldinstabilitycounts[instability]
2092 2096 )
2093 2097 msg = getinstabilitymessage(delta, instability)
2094 2098 if msg:
2095 2099 repo.ui.warn(msg)
2096 2100
2097 2101 if txmatch(_reportnewcssource):
2098 2102
2099 2103 @reportsummary
2100 2104 def reportnewcs(repo, tr):
2101 2105 """Report the range of new revisions pulled/unbundled."""
2102 2106 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2103 2107 unfi = repo.unfiltered()
2104 2108 if origrepolen >= len(unfi):
2105 2109 return
2106 2110
2107 2111 # Compute the bounds of new visible revisions' range.
2108 2112 revs = smartset.spanset(repo, start=origrepolen)
2109 2113 if revs:
2110 2114 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2111 2115
2112 2116 if minrev == maxrev:
2113 2117 revrange = minrev
2114 2118 else:
2115 2119 revrange = b'%s:%s' % (minrev, maxrev)
2116 2120 draft = len(repo.revs(b'%ld and draft()', revs))
2117 2121 secret = len(repo.revs(b'%ld and secret()', revs))
2118 2122 if not (draft or secret):
2119 2123 msg = _(b'new changesets %s\n') % revrange
2120 2124 elif draft and secret:
2121 2125 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2122 2126 msg %= (revrange, draft, secret)
2123 2127 elif draft:
2124 2128 msg = _(b'new changesets %s (%d drafts)\n')
2125 2129 msg %= (revrange, draft)
2126 2130 elif secret:
2127 2131 msg = _(b'new changesets %s (%d secrets)\n')
2128 2132 msg %= (revrange, secret)
2129 2133 else:
2130 2134 errormsg = b'entered unreachable condition'
2131 2135 raise error.ProgrammingError(errormsg)
2132 2136 repo.ui.status(msg)
2133 2137
2134 2138 # search new changesets directly pulled as obsolete
2135 2139 duplicates = tr.changes.get(b'revduplicates', ())
2136 2140 obsadded = unfi.revs(
2137 2141 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2138 2142 )
2139 2143 cl = repo.changelog
2140 2144 extinctadded = [r for r in obsadded if r not in cl]
2141 2145 if extinctadded:
2142 2146 # They are not just obsolete, but obsolete and invisible
2143 2147 # we call them "extinct" internally but the terms have not been
2144 2148 # exposed to users.
2145 2149 msg = b'(%d other changesets obsolete on arrival)\n'
2146 2150 repo.ui.status(msg % len(extinctadded))
2147 2151
2148 2152 @reportsummary
2149 2153 def reportphasechanges(repo, tr):
2150 2154 """Report statistics of phase changes for changesets pre-existing
2151 2155 pull/unbundle.
2152 2156 """
2153 2157 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2154 2158 published = []
2155 2159 for revs, (old, new) in tr.changes.get(b'phases', []):
2156 2160 if new != phases.public:
2157 2161 continue
2158 2162 published.extend(rev for rev in revs if rev < origrepolen)
2159 2163 if not published:
2160 2164 return
2161 2165 msg = _(b'%d local changesets published\n')
2162 2166 if as_validator:
2163 2167 msg = _(b'%d local changesets will be published\n')
2164 2168 repo.ui.status(msg % len(published))
2165 2169
2166 2170
2167 2171 def getinstabilitymessage(delta, instability):
2168 2172 """function to return the message to show warning about new instabilities
2169 2173
2170 2174 exists as a separate function so that extension can wrap to show more
2171 2175 information like how to fix instabilities"""
2172 2176 if delta > 0:
2173 2177 return _(b'%i new %s changesets\n') % (delta, instability)
2174 2178
2175 2179
2176 2180 def nodesummaries(repo, nodes, maxnumnodes=4):
2177 2181 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2178 2182 return b' '.join(short(h) for h in nodes)
2179 2183 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2180 2184 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2181 2185
2182 2186
2183 2187 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2184 2188 """check that no named branch has multiple heads"""
2185 2189 if desc in (b'strip', b'repair'):
2186 2190 # skip the logic during strip
2187 2191 return
2188 2192 visible = repo.filtered(filtername)
2189 2193 # possible improvement: we could restrict the check to affected branch
2190 2194 bm = visible.branchmap()
2191 2195 for name in bm:
2192 2196 heads = bm.branchheads(name, closed=accountclosed)
2193 2197 if len(heads) > 1:
2194 2198 msg = _(b'rejecting multiple heads on branch "%s"')
2195 2199 msg %= name
2196 2200 hint = _(b'%d heads: %s')
2197 2201 hint %= (len(heads), nodesummaries(repo, heads))
2198 2202 raise error.Abort(msg, hint=hint)
2199 2203
2200 2204
2201 2205 def wrapconvertsink(sink):
2202 2206 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2203 2207 before it is used, whether or not the convert extension was formally loaded.
2204 2208 """
2205 2209 return sink
2206 2210
2207 2211
2208 2212 def unhidehashlikerevs(repo, specs, hiddentype):
2209 2213 """parse the user specs and unhide changesets whose hash or revision number
2210 2214 is passed.
2211 2215
2212 2216 hiddentype can be: 1) 'warn': warn while unhiding changesets
2213 2217 2) 'nowarn': don't warn while unhiding changesets
2214 2218
2215 2219 returns a repo object with the required changesets unhidden
2216 2220 """
2217 2221 if not specs:
2218 2222 return repo
2219 2223
2220 2224 if not repo.filtername or not repo.ui.configbool(
2221 2225 b'experimental', b'directaccess'
2222 2226 ):
2223 2227 return repo
2224 2228
2225 2229 if repo.filtername not in (b'visible', b'visible-hidden'):
2226 2230 return repo
2227 2231
2228 2232 symbols = set()
2229 2233 for spec in specs:
2230 2234 try:
2231 2235 tree = revsetlang.parse(spec)
2232 2236 except error.ParseError: # will be reported by scmutil.revrange()
2233 2237 continue
2234 2238
2235 2239 symbols.update(revsetlang.gethashlikesymbols(tree))
2236 2240
2237 2241 if not symbols:
2238 2242 return repo
2239 2243
2240 2244 revs = _getrevsfromsymbols(repo, symbols)
2241 2245
2242 2246 if not revs:
2243 2247 return repo
2244 2248
2245 2249 if hiddentype == b'warn':
2246 2250 unfi = repo.unfiltered()
2247 2251 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2248 2252 repo.ui.warn(
2249 2253 _(
2250 2254 b"warning: accessing hidden changesets for write "
2251 2255 b"operation: %s\n"
2252 2256 )
2253 2257 % revstr
2254 2258 )
2255 2259
2256 2260 # we have to use new filtername to separate branch/tags cache until we can
2257 2261 # disbale these cache when revisions are dynamically pinned.
2258 2262 return repo.filtered(b'visible-hidden', revs)
2259 2263
2260 2264
2261 2265 def _getrevsfromsymbols(repo, symbols):
2262 2266 """parse the list of symbols and returns a set of revision numbers of hidden
2263 2267 changesets present in symbols"""
2264 2268 revs = set()
2265 2269 unfi = repo.unfiltered()
2266 2270 unficl = unfi.changelog
2267 2271 cl = repo.changelog
2268 2272 tiprev = len(unficl)
2269 2273 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2270 2274 for s in symbols:
2271 2275 try:
2272 2276 n = int(s)
2273 2277 if n <= tiprev:
2274 2278 if not allowrevnums:
2275 2279 continue
2276 2280 else:
2277 2281 if n not in cl:
2278 2282 revs.add(n)
2279 2283 continue
2280 2284 except ValueError:
2281 2285 pass
2282 2286
2283 2287 try:
2284 2288 s = resolvehexnodeidprefix(unfi, s)
2285 2289 except (error.LookupError, error.WdirUnsupported):
2286 2290 s = None
2287 2291
2288 2292 if s is not None:
2289 2293 rev = unficl.rev(s)
2290 2294 if rev not in cl:
2291 2295 revs.add(rev)
2292 2296
2293 2297 return revs
2294 2298
2295 2299
2296 2300 def bookmarkrevs(repo, mark):
2297 2301 """Select revisions reachable by a given bookmark
2298 2302
2299 2303 If the bookmarked revision isn't a head, an empty set will be returned.
2300 2304 """
2301 2305 return repo.revs(format_bookmark_revspec(mark))
2302 2306
2303 2307
2304 2308 def format_bookmark_revspec(mark):
2305 2309 """Build a revset expression to select revisions reachable by a given
2306 2310 bookmark"""
2307 2311 mark = b'literal:' + mark
2308 2312 return revsetlang.formatspec(
2309 2313 b"ancestors(bookmark(%s)) - "
2310 2314 b"ancestors(head() and not bookmark(%s)) - "
2311 2315 b"ancestors(bookmark() and not bookmark(%s))",
2312 2316 mark,
2313 2317 mark,
2314 2318 mark,
2315 2319 )
2316 2320
2317 2321
2318 2322 def ismember(ui, username, userlist):
2319 2323 """Check if username is a member of userlist.
2320 2324
2321 2325 If userlist has a single '*' member, all users are considered members.
2322 2326 Can be overridden by extensions to provide more complex authorization
2323 2327 schemes.
2324 2328 """
2325 2329 return userlist == [b'*'] or username in userlist
General Comments 0
You need to be logged in to leave comments. Login now