##// END OF EJS Templates
localrepo: use functools.wraps() in unfilteredmethod decorator...
Augie Fackler -
r45987:4111954c default
parent child Browse files
Show More
@@ -1,3519 +1,3521 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 import functools
11 12 import os
12 13 import random
13 14 import sys
14 15 import time
15 16 import weakref
16 17
17 18 from .i18n import _
18 19 from .node import (
19 20 bin,
20 21 hex,
21 22 nullid,
22 23 nullrev,
23 24 short,
24 25 )
25 26 from .pycompat import (
26 27 delattr,
27 28 getattr,
28 29 )
29 30 from . import (
30 31 bookmarks,
31 32 branchmap,
32 33 bundle2,
33 34 changegroup,
34 35 color,
35 36 commit,
36 37 context,
37 38 dirstate,
38 39 dirstateguard,
39 40 discovery,
40 41 encoding,
41 42 error,
42 43 exchange,
43 44 extensions,
44 45 filelog,
45 46 hook,
46 47 lock as lockmod,
47 48 match as matchmod,
48 49 mergestate as mergestatemod,
49 50 mergeutil,
50 51 namespaces,
51 52 narrowspec,
52 53 obsolete,
53 54 pathutil,
54 55 phases,
55 56 pushkey,
56 57 pycompat,
57 58 rcutil,
58 59 repoview,
59 60 requirements as requirementsmod,
60 61 revset,
61 62 revsetlang,
62 63 scmutil,
63 64 sparse,
64 65 store as storemod,
65 66 subrepoutil,
66 67 tags as tagsmod,
67 68 transaction,
68 69 txnutil,
69 70 util,
70 71 vfs as vfsmod,
71 72 )
72 73
73 74 from .interfaces import (
74 75 repository,
75 76 util as interfaceutil,
76 77 )
77 78
78 79 from .utils import (
79 80 hashutil,
80 81 procutil,
81 82 stringutil,
82 83 )
83 84
84 85 from .revlogutils import constants as revlogconst
85 86
86 87 release = lockmod.release
87 88 urlerr = util.urlerr
88 89 urlreq = util.urlreq
89 90
90 91 # set of (path, vfs-location) tuples. vfs-location is:
91 92 # - 'plain for vfs relative paths
92 93 # - '' for svfs relative paths
93 94 _cachedfiles = set()
94 95
95 96
96 97 class _basefilecache(scmutil.filecache):
97 98 """All filecache usage on repo are done for logic that should be unfiltered
98 99 """
99 100
100 101 def __get__(self, repo, type=None):
101 102 if repo is None:
102 103 return self
103 104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 105 unfi = repo.unfiltered()
105 106 try:
106 107 return unfi.__dict__[self.sname]
107 108 except KeyError:
108 109 pass
109 110 return super(_basefilecache, self).__get__(unfi, type)
110 111
111 112 def set(self, repo, value):
112 113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 114
114 115
115 116 class repofilecache(_basefilecache):
116 117 """filecache for files in .hg but outside of .hg/store"""
117 118
118 119 def __init__(self, *paths):
119 120 super(repofilecache, self).__init__(*paths)
120 121 for path in paths:
121 122 _cachedfiles.add((path, b'plain'))
122 123
123 124 def join(self, obj, fname):
124 125 return obj.vfs.join(fname)
125 126
126 127
127 128 class storecache(_basefilecache):
128 129 """filecache for files in the store"""
129 130
130 131 def __init__(self, *paths):
131 132 super(storecache, self).__init__(*paths)
132 133 for path in paths:
133 134 _cachedfiles.add((path, b''))
134 135
135 136 def join(self, obj, fname):
136 137 return obj.sjoin(fname)
137 138
138 139
139 140 class mixedrepostorecache(_basefilecache):
140 141 """filecache for a mix files in .hg/store and outside"""
141 142
142 143 def __init__(self, *pathsandlocations):
143 144 # scmutil.filecache only uses the path for passing back into our
144 145 # join(), so we can safely pass a list of paths and locations
145 146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 147 _cachedfiles.update(pathsandlocations)
147 148
148 149 def join(self, obj, fnameandlocation):
149 150 fname, location = fnameandlocation
150 151 if location == b'plain':
151 152 return obj.vfs.join(fname)
152 153 else:
153 154 if location != b'':
154 155 raise error.ProgrammingError(
155 156 b'unexpected location: %s' % location
156 157 )
157 158 return obj.sjoin(fname)
158 159
159 160
160 161 def isfilecached(repo, name):
161 162 """check if a repo has already cached "name" filecache-ed property
162 163
163 164 This returns (cachedobj-or-None, iscached) tuple.
164 165 """
165 166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 167 if not cacheentry:
167 168 return None, False
168 169 return cacheentry.obj, True
169 170
170 171
171 172 class unfilteredpropertycache(util.propertycache):
172 173 """propertycache that apply to unfiltered repo only"""
173 174
174 175 def __get__(self, repo, type=None):
175 176 unfi = repo.unfiltered()
176 177 if unfi is repo:
177 178 return super(unfilteredpropertycache, self).__get__(unfi)
178 179 return getattr(unfi, self.name)
179 180
180 181
181 182 class filteredpropertycache(util.propertycache):
182 183 """propertycache that must take filtering in account"""
183 184
184 185 def cachevalue(self, obj, value):
185 186 object.__setattr__(obj, self.name, value)
186 187
187 188
188 189 def hasunfilteredcache(repo, name):
189 190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 191 return name in vars(repo.unfiltered())
191 192
192 193
193 194 def unfilteredmethod(orig):
194 195 """decorate method that always need to be run on unfiltered version"""
195 196
197 @functools.wraps(orig)
196 198 def wrapper(repo, *args, **kwargs):
197 199 return orig(repo.unfiltered(), *args, **kwargs)
198 200
199 201 return wrapper
200 202
201 203
202 204 moderncaps = {
203 205 b'lookup',
204 206 b'branchmap',
205 207 b'pushkey',
206 208 b'known',
207 209 b'getbundle',
208 210 b'unbundle',
209 211 }
210 212 legacycaps = moderncaps.union({b'changegroupsubset'})
211 213
212 214
213 215 @interfaceutil.implementer(repository.ipeercommandexecutor)
214 216 class localcommandexecutor(object):
215 217 def __init__(self, peer):
216 218 self._peer = peer
217 219 self._sent = False
218 220 self._closed = False
219 221
220 222 def __enter__(self):
221 223 return self
222 224
223 225 def __exit__(self, exctype, excvalue, exctb):
224 226 self.close()
225 227
226 228 def callcommand(self, command, args):
227 229 if self._sent:
228 230 raise error.ProgrammingError(
229 231 b'callcommand() cannot be used after sendcommands()'
230 232 )
231 233
232 234 if self._closed:
233 235 raise error.ProgrammingError(
234 236 b'callcommand() cannot be used after close()'
235 237 )
236 238
237 239 # We don't need to support anything fancy. Just call the named
238 240 # method on the peer and return a resolved future.
239 241 fn = getattr(self._peer, pycompat.sysstr(command))
240 242
241 243 f = pycompat.futures.Future()
242 244
243 245 try:
244 246 result = fn(**pycompat.strkwargs(args))
245 247 except Exception:
246 248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
247 249 else:
248 250 f.set_result(result)
249 251
250 252 return f
251 253
252 254 def sendcommands(self):
253 255 self._sent = True
254 256
255 257 def close(self):
256 258 self._closed = True
257 259
258 260
259 261 @interfaceutil.implementer(repository.ipeercommands)
260 262 class localpeer(repository.peer):
261 263 '''peer for a local repo; reflects only the most recent API'''
262 264
263 265 def __init__(self, repo, caps=None):
264 266 super(localpeer, self).__init__()
265 267
266 268 if caps is None:
267 269 caps = moderncaps.copy()
268 270 self._repo = repo.filtered(b'served')
269 271 self.ui = repo.ui
270 272 self._caps = repo._restrictcapabilities(caps)
271 273
272 274 # Begin of _basepeer interface.
273 275
274 276 def url(self):
275 277 return self._repo.url()
276 278
277 279 def local(self):
278 280 return self._repo
279 281
280 282 def peer(self):
281 283 return self
282 284
283 285 def canpush(self):
284 286 return True
285 287
286 288 def close(self):
287 289 self._repo.close()
288 290
289 291 # End of _basepeer interface.
290 292
291 293 # Begin of _basewirecommands interface.
292 294
293 295 def branchmap(self):
294 296 return self._repo.branchmap()
295 297
296 298 def capabilities(self):
297 299 return self._caps
298 300
299 301 def clonebundles(self):
300 302 return self._repo.tryread(b'clonebundles.manifest')
301 303
302 304 def debugwireargs(self, one, two, three=None, four=None, five=None):
303 305 """Used to test argument passing over the wire"""
304 306 return b"%s %s %s %s %s" % (
305 307 one,
306 308 two,
307 309 pycompat.bytestr(three),
308 310 pycompat.bytestr(four),
309 311 pycompat.bytestr(five),
310 312 )
311 313
312 314 def getbundle(
313 315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
314 316 ):
315 317 chunks = exchange.getbundlechunks(
316 318 self._repo,
317 319 source,
318 320 heads=heads,
319 321 common=common,
320 322 bundlecaps=bundlecaps,
321 323 **kwargs
322 324 )[1]
323 325 cb = util.chunkbuffer(chunks)
324 326
325 327 if exchange.bundle2requested(bundlecaps):
326 328 # When requesting a bundle2, getbundle returns a stream to make the
327 329 # wire level function happier. We need to build a proper object
328 330 # from it in local peer.
329 331 return bundle2.getunbundler(self.ui, cb)
330 332 else:
331 333 return changegroup.getunbundler(b'01', cb, None)
332 334
333 335 def heads(self):
334 336 return self._repo.heads()
335 337
336 338 def known(self, nodes):
337 339 return self._repo.known(nodes)
338 340
339 341 def listkeys(self, namespace):
340 342 return self._repo.listkeys(namespace)
341 343
342 344 def lookup(self, key):
343 345 return self._repo.lookup(key)
344 346
345 347 def pushkey(self, namespace, key, old, new):
346 348 return self._repo.pushkey(namespace, key, old, new)
347 349
348 350 def stream_out(self):
349 351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
350 352
351 353 def unbundle(self, bundle, heads, url):
352 354 """apply a bundle on a repo
353 355
354 356 This function handles the repo locking itself."""
355 357 try:
356 358 try:
357 359 bundle = exchange.readbundle(self.ui, bundle, None)
358 360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
359 361 if util.safehasattr(ret, b'getchunks'):
360 362 # This is a bundle20 object, turn it into an unbundler.
361 363 # This little dance should be dropped eventually when the
362 364 # API is finally improved.
363 365 stream = util.chunkbuffer(ret.getchunks())
364 366 ret = bundle2.getunbundler(self.ui, stream)
365 367 return ret
366 368 except Exception as exc:
367 369 # If the exception contains output salvaged from a bundle2
368 370 # reply, we need to make sure it is printed before continuing
369 371 # to fail. So we build a bundle2 with such output and consume
370 372 # it directly.
371 373 #
372 374 # This is not very elegant but allows a "simple" solution for
373 375 # issue4594
374 376 output = getattr(exc, '_bundle2salvagedoutput', ())
375 377 if output:
376 378 bundler = bundle2.bundle20(self._repo.ui)
377 379 for out in output:
378 380 bundler.addpart(out)
379 381 stream = util.chunkbuffer(bundler.getchunks())
380 382 b = bundle2.getunbundler(self.ui, stream)
381 383 bundle2.processbundle(self._repo, b)
382 384 raise
383 385 except error.PushRaced as exc:
384 386 raise error.ResponseError(
385 387 _(b'push failed:'), stringutil.forcebytestr(exc)
386 388 )
387 389
388 390 # End of _basewirecommands interface.
389 391
390 392 # Begin of peer interface.
391 393
392 394 def commandexecutor(self):
393 395 return localcommandexecutor(self)
394 396
395 397 # End of peer interface.
396 398
397 399
398 400 @interfaceutil.implementer(repository.ipeerlegacycommands)
399 401 class locallegacypeer(localpeer):
400 402 '''peer extension which implements legacy methods too; used for tests with
401 403 restricted capabilities'''
402 404
403 405 def __init__(self, repo):
404 406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
405 407
406 408 # Begin of baselegacywirecommands interface.
407 409
408 410 def between(self, pairs):
409 411 return self._repo.between(pairs)
410 412
411 413 def branches(self, nodes):
412 414 return self._repo.branches(nodes)
413 415
414 416 def changegroup(self, nodes, source):
415 417 outgoing = discovery.outgoing(
416 418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
417 419 )
418 420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
419 421
420 422 def changegroupsubset(self, bases, heads, source):
421 423 outgoing = discovery.outgoing(
422 424 self._repo, missingroots=bases, ancestorsof=heads
423 425 )
424 426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
425 427
426 428 # End of baselegacywirecommands interface.
427 429
428 430
429 431 # Functions receiving (ui, features) that extensions can register to impact
430 432 # the ability to load repositories with custom requirements. Only
431 433 # functions defined in loaded extensions are called.
432 434 #
433 435 # The function receives a set of requirement strings that the repository
434 436 # is capable of opening. Functions will typically add elements to the
435 437 # set to reflect that the extension knows how to handle that requirements.
436 438 featuresetupfuncs = set()
437 439
438 440
439 441 def _getsharedvfs(hgvfs, requirements):
440 442 """ returns the vfs object pointing to root of shared source
441 443 repo for a shared repository
442 444
443 445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
444 446 requirements is a set of requirements of current repo (shared one)
445 447 """
446 448 # The ``shared`` or ``relshared`` requirements indicate the
447 449 # store lives in the path contained in the ``.hg/sharedpath`` file.
448 450 # This is an absolute path for ``shared`` and relative to
449 451 # ``.hg/`` for ``relshared``.
450 452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
451 453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
452 454 sharedpath = hgvfs.join(sharedpath)
453 455
454 456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
455 457
456 458 if not sharedvfs.exists():
457 459 raise error.RepoError(
458 460 _(b'.hg/sharedpath points to nonexistent directory %s')
459 461 % sharedvfs.base
460 462 )
461 463 return sharedvfs
462 464
463 465
464 466 def _readrequires(vfs, allowmissing):
465 467 """ reads the require file present at root of this vfs
466 468 and return a set of requirements
467 469
468 470 If allowmissing is True, we suppress ENOENT if raised"""
469 471 # requires file contains a newline-delimited list of
470 472 # features/capabilities the opener (us) must have in order to use
471 473 # the repository. This file was introduced in Mercurial 0.9.2,
472 474 # which means very old repositories may not have one. We assume
473 475 # a missing file translates to no requirements.
474 476 try:
475 477 requirements = set(vfs.read(b'requires').splitlines())
476 478 except IOError as e:
477 479 if not (allowmissing and e.errno == errno.ENOENT):
478 480 raise
479 481 requirements = set()
480 482 return requirements
481 483
482 484
483 485 def makelocalrepository(baseui, path, intents=None):
484 486 """Create a local repository object.
485 487
486 488 Given arguments needed to construct a local repository, this function
487 489 performs various early repository loading functionality (such as
488 490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
489 491 the repository can be opened, derives a type suitable for representing
490 492 that repository, and returns an instance of it.
491 493
492 494 The returned object conforms to the ``repository.completelocalrepository``
493 495 interface.
494 496
495 497 The repository type is derived by calling a series of factory functions
496 498 for each aspect/interface of the final repository. These are defined by
497 499 ``REPO_INTERFACES``.
498 500
499 501 Each factory function is called to produce a type implementing a specific
500 502 interface. The cumulative list of returned types will be combined into a
501 503 new type and that type will be instantiated to represent the local
502 504 repository.
503 505
504 506 The factory functions each receive various state that may be consulted
505 507 as part of deriving a type.
506 508
507 509 Extensions should wrap these factory functions to customize repository type
508 510 creation. Note that an extension's wrapped function may be called even if
509 511 that extension is not loaded for the repo being constructed. Extensions
510 512 should check if their ``__name__`` appears in the
511 513 ``extensionmodulenames`` set passed to the factory function and no-op if
512 514 not.
513 515 """
514 516 ui = baseui.copy()
515 517 # Prevent copying repo configuration.
516 518 ui.copy = baseui.copy
517 519
518 520 # Working directory VFS rooted at repository root.
519 521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
520 522
521 523 # Main VFS for .hg/ directory.
522 524 hgpath = wdirvfs.join(b'.hg')
523 525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
524 526 # Whether this repository is shared one or not
525 527 shared = False
526 528 # If this repository is shared, vfs pointing to shared repo
527 529 sharedvfs = None
528 530
529 531 # The .hg/ path should exist and should be a directory. All other
530 532 # cases are errors.
531 533 if not hgvfs.isdir():
532 534 try:
533 535 hgvfs.stat()
534 536 except OSError as e:
535 537 if e.errno != errno.ENOENT:
536 538 raise
537 539 except ValueError as e:
538 540 # Can be raised on Python 3.8 when path is invalid.
539 541 raise error.Abort(
540 542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
541 543 )
542 544
543 545 raise error.RepoError(_(b'repository %s not found') % path)
544 546
545 547 requirements = _readrequires(hgvfs, True)
546 548
547 549 # The .hg/hgrc file may load extensions or contain config options
548 550 # that influence repository construction. Attempt to load it and
549 551 # process any new extensions that it may have pulled in.
550 552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
551 553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
552 554 extensions.loadall(ui)
553 555 extensions.populateui(ui)
554 556
555 557 # Set of module names of extensions loaded for this repository.
556 558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
557 559
558 560 supportedrequirements = gathersupportedrequirements(ui)
559 561
560 562 # We first validate the requirements are known.
561 563 ensurerequirementsrecognized(requirements, supportedrequirements)
562 564
563 565 # Then we validate that the known set is reasonable to use together.
564 566 ensurerequirementscompatible(ui, requirements)
565 567
566 568 # TODO there are unhandled edge cases related to opening repositories with
567 569 # shared storage. If storage is shared, we should also test for requirements
568 570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
569 571 # that repo, as that repo may load extensions needed to open it. This is a
570 572 # bit complicated because we don't want the other hgrc to overwrite settings
571 573 # in this hgrc.
572 574 #
573 575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
574 576 # file when sharing repos. But if a requirement is added after the share is
575 577 # performed, thereby introducing a new requirement for the opener, we may
576 578 # will not see that and could encounter a run-time error interacting with
577 579 # that shared store since it has an unknown-to-us requirement.
578 580
579 581 # At this point, we know we should be capable of opening the repository.
580 582 # Now get on with doing that.
581 583
582 584 features = set()
583 585
584 586 # The "store" part of the repository holds versioned data. How it is
585 587 # accessed is determined by various requirements. If `shared` or
586 588 # `relshared` requirements are present, this indicates current repository
587 589 # is a share and store exists in path mentioned in `.hg/sharedpath`
588 590 shared = (
589 591 requirementsmod.SHARED_REQUIREMENT in requirements
590 592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
591 593 )
592 594 if shared:
593 595 sharedvfs = _getsharedvfs(hgvfs, requirements)
594 596 storebasepath = sharedvfs.base
595 597 cachepath = sharedvfs.join(b'cache')
596 598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
597 599 else:
598 600 storebasepath = hgvfs.base
599 601 cachepath = hgvfs.join(b'cache')
600 602 wcachepath = hgvfs.join(b'wcache')
601 603
602 604 # The store has changed over time and the exact layout is dictated by
603 605 # requirements. The store interface abstracts differences across all
604 606 # of them.
605 607 store = makestore(
606 608 requirements,
607 609 storebasepath,
608 610 lambda base: vfsmod.vfs(base, cacheaudited=True),
609 611 )
610 612 hgvfs.createmode = store.createmode
611 613
612 614 storevfs = store.vfs
613 615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
614 616
615 617 # The cache vfs is used to manage cache files.
616 618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
617 619 cachevfs.createmode = store.createmode
618 620 # The cache vfs is used to manage cache files related to the working copy
619 621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
620 622 wcachevfs.createmode = store.createmode
621 623
622 624 # Now resolve the type for the repository object. We do this by repeatedly
623 625 # calling a factory function to produces types for specific aspects of the
624 626 # repo's operation. The aggregate returned types are used as base classes
625 627 # for a dynamically-derived type, which will represent our new repository.
626 628
627 629 bases = []
628 630 extrastate = {}
629 631
630 632 for iface, fn in REPO_INTERFACES:
631 633 # We pass all potentially useful state to give extensions tons of
632 634 # flexibility.
633 635 typ = fn()(
634 636 ui=ui,
635 637 intents=intents,
636 638 requirements=requirements,
637 639 features=features,
638 640 wdirvfs=wdirvfs,
639 641 hgvfs=hgvfs,
640 642 store=store,
641 643 storevfs=storevfs,
642 644 storeoptions=storevfs.options,
643 645 cachevfs=cachevfs,
644 646 wcachevfs=wcachevfs,
645 647 extensionmodulenames=extensionmodulenames,
646 648 extrastate=extrastate,
647 649 baseclasses=bases,
648 650 )
649 651
650 652 if not isinstance(typ, type):
651 653 raise error.ProgrammingError(
652 654 b'unable to construct type for %s' % iface
653 655 )
654 656
655 657 bases.append(typ)
656 658
657 659 # type() allows you to use characters in type names that wouldn't be
658 660 # recognized as Python symbols in source code. We abuse that to add
659 661 # rich information about our constructed repo.
660 662 name = pycompat.sysstr(
661 663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
662 664 )
663 665
664 666 cls = type(name, tuple(bases), {})
665 667
666 668 return cls(
667 669 baseui=baseui,
668 670 ui=ui,
669 671 origroot=path,
670 672 wdirvfs=wdirvfs,
671 673 hgvfs=hgvfs,
672 674 requirements=requirements,
673 675 supportedrequirements=supportedrequirements,
674 676 sharedpath=storebasepath,
675 677 store=store,
676 678 cachevfs=cachevfs,
677 679 wcachevfs=wcachevfs,
678 680 features=features,
679 681 intents=intents,
680 682 )
681 683
682 684
683 685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
684 686 """Load hgrc files/content into a ui instance.
685 687
686 688 This is called during repository opening to load any additional
687 689 config files or settings relevant to the current repository.
688 690
689 691 Returns a bool indicating whether any additional configs were loaded.
690 692
691 693 Extensions should monkeypatch this function to modify how per-repo
692 694 configs are loaded. For example, an extension may wish to pull in
693 695 configs from alternate files or sources.
694 696 """
695 697 if not rcutil.use_repo_hgrc():
696 698 return False
697 699 try:
698 700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
699 701 return True
700 702 except IOError:
701 703 return False
702 704
703 705
704 706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
705 707 """Perform additional actions after .hg/hgrc is loaded.
706 708
707 709 This function is called during repository loading immediately after
708 710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
709 711
710 712 The function can be used to validate configs, automatically add
711 713 options (including extensions) based on requirements, etc.
712 714 """
713 715
714 716 # Map of requirements to list of extensions to load automatically when
715 717 # requirement is present.
716 718 autoextensions = {
717 719 b'git': [b'git'],
718 720 b'largefiles': [b'largefiles'],
719 721 b'lfs': [b'lfs'],
720 722 }
721 723
722 724 for requirement, names in sorted(autoextensions.items()):
723 725 if requirement not in requirements:
724 726 continue
725 727
726 728 for name in names:
727 729 if not ui.hasconfig(b'extensions', name):
728 730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
729 731
730 732
731 733 def gathersupportedrequirements(ui):
732 734 """Determine the complete set of recognized requirements."""
733 735 # Start with all requirements supported by this file.
734 736 supported = set(localrepository._basesupported)
735 737
736 738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
737 739 # relevant to this ui instance.
738 740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
739 741
740 742 for fn in featuresetupfuncs:
741 743 if fn.__module__ in modules:
742 744 fn(ui, supported)
743 745
744 746 # Add derived requirements from registered compression engines.
745 747 for name in util.compengines:
746 748 engine = util.compengines[name]
747 749 if engine.available() and engine.revlogheader():
748 750 supported.add(b'exp-compression-%s' % name)
749 751 if engine.name() == b'zstd':
750 752 supported.add(b'revlog-compression-zstd')
751 753
752 754 return supported
753 755
754 756
755 757 def ensurerequirementsrecognized(requirements, supported):
756 758 """Validate that a set of local requirements is recognized.
757 759
758 760 Receives a set of requirements. Raises an ``error.RepoError`` if there
759 761 exists any requirement in that set that currently loaded code doesn't
760 762 recognize.
761 763
762 764 Returns a set of supported requirements.
763 765 """
764 766 missing = set()
765 767
766 768 for requirement in requirements:
767 769 if requirement in supported:
768 770 continue
769 771
770 772 if not requirement or not requirement[0:1].isalnum():
771 773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
772 774
773 775 missing.add(requirement)
774 776
775 777 if missing:
776 778 raise error.RequirementError(
777 779 _(b'repository requires features unknown to this Mercurial: %s')
778 780 % b' '.join(sorted(missing)),
779 781 hint=_(
780 782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
781 783 b'for more information'
782 784 ),
783 785 )
784 786
785 787
786 788 def ensurerequirementscompatible(ui, requirements):
787 789 """Validates that a set of recognized requirements is mutually compatible.
788 790
789 791 Some requirements may not be compatible with others or require
790 792 config options that aren't enabled. This function is called during
791 793 repository opening to ensure that the set of requirements needed
792 794 to open a repository is sane and compatible with config options.
793 795
794 796 Extensions can monkeypatch this function to perform additional
795 797 checking.
796 798
797 799 ``error.RepoError`` should be raised on failure.
798 800 """
799 801 if (
800 802 requirementsmod.SPARSE_REQUIREMENT in requirements
801 803 and not sparse.enabled
802 804 ):
803 805 raise error.RepoError(
804 806 _(
805 807 b'repository is using sparse feature but '
806 808 b'sparse is not enabled; enable the '
807 809 b'"sparse" extensions to access'
808 810 )
809 811 )
810 812
811 813
812 814 def makestore(requirements, path, vfstype):
813 815 """Construct a storage object for a repository."""
814 816 if b'store' in requirements:
815 817 if b'fncache' in requirements:
816 818 return storemod.fncachestore(
817 819 path, vfstype, b'dotencode' in requirements
818 820 )
819 821
820 822 return storemod.encodedstore(path, vfstype)
821 823
822 824 return storemod.basicstore(path, vfstype)
823 825
824 826
825 827 def resolvestorevfsoptions(ui, requirements, features):
826 828 """Resolve the options to pass to the store vfs opener.
827 829
828 830 The returned dict is used to influence behavior of the storage layer.
829 831 """
830 832 options = {}
831 833
832 834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
833 835 options[b'treemanifest'] = True
834 836
835 837 # experimental config: format.manifestcachesize
836 838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
837 839 if manifestcachesize is not None:
838 840 options[b'manifestcachesize'] = manifestcachesize
839 841
840 842 # In the absence of another requirement superseding a revlog-related
841 843 # requirement, we have to assume the repo is using revlog version 0.
842 844 # This revlog format is super old and we don't bother trying to parse
843 845 # opener options for it because those options wouldn't do anything
844 846 # meaningful on such old repos.
845 847 if (
846 848 b'revlogv1' in requirements
847 849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
848 850 ):
849 851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
850 852 else: # explicitly mark repo as using revlogv0
851 853 options[b'revlogv0'] = True
852 854
853 855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
854 856 options[b'copies-storage'] = b'changeset-sidedata'
855 857 else:
856 858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
857 859 copiesextramode = (b'changeset-only', b'compatibility')
858 860 if writecopiesto in copiesextramode:
859 861 options[b'copies-storage'] = b'extra'
860 862
861 863 return options
862 864
863 865
864 866 def resolverevlogstorevfsoptions(ui, requirements, features):
865 867 """Resolve opener options specific to revlogs."""
866 868
867 869 options = {}
868 870 options[b'flagprocessors'] = {}
869 871
870 872 if b'revlogv1' in requirements:
871 873 options[b'revlogv1'] = True
872 874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
873 875 options[b'revlogv2'] = True
874 876
875 877 if b'generaldelta' in requirements:
876 878 options[b'generaldelta'] = True
877 879
878 880 # experimental config: format.chunkcachesize
879 881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
880 882 if chunkcachesize is not None:
881 883 options[b'chunkcachesize'] = chunkcachesize
882 884
883 885 deltabothparents = ui.configbool(
884 886 b'storage', b'revlog.optimize-delta-parent-choice'
885 887 )
886 888 options[b'deltabothparents'] = deltabothparents
887 889
888 890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
889 891 lazydeltabase = False
890 892 if lazydelta:
891 893 lazydeltabase = ui.configbool(
892 894 b'storage', b'revlog.reuse-external-delta-parent'
893 895 )
894 896 if lazydeltabase is None:
895 897 lazydeltabase = not scmutil.gddeltaconfig(ui)
896 898 options[b'lazydelta'] = lazydelta
897 899 options[b'lazydeltabase'] = lazydeltabase
898 900
899 901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
900 902 if 0 <= chainspan:
901 903 options[b'maxdeltachainspan'] = chainspan
902 904
903 905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
904 906 if mmapindexthreshold is not None:
905 907 options[b'mmapindexthreshold'] = mmapindexthreshold
906 908
907 909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
908 910 srdensitythres = float(
909 911 ui.config(b'experimental', b'sparse-read.density-threshold')
910 912 )
911 913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
912 914 options[b'with-sparse-read'] = withsparseread
913 915 options[b'sparse-read-density-threshold'] = srdensitythres
914 916 options[b'sparse-read-min-gap-size'] = srmingapsize
915 917
916 918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
917 919 options[b'sparse-revlog'] = sparserevlog
918 920 if sparserevlog:
919 921 options[b'generaldelta'] = True
920 922
921 923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
922 924 options[b'side-data'] = sidedata
923 925
924 926 maxchainlen = None
925 927 if sparserevlog:
926 928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
927 929 # experimental config: format.maxchainlen
928 930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
929 931 if maxchainlen is not None:
930 932 options[b'maxchainlen'] = maxchainlen
931 933
932 934 for r in requirements:
933 935 # we allow multiple compression engine requirement to co-exist because
934 936 # strickly speaking, revlog seems to support mixed compression style.
935 937 #
936 938 # The compression used for new entries will be "the last one"
937 939 prefix = r.startswith
938 940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
939 941 options[b'compengine'] = r.split(b'-', 2)[2]
940 942
941 943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
942 944 if options[b'zlib.level'] is not None:
943 945 if not (0 <= options[b'zlib.level'] <= 9):
944 946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
945 947 raise error.Abort(msg % options[b'zlib.level'])
946 948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
947 949 if options[b'zstd.level'] is not None:
948 950 if not (0 <= options[b'zstd.level'] <= 22):
949 951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
950 952 raise error.Abort(msg % options[b'zstd.level'])
951 953
952 954 if requirementsmod.NARROW_REQUIREMENT in requirements:
953 955 options[b'enableellipsis'] = True
954 956
955 957 if ui.configbool(b'experimental', b'rust.index'):
956 958 options[b'rust.index'] = True
957 959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
958 960 options[b'persistent-nodemap'] = True
959 961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
960 962 options[b'persistent-nodemap.mmap'] = True
961 963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
962 964 options[b'persistent-nodemap.mode'] = epnm
963 965 if ui.configbool(b'devel', b'persistent-nodemap'):
964 966 options[b'devel-force-nodemap'] = True
965 967
966 968 return options
967 969
968 970
969 971 def makemain(**kwargs):
970 972 """Produce a type conforming to ``ilocalrepositorymain``."""
971 973 return localrepository
972 974
973 975
974 976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
975 977 class revlogfilestorage(object):
976 978 """File storage when using revlogs."""
977 979
978 980 def file(self, path):
979 981 if path[0] == b'/':
980 982 path = path[1:]
981 983
982 984 return filelog.filelog(self.svfs, path)
983 985
984 986
985 987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
986 988 class revlognarrowfilestorage(object):
987 989 """File storage when using revlogs and narrow files."""
988 990
989 991 def file(self, path):
990 992 if path[0] == b'/':
991 993 path = path[1:]
992 994
993 995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
994 996
995 997
996 998 def makefilestorage(requirements, features, **kwargs):
997 999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
998 1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
999 1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1000 1002
1001 1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1002 1004 return revlognarrowfilestorage
1003 1005 else:
1004 1006 return revlogfilestorage
1005 1007
1006 1008
1007 1009 # List of repository interfaces and factory functions for them. Each
1008 1010 # will be called in order during ``makelocalrepository()`` to iteratively
1009 1011 # derive the final type for a local repository instance. We capture the
1010 1012 # function as a lambda so we don't hold a reference and the module-level
1011 1013 # functions can be wrapped.
1012 1014 REPO_INTERFACES = [
1013 1015 (repository.ilocalrepositorymain, lambda: makemain),
1014 1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1015 1017 ]
1016 1018
1017 1019
1018 1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1019 1021 class localrepository(object):
1020 1022 """Main class for representing local repositories.
1021 1023
1022 1024 All local repositories are instances of this class.
1023 1025
1024 1026 Constructed on its own, instances of this class are not usable as
1025 1027 repository objects. To obtain a usable repository object, call
1026 1028 ``hg.repository()``, ``localrepo.instance()``, or
1027 1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1028 1030 ``instance()`` adds support for creating new repositories.
1029 1031 ``hg.repository()`` adds more extension integration, including calling
1030 1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1031 1033 used.
1032 1034 """
1033 1035
1034 1036 # obsolete experimental requirements:
1035 1037 # - manifestv2: An experimental new manifest format that allowed
1036 1038 # for stem compression of long paths. Experiment ended up not
1037 1039 # being successful (repository sizes went up due to worse delta
1038 1040 # chains), and the code was deleted in 4.6.
1039 1041 supportedformats = {
1040 1042 b'revlogv1',
1041 1043 b'generaldelta',
1042 1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1043 1045 requirementsmod.COPIESSDC_REQUIREMENT,
1044 1046 requirementsmod.REVLOGV2_REQUIREMENT,
1045 1047 requirementsmod.SIDEDATA_REQUIREMENT,
1046 1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1047 1049 requirementsmod.NODEMAP_REQUIREMENT,
1048 1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1049 1051 }
1050 1052 _basesupported = supportedformats | {
1051 1053 b'store',
1052 1054 b'fncache',
1053 1055 requirementsmod.SHARED_REQUIREMENT,
1054 1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1055 1057 b'dotencode',
1056 1058 requirementsmod.SPARSE_REQUIREMENT,
1057 1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1058 1060 }
1059 1061
1060 1062 # list of prefix for file which can be written without 'wlock'
1061 1063 # Extensions should extend this list when needed
1062 1064 _wlockfreeprefix = {
1063 1065 # We migh consider requiring 'wlock' for the next
1064 1066 # two, but pretty much all the existing code assume
1065 1067 # wlock is not needed so we keep them excluded for
1066 1068 # now.
1067 1069 b'hgrc',
1068 1070 b'requires',
1069 1071 # XXX cache is a complicatged business someone
1070 1072 # should investigate this in depth at some point
1071 1073 b'cache/',
1072 1074 # XXX shouldn't be dirstate covered by the wlock?
1073 1075 b'dirstate',
1074 1076 # XXX bisect was still a bit too messy at the time
1075 1077 # this changeset was introduced. Someone should fix
1076 1078 # the remainig bit and drop this line
1077 1079 b'bisect.state',
1078 1080 }
1079 1081
1080 1082 def __init__(
1081 1083 self,
1082 1084 baseui,
1083 1085 ui,
1084 1086 origroot,
1085 1087 wdirvfs,
1086 1088 hgvfs,
1087 1089 requirements,
1088 1090 supportedrequirements,
1089 1091 sharedpath,
1090 1092 store,
1091 1093 cachevfs,
1092 1094 wcachevfs,
1093 1095 features,
1094 1096 intents=None,
1095 1097 ):
1096 1098 """Create a new local repository instance.
1097 1099
1098 1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1099 1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1100 1102 object.
1101 1103
1102 1104 Arguments:
1103 1105
1104 1106 baseui
1105 1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1106 1108
1107 1109 ui
1108 1110 ``ui.ui`` instance for use by the repository.
1109 1111
1110 1112 origroot
1111 1113 ``bytes`` path to working directory root of this repository.
1112 1114
1113 1115 wdirvfs
1114 1116 ``vfs.vfs`` rooted at the working directory.
1115 1117
1116 1118 hgvfs
1117 1119 ``vfs.vfs`` rooted at .hg/
1118 1120
1119 1121 requirements
1120 1122 ``set`` of bytestrings representing repository opening requirements.
1121 1123
1122 1124 supportedrequirements
1123 1125 ``set`` of bytestrings representing repository requirements that we
1124 1126 know how to open. May be a supetset of ``requirements``.
1125 1127
1126 1128 sharedpath
1127 1129 ``bytes`` Defining path to storage base directory. Points to a
1128 1130 ``.hg/`` directory somewhere.
1129 1131
1130 1132 store
1131 1133 ``store.basicstore`` (or derived) instance providing access to
1132 1134 versioned storage.
1133 1135
1134 1136 cachevfs
1135 1137 ``vfs.vfs`` used for cache files.
1136 1138
1137 1139 wcachevfs
1138 1140 ``vfs.vfs`` used for cache files related to the working copy.
1139 1141
1140 1142 features
1141 1143 ``set`` of bytestrings defining features/capabilities of this
1142 1144 instance.
1143 1145
1144 1146 intents
1145 1147 ``set`` of system strings indicating what this repo will be used
1146 1148 for.
1147 1149 """
1148 1150 self.baseui = baseui
1149 1151 self.ui = ui
1150 1152 self.origroot = origroot
1151 1153 # vfs rooted at working directory.
1152 1154 self.wvfs = wdirvfs
1153 1155 self.root = wdirvfs.base
1154 1156 # vfs rooted at .hg/. Used to access most non-store paths.
1155 1157 self.vfs = hgvfs
1156 1158 self.path = hgvfs.base
1157 1159 self.requirements = requirements
1158 1160 self.supported = supportedrequirements
1159 1161 self.sharedpath = sharedpath
1160 1162 self.store = store
1161 1163 self.cachevfs = cachevfs
1162 1164 self.wcachevfs = wcachevfs
1163 1165 self.features = features
1164 1166
1165 1167 self.filtername = None
1166 1168
1167 1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1168 1170 b'devel', b'check-locks'
1169 1171 ):
1170 1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1171 1173 # A list of callback to shape the phase if no data were found.
1172 1174 # Callback are in the form: func(repo, roots) --> processed root.
1173 1175 # This list it to be filled by extension during repo setup
1174 1176 self._phasedefaults = []
1175 1177
1176 1178 color.setup(self.ui)
1177 1179
1178 1180 self.spath = self.store.path
1179 1181 self.svfs = self.store.vfs
1180 1182 self.sjoin = self.store.join
1181 1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1182 1184 b'devel', b'check-locks'
1183 1185 ):
1184 1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1185 1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1186 1188 else: # standard vfs
1187 1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1188 1190
1189 1191 self._dirstatevalidatewarned = False
1190 1192
1191 1193 self._branchcaches = branchmap.BranchMapCache()
1192 1194 self._revbranchcache = None
1193 1195 self._filterpats = {}
1194 1196 self._datafilters = {}
1195 1197 self._transref = self._lockref = self._wlockref = None
1196 1198
1197 1199 # A cache for various files under .hg/ that tracks file changes,
1198 1200 # (used by the filecache decorator)
1199 1201 #
1200 1202 # Maps a property name to its util.filecacheentry
1201 1203 self._filecache = {}
1202 1204
1203 1205 # hold sets of revision to be filtered
1204 1206 # should be cleared when something might have changed the filter value:
1205 1207 # - new changesets,
1206 1208 # - phase change,
1207 1209 # - new obsolescence marker,
1208 1210 # - working directory parent change,
1209 1211 # - bookmark changes
1210 1212 self.filteredrevcache = {}
1211 1213
1212 1214 # post-dirstate-status hooks
1213 1215 self._postdsstatus = []
1214 1216
1215 1217 # generic mapping between names and nodes
1216 1218 self.names = namespaces.namespaces()
1217 1219
1218 1220 # Key to signature value.
1219 1221 self._sparsesignaturecache = {}
1220 1222 # Signature to cached matcher instance.
1221 1223 self._sparsematchercache = {}
1222 1224
1223 1225 self._extrafilterid = repoview.extrafilter(ui)
1224 1226
1225 1227 self.filecopiesmode = None
1226 1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1227 1229 self.filecopiesmode = b'changeset-sidedata'
1228 1230
1229 1231 def _getvfsward(self, origfunc):
1230 1232 """build a ward for self.vfs"""
1231 1233 rref = weakref.ref(self)
1232 1234
1233 1235 def checkvfs(path, mode=None):
1234 1236 ret = origfunc(path, mode=mode)
1235 1237 repo = rref()
1236 1238 if (
1237 1239 repo is None
1238 1240 or not util.safehasattr(repo, b'_wlockref')
1239 1241 or not util.safehasattr(repo, b'_lockref')
1240 1242 ):
1241 1243 return
1242 1244 if mode in (None, b'r', b'rb'):
1243 1245 return
1244 1246 if path.startswith(repo.path):
1245 1247 # truncate name relative to the repository (.hg)
1246 1248 path = path[len(repo.path) + 1 :]
1247 1249 if path.startswith(b'cache/'):
1248 1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1249 1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1250 1252 # path prefixes covered by 'lock'
1251 1253 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1252 1254 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1253 1255 if repo._currentlock(repo._lockref) is None:
1254 1256 repo.ui.develwarn(
1255 1257 b'write with no lock: "%s"' % path,
1256 1258 stacklevel=3,
1257 1259 config=b'check-locks',
1258 1260 )
1259 1261 elif repo._currentlock(repo._wlockref) is None:
1260 1262 # rest of vfs files are covered by 'wlock'
1261 1263 #
1262 1264 # exclude special files
1263 1265 for prefix in self._wlockfreeprefix:
1264 1266 if path.startswith(prefix):
1265 1267 return
1266 1268 repo.ui.develwarn(
1267 1269 b'write with no wlock: "%s"' % path,
1268 1270 stacklevel=3,
1269 1271 config=b'check-locks',
1270 1272 )
1271 1273 return ret
1272 1274
1273 1275 return checkvfs
1274 1276
1275 1277 def _getsvfsward(self, origfunc):
1276 1278 """build a ward for self.svfs"""
1277 1279 rref = weakref.ref(self)
1278 1280
1279 1281 def checksvfs(path, mode=None):
1280 1282 ret = origfunc(path, mode=mode)
1281 1283 repo = rref()
1282 1284 if repo is None or not util.safehasattr(repo, b'_lockref'):
1283 1285 return
1284 1286 if mode in (None, b'r', b'rb'):
1285 1287 return
1286 1288 if path.startswith(repo.sharedpath):
1287 1289 # truncate name relative to the repository (.hg)
1288 1290 path = path[len(repo.sharedpath) + 1 :]
1289 1291 if repo._currentlock(repo._lockref) is None:
1290 1292 repo.ui.develwarn(
1291 1293 b'write with no lock: "%s"' % path, stacklevel=4
1292 1294 )
1293 1295 return ret
1294 1296
1295 1297 return checksvfs
1296 1298
1297 1299 def close(self):
1298 1300 self._writecaches()
1299 1301
1300 1302 def _writecaches(self):
1301 1303 if self._revbranchcache:
1302 1304 self._revbranchcache.write()
1303 1305
1304 1306 def _restrictcapabilities(self, caps):
1305 1307 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1306 1308 caps = set(caps)
1307 1309 capsblob = bundle2.encodecaps(
1308 1310 bundle2.getrepocaps(self, role=b'client')
1309 1311 )
1310 1312 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1311 1313 return caps
1312 1314
1313 1315 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1314 1316 # self -> auditor -> self._checknested -> self
1315 1317
1316 1318 @property
1317 1319 def auditor(self):
1318 1320 # This is only used by context.workingctx.match in order to
1319 1321 # detect files in subrepos.
1320 1322 return pathutil.pathauditor(self.root, callback=self._checknested)
1321 1323
1322 1324 @property
1323 1325 def nofsauditor(self):
1324 1326 # This is only used by context.basectx.match in order to detect
1325 1327 # files in subrepos.
1326 1328 return pathutil.pathauditor(
1327 1329 self.root, callback=self._checknested, realfs=False, cached=True
1328 1330 )
1329 1331
1330 1332 def _checknested(self, path):
1331 1333 """Determine if path is a legal nested repository."""
1332 1334 if not path.startswith(self.root):
1333 1335 return False
1334 1336 subpath = path[len(self.root) + 1 :]
1335 1337 normsubpath = util.pconvert(subpath)
1336 1338
1337 1339 # XXX: Checking against the current working copy is wrong in
1338 1340 # the sense that it can reject things like
1339 1341 #
1340 1342 # $ hg cat -r 10 sub/x.txt
1341 1343 #
1342 1344 # if sub/ is no longer a subrepository in the working copy
1343 1345 # parent revision.
1344 1346 #
1345 1347 # However, it can of course also allow things that would have
1346 1348 # been rejected before, such as the above cat command if sub/
1347 1349 # is a subrepository now, but was a normal directory before.
1348 1350 # The old path auditor would have rejected by mistake since it
1349 1351 # panics when it sees sub/.hg/.
1350 1352 #
1351 1353 # All in all, checking against the working copy seems sensible
1352 1354 # since we want to prevent access to nested repositories on
1353 1355 # the filesystem *now*.
1354 1356 ctx = self[None]
1355 1357 parts = util.splitpath(subpath)
1356 1358 while parts:
1357 1359 prefix = b'/'.join(parts)
1358 1360 if prefix in ctx.substate:
1359 1361 if prefix == normsubpath:
1360 1362 return True
1361 1363 else:
1362 1364 sub = ctx.sub(prefix)
1363 1365 return sub.checknested(subpath[len(prefix) + 1 :])
1364 1366 else:
1365 1367 parts.pop()
1366 1368 return False
1367 1369
1368 1370 def peer(self):
1369 1371 return localpeer(self) # not cached to avoid reference cycle
1370 1372
1371 1373 def unfiltered(self):
1372 1374 """Return unfiltered version of the repository
1373 1375
1374 1376 Intended to be overwritten by filtered repo."""
1375 1377 return self
1376 1378
1377 1379 def filtered(self, name, visibilityexceptions=None):
1378 1380 """Return a filtered version of a repository
1379 1381
1380 1382 The `name` parameter is the identifier of the requested view. This
1381 1383 will return a repoview object set "exactly" to the specified view.
1382 1384
1383 1385 This function does not apply recursive filtering to a repository. For
1384 1386 example calling `repo.filtered("served")` will return a repoview using
1385 1387 the "served" view, regardless of the initial view used by `repo`.
1386 1388
1387 1389 In other word, there is always only one level of `repoview` "filtering".
1388 1390 """
1389 1391 if self._extrafilterid is not None and b'%' not in name:
1390 1392 name = name + b'%' + self._extrafilterid
1391 1393
1392 1394 cls = repoview.newtype(self.unfiltered().__class__)
1393 1395 return cls(self, name, visibilityexceptions)
1394 1396
1395 1397 @mixedrepostorecache(
1396 1398 (b'bookmarks', b'plain'),
1397 1399 (b'bookmarks.current', b'plain'),
1398 1400 (b'bookmarks', b''),
1399 1401 (b'00changelog.i', b''),
1400 1402 )
1401 1403 def _bookmarks(self):
1402 1404 # Since the multiple files involved in the transaction cannot be
1403 1405 # written atomically (with current repository format), there is a race
1404 1406 # condition here.
1405 1407 #
1406 1408 # 1) changelog content A is read
1407 1409 # 2) outside transaction update changelog to content B
1408 1410 # 3) outside transaction update bookmark file referring to content B
1409 1411 # 4) bookmarks file content is read and filtered against changelog-A
1410 1412 #
1411 1413 # When this happens, bookmarks against nodes missing from A are dropped.
1412 1414 #
1413 1415 # Having this happening during read is not great, but it become worse
1414 1416 # when this happen during write because the bookmarks to the "unknown"
1415 1417 # nodes will be dropped for good. However, writes happen within locks.
1416 1418 # This locking makes it possible to have a race free consistent read.
1417 1419 # For this purpose data read from disc before locking are
1418 1420 # "invalidated" right after the locks are taken. This invalidations are
1419 1421 # "light", the `filecache` mechanism keep the data in memory and will
1420 1422 # reuse them if the underlying files did not changed. Not parsing the
1421 1423 # same data multiple times helps performances.
1422 1424 #
1423 1425 # Unfortunately in the case describe above, the files tracked by the
1424 1426 # bookmarks file cache might not have changed, but the in-memory
1425 1427 # content is still "wrong" because we used an older changelog content
1426 1428 # to process the on-disk data. So after locking, the changelog would be
1427 1429 # refreshed but `_bookmarks` would be preserved.
1428 1430 # Adding `00changelog.i` to the list of tracked file is not
1429 1431 # enough, because at the time we build the content for `_bookmarks` in
1430 1432 # (4), the changelog file has already diverged from the content used
1431 1433 # for loading `changelog` in (1)
1432 1434 #
1433 1435 # To prevent the issue, we force the changelog to be explicitly
1434 1436 # reloaded while computing `_bookmarks`. The data race can still happen
1435 1437 # without the lock (with a narrower window), but it would no longer go
1436 1438 # undetected during the lock time refresh.
1437 1439 #
1438 1440 # The new schedule is as follow
1439 1441 #
1440 1442 # 1) filecache logic detect that `_bookmarks` needs to be computed
1441 1443 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1442 1444 # 3) We force `changelog` filecache to be tested
1443 1445 # 4) cachestat for `changelog` are captured (for changelog)
1444 1446 # 5) `_bookmarks` is computed and cached
1445 1447 #
1446 1448 # The step in (3) ensure we have a changelog at least as recent as the
1447 1449 # cache stat computed in (1). As a result at locking time:
1448 1450 # * if the changelog did not changed since (1) -> we can reuse the data
1449 1451 # * otherwise -> the bookmarks get refreshed.
1450 1452 self._refreshchangelog()
1451 1453 return bookmarks.bmstore(self)
1452 1454
1453 1455 def _refreshchangelog(self):
1454 1456 """make sure the in memory changelog match the on-disk one"""
1455 1457 if 'changelog' in vars(self) and self.currenttransaction() is None:
1456 1458 del self.changelog
1457 1459
1458 1460 @property
1459 1461 def _activebookmark(self):
1460 1462 return self._bookmarks.active
1461 1463
1462 1464 # _phasesets depend on changelog. what we need is to call
1463 1465 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1464 1466 # can't be easily expressed in filecache mechanism.
1465 1467 @storecache(b'phaseroots', b'00changelog.i')
1466 1468 def _phasecache(self):
1467 1469 return phases.phasecache(self, self._phasedefaults)
1468 1470
1469 1471 @storecache(b'obsstore')
1470 1472 def obsstore(self):
1471 1473 return obsolete.makestore(self.ui, self)
1472 1474
1473 1475 @storecache(b'00changelog.i')
1474 1476 def changelog(self):
1475 1477 # load dirstate before changelog to avoid race see issue6303
1476 1478 self.dirstate.prefetch_parents()
1477 1479 return self.store.changelog(txnutil.mayhavepending(self.root))
1478 1480
1479 1481 @storecache(b'00manifest.i')
1480 1482 def manifestlog(self):
1481 1483 return self.store.manifestlog(self, self._storenarrowmatch)
1482 1484
1483 1485 @repofilecache(b'dirstate')
1484 1486 def dirstate(self):
1485 1487 return self._makedirstate()
1486 1488
1487 1489 def _makedirstate(self):
1488 1490 """Extension point for wrapping the dirstate per-repo."""
1489 1491 sparsematchfn = lambda: sparse.matcher(self)
1490 1492
1491 1493 return dirstate.dirstate(
1492 1494 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1493 1495 )
1494 1496
1495 1497 def _dirstatevalidate(self, node):
1496 1498 try:
1497 1499 self.changelog.rev(node)
1498 1500 return node
1499 1501 except error.LookupError:
1500 1502 if not self._dirstatevalidatewarned:
1501 1503 self._dirstatevalidatewarned = True
1502 1504 self.ui.warn(
1503 1505 _(b"warning: ignoring unknown working parent %s!\n")
1504 1506 % short(node)
1505 1507 )
1506 1508 return nullid
1507 1509
1508 1510 @storecache(narrowspec.FILENAME)
1509 1511 def narrowpats(self):
1510 1512 """matcher patterns for this repository's narrowspec
1511 1513
1512 1514 A tuple of (includes, excludes).
1513 1515 """
1514 1516 return narrowspec.load(self)
1515 1517
1516 1518 @storecache(narrowspec.FILENAME)
1517 1519 def _storenarrowmatch(self):
1518 1520 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1519 1521 return matchmod.always()
1520 1522 include, exclude = self.narrowpats
1521 1523 return narrowspec.match(self.root, include=include, exclude=exclude)
1522 1524
1523 1525 @storecache(narrowspec.FILENAME)
1524 1526 def _narrowmatch(self):
1525 1527 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1526 1528 return matchmod.always()
1527 1529 narrowspec.checkworkingcopynarrowspec(self)
1528 1530 include, exclude = self.narrowpats
1529 1531 return narrowspec.match(self.root, include=include, exclude=exclude)
1530 1532
1531 1533 def narrowmatch(self, match=None, includeexact=False):
1532 1534 """matcher corresponding the the repo's narrowspec
1533 1535
1534 1536 If `match` is given, then that will be intersected with the narrow
1535 1537 matcher.
1536 1538
1537 1539 If `includeexact` is True, then any exact matches from `match` will
1538 1540 be included even if they're outside the narrowspec.
1539 1541 """
1540 1542 if match:
1541 1543 if includeexact and not self._narrowmatch.always():
1542 1544 # do not exclude explicitly-specified paths so that they can
1543 1545 # be warned later on
1544 1546 em = matchmod.exact(match.files())
1545 1547 nm = matchmod.unionmatcher([self._narrowmatch, em])
1546 1548 return matchmod.intersectmatchers(match, nm)
1547 1549 return matchmod.intersectmatchers(match, self._narrowmatch)
1548 1550 return self._narrowmatch
1549 1551
1550 1552 def setnarrowpats(self, newincludes, newexcludes):
1551 1553 narrowspec.save(self, newincludes, newexcludes)
1552 1554 self.invalidate(clearfilecache=True)
1553 1555
1554 1556 @unfilteredpropertycache
1555 1557 def _quick_access_changeid_null(self):
1556 1558 return {
1557 1559 b'null': (nullrev, nullid),
1558 1560 nullrev: (nullrev, nullid),
1559 1561 nullid: (nullrev, nullid),
1560 1562 }
1561 1563
1562 1564 @unfilteredpropertycache
1563 1565 def _quick_access_changeid_wc(self):
1564 1566 # also fast path access to the working copy parents
1565 1567 # however, only do it for filter that ensure wc is visible.
1566 1568 quick = {}
1567 1569 cl = self.unfiltered().changelog
1568 1570 for node in self.dirstate.parents():
1569 1571 if node == nullid:
1570 1572 continue
1571 1573 rev = cl.index.get_rev(node)
1572 1574 if rev is None:
1573 1575 # unknown working copy parent case:
1574 1576 #
1575 1577 # skip the fast path and let higher code deal with it
1576 1578 continue
1577 1579 pair = (rev, node)
1578 1580 quick[rev] = pair
1579 1581 quick[node] = pair
1580 1582 # also add the parents of the parents
1581 1583 for r in cl.parentrevs(rev):
1582 1584 if r == nullrev:
1583 1585 continue
1584 1586 n = cl.node(r)
1585 1587 pair = (r, n)
1586 1588 quick[r] = pair
1587 1589 quick[n] = pair
1588 1590 p1node = self.dirstate.p1()
1589 1591 if p1node != nullid:
1590 1592 quick[b'.'] = quick[p1node]
1591 1593 return quick
1592 1594
1593 1595 @unfilteredmethod
1594 1596 def _quick_access_changeid_invalidate(self):
1595 1597 if '_quick_access_changeid_wc' in vars(self):
1596 1598 del self.__dict__['_quick_access_changeid_wc']
1597 1599
1598 1600 @property
1599 1601 def _quick_access_changeid(self):
1600 1602 """an helper dictionnary for __getitem__ calls
1601 1603
1602 1604 This contains a list of symbol we can recognise right away without
1603 1605 further processing.
1604 1606 """
1605 1607 mapping = self._quick_access_changeid_null
1606 1608 if self.filtername in repoview.filter_has_wc:
1607 1609 mapping = mapping.copy()
1608 1610 mapping.update(self._quick_access_changeid_wc)
1609 1611 return mapping
1610 1612
1611 1613 def __getitem__(self, changeid):
1612 1614 # dealing with special cases
1613 1615 if changeid is None:
1614 1616 return context.workingctx(self)
1615 1617 if isinstance(changeid, context.basectx):
1616 1618 return changeid
1617 1619
1618 1620 # dealing with multiple revisions
1619 1621 if isinstance(changeid, slice):
1620 1622 # wdirrev isn't contiguous so the slice shouldn't include it
1621 1623 return [
1622 1624 self[i]
1623 1625 for i in pycompat.xrange(*changeid.indices(len(self)))
1624 1626 if i not in self.changelog.filteredrevs
1625 1627 ]
1626 1628
1627 1629 # dealing with some special values
1628 1630 quick_access = self._quick_access_changeid.get(changeid)
1629 1631 if quick_access is not None:
1630 1632 rev, node = quick_access
1631 1633 return context.changectx(self, rev, node, maybe_filtered=False)
1632 1634 if changeid == b'tip':
1633 1635 node = self.changelog.tip()
1634 1636 rev = self.changelog.rev(node)
1635 1637 return context.changectx(self, rev, node)
1636 1638
1637 1639 # dealing with arbitrary values
1638 1640 try:
1639 1641 if isinstance(changeid, int):
1640 1642 node = self.changelog.node(changeid)
1641 1643 rev = changeid
1642 1644 elif changeid == b'.':
1643 1645 # this is a hack to delay/avoid loading obsmarkers
1644 1646 # when we know that '.' won't be hidden
1645 1647 node = self.dirstate.p1()
1646 1648 rev = self.unfiltered().changelog.rev(node)
1647 1649 elif len(changeid) == 20:
1648 1650 try:
1649 1651 node = changeid
1650 1652 rev = self.changelog.rev(changeid)
1651 1653 except error.FilteredLookupError:
1652 1654 changeid = hex(changeid) # for the error message
1653 1655 raise
1654 1656 except LookupError:
1655 1657 # check if it might have come from damaged dirstate
1656 1658 #
1657 1659 # XXX we could avoid the unfiltered if we had a recognizable
1658 1660 # exception for filtered changeset access
1659 1661 if (
1660 1662 self.local()
1661 1663 and changeid in self.unfiltered().dirstate.parents()
1662 1664 ):
1663 1665 msg = _(b"working directory has unknown parent '%s'!")
1664 1666 raise error.Abort(msg % short(changeid))
1665 1667 changeid = hex(changeid) # for the error message
1666 1668 raise
1667 1669
1668 1670 elif len(changeid) == 40:
1669 1671 node = bin(changeid)
1670 1672 rev = self.changelog.rev(node)
1671 1673 else:
1672 1674 raise error.ProgrammingError(
1673 1675 b"unsupported changeid '%s' of type %s"
1674 1676 % (changeid, pycompat.bytestr(type(changeid)))
1675 1677 )
1676 1678
1677 1679 return context.changectx(self, rev, node)
1678 1680
1679 1681 except (error.FilteredIndexError, error.FilteredLookupError):
1680 1682 raise error.FilteredRepoLookupError(
1681 1683 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1682 1684 )
1683 1685 except (IndexError, LookupError):
1684 1686 raise error.RepoLookupError(
1685 1687 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1686 1688 )
1687 1689 except error.WdirUnsupported:
1688 1690 return context.workingctx(self)
1689 1691
1690 1692 def __contains__(self, changeid):
1691 1693 """True if the given changeid exists
1692 1694
1693 1695 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1694 1696 specified.
1695 1697 """
1696 1698 try:
1697 1699 self[changeid]
1698 1700 return True
1699 1701 except error.RepoLookupError:
1700 1702 return False
1701 1703
1702 1704 def __nonzero__(self):
1703 1705 return True
1704 1706
1705 1707 __bool__ = __nonzero__
1706 1708
1707 1709 def __len__(self):
1708 1710 # no need to pay the cost of repoview.changelog
1709 1711 unfi = self.unfiltered()
1710 1712 return len(unfi.changelog)
1711 1713
1712 1714 def __iter__(self):
1713 1715 return iter(self.changelog)
1714 1716
1715 1717 def revs(self, expr, *args):
1716 1718 '''Find revisions matching a revset.
1717 1719
1718 1720 The revset is specified as a string ``expr`` that may contain
1719 1721 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1720 1722
1721 1723 Revset aliases from the configuration are not expanded. To expand
1722 1724 user aliases, consider calling ``scmutil.revrange()`` or
1723 1725 ``repo.anyrevs([expr], user=True)``.
1724 1726
1725 1727 Returns a smartset.abstractsmartset, which is a list-like interface
1726 1728 that contains integer revisions.
1727 1729 '''
1728 1730 tree = revsetlang.spectree(expr, *args)
1729 1731 return revset.makematcher(tree)(self)
1730 1732
1731 1733 def set(self, expr, *args):
1732 1734 '''Find revisions matching a revset and emit changectx instances.
1733 1735
1734 1736 This is a convenience wrapper around ``revs()`` that iterates the
1735 1737 result and is a generator of changectx instances.
1736 1738
1737 1739 Revset aliases from the configuration are not expanded. To expand
1738 1740 user aliases, consider calling ``scmutil.revrange()``.
1739 1741 '''
1740 1742 for r in self.revs(expr, *args):
1741 1743 yield self[r]
1742 1744
1743 1745 def anyrevs(self, specs, user=False, localalias=None):
1744 1746 '''Find revisions matching one of the given revsets.
1745 1747
1746 1748 Revset aliases from the configuration are not expanded by default. To
1747 1749 expand user aliases, specify ``user=True``. To provide some local
1748 1750 definitions overriding user aliases, set ``localalias`` to
1749 1751 ``{name: definitionstring}``.
1750 1752 '''
1751 1753 if specs == [b'null']:
1752 1754 return revset.baseset([nullrev])
1753 1755 if specs == [b'.']:
1754 1756 quick_data = self._quick_access_changeid.get(b'.')
1755 1757 if quick_data is not None:
1756 1758 return revset.baseset([quick_data[0]])
1757 1759 if user:
1758 1760 m = revset.matchany(
1759 1761 self.ui,
1760 1762 specs,
1761 1763 lookup=revset.lookupfn(self),
1762 1764 localalias=localalias,
1763 1765 )
1764 1766 else:
1765 1767 m = revset.matchany(None, specs, localalias=localalias)
1766 1768 return m(self)
1767 1769
1768 1770 def url(self):
1769 1771 return b'file:' + self.root
1770 1772
1771 1773 def hook(self, name, throw=False, **args):
1772 1774 """Call a hook, passing this repo instance.
1773 1775
1774 1776 This a convenience method to aid invoking hooks. Extensions likely
1775 1777 won't call this unless they have registered a custom hook or are
1776 1778 replacing code that is expected to call a hook.
1777 1779 """
1778 1780 return hook.hook(self.ui, self, name, throw, **args)
1779 1781
1780 1782 @filteredpropertycache
1781 1783 def _tagscache(self):
1782 1784 '''Returns a tagscache object that contains various tags related
1783 1785 caches.'''
1784 1786
1785 1787 # This simplifies its cache management by having one decorated
1786 1788 # function (this one) and the rest simply fetch things from it.
1787 1789 class tagscache(object):
1788 1790 def __init__(self):
1789 1791 # These two define the set of tags for this repository. tags
1790 1792 # maps tag name to node; tagtypes maps tag name to 'global' or
1791 1793 # 'local'. (Global tags are defined by .hgtags across all
1792 1794 # heads, and local tags are defined in .hg/localtags.)
1793 1795 # They constitute the in-memory cache of tags.
1794 1796 self.tags = self.tagtypes = None
1795 1797
1796 1798 self.nodetagscache = self.tagslist = None
1797 1799
1798 1800 cache = tagscache()
1799 1801 cache.tags, cache.tagtypes = self._findtags()
1800 1802
1801 1803 return cache
1802 1804
1803 1805 def tags(self):
1804 1806 '''return a mapping of tag to node'''
1805 1807 t = {}
1806 1808 if self.changelog.filteredrevs:
1807 1809 tags, tt = self._findtags()
1808 1810 else:
1809 1811 tags = self._tagscache.tags
1810 1812 rev = self.changelog.rev
1811 1813 for k, v in pycompat.iteritems(tags):
1812 1814 try:
1813 1815 # ignore tags to unknown nodes
1814 1816 rev(v)
1815 1817 t[k] = v
1816 1818 except (error.LookupError, ValueError):
1817 1819 pass
1818 1820 return t
1819 1821
1820 1822 def _findtags(self):
1821 1823 '''Do the hard work of finding tags. Return a pair of dicts
1822 1824 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1823 1825 maps tag name to a string like \'global\' or \'local\'.
1824 1826 Subclasses or extensions are free to add their own tags, but
1825 1827 should be aware that the returned dicts will be retained for the
1826 1828 duration of the localrepo object.'''
1827 1829
1828 1830 # XXX what tagtype should subclasses/extensions use? Currently
1829 1831 # mq and bookmarks add tags, but do not set the tagtype at all.
1830 1832 # Should each extension invent its own tag type? Should there
1831 1833 # be one tagtype for all such "virtual" tags? Or is the status
1832 1834 # quo fine?
1833 1835
1834 1836 # map tag name to (node, hist)
1835 1837 alltags = tagsmod.findglobaltags(self.ui, self)
1836 1838 # map tag name to tag type
1837 1839 tagtypes = {tag: b'global' for tag in alltags}
1838 1840
1839 1841 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1840 1842
1841 1843 # Build the return dicts. Have to re-encode tag names because
1842 1844 # the tags module always uses UTF-8 (in order not to lose info
1843 1845 # writing to the cache), but the rest of Mercurial wants them in
1844 1846 # local encoding.
1845 1847 tags = {}
1846 1848 for (name, (node, hist)) in pycompat.iteritems(alltags):
1847 1849 if node != nullid:
1848 1850 tags[encoding.tolocal(name)] = node
1849 1851 tags[b'tip'] = self.changelog.tip()
1850 1852 tagtypes = {
1851 1853 encoding.tolocal(name): value
1852 1854 for (name, value) in pycompat.iteritems(tagtypes)
1853 1855 }
1854 1856 return (tags, tagtypes)
1855 1857
1856 1858 def tagtype(self, tagname):
1857 1859 '''
1858 1860 return the type of the given tag. result can be:
1859 1861
1860 1862 'local' : a local tag
1861 1863 'global' : a global tag
1862 1864 None : tag does not exist
1863 1865 '''
1864 1866
1865 1867 return self._tagscache.tagtypes.get(tagname)
1866 1868
1867 1869 def tagslist(self):
1868 1870 '''return a list of tags ordered by revision'''
1869 1871 if not self._tagscache.tagslist:
1870 1872 l = []
1871 1873 for t, n in pycompat.iteritems(self.tags()):
1872 1874 l.append((self.changelog.rev(n), t, n))
1873 1875 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1874 1876
1875 1877 return self._tagscache.tagslist
1876 1878
1877 1879 def nodetags(self, node):
1878 1880 '''return the tags associated with a node'''
1879 1881 if not self._tagscache.nodetagscache:
1880 1882 nodetagscache = {}
1881 1883 for t, n in pycompat.iteritems(self._tagscache.tags):
1882 1884 nodetagscache.setdefault(n, []).append(t)
1883 1885 for tags in pycompat.itervalues(nodetagscache):
1884 1886 tags.sort()
1885 1887 self._tagscache.nodetagscache = nodetagscache
1886 1888 return self._tagscache.nodetagscache.get(node, [])
1887 1889
1888 1890 def nodebookmarks(self, node):
1889 1891 """return the list of bookmarks pointing to the specified node"""
1890 1892 return self._bookmarks.names(node)
1891 1893
1892 1894 def branchmap(self):
1893 1895 '''returns a dictionary {branch: [branchheads]} with branchheads
1894 1896 ordered by increasing revision number'''
1895 1897 return self._branchcaches[self]
1896 1898
1897 1899 @unfilteredmethod
1898 1900 def revbranchcache(self):
1899 1901 if not self._revbranchcache:
1900 1902 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1901 1903 return self._revbranchcache
1902 1904
1903 1905 def branchtip(self, branch, ignoremissing=False):
1904 1906 '''return the tip node for a given branch
1905 1907
1906 1908 If ignoremissing is True, then this method will not raise an error.
1907 1909 This is helpful for callers that only expect None for a missing branch
1908 1910 (e.g. namespace).
1909 1911
1910 1912 '''
1911 1913 try:
1912 1914 return self.branchmap().branchtip(branch)
1913 1915 except KeyError:
1914 1916 if not ignoremissing:
1915 1917 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1916 1918 else:
1917 1919 pass
1918 1920
1919 1921 def lookup(self, key):
1920 1922 node = scmutil.revsymbol(self, key).node()
1921 1923 if node is None:
1922 1924 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1923 1925 return node
1924 1926
1925 1927 def lookupbranch(self, key):
1926 1928 if self.branchmap().hasbranch(key):
1927 1929 return key
1928 1930
1929 1931 return scmutil.revsymbol(self, key).branch()
1930 1932
1931 1933 def known(self, nodes):
1932 1934 cl = self.changelog
1933 1935 get_rev = cl.index.get_rev
1934 1936 filtered = cl.filteredrevs
1935 1937 result = []
1936 1938 for n in nodes:
1937 1939 r = get_rev(n)
1938 1940 resp = not (r is None or r in filtered)
1939 1941 result.append(resp)
1940 1942 return result
1941 1943
1942 1944 def local(self):
1943 1945 return self
1944 1946
1945 1947 def publishing(self):
1946 1948 # it's safe (and desirable) to trust the publish flag unconditionally
1947 1949 # so that we don't finalize changes shared between users via ssh or nfs
1948 1950 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1949 1951
1950 1952 def cancopy(self):
1951 1953 # so statichttprepo's override of local() works
1952 1954 if not self.local():
1953 1955 return False
1954 1956 if not self.publishing():
1955 1957 return True
1956 1958 # if publishing we can't copy if there is filtered content
1957 1959 return not self.filtered(b'visible').changelog.filteredrevs
1958 1960
1959 1961 def shared(self):
1960 1962 '''the type of shared repository (None if not shared)'''
1961 1963 if self.sharedpath != self.path:
1962 1964 return b'store'
1963 1965 return None
1964 1966
1965 1967 def wjoin(self, f, *insidef):
1966 1968 return self.vfs.reljoin(self.root, f, *insidef)
1967 1969
1968 1970 def setparents(self, p1, p2=nullid):
1969 1971 self[None].setparents(p1, p2)
1970 1972 self._quick_access_changeid_invalidate()
1971 1973
1972 1974 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1973 1975 """changeid must be a changeset revision, if specified.
1974 1976 fileid can be a file revision or node."""
1975 1977 return context.filectx(
1976 1978 self, path, changeid, fileid, changectx=changectx
1977 1979 )
1978 1980
1979 1981 def getcwd(self):
1980 1982 return self.dirstate.getcwd()
1981 1983
1982 1984 def pathto(self, f, cwd=None):
1983 1985 return self.dirstate.pathto(f, cwd)
1984 1986
1985 1987 def _loadfilter(self, filter):
1986 1988 if filter not in self._filterpats:
1987 1989 l = []
1988 1990 for pat, cmd in self.ui.configitems(filter):
1989 1991 if cmd == b'!':
1990 1992 continue
1991 1993 mf = matchmod.match(self.root, b'', [pat])
1992 1994 fn = None
1993 1995 params = cmd
1994 1996 for name, filterfn in pycompat.iteritems(self._datafilters):
1995 1997 if cmd.startswith(name):
1996 1998 fn = filterfn
1997 1999 params = cmd[len(name) :].lstrip()
1998 2000 break
1999 2001 if not fn:
2000 2002 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2001 2003 fn.__name__ = 'commandfilter'
2002 2004 # Wrap old filters not supporting keyword arguments
2003 2005 if not pycompat.getargspec(fn)[2]:
2004 2006 oldfn = fn
2005 2007 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2006 2008 fn.__name__ = 'compat-' + oldfn.__name__
2007 2009 l.append((mf, fn, params))
2008 2010 self._filterpats[filter] = l
2009 2011 return self._filterpats[filter]
2010 2012
2011 2013 def _filter(self, filterpats, filename, data):
2012 2014 for mf, fn, cmd in filterpats:
2013 2015 if mf(filename):
2014 2016 self.ui.debug(
2015 2017 b"filtering %s through %s\n"
2016 2018 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2017 2019 )
2018 2020 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2019 2021 break
2020 2022
2021 2023 return data
2022 2024
2023 2025 @unfilteredpropertycache
2024 2026 def _encodefilterpats(self):
2025 2027 return self._loadfilter(b'encode')
2026 2028
2027 2029 @unfilteredpropertycache
2028 2030 def _decodefilterpats(self):
2029 2031 return self._loadfilter(b'decode')
2030 2032
2031 2033 def adddatafilter(self, name, filter):
2032 2034 self._datafilters[name] = filter
2033 2035
2034 2036 def wread(self, filename):
2035 2037 if self.wvfs.islink(filename):
2036 2038 data = self.wvfs.readlink(filename)
2037 2039 else:
2038 2040 data = self.wvfs.read(filename)
2039 2041 return self._filter(self._encodefilterpats, filename, data)
2040 2042
2041 2043 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2042 2044 """write ``data`` into ``filename`` in the working directory
2043 2045
2044 2046 This returns length of written (maybe decoded) data.
2045 2047 """
2046 2048 data = self._filter(self._decodefilterpats, filename, data)
2047 2049 if b'l' in flags:
2048 2050 self.wvfs.symlink(data, filename)
2049 2051 else:
2050 2052 self.wvfs.write(
2051 2053 filename, data, backgroundclose=backgroundclose, **kwargs
2052 2054 )
2053 2055 if b'x' in flags:
2054 2056 self.wvfs.setflags(filename, False, True)
2055 2057 else:
2056 2058 self.wvfs.setflags(filename, False, False)
2057 2059 return len(data)
2058 2060
2059 2061 def wwritedata(self, filename, data):
2060 2062 return self._filter(self._decodefilterpats, filename, data)
2061 2063
2062 2064 def currenttransaction(self):
2063 2065 """return the current transaction or None if non exists"""
2064 2066 if self._transref:
2065 2067 tr = self._transref()
2066 2068 else:
2067 2069 tr = None
2068 2070
2069 2071 if tr and tr.running():
2070 2072 return tr
2071 2073 return None
2072 2074
2073 2075 def transaction(self, desc, report=None):
2074 2076 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2075 2077 b'devel', b'check-locks'
2076 2078 ):
2077 2079 if self._currentlock(self._lockref) is None:
2078 2080 raise error.ProgrammingError(b'transaction requires locking')
2079 2081 tr = self.currenttransaction()
2080 2082 if tr is not None:
2081 2083 return tr.nest(name=desc)
2082 2084
2083 2085 # abort here if the journal already exists
2084 2086 if self.svfs.exists(b"journal"):
2085 2087 raise error.RepoError(
2086 2088 _(b"abandoned transaction found"),
2087 2089 hint=_(b"run 'hg recover' to clean up transaction"),
2088 2090 )
2089 2091
2090 2092 idbase = b"%.40f#%f" % (random.random(), time.time())
2091 2093 ha = hex(hashutil.sha1(idbase).digest())
2092 2094 txnid = b'TXN:' + ha
2093 2095 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2094 2096
2095 2097 self._writejournal(desc)
2096 2098 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2097 2099 if report:
2098 2100 rp = report
2099 2101 else:
2100 2102 rp = self.ui.warn
2101 2103 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2102 2104 # we must avoid cyclic reference between repo and transaction.
2103 2105 reporef = weakref.ref(self)
2104 2106 # Code to track tag movement
2105 2107 #
2106 2108 # Since tags are all handled as file content, it is actually quite hard
2107 2109 # to track these movement from a code perspective. So we fallback to a
2108 2110 # tracking at the repository level. One could envision to track changes
2109 2111 # to the '.hgtags' file through changegroup apply but that fails to
2110 2112 # cope with case where transaction expose new heads without changegroup
2111 2113 # being involved (eg: phase movement).
2112 2114 #
2113 2115 # For now, We gate the feature behind a flag since this likely comes
2114 2116 # with performance impacts. The current code run more often than needed
2115 2117 # and do not use caches as much as it could. The current focus is on
2116 2118 # the behavior of the feature so we disable it by default. The flag
2117 2119 # will be removed when we are happy with the performance impact.
2118 2120 #
2119 2121 # Once this feature is no longer experimental move the following
2120 2122 # documentation to the appropriate help section:
2121 2123 #
2122 2124 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2123 2125 # tags (new or changed or deleted tags). In addition the details of
2124 2126 # these changes are made available in a file at:
2125 2127 # ``REPOROOT/.hg/changes/tags.changes``.
2126 2128 # Make sure you check for HG_TAG_MOVED before reading that file as it
2127 2129 # might exist from a previous transaction even if no tag were touched
2128 2130 # in this one. Changes are recorded in a line base format::
2129 2131 #
2130 2132 # <action> <hex-node> <tag-name>\n
2131 2133 #
2132 2134 # Actions are defined as follow:
2133 2135 # "-R": tag is removed,
2134 2136 # "+A": tag is added,
2135 2137 # "-M": tag is moved (old value),
2136 2138 # "+M": tag is moved (new value),
2137 2139 tracktags = lambda x: None
2138 2140 # experimental config: experimental.hook-track-tags
2139 2141 shouldtracktags = self.ui.configbool(
2140 2142 b'experimental', b'hook-track-tags'
2141 2143 )
2142 2144 if desc != b'strip' and shouldtracktags:
2143 2145 oldheads = self.changelog.headrevs()
2144 2146
2145 2147 def tracktags(tr2):
2146 2148 repo = reporef()
2147 2149 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2148 2150 newheads = repo.changelog.headrevs()
2149 2151 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2150 2152 # notes: we compare lists here.
2151 2153 # As we do it only once buiding set would not be cheaper
2152 2154 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2153 2155 if changes:
2154 2156 tr2.hookargs[b'tag_moved'] = b'1'
2155 2157 with repo.vfs(
2156 2158 b'changes/tags.changes', b'w', atomictemp=True
2157 2159 ) as changesfile:
2158 2160 # note: we do not register the file to the transaction
2159 2161 # because we needs it to still exist on the transaction
2160 2162 # is close (for txnclose hooks)
2161 2163 tagsmod.writediff(changesfile, changes)
2162 2164
2163 2165 def validate(tr2):
2164 2166 """will run pre-closing hooks"""
2165 2167 # XXX the transaction API is a bit lacking here so we take a hacky
2166 2168 # path for now
2167 2169 #
2168 2170 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2169 2171 # dict is copied before these run. In addition we needs the data
2170 2172 # available to in memory hooks too.
2171 2173 #
2172 2174 # Moreover, we also need to make sure this runs before txnclose
2173 2175 # hooks and there is no "pending" mechanism that would execute
2174 2176 # logic only if hooks are about to run.
2175 2177 #
2176 2178 # Fixing this limitation of the transaction is also needed to track
2177 2179 # other families of changes (bookmarks, phases, obsolescence).
2178 2180 #
2179 2181 # This will have to be fixed before we remove the experimental
2180 2182 # gating.
2181 2183 tracktags(tr2)
2182 2184 repo = reporef()
2183 2185
2184 2186 singleheadopt = (b'experimental', b'single-head-per-branch')
2185 2187 singlehead = repo.ui.configbool(*singleheadopt)
2186 2188 if singlehead:
2187 2189 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2188 2190 accountclosed = singleheadsub.get(
2189 2191 b"account-closed-heads", False
2190 2192 )
2191 2193 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2192 2194 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2193 2195 for name, (old, new) in sorted(
2194 2196 tr.changes[b'bookmarks'].items()
2195 2197 ):
2196 2198 args = tr.hookargs.copy()
2197 2199 args.update(bookmarks.preparehookargs(name, old, new))
2198 2200 repo.hook(
2199 2201 b'pretxnclose-bookmark',
2200 2202 throw=True,
2201 2203 **pycompat.strkwargs(args)
2202 2204 )
2203 2205 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2204 2206 cl = repo.unfiltered().changelog
2205 2207 for revs, (old, new) in tr.changes[b'phases']:
2206 2208 for rev in revs:
2207 2209 args = tr.hookargs.copy()
2208 2210 node = hex(cl.node(rev))
2209 2211 args.update(phases.preparehookargs(node, old, new))
2210 2212 repo.hook(
2211 2213 b'pretxnclose-phase',
2212 2214 throw=True,
2213 2215 **pycompat.strkwargs(args)
2214 2216 )
2215 2217
2216 2218 repo.hook(
2217 2219 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2218 2220 )
2219 2221
2220 2222 def releasefn(tr, success):
2221 2223 repo = reporef()
2222 2224 if repo is None:
2223 2225 # If the repo has been GC'd (and this release function is being
2224 2226 # called from transaction.__del__), there's not much we can do,
2225 2227 # so just leave the unfinished transaction there and let the
2226 2228 # user run `hg recover`.
2227 2229 return
2228 2230 if success:
2229 2231 # this should be explicitly invoked here, because
2230 2232 # in-memory changes aren't written out at closing
2231 2233 # transaction, if tr.addfilegenerator (via
2232 2234 # dirstate.write or so) isn't invoked while
2233 2235 # transaction running
2234 2236 repo.dirstate.write(None)
2235 2237 else:
2236 2238 # discard all changes (including ones already written
2237 2239 # out) in this transaction
2238 2240 narrowspec.restorebackup(self, b'journal.narrowspec')
2239 2241 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2240 2242 repo.dirstate.restorebackup(None, b'journal.dirstate')
2241 2243
2242 2244 repo.invalidate(clearfilecache=True)
2243 2245
2244 2246 tr = transaction.transaction(
2245 2247 rp,
2246 2248 self.svfs,
2247 2249 vfsmap,
2248 2250 b"journal",
2249 2251 b"undo",
2250 2252 aftertrans(renames),
2251 2253 self.store.createmode,
2252 2254 validator=validate,
2253 2255 releasefn=releasefn,
2254 2256 checkambigfiles=_cachedfiles,
2255 2257 name=desc,
2256 2258 )
2257 2259 tr.changes[b'origrepolen'] = len(self)
2258 2260 tr.changes[b'obsmarkers'] = set()
2259 2261 tr.changes[b'phases'] = []
2260 2262 tr.changes[b'bookmarks'] = {}
2261 2263
2262 2264 tr.hookargs[b'txnid'] = txnid
2263 2265 tr.hookargs[b'txnname'] = desc
2264 2266 tr.hookargs[b'changes'] = tr.changes
2265 2267 # note: writing the fncache only during finalize mean that the file is
2266 2268 # outdated when running hooks. As fncache is used for streaming clone,
2267 2269 # this is not expected to break anything that happen during the hooks.
2268 2270 tr.addfinalize(b'flush-fncache', self.store.write)
2269 2271
2270 2272 def txnclosehook(tr2):
2271 2273 """To be run if transaction is successful, will schedule a hook run
2272 2274 """
2273 2275 # Don't reference tr2 in hook() so we don't hold a reference.
2274 2276 # This reduces memory consumption when there are multiple
2275 2277 # transactions per lock. This can likely go away if issue5045
2276 2278 # fixes the function accumulation.
2277 2279 hookargs = tr2.hookargs
2278 2280
2279 2281 def hookfunc(unused_success):
2280 2282 repo = reporef()
2281 2283 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2282 2284 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2283 2285 for name, (old, new) in bmchanges:
2284 2286 args = tr.hookargs.copy()
2285 2287 args.update(bookmarks.preparehookargs(name, old, new))
2286 2288 repo.hook(
2287 2289 b'txnclose-bookmark',
2288 2290 throw=False,
2289 2291 **pycompat.strkwargs(args)
2290 2292 )
2291 2293
2292 2294 if hook.hashook(repo.ui, b'txnclose-phase'):
2293 2295 cl = repo.unfiltered().changelog
2294 2296 phasemv = sorted(
2295 2297 tr.changes[b'phases'], key=lambda r: r[0][0]
2296 2298 )
2297 2299 for revs, (old, new) in phasemv:
2298 2300 for rev in revs:
2299 2301 args = tr.hookargs.copy()
2300 2302 node = hex(cl.node(rev))
2301 2303 args.update(phases.preparehookargs(node, old, new))
2302 2304 repo.hook(
2303 2305 b'txnclose-phase',
2304 2306 throw=False,
2305 2307 **pycompat.strkwargs(args)
2306 2308 )
2307 2309
2308 2310 repo.hook(
2309 2311 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2310 2312 )
2311 2313
2312 2314 reporef()._afterlock(hookfunc)
2313 2315
2314 2316 tr.addfinalize(b'txnclose-hook', txnclosehook)
2315 2317 # Include a leading "-" to make it happen before the transaction summary
2316 2318 # reports registered via scmutil.registersummarycallback() whose names
2317 2319 # are 00-txnreport etc. That way, the caches will be warm when the
2318 2320 # callbacks run.
2319 2321 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2320 2322
2321 2323 def txnaborthook(tr2):
2322 2324 """To be run if transaction is aborted
2323 2325 """
2324 2326 reporef().hook(
2325 2327 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2326 2328 )
2327 2329
2328 2330 tr.addabort(b'txnabort-hook', txnaborthook)
2329 2331 # avoid eager cache invalidation. in-memory data should be identical
2330 2332 # to stored data if transaction has no error.
2331 2333 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2332 2334 self._transref = weakref.ref(tr)
2333 2335 scmutil.registersummarycallback(self, tr, desc)
2334 2336 return tr
2335 2337
2336 2338 def _journalfiles(self):
2337 2339 return (
2338 2340 (self.svfs, b'journal'),
2339 2341 (self.svfs, b'journal.narrowspec'),
2340 2342 (self.vfs, b'journal.narrowspec.dirstate'),
2341 2343 (self.vfs, b'journal.dirstate'),
2342 2344 (self.vfs, b'journal.branch'),
2343 2345 (self.vfs, b'journal.desc'),
2344 2346 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2345 2347 (self.svfs, b'journal.phaseroots'),
2346 2348 )
2347 2349
2348 2350 def undofiles(self):
2349 2351 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2350 2352
2351 2353 @unfilteredmethod
2352 2354 def _writejournal(self, desc):
2353 2355 self.dirstate.savebackup(None, b'journal.dirstate')
2354 2356 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2355 2357 narrowspec.savebackup(self, b'journal.narrowspec')
2356 2358 self.vfs.write(
2357 2359 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2358 2360 )
2359 2361 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2360 2362 bookmarksvfs = bookmarks.bookmarksvfs(self)
2361 2363 bookmarksvfs.write(
2362 2364 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2363 2365 )
2364 2366 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2365 2367
2366 2368 def recover(self):
2367 2369 with self.lock():
2368 2370 if self.svfs.exists(b"journal"):
2369 2371 self.ui.status(_(b"rolling back interrupted transaction\n"))
2370 2372 vfsmap = {
2371 2373 b'': self.svfs,
2372 2374 b'plain': self.vfs,
2373 2375 }
2374 2376 transaction.rollback(
2375 2377 self.svfs,
2376 2378 vfsmap,
2377 2379 b"journal",
2378 2380 self.ui.warn,
2379 2381 checkambigfiles=_cachedfiles,
2380 2382 )
2381 2383 self.invalidate()
2382 2384 return True
2383 2385 else:
2384 2386 self.ui.warn(_(b"no interrupted transaction available\n"))
2385 2387 return False
2386 2388
2387 2389 def rollback(self, dryrun=False, force=False):
2388 2390 wlock = lock = dsguard = None
2389 2391 try:
2390 2392 wlock = self.wlock()
2391 2393 lock = self.lock()
2392 2394 if self.svfs.exists(b"undo"):
2393 2395 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2394 2396
2395 2397 return self._rollback(dryrun, force, dsguard)
2396 2398 else:
2397 2399 self.ui.warn(_(b"no rollback information available\n"))
2398 2400 return 1
2399 2401 finally:
2400 2402 release(dsguard, lock, wlock)
2401 2403
2402 2404 @unfilteredmethod # Until we get smarter cache management
2403 2405 def _rollback(self, dryrun, force, dsguard):
2404 2406 ui = self.ui
2405 2407 try:
2406 2408 args = self.vfs.read(b'undo.desc').splitlines()
2407 2409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2408 2410 if len(args) >= 3:
2409 2411 detail = args[2]
2410 2412 oldtip = oldlen - 1
2411 2413
2412 2414 if detail and ui.verbose:
2413 2415 msg = _(
2414 2416 b'repository tip rolled back to revision %d'
2415 2417 b' (undo %s: %s)\n'
2416 2418 ) % (oldtip, desc, detail)
2417 2419 else:
2418 2420 msg = _(
2419 2421 b'repository tip rolled back to revision %d (undo %s)\n'
2420 2422 ) % (oldtip, desc)
2421 2423 except IOError:
2422 2424 msg = _(b'rolling back unknown transaction\n')
2423 2425 desc = None
2424 2426
2425 2427 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2426 2428 raise error.Abort(
2427 2429 _(
2428 2430 b'rollback of last commit while not checked out '
2429 2431 b'may lose data'
2430 2432 ),
2431 2433 hint=_(b'use -f to force'),
2432 2434 )
2433 2435
2434 2436 ui.status(msg)
2435 2437 if dryrun:
2436 2438 return 0
2437 2439
2438 2440 parents = self.dirstate.parents()
2439 2441 self.destroying()
2440 2442 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2441 2443 transaction.rollback(
2442 2444 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2443 2445 )
2444 2446 bookmarksvfs = bookmarks.bookmarksvfs(self)
2445 2447 if bookmarksvfs.exists(b'undo.bookmarks'):
2446 2448 bookmarksvfs.rename(
2447 2449 b'undo.bookmarks', b'bookmarks', checkambig=True
2448 2450 )
2449 2451 if self.svfs.exists(b'undo.phaseroots'):
2450 2452 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2451 2453 self.invalidate()
2452 2454
2453 2455 has_node = self.changelog.index.has_node
2454 2456 parentgone = any(not has_node(p) for p in parents)
2455 2457 if parentgone:
2456 2458 # prevent dirstateguard from overwriting already restored one
2457 2459 dsguard.close()
2458 2460
2459 2461 narrowspec.restorebackup(self, b'undo.narrowspec')
2460 2462 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2461 2463 self.dirstate.restorebackup(None, b'undo.dirstate')
2462 2464 try:
2463 2465 branch = self.vfs.read(b'undo.branch')
2464 2466 self.dirstate.setbranch(encoding.tolocal(branch))
2465 2467 except IOError:
2466 2468 ui.warn(
2467 2469 _(
2468 2470 b'named branch could not be reset: '
2469 2471 b'current branch is still \'%s\'\n'
2470 2472 )
2471 2473 % self.dirstate.branch()
2472 2474 )
2473 2475
2474 2476 parents = tuple([p.rev() for p in self[None].parents()])
2475 2477 if len(parents) > 1:
2476 2478 ui.status(
2477 2479 _(
2478 2480 b'working directory now based on '
2479 2481 b'revisions %d and %d\n'
2480 2482 )
2481 2483 % parents
2482 2484 )
2483 2485 else:
2484 2486 ui.status(
2485 2487 _(b'working directory now based on revision %d\n') % parents
2486 2488 )
2487 2489 mergestatemod.mergestate.clean(self, self[b'.'].node())
2488 2490
2489 2491 # TODO: if we know which new heads may result from this rollback, pass
2490 2492 # them to destroy(), which will prevent the branchhead cache from being
2491 2493 # invalidated.
2492 2494 self.destroyed()
2493 2495 return 0
2494 2496
2495 2497 def _buildcacheupdater(self, newtransaction):
2496 2498 """called during transaction to build the callback updating cache
2497 2499
2498 2500 Lives on the repository to help extension who might want to augment
2499 2501 this logic. For this purpose, the created transaction is passed to the
2500 2502 method.
2501 2503 """
2502 2504 # we must avoid cyclic reference between repo and transaction.
2503 2505 reporef = weakref.ref(self)
2504 2506
2505 2507 def updater(tr):
2506 2508 repo = reporef()
2507 2509 repo.updatecaches(tr)
2508 2510
2509 2511 return updater
2510 2512
2511 2513 @unfilteredmethod
2512 2514 def updatecaches(self, tr=None, full=False):
2513 2515 """warm appropriate caches
2514 2516
2515 2517 If this function is called after a transaction closed. The transaction
2516 2518 will be available in the 'tr' argument. This can be used to selectively
2517 2519 update caches relevant to the changes in that transaction.
2518 2520
2519 2521 If 'full' is set, make sure all caches the function knows about have
2520 2522 up-to-date data. Even the ones usually loaded more lazily.
2521 2523 """
2522 2524 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2523 2525 # During strip, many caches are invalid but
2524 2526 # later call to `destroyed` will refresh them.
2525 2527 return
2526 2528
2527 2529 if tr is None or tr.changes[b'origrepolen'] < len(self):
2528 2530 # accessing the 'ser ved' branchmap should refresh all the others,
2529 2531 self.ui.debug(b'updating the branch cache\n')
2530 2532 self.filtered(b'served').branchmap()
2531 2533 self.filtered(b'served.hidden').branchmap()
2532 2534
2533 2535 if full:
2534 2536 unfi = self.unfiltered()
2535 2537
2536 2538 self.changelog.update_caches(transaction=tr)
2537 2539 self.manifestlog.update_caches(transaction=tr)
2538 2540
2539 2541 rbc = unfi.revbranchcache()
2540 2542 for r in unfi.changelog:
2541 2543 rbc.branchinfo(r)
2542 2544 rbc.write()
2543 2545
2544 2546 # ensure the working copy parents are in the manifestfulltextcache
2545 2547 for ctx in self[b'.'].parents():
2546 2548 ctx.manifest() # accessing the manifest is enough
2547 2549
2548 2550 # accessing fnode cache warms the cache
2549 2551 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2550 2552 # accessing tags warm the cache
2551 2553 self.tags()
2552 2554 self.filtered(b'served').tags()
2553 2555
2554 2556 # The `full` arg is documented as updating even the lazily-loaded
2555 2557 # caches immediately, so we're forcing a write to cause these caches
2556 2558 # to be warmed up even if they haven't explicitly been requested
2557 2559 # yet (if they've never been used by hg, they won't ever have been
2558 2560 # written, even if they're a subset of another kind of cache that
2559 2561 # *has* been used).
2560 2562 for filt in repoview.filtertable.keys():
2561 2563 filtered = self.filtered(filt)
2562 2564 filtered.branchmap().write(filtered)
2563 2565
2564 2566 def invalidatecaches(self):
2565 2567
2566 2568 if '_tagscache' in vars(self):
2567 2569 # can't use delattr on proxy
2568 2570 del self.__dict__['_tagscache']
2569 2571
2570 2572 self._branchcaches.clear()
2571 2573 self.invalidatevolatilesets()
2572 2574 self._sparsesignaturecache.clear()
2573 2575
2574 2576 def invalidatevolatilesets(self):
2575 2577 self.filteredrevcache.clear()
2576 2578 obsolete.clearobscaches(self)
2577 2579 self._quick_access_changeid_invalidate()
2578 2580
2579 2581 def invalidatedirstate(self):
2580 2582 '''Invalidates the dirstate, causing the next call to dirstate
2581 2583 to check if it was modified since the last time it was read,
2582 2584 rereading it if it has.
2583 2585
2584 2586 This is different to dirstate.invalidate() that it doesn't always
2585 2587 rereads the dirstate. Use dirstate.invalidate() if you want to
2586 2588 explicitly read the dirstate again (i.e. restoring it to a previous
2587 2589 known good state).'''
2588 2590 if hasunfilteredcache(self, 'dirstate'):
2589 2591 for k in self.dirstate._filecache:
2590 2592 try:
2591 2593 delattr(self.dirstate, k)
2592 2594 except AttributeError:
2593 2595 pass
2594 2596 delattr(self.unfiltered(), 'dirstate')
2595 2597
2596 2598 def invalidate(self, clearfilecache=False):
2597 2599 '''Invalidates both store and non-store parts other than dirstate
2598 2600
2599 2601 If a transaction is running, invalidation of store is omitted,
2600 2602 because discarding in-memory changes might cause inconsistency
2601 2603 (e.g. incomplete fncache causes unintentional failure, but
2602 2604 redundant one doesn't).
2603 2605 '''
2604 2606 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2605 2607 for k in list(self._filecache.keys()):
2606 2608 # dirstate is invalidated separately in invalidatedirstate()
2607 2609 if k == b'dirstate':
2608 2610 continue
2609 2611 if (
2610 2612 k == b'changelog'
2611 2613 and self.currenttransaction()
2612 2614 and self.changelog._delayed
2613 2615 ):
2614 2616 # The changelog object may store unwritten revisions. We don't
2615 2617 # want to lose them.
2616 2618 # TODO: Solve the problem instead of working around it.
2617 2619 continue
2618 2620
2619 2621 if clearfilecache:
2620 2622 del self._filecache[k]
2621 2623 try:
2622 2624 delattr(unfiltered, k)
2623 2625 except AttributeError:
2624 2626 pass
2625 2627 self.invalidatecaches()
2626 2628 if not self.currenttransaction():
2627 2629 # TODO: Changing contents of store outside transaction
2628 2630 # causes inconsistency. We should make in-memory store
2629 2631 # changes detectable, and abort if changed.
2630 2632 self.store.invalidatecaches()
2631 2633
2632 2634 def invalidateall(self):
2633 2635 '''Fully invalidates both store and non-store parts, causing the
2634 2636 subsequent operation to reread any outside changes.'''
2635 2637 # extension should hook this to invalidate its caches
2636 2638 self.invalidate()
2637 2639 self.invalidatedirstate()
2638 2640
2639 2641 @unfilteredmethod
2640 2642 def _refreshfilecachestats(self, tr):
2641 2643 """Reload stats of cached files so that they are flagged as valid"""
2642 2644 for k, ce in self._filecache.items():
2643 2645 k = pycompat.sysstr(k)
2644 2646 if k == 'dirstate' or k not in self.__dict__:
2645 2647 continue
2646 2648 ce.refresh()
2647 2649
2648 2650 def _lock(
2649 2651 self,
2650 2652 vfs,
2651 2653 lockname,
2652 2654 wait,
2653 2655 releasefn,
2654 2656 acquirefn,
2655 2657 desc,
2656 2658 inheritchecker=None,
2657 2659 parentenvvar=None,
2658 2660 ):
2659 2661 parentlock = None
2660 2662 # the contents of parentenvvar are used by the underlying lock to
2661 2663 # determine whether it can be inherited
2662 2664 if parentenvvar is not None:
2663 2665 parentlock = encoding.environ.get(parentenvvar)
2664 2666
2665 2667 timeout = 0
2666 2668 warntimeout = 0
2667 2669 if wait:
2668 2670 timeout = self.ui.configint(b"ui", b"timeout")
2669 2671 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2670 2672 # internal config: ui.signal-safe-lock
2671 2673 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2672 2674
2673 2675 l = lockmod.trylock(
2674 2676 self.ui,
2675 2677 vfs,
2676 2678 lockname,
2677 2679 timeout,
2678 2680 warntimeout,
2679 2681 releasefn=releasefn,
2680 2682 acquirefn=acquirefn,
2681 2683 desc=desc,
2682 2684 inheritchecker=inheritchecker,
2683 2685 parentlock=parentlock,
2684 2686 signalsafe=signalsafe,
2685 2687 )
2686 2688 return l
2687 2689
2688 2690 def _afterlock(self, callback):
2689 2691 """add a callback to be run when the repository is fully unlocked
2690 2692
2691 2693 The callback will be executed when the outermost lock is released
2692 2694 (with wlock being higher level than 'lock')."""
2693 2695 for ref in (self._wlockref, self._lockref):
2694 2696 l = ref and ref()
2695 2697 if l and l.held:
2696 2698 l.postrelease.append(callback)
2697 2699 break
2698 2700 else: # no lock have been found.
2699 2701 callback(True)
2700 2702
2701 2703 def lock(self, wait=True):
2702 2704 '''Lock the repository store (.hg/store) and return a weak reference
2703 2705 to the lock. Use this before modifying the store (e.g. committing or
2704 2706 stripping). If you are opening a transaction, get a lock as well.)
2705 2707
2706 2708 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2707 2709 'wlock' first to avoid a dead-lock hazard.'''
2708 2710 l = self._currentlock(self._lockref)
2709 2711 if l is not None:
2710 2712 l.lock()
2711 2713 return l
2712 2714
2713 2715 l = self._lock(
2714 2716 vfs=self.svfs,
2715 2717 lockname=b"lock",
2716 2718 wait=wait,
2717 2719 releasefn=None,
2718 2720 acquirefn=self.invalidate,
2719 2721 desc=_(b'repository %s') % self.origroot,
2720 2722 )
2721 2723 self._lockref = weakref.ref(l)
2722 2724 return l
2723 2725
2724 2726 def _wlockchecktransaction(self):
2725 2727 if self.currenttransaction() is not None:
2726 2728 raise error.LockInheritanceContractViolation(
2727 2729 b'wlock cannot be inherited in the middle of a transaction'
2728 2730 )
2729 2731
2730 2732 def wlock(self, wait=True):
2731 2733 '''Lock the non-store parts of the repository (everything under
2732 2734 .hg except .hg/store) and return a weak reference to the lock.
2733 2735
2734 2736 Use this before modifying files in .hg.
2735 2737
2736 2738 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2737 2739 'wlock' first to avoid a dead-lock hazard.'''
2738 2740 l = self._wlockref and self._wlockref()
2739 2741 if l is not None and l.held:
2740 2742 l.lock()
2741 2743 return l
2742 2744
2743 2745 # We do not need to check for non-waiting lock acquisition. Such
2744 2746 # acquisition would not cause dead-lock as they would just fail.
2745 2747 if wait and (
2746 2748 self.ui.configbool(b'devel', b'all-warnings')
2747 2749 or self.ui.configbool(b'devel', b'check-locks')
2748 2750 ):
2749 2751 if self._currentlock(self._lockref) is not None:
2750 2752 self.ui.develwarn(b'"wlock" acquired after "lock"')
2751 2753
2752 2754 def unlock():
2753 2755 if self.dirstate.pendingparentchange():
2754 2756 self.dirstate.invalidate()
2755 2757 else:
2756 2758 self.dirstate.write(None)
2757 2759
2758 2760 self._filecache[b'dirstate'].refresh()
2759 2761
2760 2762 l = self._lock(
2761 2763 self.vfs,
2762 2764 b"wlock",
2763 2765 wait,
2764 2766 unlock,
2765 2767 self.invalidatedirstate,
2766 2768 _(b'working directory of %s') % self.origroot,
2767 2769 inheritchecker=self._wlockchecktransaction,
2768 2770 parentenvvar=b'HG_WLOCK_LOCKER',
2769 2771 )
2770 2772 self._wlockref = weakref.ref(l)
2771 2773 return l
2772 2774
2773 2775 def _currentlock(self, lockref):
2774 2776 """Returns the lock if it's held, or None if it's not."""
2775 2777 if lockref is None:
2776 2778 return None
2777 2779 l = lockref()
2778 2780 if l is None or not l.held:
2779 2781 return None
2780 2782 return l
2781 2783
2782 2784 def currentwlock(self):
2783 2785 """Returns the wlock if it's held, or None if it's not."""
2784 2786 return self._currentlock(self._wlockref)
2785 2787
2786 2788 def checkcommitpatterns(self, wctx, match, status, fail):
2787 2789 """check for commit arguments that aren't committable"""
2788 2790 if match.isexact() or match.prefix():
2789 2791 matched = set(status.modified + status.added + status.removed)
2790 2792
2791 2793 for f in match.files():
2792 2794 f = self.dirstate.normalize(f)
2793 2795 if f == b'.' or f in matched or f in wctx.substate:
2794 2796 continue
2795 2797 if f in status.deleted:
2796 2798 fail(f, _(b'file not found!'))
2797 2799 # Is it a directory that exists or used to exist?
2798 2800 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2799 2801 d = f + b'/'
2800 2802 for mf in matched:
2801 2803 if mf.startswith(d):
2802 2804 break
2803 2805 else:
2804 2806 fail(f, _(b"no match under directory!"))
2805 2807 elif f not in self.dirstate:
2806 2808 fail(f, _(b"file not tracked!"))
2807 2809
2808 2810 @unfilteredmethod
2809 2811 def commit(
2810 2812 self,
2811 2813 text=b"",
2812 2814 user=None,
2813 2815 date=None,
2814 2816 match=None,
2815 2817 force=False,
2816 2818 editor=None,
2817 2819 extra=None,
2818 2820 ):
2819 2821 """Add a new revision to current repository.
2820 2822
2821 2823 Revision information is gathered from the working directory,
2822 2824 match can be used to filter the committed files. If editor is
2823 2825 supplied, it is called to get a commit message.
2824 2826 """
2825 2827 if extra is None:
2826 2828 extra = {}
2827 2829
2828 2830 def fail(f, msg):
2829 2831 raise error.Abort(b'%s: %s' % (f, msg))
2830 2832
2831 2833 if not match:
2832 2834 match = matchmod.always()
2833 2835
2834 2836 if not force:
2835 2837 match.bad = fail
2836 2838
2837 2839 # lock() for recent changelog (see issue4368)
2838 2840 with self.wlock(), self.lock():
2839 2841 wctx = self[None]
2840 2842 merge = len(wctx.parents()) > 1
2841 2843
2842 2844 if not force and merge and not match.always():
2843 2845 raise error.Abort(
2844 2846 _(
2845 2847 b'cannot partially commit a merge '
2846 2848 b'(do not specify files or patterns)'
2847 2849 )
2848 2850 )
2849 2851
2850 2852 status = self.status(match=match, clean=force)
2851 2853 if force:
2852 2854 status.modified.extend(
2853 2855 status.clean
2854 2856 ) # mq may commit clean files
2855 2857
2856 2858 # check subrepos
2857 2859 subs, commitsubs, newstate = subrepoutil.precommit(
2858 2860 self.ui, wctx, status, match, force=force
2859 2861 )
2860 2862
2861 2863 # make sure all explicit patterns are matched
2862 2864 if not force:
2863 2865 self.checkcommitpatterns(wctx, match, status, fail)
2864 2866
2865 2867 cctx = context.workingcommitctx(
2866 2868 self, status, text, user, date, extra
2867 2869 )
2868 2870
2869 2871 ms = mergestatemod.mergestate.read(self)
2870 2872 mergeutil.checkunresolved(ms)
2871 2873
2872 2874 # internal config: ui.allowemptycommit
2873 2875 if cctx.isempty() and not self.ui.configbool(
2874 2876 b'ui', b'allowemptycommit'
2875 2877 ):
2876 2878 self.ui.debug(b'nothing to commit, clearing merge state\n')
2877 2879 ms.reset()
2878 2880 return None
2879 2881
2880 2882 if merge and cctx.deleted():
2881 2883 raise error.Abort(_(b"cannot commit merge with missing files"))
2882 2884
2883 2885 if editor:
2884 2886 cctx._text = editor(self, cctx, subs)
2885 2887 edited = text != cctx._text
2886 2888
2887 2889 # Save commit message in case this transaction gets rolled back
2888 2890 # (e.g. by a pretxncommit hook). Leave the content alone on
2889 2891 # the assumption that the user will use the same editor again.
2890 2892 msgfn = self.savecommitmessage(cctx._text)
2891 2893
2892 2894 # commit subs and write new state
2893 2895 if subs:
2894 2896 uipathfn = scmutil.getuipathfn(self)
2895 2897 for s in sorted(commitsubs):
2896 2898 sub = wctx.sub(s)
2897 2899 self.ui.status(
2898 2900 _(b'committing subrepository %s\n')
2899 2901 % uipathfn(subrepoutil.subrelpath(sub))
2900 2902 )
2901 2903 sr = sub.commit(cctx._text, user, date)
2902 2904 newstate[s] = (newstate[s][0], sr)
2903 2905 subrepoutil.writestate(self, newstate)
2904 2906
2905 2907 p1, p2 = self.dirstate.parents()
2906 2908 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2907 2909 try:
2908 2910 self.hook(
2909 2911 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2910 2912 )
2911 2913 with self.transaction(b'commit'):
2912 2914 ret = self.commitctx(cctx, True)
2913 2915 # update bookmarks, dirstate and mergestate
2914 2916 bookmarks.update(self, [p1, p2], ret)
2915 2917 cctx.markcommitted(ret)
2916 2918 ms.reset()
2917 2919 except: # re-raises
2918 2920 if edited:
2919 2921 self.ui.write(
2920 2922 _(b'note: commit message saved in %s\n') % msgfn
2921 2923 )
2922 2924 self.ui.write(
2923 2925 _(
2924 2926 b"note: use 'hg commit --logfile "
2925 2927 b".hg/last-message.txt --edit' to reuse it\n"
2926 2928 )
2927 2929 )
2928 2930 raise
2929 2931
2930 2932 def commithook(unused_success):
2931 2933 # hack for command that use a temporary commit (eg: histedit)
2932 2934 # temporary commit got stripped before hook release
2933 2935 if self.changelog.hasnode(ret):
2934 2936 self.hook(
2935 2937 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2936 2938 )
2937 2939
2938 2940 self._afterlock(commithook)
2939 2941 return ret
2940 2942
2941 2943 @unfilteredmethod
2942 2944 def commitctx(self, ctx, error=False, origctx=None):
2943 2945 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2944 2946
2945 2947 @unfilteredmethod
2946 2948 def destroying(self):
2947 2949 '''Inform the repository that nodes are about to be destroyed.
2948 2950 Intended for use by strip and rollback, so there's a common
2949 2951 place for anything that has to be done before destroying history.
2950 2952
2951 2953 This is mostly useful for saving state that is in memory and waiting
2952 2954 to be flushed when the current lock is released. Because a call to
2953 2955 destroyed is imminent, the repo will be invalidated causing those
2954 2956 changes to stay in memory (waiting for the next unlock), or vanish
2955 2957 completely.
2956 2958 '''
2957 2959 # When using the same lock to commit and strip, the phasecache is left
2958 2960 # dirty after committing. Then when we strip, the repo is invalidated,
2959 2961 # causing those changes to disappear.
2960 2962 if '_phasecache' in vars(self):
2961 2963 self._phasecache.write()
2962 2964
2963 2965 @unfilteredmethod
2964 2966 def destroyed(self):
2965 2967 '''Inform the repository that nodes have been destroyed.
2966 2968 Intended for use by strip and rollback, so there's a common
2967 2969 place for anything that has to be done after destroying history.
2968 2970 '''
2969 2971 # When one tries to:
2970 2972 # 1) destroy nodes thus calling this method (e.g. strip)
2971 2973 # 2) use phasecache somewhere (e.g. commit)
2972 2974 #
2973 2975 # then 2) will fail because the phasecache contains nodes that were
2974 2976 # removed. We can either remove phasecache from the filecache,
2975 2977 # causing it to reload next time it is accessed, or simply filter
2976 2978 # the removed nodes now and write the updated cache.
2977 2979 self._phasecache.filterunknown(self)
2978 2980 self._phasecache.write()
2979 2981
2980 2982 # refresh all repository caches
2981 2983 self.updatecaches()
2982 2984
2983 2985 # Ensure the persistent tag cache is updated. Doing it now
2984 2986 # means that the tag cache only has to worry about destroyed
2985 2987 # heads immediately after a strip/rollback. That in turn
2986 2988 # guarantees that "cachetip == currenttip" (comparing both rev
2987 2989 # and node) always means no nodes have been added or destroyed.
2988 2990
2989 2991 # XXX this is suboptimal when qrefresh'ing: we strip the current
2990 2992 # head, refresh the tag cache, then immediately add a new head.
2991 2993 # But I think doing it this way is necessary for the "instant
2992 2994 # tag cache retrieval" case to work.
2993 2995 self.invalidate()
2994 2996
2995 2997 def status(
2996 2998 self,
2997 2999 node1=b'.',
2998 3000 node2=None,
2999 3001 match=None,
3000 3002 ignored=False,
3001 3003 clean=False,
3002 3004 unknown=False,
3003 3005 listsubrepos=False,
3004 3006 ):
3005 3007 '''a convenience method that calls node1.status(node2)'''
3006 3008 return self[node1].status(
3007 3009 node2, match, ignored, clean, unknown, listsubrepos
3008 3010 )
3009 3011
3010 3012 def addpostdsstatus(self, ps):
3011 3013 """Add a callback to run within the wlock, at the point at which status
3012 3014 fixups happen.
3013 3015
3014 3016 On status completion, callback(wctx, status) will be called with the
3015 3017 wlock held, unless the dirstate has changed from underneath or the wlock
3016 3018 couldn't be grabbed.
3017 3019
3018 3020 Callbacks should not capture and use a cached copy of the dirstate --
3019 3021 it might change in the meanwhile. Instead, they should access the
3020 3022 dirstate via wctx.repo().dirstate.
3021 3023
3022 3024 This list is emptied out after each status run -- extensions should
3023 3025 make sure it adds to this list each time dirstate.status is called.
3024 3026 Extensions should also make sure they don't call this for statuses
3025 3027 that don't involve the dirstate.
3026 3028 """
3027 3029
3028 3030 # The list is located here for uniqueness reasons -- it is actually
3029 3031 # managed by the workingctx, but that isn't unique per-repo.
3030 3032 self._postdsstatus.append(ps)
3031 3033
3032 3034 def postdsstatus(self):
3033 3035 """Used by workingctx to get the list of post-dirstate-status hooks."""
3034 3036 return self._postdsstatus
3035 3037
3036 3038 def clearpostdsstatus(self):
3037 3039 """Used by workingctx to clear post-dirstate-status hooks."""
3038 3040 del self._postdsstatus[:]
3039 3041
3040 3042 def heads(self, start=None):
3041 3043 if start is None:
3042 3044 cl = self.changelog
3043 3045 headrevs = reversed(cl.headrevs())
3044 3046 return [cl.node(rev) for rev in headrevs]
3045 3047
3046 3048 heads = self.changelog.heads(start)
3047 3049 # sort the output in rev descending order
3048 3050 return sorted(heads, key=self.changelog.rev, reverse=True)
3049 3051
3050 3052 def branchheads(self, branch=None, start=None, closed=False):
3051 3053 '''return a (possibly filtered) list of heads for the given branch
3052 3054
3053 3055 Heads are returned in topological order, from newest to oldest.
3054 3056 If branch is None, use the dirstate branch.
3055 3057 If start is not None, return only heads reachable from start.
3056 3058 If closed is True, return heads that are marked as closed as well.
3057 3059 '''
3058 3060 if branch is None:
3059 3061 branch = self[None].branch()
3060 3062 branches = self.branchmap()
3061 3063 if not branches.hasbranch(branch):
3062 3064 return []
3063 3065 # the cache returns heads ordered lowest to highest
3064 3066 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3065 3067 if start is not None:
3066 3068 # filter out the heads that cannot be reached from startrev
3067 3069 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3068 3070 bheads = [h for h in bheads if h in fbheads]
3069 3071 return bheads
3070 3072
3071 3073 def branches(self, nodes):
3072 3074 if not nodes:
3073 3075 nodes = [self.changelog.tip()]
3074 3076 b = []
3075 3077 for n in nodes:
3076 3078 t = n
3077 3079 while True:
3078 3080 p = self.changelog.parents(n)
3079 3081 if p[1] != nullid or p[0] == nullid:
3080 3082 b.append((t, n, p[0], p[1]))
3081 3083 break
3082 3084 n = p[0]
3083 3085 return b
3084 3086
3085 3087 def between(self, pairs):
3086 3088 r = []
3087 3089
3088 3090 for top, bottom in pairs:
3089 3091 n, l, i = top, [], 0
3090 3092 f = 1
3091 3093
3092 3094 while n != bottom and n != nullid:
3093 3095 p = self.changelog.parents(n)[0]
3094 3096 if i == f:
3095 3097 l.append(n)
3096 3098 f = f * 2
3097 3099 n = p
3098 3100 i += 1
3099 3101
3100 3102 r.append(l)
3101 3103
3102 3104 return r
3103 3105
3104 3106 def checkpush(self, pushop):
3105 3107 """Extensions can override this function if additional checks have
3106 3108 to be performed before pushing, or call it if they override push
3107 3109 command.
3108 3110 """
3109 3111
3110 3112 @unfilteredpropertycache
3111 3113 def prepushoutgoinghooks(self):
3112 3114 """Return util.hooks consists of a pushop with repo, remote, outgoing
3113 3115 methods, which are called before pushing changesets.
3114 3116 """
3115 3117 return util.hooks()
3116 3118
3117 3119 def pushkey(self, namespace, key, old, new):
3118 3120 try:
3119 3121 tr = self.currenttransaction()
3120 3122 hookargs = {}
3121 3123 if tr is not None:
3122 3124 hookargs.update(tr.hookargs)
3123 3125 hookargs = pycompat.strkwargs(hookargs)
3124 3126 hookargs['namespace'] = namespace
3125 3127 hookargs['key'] = key
3126 3128 hookargs['old'] = old
3127 3129 hookargs['new'] = new
3128 3130 self.hook(b'prepushkey', throw=True, **hookargs)
3129 3131 except error.HookAbort as exc:
3130 3132 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3131 3133 if exc.hint:
3132 3134 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3133 3135 return False
3134 3136 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3135 3137 ret = pushkey.push(self, namespace, key, old, new)
3136 3138
3137 3139 def runhook(unused_success):
3138 3140 self.hook(
3139 3141 b'pushkey',
3140 3142 namespace=namespace,
3141 3143 key=key,
3142 3144 old=old,
3143 3145 new=new,
3144 3146 ret=ret,
3145 3147 )
3146 3148
3147 3149 self._afterlock(runhook)
3148 3150 return ret
3149 3151
3150 3152 def listkeys(self, namespace):
3151 3153 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3152 3154 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3153 3155 values = pushkey.list(self, namespace)
3154 3156 self.hook(b'listkeys', namespace=namespace, values=values)
3155 3157 return values
3156 3158
3157 3159 def debugwireargs(self, one, two, three=None, four=None, five=None):
3158 3160 '''used to test argument passing over the wire'''
3159 3161 return b"%s %s %s %s %s" % (
3160 3162 one,
3161 3163 two,
3162 3164 pycompat.bytestr(three),
3163 3165 pycompat.bytestr(four),
3164 3166 pycompat.bytestr(five),
3165 3167 )
3166 3168
3167 3169 def savecommitmessage(self, text):
3168 3170 fp = self.vfs(b'last-message.txt', b'wb')
3169 3171 try:
3170 3172 fp.write(text)
3171 3173 finally:
3172 3174 fp.close()
3173 3175 return self.pathto(fp.name[len(self.root) + 1 :])
3174 3176
3175 3177
3176 3178 # used to avoid circular references so destructors work
3177 3179 def aftertrans(files):
3178 3180 renamefiles = [tuple(t) for t in files]
3179 3181
3180 3182 def a():
3181 3183 for vfs, src, dest in renamefiles:
3182 3184 # if src and dest refer to a same file, vfs.rename is a no-op,
3183 3185 # leaving both src and dest on disk. delete dest to make sure
3184 3186 # the rename couldn't be such a no-op.
3185 3187 vfs.tryunlink(dest)
3186 3188 try:
3187 3189 vfs.rename(src, dest)
3188 3190 except OSError: # journal file does not yet exist
3189 3191 pass
3190 3192
3191 3193 return a
3192 3194
3193 3195
3194 3196 def undoname(fn):
3195 3197 base, name = os.path.split(fn)
3196 3198 assert name.startswith(b'journal')
3197 3199 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3198 3200
3199 3201
3200 3202 def instance(ui, path, create, intents=None, createopts=None):
3201 3203 localpath = util.urllocalpath(path)
3202 3204 if create:
3203 3205 createrepository(ui, localpath, createopts=createopts)
3204 3206
3205 3207 return makelocalrepository(ui, localpath, intents=intents)
3206 3208
3207 3209
3208 3210 def islocal(path):
3209 3211 return True
3210 3212
3211 3213
3212 3214 def defaultcreateopts(ui, createopts=None):
3213 3215 """Populate the default creation options for a repository.
3214 3216
3215 3217 A dictionary of explicitly requested creation options can be passed
3216 3218 in. Missing keys will be populated.
3217 3219 """
3218 3220 createopts = dict(createopts or {})
3219 3221
3220 3222 if b'backend' not in createopts:
3221 3223 # experimental config: storage.new-repo-backend
3222 3224 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3223 3225
3224 3226 return createopts
3225 3227
3226 3228
3227 3229 def newreporequirements(ui, createopts):
3228 3230 """Determine the set of requirements for a new local repository.
3229 3231
3230 3232 Extensions can wrap this function to specify custom requirements for
3231 3233 new repositories.
3232 3234 """
3233 3235 # If the repo is being created from a shared repository, we copy
3234 3236 # its requirements.
3235 3237 if b'sharedrepo' in createopts:
3236 3238 requirements = set(createopts[b'sharedrepo'].requirements)
3237 3239 if createopts.get(b'sharedrelative'):
3238 3240 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3239 3241 else:
3240 3242 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3241 3243
3242 3244 return requirements
3243 3245
3244 3246 if b'backend' not in createopts:
3245 3247 raise error.ProgrammingError(
3246 3248 b'backend key not present in createopts; '
3247 3249 b'was defaultcreateopts() called?'
3248 3250 )
3249 3251
3250 3252 if createopts[b'backend'] != b'revlogv1':
3251 3253 raise error.Abort(
3252 3254 _(
3253 3255 b'unable to determine repository requirements for '
3254 3256 b'storage backend: %s'
3255 3257 )
3256 3258 % createopts[b'backend']
3257 3259 )
3258 3260
3259 3261 requirements = {b'revlogv1'}
3260 3262 if ui.configbool(b'format', b'usestore'):
3261 3263 requirements.add(b'store')
3262 3264 if ui.configbool(b'format', b'usefncache'):
3263 3265 requirements.add(b'fncache')
3264 3266 if ui.configbool(b'format', b'dotencode'):
3265 3267 requirements.add(b'dotencode')
3266 3268
3267 3269 compengines = ui.configlist(b'format', b'revlog-compression')
3268 3270 for compengine in compengines:
3269 3271 if compengine in util.compengines:
3270 3272 break
3271 3273 else:
3272 3274 raise error.Abort(
3273 3275 _(
3274 3276 b'compression engines %s defined by '
3275 3277 b'format.revlog-compression not available'
3276 3278 )
3277 3279 % b', '.join(b'"%s"' % e for e in compengines),
3278 3280 hint=_(
3279 3281 b'run "hg debuginstall" to list available '
3280 3282 b'compression engines'
3281 3283 ),
3282 3284 )
3283 3285
3284 3286 # zlib is the historical default and doesn't need an explicit requirement.
3285 3287 if compengine == b'zstd':
3286 3288 requirements.add(b'revlog-compression-zstd')
3287 3289 elif compengine != b'zlib':
3288 3290 requirements.add(b'exp-compression-%s' % compengine)
3289 3291
3290 3292 if scmutil.gdinitconfig(ui):
3291 3293 requirements.add(b'generaldelta')
3292 3294 if ui.configbool(b'format', b'sparse-revlog'):
3293 3295 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3294 3296
3295 3297 # experimental config: format.exp-use-side-data
3296 3298 if ui.configbool(b'format', b'exp-use-side-data'):
3297 3299 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3298 3300 # experimental config: format.exp-use-copies-side-data-changeset
3299 3301 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3300 3302 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3301 3303 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3302 3304 if ui.configbool(b'experimental', b'treemanifest'):
3303 3305 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3304 3306
3305 3307 revlogv2 = ui.config(b'experimental', b'revlogv2')
3306 3308 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3307 3309 requirements.remove(b'revlogv1')
3308 3310 # generaldelta is implied by revlogv2.
3309 3311 requirements.discard(b'generaldelta')
3310 3312 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3311 3313 # experimental config: format.internal-phase
3312 3314 if ui.configbool(b'format', b'internal-phase'):
3313 3315 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3314 3316
3315 3317 if createopts.get(b'narrowfiles'):
3316 3318 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3317 3319
3318 3320 if createopts.get(b'lfs'):
3319 3321 requirements.add(b'lfs')
3320 3322
3321 3323 if ui.configbool(b'format', b'bookmarks-in-store'):
3322 3324 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3323 3325
3324 3326 if ui.configbool(b'format', b'use-persistent-nodemap'):
3325 3327 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3326 3328
3327 3329 return requirements
3328 3330
3329 3331
3330 3332 def checkrequirementscompat(ui, requirements):
3331 3333 """ Checks compatibility of repository requirements enabled and disabled.
3332 3334
3333 3335 Returns a set of requirements which needs to be dropped because dependend
3334 3336 requirements are not enabled. Also warns users about it """
3335 3337
3336 3338 dropped = set()
3337 3339
3338 3340 if b'store' not in requirements:
3339 3341 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3340 3342 ui.warn(
3341 3343 _(
3342 3344 b'ignoring enabled \'format.bookmarks-in-store\' config '
3343 3345 b'beacuse it is incompatible with disabled '
3344 3346 b'\'format.usestore\' config\n'
3345 3347 )
3346 3348 )
3347 3349 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3348 3350
3349 3351 if (
3350 3352 requirementsmod.SHARED_REQUIREMENT in requirements
3351 3353 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3352 3354 ):
3353 3355 raise error.Abort(
3354 3356 _(
3355 3357 b"cannot create shared repository as source was created"
3356 3358 b" with 'format.usestore' config disabled"
3357 3359 )
3358 3360 )
3359 3361
3360 3362 return dropped
3361 3363
3362 3364
3363 3365 def filterknowncreateopts(ui, createopts):
3364 3366 """Filters a dict of repo creation options against options that are known.
3365 3367
3366 3368 Receives a dict of repo creation options and returns a dict of those
3367 3369 options that we don't know how to handle.
3368 3370
3369 3371 This function is called as part of repository creation. If the
3370 3372 returned dict contains any items, repository creation will not
3371 3373 be allowed, as it means there was a request to create a repository
3372 3374 with options not recognized by loaded code.
3373 3375
3374 3376 Extensions can wrap this function to filter out creation options
3375 3377 they know how to handle.
3376 3378 """
3377 3379 known = {
3378 3380 b'backend',
3379 3381 b'lfs',
3380 3382 b'narrowfiles',
3381 3383 b'sharedrepo',
3382 3384 b'sharedrelative',
3383 3385 b'shareditems',
3384 3386 b'shallowfilestore',
3385 3387 }
3386 3388
3387 3389 return {k: v for k, v in createopts.items() if k not in known}
3388 3390
3389 3391
3390 3392 def createrepository(ui, path, createopts=None):
3391 3393 """Create a new repository in a vfs.
3392 3394
3393 3395 ``path`` path to the new repo's working directory.
3394 3396 ``createopts`` options for the new repository.
3395 3397
3396 3398 The following keys for ``createopts`` are recognized:
3397 3399
3398 3400 backend
3399 3401 The storage backend to use.
3400 3402 lfs
3401 3403 Repository will be created with ``lfs`` requirement. The lfs extension
3402 3404 will automatically be loaded when the repository is accessed.
3403 3405 narrowfiles
3404 3406 Set up repository to support narrow file storage.
3405 3407 sharedrepo
3406 3408 Repository object from which storage should be shared.
3407 3409 sharedrelative
3408 3410 Boolean indicating if the path to the shared repo should be
3409 3411 stored as relative. By default, the pointer to the "parent" repo
3410 3412 is stored as an absolute path.
3411 3413 shareditems
3412 3414 Set of items to share to the new repository (in addition to storage).
3413 3415 shallowfilestore
3414 3416 Indicates that storage for files should be shallow (not all ancestor
3415 3417 revisions are known).
3416 3418 """
3417 3419 createopts = defaultcreateopts(ui, createopts=createopts)
3418 3420
3419 3421 unknownopts = filterknowncreateopts(ui, createopts)
3420 3422
3421 3423 if not isinstance(unknownopts, dict):
3422 3424 raise error.ProgrammingError(
3423 3425 b'filterknowncreateopts() did not return a dict'
3424 3426 )
3425 3427
3426 3428 if unknownopts:
3427 3429 raise error.Abort(
3428 3430 _(
3429 3431 b'unable to create repository because of unknown '
3430 3432 b'creation option: %s'
3431 3433 )
3432 3434 % b', '.join(sorted(unknownopts)),
3433 3435 hint=_(b'is a required extension not loaded?'),
3434 3436 )
3435 3437
3436 3438 requirements = newreporequirements(ui, createopts=createopts)
3437 3439 requirements -= checkrequirementscompat(ui, requirements)
3438 3440
3439 3441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3440 3442
3441 3443 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3442 3444 if hgvfs.exists():
3443 3445 raise error.RepoError(_(b'repository %s already exists') % path)
3444 3446
3445 3447 if b'sharedrepo' in createopts:
3446 3448 sharedpath = createopts[b'sharedrepo'].sharedpath
3447 3449
3448 3450 if createopts.get(b'sharedrelative'):
3449 3451 try:
3450 3452 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3451 3453 except (IOError, ValueError) as e:
3452 3454 # ValueError is raised on Windows if the drive letters differ
3453 3455 # on each path.
3454 3456 raise error.Abort(
3455 3457 _(b'cannot calculate relative path'),
3456 3458 hint=stringutil.forcebytestr(e),
3457 3459 )
3458 3460
3459 3461 if not wdirvfs.exists():
3460 3462 wdirvfs.makedirs()
3461 3463
3462 3464 hgvfs.makedir(notindexed=True)
3463 3465 if b'sharedrepo' not in createopts:
3464 3466 hgvfs.mkdir(b'cache')
3465 3467 hgvfs.mkdir(b'wcache')
3466 3468
3467 3469 if b'store' in requirements and b'sharedrepo' not in createopts:
3468 3470 hgvfs.mkdir(b'store')
3469 3471
3470 3472 # We create an invalid changelog outside the store so very old
3471 3473 # Mercurial versions (which didn't know about the requirements
3472 3474 # file) encounter an error on reading the changelog. This
3473 3475 # effectively locks out old clients and prevents them from
3474 3476 # mucking with a repo in an unknown format.
3475 3477 #
3476 3478 # The revlog header has version 2, which won't be recognized by
3477 3479 # such old clients.
3478 3480 hgvfs.append(
3479 3481 b'00changelog.i',
3480 3482 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3481 3483 b'layout',
3482 3484 )
3483 3485
3484 3486 scmutil.writerequires(hgvfs, requirements)
3485 3487
3486 3488 # Write out file telling readers where to find the shared store.
3487 3489 if b'sharedrepo' in createopts:
3488 3490 hgvfs.write(b'sharedpath', sharedpath)
3489 3491
3490 3492 if createopts.get(b'shareditems'):
3491 3493 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3492 3494 hgvfs.write(b'shared', shared)
3493 3495
3494 3496
3495 3497 def poisonrepository(repo):
3496 3498 """Poison a repository instance so it can no longer be used."""
3497 3499 # Perform any cleanup on the instance.
3498 3500 repo.close()
3499 3501
3500 3502 # Our strategy is to replace the type of the object with one that
3501 3503 # has all attribute lookups result in error.
3502 3504 #
3503 3505 # But we have to allow the close() method because some constructors
3504 3506 # of repos call close() on repo references.
3505 3507 class poisonedrepository(object):
3506 3508 def __getattribute__(self, item):
3507 3509 if item == 'close':
3508 3510 return object.__getattribute__(self, item)
3509 3511
3510 3512 raise error.ProgrammingError(
3511 3513 b'repo instances should not be used after unshare'
3512 3514 )
3513 3515
3514 3516 def close(self):
3515 3517 pass
3516 3518
3517 3519 # We may have a repoview, which intercepts __setattr__. So be sure
3518 3520 # we operate at the lowest level possible.
3519 3521 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now