##// END OF EJS Templates
localrepo: refactor `.hg/requires` reading logic in separate function...
Pulkit Goyal -
r45913:c4fe2262 default
parent child Browse files
Show More
@@ -1,3521 +1,3530 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 commit,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 mergestate as mergestatemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 rcutil,
58 58 repoview,
59 59 revset,
60 60 revsetlang,
61 61 scmutil,
62 62 sparse,
63 63 store as storemod,
64 64 subrepoutil,
65 65 tags as tagsmod,
66 66 transaction,
67 67 txnutil,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76
77 77 from .utils import (
78 78 hashutil,
79 79 procutil,
80 80 stringutil,
81 81 )
82 82
83 83 from .revlogutils import constants as revlogconst
84 84
85 85 release = lockmod.release
86 86 urlerr = util.urlerr
87 87 urlreq = util.urlreq
88 88
89 89 # set of (path, vfs-location) tuples. vfs-location is:
90 90 # - 'plain for vfs relative paths
91 91 # - '' for svfs relative paths
92 92 _cachedfiles = set()
93 93
94 94
95 95 class _basefilecache(scmutil.filecache):
96 96 """All filecache usage on repo are done for logic that should be unfiltered
97 97 """
98 98
99 99 def __get__(self, repo, type=None):
100 100 if repo is None:
101 101 return self
102 102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 103 unfi = repo.unfiltered()
104 104 try:
105 105 return unfi.__dict__[self.sname]
106 106 except KeyError:
107 107 pass
108 108 return super(_basefilecache, self).__get__(unfi, type)
109 109
110 110 def set(self, repo, value):
111 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 112
113 113
114 114 class repofilecache(_basefilecache):
115 115 """filecache for files in .hg but outside of .hg/store"""
116 116
117 117 def __init__(self, *paths):
118 118 super(repofilecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, b'plain'))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.vfs.join(fname)
124 124
125 125
126 126 class storecache(_basefilecache):
127 127 """filecache for files in the store"""
128 128
129 129 def __init__(self, *paths):
130 130 super(storecache, self).__init__(*paths)
131 131 for path in paths:
132 132 _cachedfiles.add((path, b''))
133 133
134 134 def join(self, obj, fname):
135 135 return obj.sjoin(fname)
136 136
137 137
138 138 class mixedrepostorecache(_basefilecache):
139 139 """filecache for a mix files in .hg/store and outside"""
140 140
141 141 def __init__(self, *pathsandlocations):
142 142 # scmutil.filecache only uses the path for passing back into our
143 143 # join(), so we can safely pass a list of paths and locations
144 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 145 _cachedfiles.update(pathsandlocations)
146 146
147 147 def join(self, obj, fnameandlocation):
148 148 fname, location = fnameandlocation
149 149 if location == b'plain':
150 150 return obj.vfs.join(fname)
151 151 else:
152 152 if location != b'':
153 153 raise error.ProgrammingError(
154 154 b'unexpected location: %s' % location
155 155 )
156 156 return obj.sjoin(fname)
157 157
158 158
159 159 def isfilecached(repo, name):
160 160 """check if a repo has already cached "name" filecache-ed property
161 161
162 162 This returns (cachedobj-or-None, iscached) tuple.
163 163 """
164 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 165 if not cacheentry:
166 166 return None, False
167 167 return cacheentry.obj, True
168 168
169 169
170 170 class unfilteredpropertycache(util.propertycache):
171 171 """propertycache that apply to unfiltered repo only"""
172 172
173 173 def __get__(self, repo, type=None):
174 174 unfi = repo.unfiltered()
175 175 if unfi is repo:
176 176 return super(unfilteredpropertycache, self).__get__(unfi)
177 177 return getattr(unfi, self.name)
178 178
179 179
180 180 class filteredpropertycache(util.propertycache):
181 181 """propertycache that must take filtering in account"""
182 182
183 183 def cachevalue(self, obj, value):
184 184 object.__setattr__(obj, self.name, value)
185 185
186 186
187 187 def hasunfilteredcache(repo, name):
188 188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 189 return name in vars(repo.unfiltered())
190 190
191 191
192 192 def unfilteredmethod(orig):
193 193 """decorate method that always need to be run on unfiltered version"""
194 194
195 195 def wrapper(repo, *args, **kwargs):
196 196 return orig(repo.unfiltered(), *args, **kwargs)
197 197
198 198 return wrapper
199 199
200 200
201 201 moderncaps = {
202 202 b'lookup',
203 203 b'branchmap',
204 204 b'pushkey',
205 205 b'known',
206 206 b'getbundle',
207 207 b'unbundle',
208 208 }
209 209 legacycaps = moderncaps.union({b'changegroupsubset'})
210 210
211 211
212 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 213 class localcommandexecutor(object):
214 214 def __init__(self, peer):
215 215 self._peer = peer
216 216 self._sent = False
217 217 self._closed = False
218 218
219 219 def __enter__(self):
220 220 return self
221 221
222 222 def __exit__(self, exctype, excvalue, exctb):
223 223 self.close()
224 224
225 225 def callcommand(self, command, args):
226 226 if self._sent:
227 227 raise error.ProgrammingError(
228 228 b'callcommand() cannot be used after sendcommands()'
229 229 )
230 230
231 231 if self._closed:
232 232 raise error.ProgrammingError(
233 233 b'callcommand() cannot be used after close()'
234 234 )
235 235
236 236 # We don't need to support anything fancy. Just call the named
237 237 # method on the peer and return a resolved future.
238 238 fn = getattr(self._peer, pycompat.sysstr(command))
239 239
240 240 f = pycompat.futures.Future()
241 241
242 242 try:
243 243 result = fn(**pycompat.strkwargs(args))
244 244 except Exception:
245 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 246 else:
247 247 f.set_result(result)
248 248
249 249 return f
250 250
251 251 def sendcommands(self):
252 252 self._sent = True
253 253
254 254 def close(self):
255 255 self._closed = True
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommands)
259 259 class localpeer(repository.peer):
260 260 '''peer for a local repo; reflects only the most recent API'''
261 261
262 262 def __init__(self, repo, caps=None):
263 263 super(localpeer, self).__init__()
264 264
265 265 if caps is None:
266 266 caps = moderncaps.copy()
267 267 self._repo = repo.filtered(b'served')
268 268 self.ui = repo.ui
269 269 self._caps = repo._restrictcapabilities(caps)
270 270
271 271 # Begin of _basepeer interface.
272 272
273 273 def url(self):
274 274 return self._repo.url()
275 275
276 276 def local(self):
277 277 return self._repo
278 278
279 279 def peer(self):
280 280 return self
281 281
282 282 def canpush(self):
283 283 return True
284 284
285 285 def close(self):
286 286 self._repo.close()
287 287
288 288 # End of _basepeer interface.
289 289
290 290 # Begin of _basewirecommands interface.
291 291
292 292 def branchmap(self):
293 293 return self._repo.branchmap()
294 294
295 295 def capabilities(self):
296 296 return self._caps
297 297
298 298 def clonebundles(self):
299 299 return self._repo.tryread(b'clonebundles.manifest')
300 300
301 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 302 """Used to test argument passing over the wire"""
303 303 return b"%s %s %s %s %s" % (
304 304 one,
305 305 two,
306 306 pycompat.bytestr(three),
307 307 pycompat.bytestr(four),
308 308 pycompat.bytestr(five),
309 309 )
310 310
311 311 def getbundle(
312 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 313 ):
314 314 chunks = exchange.getbundlechunks(
315 315 self._repo,
316 316 source,
317 317 heads=heads,
318 318 common=common,
319 319 bundlecaps=bundlecaps,
320 320 **kwargs
321 321 )[1]
322 322 cb = util.chunkbuffer(chunks)
323 323
324 324 if exchange.bundle2requested(bundlecaps):
325 325 # When requesting a bundle2, getbundle returns a stream to make the
326 326 # wire level function happier. We need to build a proper object
327 327 # from it in local peer.
328 328 return bundle2.getunbundler(self.ui, cb)
329 329 else:
330 330 return changegroup.getunbundler(b'01', cb, None)
331 331
332 332 def heads(self):
333 333 return self._repo.heads()
334 334
335 335 def known(self, nodes):
336 336 return self._repo.known(nodes)
337 337
338 338 def listkeys(self, namespace):
339 339 return self._repo.listkeys(namespace)
340 340
341 341 def lookup(self, key):
342 342 return self._repo.lookup(key)
343 343
344 344 def pushkey(self, namespace, key, old, new):
345 345 return self._repo.pushkey(namespace, key, old, new)
346 346
347 347 def stream_out(self):
348 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 349
350 350 def unbundle(self, bundle, heads, url):
351 351 """apply a bundle on a repo
352 352
353 353 This function handles the repo locking itself."""
354 354 try:
355 355 try:
356 356 bundle = exchange.readbundle(self.ui, bundle, None)
357 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 358 if util.safehasattr(ret, b'getchunks'):
359 359 # This is a bundle20 object, turn it into an unbundler.
360 360 # This little dance should be dropped eventually when the
361 361 # API is finally improved.
362 362 stream = util.chunkbuffer(ret.getchunks())
363 363 ret = bundle2.getunbundler(self.ui, stream)
364 364 return ret
365 365 except Exception as exc:
366 366 # If the exception contains output salvaged from a bundle2
367 367 # reply, we need to make sure it is printed before continuing
368 368 # to fail. So we build a bundle2 with such output and consume
369 369 # it directly.
370 370 #
371 371 # This is not very elegant but allows a "simple" solution for
372 372 # issue4594
373 373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 374 if output:
375 375 bundler = bundle2.bundle20(self._repo.ui)
376 376 for out in output:
377 377 bundler.addpart(out)
378 378 stream = util.chunkbuffer(bundler.getchunks())
379 379 b = bundle2.getunbundler(self.ui, stream)
380 380 bundle2.processbundle(self._repo, b)
381 381 raise
382 382 except error.PushRaced as exc:
383 383 raise error.ResponseError(
384 384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 385 )
386 386
387 387 # End of _basewirecommands interface.
388 388
389 389 # Begin of peer interface.
390 390
391 391 def commandexecutor(self):
392 392 return localcommandexecutor(self)
393 393
394 394 # End of peer interface.
395 395
396 396
397 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 398 class locallegacypeer(localpeer):
399 399 '''peer extension which implements legacy methods too; used for tests with
400 400 restricted capabilities'''
401 401
402 402 def __init__(self, repo):
403 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 404
405 405 # Begin of baselegacywirecommands interface.
406 406
407 407 def between(self, pairs):
408 408 return self._repo.between(pairs)
409 409
410 410 def branches(self, nodes):
411 411 return self._repo.branches(nodes)
412 412
413 413 def changegroup(self, nodes, source):
414 414 outgoing = discovery.outgoing(
415 415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 416 )
417 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 418
419 419 def changegroupsubset(self, bases, heads, source):
420 420 outgoing = discovery.outgoing(
421 421 self._repo, missingroots=bases, ancestorsof=heads
422 422 )
423 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 424
425 425 # End of baselegacywirecommands interface.
426 426
427 427
428 428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 429 # clients.
430 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431 431
432 432 # A repository with the sparserevlog feature will have delta chains that
433 433 # can spread over a larger span. Sparse reading cuts these large spans into
434 434 # pieces, so that each piece isn't too big.
435 435 # Without the sparserevlog capability, reading from the repository could use
436 436 # huge amounts of memory, because the whole span would be read at once,
437 437 # including all the intermediate revisions that aren't pertinent for the chain.
438 438 # This is why once a repository has enabled sparse-read, it becomes required.
439 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440 440
441 441 # A repository with the sidedataflag requirement will allow to store extra
442 442 # information for revision without altering their original hashes.
443 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444 444
445 445 # A repository with the the copies-sidedata-changeset requirement will store
446 446 # copies related information in changeset's sidedata.
447 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448 448
449 449 # The repository use persistent nodemap for the changelog and the manifest.
450 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451 451
452 452 # Functions receiving (ui, features) that extensions can register to impact
453 453 # the ability to load repositories with custom requirements. Only
454 454 # functions defined in loaded extensions are called.
455 455 #
456 456 # The function receives a set of requirement strings that the repository
457 457 # is capable of opening. Functions will typically add elements to the
458 458 # set to reflect that the extension knows how to handle that requirements.
459 459 featuresetupfuncs = set()
460 460
461 461
462 462 def _getsharedvfs(hgvfs, requirements):
463 463 """ returns the vfs object pointing to root of shared source
464 464 repo for a shared repository
465 465
466 466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 467 requirements is a set of requirements of current repo (shared one)
468 468 """
469 469 # The ``shared`` or ``relshared`` requirements indicate the
470 470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 471 # This is an absolute path for ``shared`` and relative to
472 472 # ``.hg/`` for ``relshared``.
473 473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 474 if b'relshared' in requirements:
475 475 sharedpath = hgvfs.join(sharedpath)
476 476
477 477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478 478
479 479 if not sharedvfs.exists():
480 480 raise error.RepoError(
481 481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 482 % sharedvfs.base
483 483 )
484 484 return sharedvfs
485 485
486 486
487 def _readrequires(vfs, allowmissing):
488 """ reads the require file present at root of this vfs
489 and return a set of requirements
490
491 If allowmissing is True, we suppress ENOENT if raised"""
492 # requires file contains a newline-delimited list of
493 # features/capabilities the opener (us) must have in order to use
494 # the repository. This file was introduced in Mercurial 0.9.2,
495 # which means very old repositories may not have one. We assume
496 # a missing file translates to no requirements.
497 try:
498 requirements = set(vfs.read(b'requires').splitlines())
499 except IOError as e:
500 if not (allowmissing and e.errno == errno.ENOENT):
501 raise
502 requirements = set()
503 return requirements
504
505
487 506 def makelocalrepository(baseui, path, intents=None):
488 507 """Create a local repository object.
489 508
490 509 Given arguments needed to construct a local repository, this function
491 510 performs various early repository loading functionality (such as
492 511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
493 512 the repository can be opened, derives a type suitable for representing
494 513 that repository, and returns an instance of it.
495 514
496 515 The returned object conforms to the ``repository.completelocalrepository``
497 516 interface.
498 517
499 518 The repository type is derived by calling a series of factory functions
500 519 for each aspect/interface of the final repository. These are defined by
501 520 ``REPO_INTERFACES``.
502 521
503 522 Each factory function is called to produce a type implementing a specific
504 523 interface. The cumulative list of returned types will be combined into a
505 524 new type and that type will be instantiated to represent the local
506 525 repository.
507 526
508 527 The factory functions each receive various state that may be consulted
509 528 as part of deriving a type.
510 529
511 530 Extensions should wrap these factory functions to customize repository type
512 531 creation. Note that an extension's wrapped function may be called even if
513 532 that extension is not loaded for the repo being constructed. Extensions
514 533 should check if their ``__name__`` appears in the
515 534 ``extensionmodulenames`` set passed to the factory function and no-op if
516 535 not.
517 536 """
518 537 ui = baseui.copy()
519 538 # Prevent copying repo configuration.
520 539 ui.copy = baseui.copy
521 540
522 541 # Working directory VFS rooted at repository root.
523 542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
524 543
525 544 # Main VFS for .hg/ directory.
526 545 hgpath = wdirvfs.join(b'.hg')
527 546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
528 547 # Whether this repository is shared one or not
529 548 shared = False
530 549 # If this repository is shared, vfs pointing to shared repo
531 550 sharedvfs = None
532 551
533 552 # The .hg/ path should exist and should be a directory. All other
534 553 # cases are errors.
535 554 if not hgvfs.isdir():
536 555 try:
537 556 hgvfs.stat()
538 557 except OSError as e:
539 558 if e.errno != errno.ENOENT:
540 559 raise
541 560 except ValueError as e:
542 561 # Can be raised on Python 3.8 when path is invalid.
543 562 raise error.Abort(
544 563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
545 564 )
546 565
547 566 raise error.RepoError(_(b'repository %s not found') % path)
548 567
549 # .hg/requires file contains a newline-delimited list of
550 # features/capabilities the opener (us) must have in order to use
551 # the repository. This file was introduced in Mercurial 0.9.2,
552 # which means very old repositories may not have one. We assume
553 # a missing file translates to no requirements.
554 try:
555 requirements = set(hgvfs.read(b'requires').splitlines())
556 except IOError as e:
557 if e.errno != errno.ENOENT:
558 raise
559 requirements = set()
568 requirements = _readrequires(hgvfs, True)
560 569
561 570 # The .hg/hgrc file may load extensions or contain config options
562 571 # that influence repository construction. Attempt to load it and
563 572 # process any new extensions that it may have pulled in.
564 573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
565 574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
566 575 extensions.loadall(ui)
567 576 extensions.populateui(ui)
568 577
569 578 # Set of module names of extensions loaded for this repository.
570 579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
571 580
572 581 supportedrequirements = gathersupportedrequirements(ui)
573 582
574 583 # We first validate the requirements are known.
575 584 ensurerequirementsrecognized(requirements, supportedrequirements)
576 585
577 586 # Then we validate that the known set is reasonable to use together.
578 587 ensurerequirementscompatible(ui, requirements)
579 588
580 589 # TODO there are unhandled edge cases related to opening repositories with
581 590 # shared storage. If storage is shared, we should also test for requirements
582 591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
583 592 # that repo, as that repo may load extensions needed to open it. This is a
584 593 # bit complicated because we don't want the other hgrc to overwrite settings
585 594 # in this hgrc.
586 595 #
587 596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
588 597 # file when sharing repos. But if a requirement is added after the share is
589 598 # performed, thereby introducing a new requirement for the opener, we may
590 599 # will not see that and could encounter a run-time error interacting with
591 600 # that shared store since it has an unknown-to-us requirement.
592 601
593 602 # At this point, we know we should be capable of opening the repository.
594 603 # Now get on with doing that.
595 604
596 605 features = set()
597 606
598 607 # The "store" part of the repository holds versioned data. How it is
599 608 # accessed is determined by various requirements. If `shared` or
600 609 # `relshared` requirements are present, this indicates current repository
601 610 # is a share and store exists in path mentioned in `.hg/sharedpath`
602 611 shared = b'shared' in requirements or b'relshared' in requirements
603 612 if shared:
604 613 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 614 storebasepath = sharedvfs.base
606 615 cachepath = sharedvfs.join(b'cache')
607 616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
608 617 else:
609 618 storebasepath = hgvfs.base
610 619 cachepath = hgvfs.join(b'cache')
611 620 wcachepath = hgvfs.join(b'wcache')
612 621
613 622 # The store has changed over time and the exact layout is dictated by
614 623 # requirements. The store interface abstracts differences across all
615 624 # of them.
616 625 store = makestore(
617 626 requirements,
618 627 storebasepath,
619 628 lambda base: vfsmod.vfs(base, cacheaudited=True),
620 629 )
621 630 hgvfs.createmode = store.createmode
622 631
623 632 storevfs = store.vfs
624 633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
625 634
626 635 # The cache vfs is used to manage cache files.
627 636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
628 637 cachevfs.createmode = store.createmode
629 638 # The cache vfs is used to manage cache files related to the working copy
630 639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
631 640 wcachevfs.createmode = store.createmode
632 641
633 642 # Now resolve the type for the repository object. We do this by repeatedly
634 643 # calling a factory function to produces types for specific aspects of the
635 644 # repo's operation. The aggregate returned types are used as base classes
636 645 # for a dynamically-derived type, which will represent our new repository.
637 646
638 647 bases = []
639 648 extrastate = {}
640 649
641 650 for iface, fn in REPO_INTERFACES:
642 651 # We pass all potentially useful state to give extensions tons of
643 652 # flexibility.
644 653 typ = fn()(
645 654 ui=ui,
646 655 intents=intents,
647 656 requirements=requirements,
648 657 features=features,
649 658 wdirvfs=wdirvfs,
650 659 hgvfs=hgvfs,
651 660 store=store,
652 661 storevfs=storevfs,
653 662 storeoptions=storevfs.options,
654 663 cachevfs=cachevfs,
655 664 wcachevfs=wcachevfs,
656 665 extensionmodulenames=extensionmodulenames,
657 666 extrastate=extrastate,
658 667 baseclasses=bases,
659 668 )
660 669
661 670 if not isinstance(typ, type):
662 671 raise error.ProgrammingError(
663 672 b'unable to construct type for %s' % iface
664 673 )
665 674
666 675 bases.append(typ)
667 676
668 677 # type() allows you to use characters in type names that wouldn't be
669 678 # recognized as Python symbols in source code. We abuse that to add
670 679 # rich information about our constructed repo.
671 680 name = pycompat.sysstr(
672 681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
673 682 )
674 683
675 684 cls = type(name, tuple(bases), {})
676 685
677 686 return cls(
678 687 baseui=baseui,
679 688 ui=ui,
680 689 origroot=path,
681 690 wdirvfs=wdirvfs,
682 691 hgvfs=hgvfs,
683 692 requirements=requirements,
684 693 supportedrequirements=supportedrequirements,
685 694 sharedpath=storebasepath,
686 695 store=store,
687 696 cachevfs=cachevfs,
688 697 wcachevfs=wcachevfs,
689 698 features=features,
690 699 intents=intents,
691 700 )
692 701
693 702
694 703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
695 704 """Load hgrc files/content into a ui instance.
696 705
697 706 This is called during repository opening to load any additional
698 707 config files or settings relevant to the current repository.
699 708
700 709 Returns a bool indicating whether any additional configs were loaded.
701 710
702 711 Extensions should monkeypatch this function to modify how per-repo
703 712 configs are loaded. For example, an extension may wish to pull in
704 713 configs from alternate files or sources.
705 714 """
706 715 if not rcutil.use_repo_hgrc():
707 716 return False
708 717 try:
709 718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
710 719 return True
711 720 except IOError:
712 721 return False
713 722
714 723
715 724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
716 725 """Perform additional actions after .hg/hgrc is loaded.
717 726
718 727 This function is called during repository loading immediately after
719 728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
720 729
721 730 The function can be used to validate configs, automatically add
722 731 options (including extensions) based on requirements, etc.
723 732 """
724 733
725 734 # Map of requirements to list of extensions to load automatically when
726 735 # requirement is present.
727 736 autoextensions = {
728 737 b'git': [b'git'],
729 738 b'largefiles': [b'largefiles'],
730 739 b'lfs': [b'lfs'],
731 740 }
732 741
733 742 for requirement, names in sorted(autoextensions.items()):
734 743 if requirement not in requirements:
735 744 continue
736 745
737 746 for name in names:
738 747 if not ui.hasconfig(b'extensions', name):
739 748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
740 749
741 750
742 751 def gathersupportedrequirements(ui):
743 752 """Determine the complete set of recognized requirements."""
744 753 # Start with all requirements supported by this file.
745 754 supported = set(localrepository._basesupported)
746 755
747 756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
748 757 # relevant to this ui instance.
749 758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
750 759
751 760 for fn in featuresetupfuncs:
752 761 if fn.__module__ in modules:
753 762 fn(ui, supported)
754 763
755 764 # Add derived requirements from registered compression engines.
756 765 for name in util.compengines:
757 766 engine = util.compengines[name]
758 767 if engine.available() and engine.revlogheader():
759 768 supported.add(b'exp-compression-%s' % name)
760 769 if engine.name() == b'zstd':
761 770 supported.add(b'revlog-compression-zstd')
762 771
763 772 return supported
764 773
765 774
766 775 def ensurerequirementsrecognized(requirements, supported):
767 776 """Validate that a set of local requirements is recognized.
768 777
769 778 Receives a set of requirements. Raises an ``error.RepoError`` if there
770 779 exists any requirement in that set that currently loaded code doesn't
771 780 recognize.
772 781
773 782 Returns a set of supported requirements.
774 783 """
775 784 missing = set()
776 785
777 786 for requirement in requirements:
778 787 if requirement in supported:
779 788 continue
780 789
781 790 if not requirement or not requirement[0:1].isalnum():
782 791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
783 792
784 793 missing.add(requirement)
785 794
786 795 if missing:
787 796 raise error.RequirementError(
788 797 _(b'repository requires features unknown to this Mercurial: %s')
789 798 % b' '.join(sorted(missing)),
790 799 hint=_(
791 800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
792 801 b'for more information'
793 802 ),
794 803 )
795 804
796 805
797 806 def ensurerequirementscompatible(ui, requirements):
798 807 """Validates that a set of recognized requirements is mutually compatible.
799 808
800 809 Some requirements may not be compatible with others or require
801 810 config options that aren't enabled. This function is called during
802 811 repository opening to ensure that the set of requirements needed
803 812 to open a repository is sane and compatible with config options.
804 813
805 814 Extensions can monkeypatch this function to perform additional
806 815 checking.
807 816
808 817 ``error.RepoError`` should be raised on failure.
809 818 """
810 819 if b'exp-sparse' in requirements and not sparse.enabled:
811 820 raise error.RepoError(
812 821 _(
813 822 b'repository is using sparse feature but '
814 823 b'sparse is not enabled; enable the '
815 824 b'"sparse" extensions to access'
816 825 )
817 826 )
818 827
819 828
820 829 def makestore(requirements, path, vfstype):
821 830 """Construct a storage object for a repository."""
822 831 if b'store' in requirements:
823 832 if b'fncache' in requirements:
824 833 return storemod.fncachestore(
825 834 path, vfstype, b'dotencode' in requirements
826 835 )
827 836
828 837 return storemod.encodedstore(path, vfstype)
829 838
830 839 return storemod.basicstore(path, vfstype)
831 840
832 841
833 842 def resolvestorevfsoptions(ui, requirements, features):
834 843 """Resolve the options to pass to the store vfs opener.
835 844
836 845 The returned dict is used to influence behavior of the storage layer.
837 846 """
838 847 options = {}
839 848
840 849 if b'treemanifest' in requirements:
841 850 options[b'treemanifest'] = True
842 851
843 852 # experimental config: format.manifestcachesize
844 853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
845 854 if manifestcachesize is not None:
846 855 options[b'manifestcachesize'] = manifestcachesize
847 856
848 857 # In the absence of another requirement superseding a revlog-related
849 858 # requirement, we have to assume the repo is using revlog version 0.
850 859 # This revlog format is super old and we don't bother trying to parse
851 860 # opener options for it because those options wouldn't do anything
852 861 # meaningful on such old repos.
853 862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
854 863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
855 864 else: # explicitly mark repo as using revlogv0
856 865 options[b'revlogv0'] = True
857 866
858 867 if COPIESSDC_REQUIREMENT in requirements:
859 868 options[b'copies-storage'] = b'changeset-sidedata'
860 869 else:
861 870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
862 871 copiesextramode = (b'changeset-only', b'compatibility')
863 872 if writecopiesto in copiesextramode:
864 873 options[b'copies-storage'] = b'extra'
865 874
866 875 return options
867 876
868 877
869 878 def resolverevlogstorevfsoptions(ui, requirements, features):
870 879 """Resolve opener options specific to revlogs."""
871 880
872 881 options = {}
873 882 options[b'flagprocessors'] = {}
874 883
875 884 if b'revlogv1' in requirements:
876 885 options[b'revlogv1'] = True
877 886 if REVLOGV2_REQUIREMENT in requirements:
878 887 options[b'revlogv2'] = True
879 888
880 889 if b'generaldelta' in requirements:
881 890 options[b'generaldelta'] = True
882 891
883 892 # experimental config: format.chunkcachesize
884 893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
885 894 if chunkcachesize is not None:
886 895 options[b'chunkcachesize'] = chunkcachesize
887 896
888 897 deltabothparents = ui.configbool(
889 898 b'storage', b'revlog.optimize-delta-parent-choice'
890 899 )
891 900 options[b'deltabothparents'] = deltabothparents
892 901
893 902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
894 903 lazydeltabase = False
895 904 if lazydelta:
896 905 lazydeltabase = ui.configbool(
897 906 b'storage', b'revlog.reuse-external-delta-parent'
898 907 )
899 908 if lazydeltabase is None:
900 909 lazydeltabase = not scmutil.gddeltaconfig(ui)
901 910 options[b'lazydelta'] = lazydelta
902 911 options[b'lazydeltabase'] = lazydeltabase
903 912
904 913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
905 914 if 0 <= chainspan:
906 915 options[b'maxdeltachainspan'] = chainspan
907 916
908 917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
909 918 if mmapindexthreshold is not None:
910 919 options[b'mmapindexthreshold'] = mmapindexthreshold
911 920
912 921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
913 922 srdensitythres = float(
914 923 ui.config(b'experimental', b'sparse-read.density-threshold')
915 924 )
916 925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
917 926 options[b'with-sparse-read'] = withsparseread
918 927 options[b'sparse-read-density-threshold'] = srdensitythres
919 928 options[b'sparse-read-min-gap-size'] = srmingapsize
920 929
921 930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
922 931 options[b'sparse-revlog'] = sparserevlog
923 932 if sparserevlog:
924 933 options[b'generaldelta'] = True
925 934
926 935 sidedata = SIDEDATA_REQUIREMENT in requirements
927 936 options[b'side-data'] = sidedata
928 937
929 938 maxchainlen = None
930 939 if sparserevlog:
931 940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
932 941 # experimental config: format.maxchainlen
933 942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
934 943 if maxchainlen is not None:
935 944 options[b'maxchainlen'] = maxchainlen
936 945
937 946 for r in requirements:
938 947 # we allow multiple compression engine requirement to co-exist because
939 948 # strickly speaking, revlog seems to support mixed compression style.
940 949 #
941 950 # The compression used for new entries will be "the last one"
942 951 prefix = r.startswith
943 952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
944 953 options[b'compengine'] = r.split(b'-', 2)[2]
945 954
946 955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
947 956 if options[b'zlib.level'] is not None:
948 957 if not (0 <= options[b'zlib.level'] <= 9):
949 958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
950 959 raise error.Abort(msg % options[b'zlib.level'])
951 960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
952 961 if options[b'zstd.level'] is not None:
953 962 if not (0 <= options[b'zstd.level'] <= 22):
954 963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
955 964 raise error.Abort(msg % options[b'zstd.level'])
956 965
957 966 if repository.NARROW_REQUIREMENT in requirements:
958 967 options[b'enableellipsis'] = True
959 968
960 969 if ui.configbool(b'experimental', b'rust.index'):
961 970 options[b'rust.index'] = True
962 971 if NODEMAP_REQUIREMENT in requirements:
963 972 options[b'persistent-nodemap'] = True
964 973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
965 974 options[b'persistent-nodemap.mmap'] = True
966 975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
967 976 options[b'persistent-nodemap.mode'] = epnm
968 977 if ui.configbool(b'devel', b'persistent-nodemap'):
969 978 options[b'devel-force-nodemap'] = True
970 979
971 980 return options
972 981
973 982
974 983 def makemain(**kwargs):
975 984 """Produce a type conforming to ``ilocalrepositorymain``."""
976 985 return localrepository
977 986
978 987
979 988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
980 989 class revlogfilestorage(object):
981 990 """File storage when using revlogs."""
982 991
983 992 def file(self, path):
984 993 if path[0] == b'/':
985 994 path = path[1:]
986 995
987 996 return filelog.filelog(self.svfs, path)
988 997
989 998
990 999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
991 1000 class revlognarrowfilestorage(object):
992 1001 """File storage when using revlogs and narrow files."""
993 1002
994 1003 def file(self, path):
995 1004 if path[0] == b'/':
996 1005 path = path[1:]
997 1006
998 1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
999 1008
1000 1009
1001 1010 def makefilestorage(requirements, features, **kwargs):
1002 1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1003 1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1004 1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1005 1014
1006 1015 if repository.NARROW_REQUIREMENT in requirements:
1007 1016 return revlognarrowfilestorage
1008 1017 else:
1009 1018 return revlogfilestorage
1010 1019
1011 1020
1012 1021 # List of repository interfaces and factory functions for them. Each
1013 1022 # will be called in order during ``makelocalrepository()`` to iteratively
1014 1023 # derive the final type for a local repository instance. We capture the
1015 1024 # function as a lambda so we don't hold a reference and the module-level
1016 1025 # functions can be wrapped.
1017 1026 REPO_INTERFACES = [
1018 1027 (repository.ilocalrepositorymain, lambda: makemain),
1019 1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1020 1029 ]
1021 1030
1022 1031
1023 1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1024 1033 class localrepository(object):
1025 1034 """Main class for representing local repositories.
1026 1035
1027 1036 All local repositories are instances of this class.
1028 1037
1029 1038 Constructed on its own, instances of this class are not usable as
1030 1039 repository objects. To obtain a usable repository object, call
1031 1040 ``hg.repository()``, ``localrepo.instance()``, or
1032 1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1033 1042 ``instance()`` adds support for creating new repositories.
1034 1043 ``hg.repository()`` adds more extension integration, including calling
1035 1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1036 1045 used.
1037 1046 """
1038 1047
1039 1048 # obsolete experimental requirements:
1040 1049 # - manifestv2: An experimental new manifest format that allowed
1041 1050 # for stem compression of long paths. Experiment ended up not
1042 1051 # being successful (repository sizes went up due to worse delta
1043 1052 # chains), and the code was deleted in 4.6.
1044 1053 supportedformats = {
1045 1054 b'revlogv1',
1046 1055 b'generaldelta',
1047 1056 b'treemanifest',
1048 1057 COPIESSDC_REQUIREMENT,
1049 1058 REVLOGV2_REQUIREMENT,
1050 1059 SIDEDATA_REQUIREMENT,
1051 1060 SPARSEREVLOG_REQUIREMENT,
1052 1061 NODEMAP_REQUIREMENT,
1053 1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1054 1063 }
1055 1064 _basesupported = supportedformats | {
1056 1065 b'store',
1057 1066 b'fncache',
1058 1067 b'shared',
1059 1068 b'relshared',
1060 1069 b'dotencode',
1061 1070 b'exp-sparse',
1062 1071 b'internal-phase',
1063 1072 }
1064 1073
1065 1074 # list of prefix for file which can be written without 'wlock'
1066 1075 # Extensions should extend this list when needed
1067 1076 _wlockfreeprefix = {
1068 1077 # We migh consider requiring 'wlock' for the next
1069 1078 # two, but pretty much all the existing code assume
1070 1079 # wlock is not needed so we keep them excluded for
1071 1080 # now.
1072 1081 b'hgrc',
1073 1082 b'requires',
1074 1083 # XXX cache is a complicatged business someone
1075 1084 # should investigate this in depth at some point
1076 1085 b'cache/',
1077 1086 # XXX shouldn't be dirstate covered by the wlock?
1078 1087 b'dirstate',
1079 1088 # XXX bisect was still a bit too messy at the time
1080 1089 # this changeset was introduced. Someone should fix
1081 1090 # the remainig bit and drop this line
1082 1091 b'bisect.state',
1083 1092 }
1084 1093
1085 1094 def __init__(
1086 1095 self,
1087 1096 baseui,
1088 1097 ui,
1089 1098 origroot,
1090 1099 wdirvfs,
1091 1100 hgvfs,
1092 1101 requirements,
1093 1102 supportedrequirements,
1094 1103 sharedpath,
1095 1104 store,
1096 1105 cachevfs,
1097 1106 wcachevfs,
1098 1107 features,
1099 1108 intents=None,
1100 1109 ):
1101 1110 """Create a new local repository instance.
1102 1111
1103 1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1104 1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1105 1114 object.
1106 1115
1107 1116 Arguments:
1108 1117
1109 1118 baseui
1110 1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1111 1120
1112 1121 ui
1113 1122 ``ui.ui`` instance for use by the repository.
1114 1123
1115 1124 origroot
1116 1125 ``bytes`` path to working directory root of this repository.
1117 1126
1118 1127 wdirvfs
1119 1128 ``vfs.vfs`` rooted at the working directory.
1120 1129
1121 1130 hgvfs
1122 1131 ``vfs.vfs`` rooted at .hg/
1123 1132
1124 1133 requirements
1125 1134 ``set`` of bytestrings representing repository opening requirements.
1126 1135
1127 1136 supportedrequirements
1128 1137 ``set`` of bytestrings representing repository requirements that we
1129 1138 know how to open. May be a supetset of ``requirements``.
1130 1139
1131 1140 sharedpath
1132 1141 ``bytes`` Defining path to storage base directory. Points to a
1133 1142 ``.hg/`` directory somewhere.
1134 1143
1135 1144 store
1136 1145 ``store.basicstore`` (or derived) instance providing access to
1137 1146 versioned storage.
1138 1147
1139 1148 cachevfs
1140 1149 ``vfs.vfs`` used for cache files.
1141 1150
1142 1151 wcachevfs
1143 1152 ``vfs.vfs`` used for cache files related to the working copy.
1144 1153
1145 1154 features
1146 1155 ``set`` of bytestrings defining features/capabilities of this
1147 1156 instance.
1148 1157
1149 1158 intents
1150 1159 ``set`` of system strings indicating what this repo will be used
1151 1160 for.
1152 1161 """
1153 1162 self.baseui = baseui
1154 1163 self.ui = ui
1155 1164 self.origroot = origroot
1156 1165 # vfs rooted at working directory.
1157 1166 self.wvfs = wdirvfs
1158 1167 self.root = wdirvfs.base
1159 1168 # vfs rooted at .hg/. Used to access most non-store paths.
1160 1169 self.vfs = hgvfs
1161 1170 self.path = hgvfs.base
1162 1171 self.requirements = requirements
1163 1172 self.supported = supportedrequirements
1164 1173 self.sharedpath = sharedpath
1165 1174 self.store = store
1166 1175 self.cachevfs = cachevfs
1167 1176 self.wcachevfs = wcachevfs
1168 1177 self.features = features
1169 1178
1170 1179 self.filtername = None
1171 1180
1172 1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1173 1182 b'devel', b'check-locks'
1174 1183 ):
1175 1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1176 1185 # A list of callback to shape the phase if no data were found.
1177 1186 # Callback are in the form: func(repo, roots) --> processed root.
1178 1187 # This list it to be filled by extension during repo setup
1179 1188 self._phasedefaults = []
1180 1189
1181 1190 color.setup(self.ui)
1182 1191
1183 1192 self.spath = self.store.path
1184 1193 self.svfs = self.store.vfs
1185 1194 self.sjoin = self.store.join
1186 1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1187 1196 b'devel', b'check-locks'
1188 1197 ):
1189 1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1190 1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1191 1200 else: # standard vfs
1192 1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1193 1202
1194 1203 self._dirstatevalidatewarned = False
1195 1204
1196 1205 self._branchcaches = branchmap.BranchMapCache()
1197 1206 self._revbranchcache = None
1198 1207 self._filterpats = {}
1199 1208 self._datafilters = {}
1200 1209 self._transref = self._lockref = self._wlockref = None
1201 1210
1202 1211 # A cache for various files under .hg/ that tracks file changes,
1203 1212 # (used by the filecache decorator)
1204 1213 #
1205 1214 # Maps a property name to its util.filecacheentry
1206 1215 self._filecache = {}
1207 1216
1208 1217 # hold sets of revision to be filtered
1209 1218 # should be cleared when something might have changed the filter value:
1210 1219 # - new changesets,
1211 1220 # - phase change,
1212 1221 # - new obsolescence marker,
1213 1222 # - working directory parent change,
1214 1223 # - bookmark changes
1215 1224 self.filteredrevcache = {}
1216 1225
1217 1226 # post-dirstate-status hooks
1218 1227 self._postdsstatus = []
1219 1228
1220 1229 # generic mapping between names and nodes
1221 1230 self.names = namespaces.namespaces()
1222 1231
1223 1232 # Key to signature value.
1224 1233 self._sparsesignaturecache = {}
1225 1234 # Signature to cached matcher instance.
1226 1235 self._sparsematchercache = {}
1227 1236
1228 1237 self._extrafilterid = repoview.extrafilter(ui)
1229 1238
1230 1239 self.filecopiesmode = None
1231 1240 if COPIESSDC_REQUIREMENT in self.requirements:
1232 1241 self.filecopiesmode = b'changeset-sidedata'
1233 1242
1234 1243 def _getvfsward(self, origfunc):
1235 1244 """build a ward for self.vfs"""
1236 1245 rref = weakref.ref(self)
1237 1246
1238 1247 def checkvfs(path, mode=None):
1239 1248 ret = origfunc(path, mode=mode)
1240 1249 repo = rref()
1241 1250 if (
1242 1251 repo is None
1243 1252 or not util.safehasattr(repo, b'_wlockref')
1244 1253 or not util.safehasattr(repo, b'_lockref')
1245 1254 ):
1246 1255 return
1247 1256 if mode in (None, b'r', b'rb'):
1248 1257 return
1249 1258 if path.startswith(repo.path):
1250 1259 # truncate name relative to the repository (.hg)
1251 1260 path = path[len(repo.path) + 1 :]
1252 1261 if path.startswith(b'cache/'):
1253 1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1254 1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1255 1264 # path prefixes covered by 'lock'
1256 1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1257 1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1258 1267 if repo._currentlock(repo._lockref) is None:
1259 1268 repo.ui.develwarn(
1260 1269 b'write with no lock: "%s"' % path,
1261 1270 stacklevel=3,
1262 1271 config=b'check-locks',
1263 1272 )
1264 1273 elif repo._currentlock(repo._wlockref) is None:
1265 1274 # rest of vfs files are covered by 'wlock'
1266 1275 #
1267 1276 # exclude special files
1268 1277 for prefix in self._wlockfreeprefix:
1269 1278 if path.startswith(prefix):
1270 1279 return
1271 1280 repo.ui.develwarn(
1272 1281 b'write with no wlock: "%s"' % path,
1273 1282 stacklevel=3,
1274 1283 config=b'check-locks',
1275 1284 )
1276 1285 return ret
1277 1286
1278 1287 return checkvfs
1279 1288
1280 1289 def _getsvfsward(self, origfunc):
1281 1290 """build a ward for self.svfs"""
1282 1291 rref = weakref.ref(self)
1283 1292
1284 1293 def checksvfs(path, mode=None):
1285 1294 ret = origfunc(path, mode=mode)
1286 1295 repo = rref()
1287 1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1288 1297 return
1289 1298 if mode in (None, b'r', b'rb'):
1290 1299 return
1291 1300 if path.startswith(repo.sharedpath):
1292 1301 # truncate name relative to the repository (.hg)
1293 1302 path = path[len(repo.sharedpath) + 1 :]
1294 1303 if repo._currentlock(repo._lockref) is None:
1295 1304 repo.ui.develwarn(
1296 1305 b'write with no lock: "%s"' % path, stacklevel=4
1297 1306 )
1298 1307 return ret
1299 1308
1300 1309 return checksvfs
1301 1310
1302 1311 def close(self):
1303 1312 self._writecaches()
1304 1313
1305 1314 def _writecaches(self):
1306 1315 if self._revbranchcache:
1307 1316 self._revbranchcache.write()
1308 1317
1309 1318 def _restrictcapabilities(self, caps):
1310 1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1311 1320 caps = set(caps)
1312 1321 capsblob = bundle2.encodecaps(
1313 1322 bundle2.getrepocaps(self, role=b'client')
1314 1323 )
1315 1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1316 1325 return caps
1317 1326
1318 1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1319 1328 # self -> auditor -> self._checknested -> self
1320 1329
1321 1330 @property
1322 1331 def auditor(self):
1323 1332 # This is only used by context.workingctx.match in order to
1324 1333 # detect files in subrepos.
1325 1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1326 1335
1327 1336 @property
1328 1337 def nofsauditor(self):
1329 1338 # This is only used by context.basectx.match in order to detect
1330 1339 # files in subrepos.
1331 1340 return pathutil.pathauditor(
1332 1341 self.root, callback=self._checknested, realfs=False, cached=True
1333 1342 )
1334 1343
1335 1344 def _checknested(self, path):
1336 1345 """Determine if path is a legal nested repository."""
1337 1346 if not path.startswith(self.root):
1338 1347 return False
1339 1348 subpath = path[len(self.root) + 1 :]
1340 1349 normsubpath = util.pconvert(subpath)
1341 1350
1342 1351 # XXX: Checking against the current working copy is wrong in
1343 1352 # the sense that it can reject things like
1344 1353 #
1345 1354 # $ hg cat -r 10 sub/x.txt
1346 1355 #
1347 1356 # if sub/ is no longer a subrepository in the working copy
1348 1357 # parent revision.
1349 1358 #
1350 1359 # However, it can of course also allow things that would have
1351 1360 # been rejected before, such as the above cat command if sub/
1352 1361 # is a subrepository now, but was a normal directory before.
1353 1362 # The old path auditor would have rejected by mistake since it
1354 1363 # panics when it sees sub/.hg/.
1355 1364 #
1356 1365 # All in all, checking against the working copy seems sensible
1357 1366 # since we want to prevent access to nested repositories on
1358 1367 # the filesystem *now*.
1359 1368 ctx = self[None]
1360 1369 parts = util.splitpath(subpath)
1361 1370 while parts:
1362 1371 prefix = b'/'.join(parts)
1363 1372 if prefix in ctx.substate:
1364 1373 if prefix == normsubpath:
1365 1374 return True
1366 1375 else:
1367 1376 sub = ctx.sub(prefix)
1368 1377 return sub.checknested(subpath[len(prefix) + 1 :])
1369 1378 else:
1370 1379 parts.pop()
1371 1380 return False
1372 1381
1373 1382 def peer(self):
1374 1383 return localpeer(self) # not cached to avoid reference cycle
1375 1384
1376 1385 def unfiltered(self):
1377 1386 """Return unfiltered version of the repository
1378 1387
1379 1388 Intended to be overwritten by filtered repo."""
1380 1389 return self
1381 1390
1382 1391 def filtered(self, name, visibilityexceptions=None):
1383 1392 """Return a filtered version of a repository
1384 1393
1385 1394 The `name` parameter is the identifier of the requested view. This
1386 1395 will return a repoview object set "exactly" to the specified view.
1387 1396
1388 1397 This function does not apply recursive filtering to a repository. For
1389 1398 example calling `repo.filtered("served")` will return a repoview using
1390 1399 the "served" view, regardless of the initial view used by `repo`.
1391 1400
1392 1401 In other word, there is always only one level of `repoview` "filtering".
1393 1402 """
1394 1403 if self._extrafilterid is not None and b'%' not in name:
1395 1404 name = name + b'%' + self._extrafilterid
1396 1405
1397 1406 cls = repoview.newtype(self.unfiltered().__class__)
1398 1407 return cls(self, name, visibilityexceptions)
1399 1408
1400 1409 @mixedrepostorecache(
1401 1410 (b'bookmarks', b'plain'),
1402 1411 (b'bookmarks.current', b'plain'),
1403 1412 (b'bookmarks', b''),
1404 1413 (b'00changelog.i', b''),
1405 1414 )
1406 1415 def _bookmarks(self):
1407 1416 # Since the multiple files involved in the transaction cannot be
1408 1417 # written atomically (with current repository format), there is a race
1409 1418 # condition here.
1410 1419 #
1411 1420 # 1) changelog content A is read
1412 1421 # 2) outside transaction update changelog to content B
1413 1422 # 3) outside transaction update bookmark file referring to content B
1414 1423 # 4) bookmarks file content is read and filtered against changelog-A
1415 1424 #
1416 1425 # When this happens, bookmarks against nodes missing from A are dropped.
1417 1426 #
1418 1427 # Having this happening during read is not great, but it become worse
1419 1428 # when this happen during write because the bookmarks to the "unknown"
1420 1429 # nodes will be dropped for good. However, writes happen within locks.
1421 1430 # This locking makes it possible to have a race free consistent read.
1422 1431 # For this purpose data read from disc before locking are
1423 1432 # "invalidated" right after the locks are taken. This invalidations are
1424 1433 # "light", the `filecache` mechanism keep the data in memory and will
1425 1434 # reuse them if the underlying files did not changed. Not parsing the
1426 1435 # same data multiple times helps performances.
1427 1436 #
1428 1437 # Unfortunately in the case describe above, the files tracked by the
1429 1438 # bookmarks file cache might not have changed, but the in-memory
1430 1439 # content is still "wrong" because we used an older changelog content
1431 1440 # to process the on-disk data. So after locking, the changelog would be
1432 1441 # refreshed but `_bookmarks` would be preserved.
1433 1442 # Adding `00changelog.i` to the list of tracked file is not
1434 1443 # enough, because at the time we build the content for `_bookmarks` in
1435 1444 # (4), the changelog file has already diverged from the content used
1436 1445 # for loading `changelog` in (1)
1437 1446 #
1438 1447 # To prevent the issue, we force the changelog to be explicitly
1439 1448 # reloaded while computing `_bookmarks`. The data race can still happen
1440 1449 # without the lock (with a narrower window), but it would no longer go
1441 1450 # undetected during the lock time refresh.
1442 1451 #
1443 1452 # The new schedule is as follow
1444 1453 #
1445 1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1446 1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1447 1456 # 3) We force `changelog` filecache to be tested
1448 1457 # 4) cachestat for `changelog` are captured (for changelog)
1449 1458 # 5) `_bookmarks` is computed and cached
1450 1459 #
1451 1460 # The step in (3) ensure we have a changelog at least as recent as the
1452 1461 # cache stat computed in (1). As a result at locking time:
1453 1462 # * if the changelog did not changed since (1) -> we can reuse the data
1454 1463 # * otherwise -> the bookmarks get refreshed.
1455 1464 self._refreshchangelog()
1456 1465 return bookmarks.bmstore(self)
1457 1466
1458 1467 def _refreshchangelog(self):
1459 1468 """make sure the in memory changelog match the on-disk one"""
1460 1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1461 1470 del self.changelog
1462 1471
1463 1472 @property
1464 1473 def _activebookmark(self):
1465 1474 return self._bookmarks.active
1466 1475
1467 1476 # _phasesets depend on changelog. what we need is to call
1468 1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1469 1478 # can't be easily expressed in filecache mechanism.
1470 1479 @storecache(b'phaseroots', b'00changelog.i')
1471 1480 def _phasecache(self):
1472 1481 return phases.phasecache(self, self._phasedefaults)
1473 1482
1474 1483 @storecache(b'obsstore')
1475 1484 def obsstore(self):
1476 1485 return obsolete.makestore(self.ui, self)
1477 1486
1478 1487 @storecache(b'00changelog.i')
1479 1488 def changelog(self):
1480 1489 # load dirstate before changelog to avoid race see issue6303
1481 1490 self.dirstate.prefetch_parents()
1482 1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1483 1492
1484 1493 @storecache(b'00manifest.i')
1485 1494 def manifestlog(self):
1486 1495 return self.store.manifestlog(self, self._storenarrowmatch)
1487 1496
1488 1497 @repofilecache(b'dirstate')
1489 1498 def dirstate(self):
1490 1499 return self._makedirstate()
1491 1500
1492 1501 def _makedirstate(self):
1493 1502 """Extension point for wrapping the dirstate per-repo."""
1494 1503 sparsematchfn = lambda: sparse.matcher(self)
1495 1504
1496 1505 return dirstate.dirstate(
1497 1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1498 1507 )
1499 1508
1500 1509 def _dirstatevalidate(self, node):
1501 1510 try:
1502 1511 self.changelog.rev(node)
1503 1512 return node
1504 1513 except error.LookupError:
1505 1514 if not self._dirstatevalidatewarned:
1506 1515 self._dirstatevalidatewarned = True
1507 1516 self.ui.warn(
1508 1517 _(b"warning: ignoring unknown working parent %s!\n")
1509 1518 % short(node)
1510 1519 )
1511 1520 return nullid
1512 1521
1513 1522 @storecache(narrowspec.FILENAME)
1514 1523 def narrowpats(self):
1515 1524 """matcher patterns for this repository's narrowspec
1516 1525
1517 1526 A tuple of (includes, excludes).
1518 1527 """
1519 1528 return narrowspec.load(self)
1520 1529
1521 1530 @storecache(narrowspec.FILENAME)
1522 1531 def _storenarrowmatch(self):
1523 1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1524 1533 return matchmod.always()
1525 1534 include, exclude = self.narrowpats
1526 1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1527 1536
1528 1537 @storecache(narrowspec.FILENAME)
1529 1538 def _narrowmatch(self):
1530 1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1531 1540 return matchmod.always()
1532 1541 narrowspec.checkworkingcopynarrowspec(self)
1533 1542 include, exclude = self.narrowpats
1534 1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1535 1544
1536 1545 def narrowmatch(self, match=None, includeexact=False):
1537 1546 """matcher corresponding the the repo's narrowspec
1538 1547
1539 1548 If `match` is given, then that will be intersected with the narrow
1540 1549 matcher.
1541 1550
1542 1551 If `includeexact` is True, then any exact matches from `match` will
1543 1552 be included even if they're outside the narrowspec.
1544 1553 """
1545 1554 if match:
1546 1555 if includeexact and not self._narrowmatch.always():
1547 1556 # do not exclude explicitly-specified paths so that they can
1548 1557 # be warned later on
1549 1558 em = matchmod.exact(match.files())
1550 1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1551 1560 return matchmod.intersectmatchers(match, nm)
1552 1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1553 1562 return self._narrowmatch
1554 1563
1555 1564 def setnarrowpats(self, newincludes, newexcludes):
1556 1565 narrowspec.save(self, newincludes, newexcludes)
1557 1566 self.invalidate(clearfilecache=True)
1558 1567
1559 1568 @unfilteredpropertycache
1560 1569 def _quick_access_changeid_null(self):
1561 1570 return {
1562 1571 b'null': (nullrev, nullid),
1563 1572 nullrev: (nullrev, nullid),
1564 1573 nullid: (nullrev, nullid),
1565 1574 }
1566 1575
1567 1576 @unfilteredpropertycache
1568 1577 def _quick_access_changeid_wc(self):
1569 1578 # also fast path access to the working copy parents
1570 1579 # however, only do it for filter that ensure wc is visible.
1571 1580 quick = {}
1572 1581 cl = self.unfiltered().changelog
1573 1582 for node in self.dirstate.parents():
1574 1583 if node == nullid:
1575 1584 continue
1576 1585 rev = cl.index.get_rev(node)
1577 1586 if rev is None:
1578 1587 # unknown working copy parent case:
1579 1588 #
1580 1589 # skip the fast path and let higher code deal with it
1581 1590 continue
1582 1591 pair = (rev, node)
1583 1592 quick[rev] = pair
1584 1593 quick[node] = pair
1585 1594 # also add the parents of the parents
1586 1595 for r in cl.parentrevs(rev):
1587 1596 if r == nullrev:
1588 1597 continue
1589 1598 n = cl.node(r)
1590 1599 pair = (r, n)
1591 1600 quick[r] = pair
1592 1601 quick[n] = pair
1593 1602 p1node = self.dirstate.p1()
1594 1603 if p1node != nullid:
1595 1604 quick[b'.'] = quick[p1node]
1596 1605 return quick
1597 1606
1598 1607 @unfilteredmethod
1599 1608 def _quick_access_changeid_invalidate(self):
1600 1609 if '_quick_access_changeid_wc' in vars(self):
1601 1610 del self.__dict__['_quick_access_changeid_wc']
1602 1611
1603 1612 @property
1604 1613 def _quick_access_changeid(self):
1605 1614 """an helper dictionnary for __getitem__ calls
1606 1615
1607 1616 This contains a list of symbol we can recognise right away without
1608 1617 further processing.
1609 1618 """
1610 1619 mapping = self._quick_access_changeid_null
1611 1620 if self.filtername in repoview.filter_has_wc:
1612 1621 mapping = mapping.copy()
1613 1622 mapping.update(self._quick_access_changeid_wc)
1614 1623 return mapping
1615 1624
1616 1625 def __getitem__(self, changeid):
1617 1626 # dealing with special cases
1618 1627 if changeid is None:
1619 1628 return context.workingctx(self)
1620 1629 if isinstance(changeid, context.basectx):
1621 1630 return changeid
1622 1631
1623 1632 # dealing with multiple revisions
1624 1633 if isinstance(changeid, slice):
1625 1634 # wdirrev isn't contiguous so the slice shouldn't include it
1626 1635 return [
1627 1636 self[i]
1628 1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1629 1638 if i not in self.changelog.filteredrevs
1630 1639 ]
1631 1640
1632 1641 # dealing with some special values
1633 1642 quick_access = self._quick_access_changeid.get(changeid)
1634 1643 if quick_access is not None:
1635 1644 rev, node = quick_access
1636 1645 return context.changectx(self, rev, node, maybe_filtered=False)
1637 1646 if changeid == b'tip':
1638 1647 node = self.changelog.tip()
1639 1648 rev = self.changelog.rev(node)
1640 1649 return context.changectx(self, rev, node)
1641 1650
1642 1651 # dealing with arbitrary values
1643 1652 try:
1644 1653 if isinstance(changeid, int):
1645 1654 node = self.changelog.node(changeid)
1646 1655 rev = changeid
1647 1656 elif changeid == b'.':
1648 1657 # this is a hack to delay/avoid loading obsmarkers
1649 1658 # when we know that '.' won't be hidden
1650 1659 node = self.dirstate.p1()
1651 1660 rev = self.unfiltered().changelog.rev(node)
1652 1661 elif len(changeid) == 20:
1653 1662 try:
1654 1663 node = changeid
1655 1664 rev = self.changelog.rev(changeid)
1656 1665 except error.FilteredLookupError:
1657 1666 changeid = hex(changeid) # for the error message
1658 1667 raise
1659 1668 except LookupError:
1660 1669 # check if it might have come from damaged dirstate
1661 1670 #
1662 1671 # XXX we could avoid the unfiltered if we had a recognizable
1663 1672 # exception for filtered changeset access
1664 1673 if (
1665 1674 self.local()
1666 1675 and changeid in self.unfiltered().dirstate.parents()
1667 1676 ):
1668 1677 msg = _(b"working directory has unknown parent '%s'!")
1669 1678 raise error.Abort(msg % short(changeid))
1670 1679 changeid = hex(changeid) # for the error message
1671 1680 raise
1672 1681
1673 1682 elif len(changeid) == 40:
1674 1683 node = bin(changeid)
1675 1684 rev = self.changelog.rev(node)
1676 1685 else:
1677 1686 raise error.ProgrammingError(
1678 1687 b"unsupported changeid '%s' of type %s"
1679 1688 % (changeid, pycompat.bytestr(type(changeid)))
1680 1689 )
1681 1690
1682 1691 return context.changectx(self, rev, node)
1683 1692
1684 1693 except (error.FilteredIndexError, error.FilteredLookupError):
1685 1694 raise error.FilteredRepoLookupError(
1686 1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1687 1696 )
1688 1697 except (IndexError, LookupError):
1689 1698 raise error.RepoLookupError(
1690 1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1691 1700 )
1692 1701 except error.WdirUnsupported:
1693 1702 return context.workingctx(self)
1694 1703
1695 1704 def __contains__(self, changeid):
1696 1705 """True if the given changeid exists
1697 1706
1698 1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1699 1708 specified.
1700 1709 """
1701 1710 try:
1702 1711 self[changeid]
1703 1712 return True
1704 1713 except error.RepoLookupError:
1705 1714 return False
1706 1715
1707 1716 def __nonzero__(self):
1708 1717 return True
1709 1718
1710 1719 __bool__ = __nonzero__
1711 1720
1712 1721 def __len__(self):
1713 1722 # no need to pay the cost of repoview.changelog
1714 1723 unfi = self.unfiltered()
1715 1724 return len(unfi.changelog)
1716 1725
1717 1726 def __iter__(self):
1718 1727 return iter(self.changelog)
1719 1728
1720 1729 def revs(self, expr, *args):
1721 1730 '''Find revisions matching a revset.
1722 1731
1723 1732 The revset is specified as a string ``expr`` that may contain
1724 1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1725 1734
1726 1735 Revset aliases from the configuration are not expanded. To expand
1727 1736 user aliases, consider calling ``scmutil.revrange()`` or
1728 1737 ``repo.anyrevs([expr], user=True)``.
1729 1738
1730 1739 Returns a smartset.abstractsmartset, which is a list-like interface
1731 1740 that contains integer revisions.
1732 1741 '''
1733 1742 tree = revsetlang.spectree(expr, *args)
1734 1743 return revset.makematcher(tree)(self)
1735 1744
1736 1745 def set(self, expr, *args):
1737 1746 '''Find revisions matching a revset and emit changectx instances.
1738 1747
1739 1748 This is a convenience wrapper around ``revs()`` that iterates the
1740 1749 result and is a generator of changectx instances.
1741 1750
1742 1751 Revset aliases from the configuration are not expanded. To expand
1743 1752 user aliases, consider calling ``scmutil.revrange()``.
1744 1753 '''
1745 1754 for r in self.revs(expr, *args):
1746 1755 yield self[r]
1747 1756
1748 1757 def anyrevs(self, specs, user=False, localalias=None):
1749 1758 '''Find revisions matching one of the given revsets.
1750 1759
1751 1760 Revset aliases from the configuration are not expanded by default. To
1752 1761 expand user aliases, specify ``user=True``. To provide some local
1753 1762 definitions overriding user aliases, set ``localalias`` to
1754 1763 ``{name: definitionstring}``.
1755 1764 '''
1756 1765 if specs == [b'null']:
1757 1766 return revset.baseset([nullrev])
1758 1767 if specs == [b'.']:
1759 1768 quick_data = self._quick_access_changeid.get(b'.')
1760 1769 if quick_data is not None:
1761 1770 return revset.baseset([quick_data[0]])
1762 1771 if user:
1763 1772 m = revset.matchany(
1764 1773 self.ui,
1765 1774 specs,
1766 1775 lookup=revset.lookupfn(self),
1767 1776 localalias=localalias,
1768 1777 )
1769 1778 else:
1770 1779 m = revset.matchany(None, specs, localalias=localalias)
1771 1780 return m(self)
1772 1781
1773 1782 def url(self):
1774 1783 return b'file:' + self.root
1775 1784
1776 1785 def hook(self, name, throw=False, **args):
1777 1786 """Call a hook, passing this repo instance.
1778 1787
1779 1788 This a convenience method to aid invoking hooks. Extensions likely
1780 1789 won't call this unless they have registered a custom hook or are
1781 1790 replacing code that is expected to call a hook.
1782 1791 """
1783 1792 return hook.hook(self.ui, self, name, throw, **args)
1784 1793
1785 1794 @filteredpropertycache
1786 1795 def _tagscache(self):
1787 1796 '''Returns a tagscache object that contains various tags related
1788 1797 caches.'''
1789 1798
1790 1799 # This simplifies its cache management by having one decorated
1791 1800 # function (this one) and the rest simply fetch things from it.
1792 1801 class tagscache(object):
1793 1802 def __init__(self):
1794 1803 # These two define the set of tags for this repository. tags
1795 1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1796 1805 # 'local'. (Global tags are defined by .hgtags across all
1797 1806 # heads, and local tags are defined in .hg/localtags.)
1798 1807 # They constitute the in-memory cache of tags.
1799 1808 self.tags = self.tagtypes = None
1800 1809
1801 1810 self.nodetagscache = self.tagslist = None
1802 1811
1803 1812 cache = tagscache()
1804 1813 cache.tags, cache.tagtypes = self._findtags()
1805 1814
1806 1815 return cache
1807 1816
1808 1817 def tags(self):
1809 1818 '''return a mapping of tag to node'''
1810 1819 t = {}
1811 1820 if self.changelog.filteredrevs:
1812 1821 tags, tt = self._findtags()
1813 1822 else:
1814 1823 tags = self._tagscache.tags
1815 1824 rev = self.changelog.rev
1816 1825 for k, v in pycompat.iteritems(tags):
1817 1826 try:
1818 1827 # ignore tags to unknown nodes
1819 1828 rev(v)
1820 1829 t[k] = v
1821 1830 except (error.LookupError, ValueError):
1822 1831 pass
1823 1832 return t
1824 1833
1825 1834 def _findtags(self):
1826 1835 '''Do the hard work of finding tags. Return a pair of dicts
1827 1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1828 1837 maps tag name to a string like \'global\' or \'local\'.
1829 1838 Subclasses or extensions are free to add their own tags, but
1830 1839 should be aware that the returned dicts will be retained for the
1831 1840 duration of the localrepo object.'''
1832 1841
1833 1842 # XXX what tagtype should subclasses/extensions use? Currently
1834 1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1835 1844 # Should each extension invent its own tag type? Should there
1836 1845 # be one tagtype for all such "virtual" tags? Or is the status
1837 1846 # quo fine?
1838 1847
1839 1848 # map tag name to (node, hist)
1840 1849 alltags = tagsmod.findglobaltags(self.ui, self)
1841 1850 # map tag name to tag type
1842 1851 tagtypes = {tag: b'global' for tag in alltags}
1843 1852
1844 1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1845 1854
1846 1855 # Build the return dicts. Have to re-encode tag names because
1847 1856 # the tags module always uses UTF-8 (in order not to lose info
1848 1857 # writing to the cache), but the rest of Mercurial wants them in
1849 1858 # local encoding.
1850 1859 tags = {}
1851 1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1852 1861 if node != nullid:
1853 1862 tags[encoding.tolocal(name)] = node
1854 1863 tags[b'tip'] = self.changelog.tip()
1855 1864 tagtypes = {
1856 1865 encoding.tolocal(name): value
1857 1866 for (name, value) in pycompat.iteritems(tagtypes)
1858 1867 }
1859 1868 return (tags, tagtypes)
1860 1869
1861 1870 def tagtype(self, tagname):
1862 1871 '''
1863 1872 return the type of the given tag. result can be:
1864 1873
1865 1874 'local' : a local tag
1866 1875 'global' : a global tag
1867 1876 None : tag does not exist
1868 1877 '''
1869 1878
1870 1879 return self._tagscache.tagtypes.get(tagname)
1871 1880
1872 1881 def tagslist(self):
1873 1882 '''return a list of tags ordered by revision'''
1874 1883 if not self._tagscache.tagslist:
1875 1884 l = []
1876 1885 for t, n in pycompat.iteritems(self.tags()):
1877 1886 l.append((self.changelog.rev(n), t, n))
1878 1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1879 1888
1880 1889 return self._tagscache.tagslist
1881 1890
1882 1891 def nodetags(self, node):
1883 1892 '''return the tags associated with a node'''
1884 1893 if not self._tagscache.nodetagscache:
1885 1894 nodetagscache = {}
1886 1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1887 1896 nodetagscache.setdefault(n, []).append(t)
1888 1897 for tags in pycompat.itervalues(nodetagscache):
1889 1898 tags.sort()
1890 1899 self._tagscache.nodetagscache = nodetagscache
1891 1900 return self._tagscache.nodetagscache.get(node, [])
1892 1901
1893 1902 def nodebookmarks(self, node):
1894 1903 """return the list of bookmarks pointing to the specified node"""
1895 1904 return self._bookmarks.names(node)
1896 1905
1897 1906 def branchmap(self):
1898 1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1899 1908 ordered by increasing revision number'''
1900 1909 return self._branchcaches[self]
1901 1910
1902 1911 @unfilteredmethod
1903 1912 def revbranchcache(self):
1904 1913 if not self._revbranchcache:
1905 1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1906 1915 return self._revbranchcache
1907 1916
1908 1917 def branchtip(self, branch, ignoremissing=False):
1909 1918 '''return the tip node for a given branch
1910 1919
1911 1920 If ignoremissing is True, then this method will not raise an error.
1912 1921 This is helpful for callers that only expect None for a missing branch
1913 1922 (e.g. namespace).
1914 1923
1915 1924 '''
1916 1925 try:
1917 1926 return self.branchmap().branchtip(branch)
1918 1927 except KeyError:
1919 1928 if not ignoremissing:
1920 1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1921 1930 else:
1922 1931 pass
1923 1932
1924 1933 def lookup(self, key):
1925 1934 node = scmutil.revsymbol(self, key).node()
1926 1935 if node is None:
1927 1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1928 1937 return node
1929 1938
1930 1939 def lookupbranch(self, key):
1931 1940 if self.branchmap().hasbranch(key):
1932 1941 return key
1933 1942
1934 1943 return scmutil.revsymbol(self, key).branch()
1935 1944
1936 1945 def known(self, nodes):
1937 1946 cl = self.changelog
1938 1947 get_rev = cl.index.get_rev
1939 1948 filtered = cl.filteredrevs
1940 1949 result = []
1941 1950 for n in nodes:
1942 1951 r = get_rev(n)
1943 1952 resp = not (r is None or r in filtered)
1944 1953 result.append(resp)
1945 1954 return result
1946 1955
1947 1956 def local(self):
1948 1957 return self
1949 1958
1950 1959 def publishing(self):
1951 1960 # it's safe (and desirable) to trust the publish flag unconditionally
1952 1961 # so that we don't finalize changes shared between users via ssh or nfs
1953 1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1954 1963
1955 1964 def cancopy(self):
1956 1965 # so statichttprepo's override of local() works
1957 1966 if not self.local():
1958 1967 return False
1959 1968 if not self.publishing():
1960 1969 return True
1961 1970 # if publishing we can't copy if there is filtered content
1962 1971 return not self.filtered(b'visible').changelog.filteredrevs
1963 1972
1964 1973 def shared(self):
1965 1974 '''the type of shared repository (None if not shared)'''
1966 1975 if self.sharedpath != self.path:
1967 1976 return b'store'
1968 1977 return None
1969 1978
1970 1979 def wjoin(self, f, *insidef):
1971 1980 return self.vfs.reljoin(self.root, f, *insidef)
1972 1981
1973 1982 def setparents(self, p1, p2=nullid):
1974 1983 self[None].setparents(p1, p2)
1975 1984 self._quick_access_changeid_invalidate()
1976 1985
1977 1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1978 1987 """changeid must be a changeset revision, if specified.
1979 1988 fileid can be a file revision or node."""
1980 1989 return context.filectx(
1981 1990 self, path, changeid, fileid, changectx=changectx
1982 1991 )
1983 1992
1984 1993 def getcwd(self):
1985 1994 return self.dirstate.getcwd()
1986 1995
1987 1996 def pathto(self, f, cwd=None):
1988 1997 return self.dirstate.pathto(f, cwd)
1989 1998
1990 1999 def _loadfilter(self, filter):
1991 2000 if filter not in self._filterpats:
1992 2001 l = []
1993 2002 for pat, cmd in self.ui.configitems(filter):
1994 2003 if cmd == b'!':
1995 2004 continue
1996 2005 mf = matchmod.match(self.root, b'', [pat])
1997 2006 fn = None
1998 2007 params = cmd
1999 2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2000 2009 if cmd.startswith(name):
2001 2010 fn = filterfn
2002 2011 params = cmd[len(name) :].lstrip()
2003 2012 break
2004 2013 if not fn:
2005 2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2006 2015 fn.__name__ = 'commandfilter'
2007 2016 # Wrap old filters not supporting keyword arguments
2008 2017 if not pycompat.getargspec(fn)[2]:
2009 2018 oldfn = fn
2010 2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2011 2020 fn.__name__ = 'compat-' + oldfn.__name__
2012 2021 l.append((mf, fn, params))
2013 2022 self._filterpats[filter] = l
2014 2023 return self._filterpats[filter]
2015 2024
2016 2025 def _filter(self, filterpats, filename, data):
2017 2026 for mf, fn, cmd in filterpats:
2018 2027 if mf(filename):
2019 2028 self.ui.debug(
2020 2029 b"filtering %s through %s\n"
2021 2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2022 2031 )
2023 2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2024 2033 break
2025 2034
2026 2035 return data
2027 2036
2028 2037 @unfilteredpropertycache
2029 2038 def _encodefilterpats(self):
2030 2039 return self._loadfilter(b'encode')
2031 2040
2032 2041 @unfilteredpropertycache
2033 2042 def _decodefilterpats(self):
2034 2043 return self._loadfilter(b'decode')
2035 2044
2036 2045 def adddatafilter(self, name, filter):
2037 2046 self._datafilters[name] = filter
2038 2047
2039 2048 def wread(self, filename):
2040 2049 if self.wvfs.islink(filename):
2041 2050 data = self.wvfs.readlink(filename)
2042 2051 else:
2043 2052 data = self.wvfs.read(filename)
2044 2053 return self._filter(self._encodefilterpats, filename, data)
2045 2054
2046 2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2047 2056 """write ``data`` into ``filename`` in the working directory
2048 2057
2049 2058 This returns length of written (maybe decoded) data.
2050 2059 """
2051 2060 data = self._filter(self._decodefilterpats, filename, data)
2052 2061 if b'l' in flags:
2053 2062 self.wvfs.symlink(data, filename)
2054 2063 else:
2055 2064 self.wvfs.write(
2056 2065 filename, data, backgroundclose=backgroundclose, **kwargs
2057 2066 )
2058 2067 if b'x' in flags:
2059 2068 self.wvfs.setflags(filename, False, True)
2060 2069 else:
2061 2070 self.wvfs.setflags(filename, False, False)
2062 2071 return len(data)
2063 2072
2064 2073 def wwritedata(self, filename, data):
2065 2074 return self._filter(self._decodefilterpats, filename, data)
2066 2075
2067 2076 def currenttransaction(self):
2068 2077 """return the current transaction or None if non exists"""
2069 2078 if self._transref:
2070 2079 tr = self._transref()
2071 2080 else:
2072 2081 tr = None
2073 2082
2074 2083 if tr and tr.running():
2075 2084 return tr
2076 2085 return None
2077 2086
2078 2087 def transaction(self, desc, report=None):
2079 2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2080 2089 b'devel', b'check-locks'
2081 2090 ):
2082 2091 if self._currentlock(self._lockref) is None:
2083 2092 raise error.ProgrammingError(b'transaction requires locking')
2084 2093 tr = self.currenttransaction()
2085 2094 if tr is not None:
2086 2095 return tr.nest(name=desc)
2087 2096
2088 2097 # abort here if the journal already exists
2089 2098 if self.svfs.exists(b"journal"):
2090 2099 raise error.RepoError(
2091 2100 _(b"abandoned transaction found"),
2092 2101 hint=_(b"run 'hg recover' to clean up transaction"),
2093 2102 )
2094 2103
2095 2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2096 2105 ha = hex(hashutil.sha1(idbase).digest())
2097 2106 txnid = b'TXN:' + ha
2098 2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2099 2108
2100 2109 self._writejournal(desc)
2101 2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2102 2111 if report:
2103 2112 rp = report
2104 2113 else:
2105 2114 rp = self.ui.warn
2106 2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2107 2116 # we must avoid cyclic reference between repo and transaction.
2108 2117 reporef = weakref.ref(self)
2109 2118 # Code to track tag movement
2110 2119 #
2111 2120 # Since tags are all handled as file content, it is actually quite hard
2112 2121 # to track these movement from a code perspective. So we fallback to a
2113 2122 # tracking at the repository level. One could envision to track changes
2114 2123 # to the '.hgtags' file through changegroup apply but that fails to
2115 2124 # cope with case where transaction expose new heads without changegroup
2116 2125 # being involved (eg: phase movement).
2117 2126 #
2118 2127 # For now, We gate the feature behind a flag since this likely comes
2119 2128 # with performance impacts. The current code run more often than needed
2120 2129 # and do not use caches as much as it could. The current focus is on
2121 2130 # the behavior of the feature so we disable it by default. The flag
2122 2131 # will be removed when we are happy with the performance impact.
2123 2132 #
2124 2133 # Once this feature is no longer experimental move the following
2125 2134 # documentation to the appropriate help section:
2126 2135 #
2127 2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2128 2137 # tags (new or changed or deleted tags). In addition the details of
2129 2138 # these changes are made available in a file at:
2130 2139 # ``REPOROOT/.hg/changes/tags.changes``.
2131 2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2132 2141 # might exist from a previous transaction even if no tag were touched
2133 2142 # in this one. Changes are recorded in a line base format::
2134 2143 #
2135 2144 # <action> <hex-node> <tag-name>\n
2136 2145 #
2137 2146 # Actions are defined as follow:
2138 2147 # "-R": tag is removed,
2139 2148 # "+A": tag is added,
2140 2149 # "-M": tag is moved (old value),
2141 2150 # "+M": tag is moved (new value),
2142 2151 tracktags = lambda x: None
2143 2152 # experimental config: experimental.hook-track-tags
2144 2153 shouldtracktags = self.ui.configbool(
2145 2154 b'experimental', b'hook-track-tags'
2146 2155 )
2147 2156 if desc != b'strip' and shouldtracktags:
2148 2157 oldheads = self.changelog.headrevs()
2149 2158
2150 2159 def tracktags(tr2):
2151 2160 repo = reporef()
2152 2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2153 2162 newheads = repo.changelog.headrevs()
2154 2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2155 2164 # notes: we compare lists here.
2156 2165 # As we do it only once buiding set would not be cheaper
2157 2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2158 2167 if changes:
2159 2168 tr2.hookargs[b'tag_moved'] = b'1'
2160 2169 with repo.vfs(
2161 2170 b'changes/tags.changes', b'w', atomictemp=True
2162 2171 ) as changesfile:
2163 2172 # note: we do not register the file to the transaction
2164 2173 # because we needs it to still exist on the transaction
2165 2174 # is close (for txnclose hooks)
2166 2175 tagsmod.writediff(changesfile, changes)
2167 2176
2168 2177 def validate(tr2):
2169 2178 """will run pre-closing hooks"""
2170 2179 # XXX the transaction API is a bit lacking here so we take a hacky
2171 2180 # path for now
2172 2181 #
2173 2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2174 2183 # dict is copied before these run. In addition we needs the data
2175 2184 # available to in memory hooks too.
2176 2185 #
2177 2186 # Moreover, we also need to make sure this runs before txnclose
2178 2187 # hooks and there is no "pending" mechanism that would execute
2179 2188 # logic only if hooks are about to run.
2180 2189 #
2181 2190 # Fixing this limitation of the transaction is also needed to track
2182 2191 # other families of changes (bookmarks, phases, obsolescence).
2183 2192 #
2184 2193 # This will have to be fixed before we remove the experimental
2185 2194 # gating.
2186 2195 tracktags(tr2)
2187 2196 repo = reporef()
2188 2197
2189 2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2190 2199 singlehead = repo.ui.configbool(*singleheadopt)
2191 2200 if singlehead:
2192 2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2193 2202 accountclosed = singleheadsub.get(
2194 2203 b"account-closed-heads", False
2195 2204 )
2196 2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2197 2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2198 2207 for name, (old, new) in sorted(
2199 2208 tr.changes[b'bookmarks'].items()
2200 2209 ):
2201 2210 args = tr.hookargs.copy()
2202 2211 args.update(bookmarks.preparehookargs(name, old, new))
2203 2212 repo.hook(
2204 2213 b'pretxnclose-bookmark',
2205 2214 throw=True,
2206 2215 **pycompat.strkwargs(args)
2207 2216 )
2208 2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2209 2218 cl = repo.unfiltered().changelog
2210 2219 for revs, (old, new) in tr.changes[b'phases']:
2211 2220 for rev in revs:
2212 2221 args = tr.hookargs.copy()
2213 2222 node = hex(cl.node(rev))
2214 2223 args.update(phases.preparehookargs(node, old, new))
2215 2224 repo.hook(
2216 2225 b'pretxnclose-phase',
2217 2226 throw=True,
2218 2227 **pycompat.strkwargs(args)
2219 2228 )
2220 2229
2221 2230 repo.hook(
2222 2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2223 2232 )
2224 2233
2225 2234 def releasefn(tr, success):
2226 2235 repo = reporef()
2227 2236 if repo is None:
2228 2237 # If the repo has been GC'd (and this release function is being
2229 2238 # called from transaction.__del__), there's not much we can do,
2230 2239 # so just leave the unfinished transaction there and let the
2231 2240 # user run `hg recover`.
2232 2241 return
2233 2242 if success:
2234 2243 # this should be explicitly invoked here, because
2235 2244 # in-memory changes aren't written out at closing
2236 2245 # transaction, if tr.addfilegenerator (via
2237 2246 # dirstate.write or so) isn't invoked while
2238 2247 # transaction running
2239 2248 repo.dirstate.write(None)
2240 2249 else:
2241 2250 # discard all changes (including ones already written
2242 2251 # out) in this transaction
2243 2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2244 2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2245 2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2246 2255
2247 2256 repo.invalidate(clearfilecache=True)
2248 2257
2249 2258 tr = transaction.transaction(
2250 2259 rp,
2251 2260 self.svfs,
2252 2261 vfsmap,
2253 2262 b"journal",
2254 2263 b"undo",
2255 2264 aftertrans(renames),
2256 2265 self.store.createmode,
2257 2266 validator=validate,
2258 2267 releasefn=releasefn,
2259 2268 checkambigfiles=_cachedfiles,
2260 2269 name=desc,
2261 2270 )
2262 2271 tr.changes[b'origrepolen'] = len(self)
2263 2272 tr.changes[b'obsmarkers'] = set()
2264 2273 tr.changes[b'phases'] = []
2265 2274 tr.changes[b'bookmarks'] = {}
2266 2275
2267 2276 tr.hookargs[b'txnid'] = txnid
2268 2277 tr.hookargs[b'txnname'] = desc
2269 2278 tr.hookargs[b'changes'] = tr.changes
2270 2279 # note: writing the fncache only during finalize mean that the file is
2271 2280 # outdated when running hooks. As fncache is used for streaming clone,
2272 2281 # this is not expected to break anything that happen during the hooks.
2273 2282 tr.addfinalize(b'flush-fncache', self.store.write)
2274 2283
2275 2284 def txnclosehook(tr2):
2276 2285 """To be run if transaction is successful, will schedule a hook run
2277 2286 """
2278 2287 # Don't reference tr2 in hook() so we don't hold a reference.
2279 2288 # This reduces memory consumption when there are multiple
2280 2289 # transactions per lock. This can likely go away if issue5045
2281 2290 # fixes the function accumulation.
2282 2291 hookargs = tr2.hookargs
2283 2292
2284 2293 def hookfunc(unused_success):
2285 2294 repo = reporef()
2286 2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2287 2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2288 2297 for name, (old, new) in bmchanges:
2289 2298 args = tr.hookargs.copy()
2290 2299 args.update(bookmarks.preparehookargs(name, old, new))
2291 2300 repo.hook(
2292 2301 b'txnclose-bookmark',
2293 2302 throw=False,
2294 2303 **pycompat.strkwargs(args)
2295 2304 )
2296 2305
2297 2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2298 2307 cl = repo.unfiltered().changelog
2299 2308 phasemv = sorted(
2300 2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2301 2310 )
2302 2311 for revs, (old, new) in phasemv:
2303 2312 for rev in revs:
2304 2313 args = tr.hookargs.copy()
2305 2314 node = hex(cl.node(rev))
2306 2315 args.update(phases.preparehookargs(node, old, new))
2307 2316 repo.hook(
2308 2317 b'txnclose-phase',
2309 2318 throw=False,
2310 2319 **pycompat.strkwargs(args)
2311 2320 )
2312 2321
2313 2322 repo.hook(
2314 2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2315 2324 )
2316 2325
2317 2326 reporef()._afterlock(hookfunc)
2318 2327
2319 2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2320 2329 # Include a leading "-" to make it happen before the transaction summary
2321 2330 # reports registered via scmutil.registersummarycallback() whose names
2322 2331 # are 00-txnreport etc. That way, the caches will be warm when the
2323 2332 # callbacks run.
2324 2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2325 2334
2326 2335 def txnaborthook(tr2):
2327 2336 """To be run if transaction is aborted
2328 2337 """
2329 2338 reporef().hook(
2330 2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2331 2340 )
2332 2341
2333 2342 tr.addabort(b'txnabort-hook', txnaborthook)
2334 2343 # avoid eager cache invalidation. in-memory data should be identical
2335 2344 # to stored data if transaction has no error.
2336 2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2337 2346 self._transref = weakref.ref(tr)
2338 2347 scmutil.registersummarycallback(self, tr, desc)
2339 2348 return tr
2340 2349
2341 2350 def _journalfiles(self):
2342 2351 return (
2343 2352 (self.svfs, b'journal'),
2344 2353 (self.svfs, b'journal.narrowspec'),
2345 2354 (self.vfs, b'journal.narrowspec.dirstate'),
2346 2355 (self.vfs, b'journal.dirstate'),
2347 2356 (self.vfs, b'journal.branch'),
2348 2357 (self.vfs, b'journal.desc'),
2349 2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2350 2359 (self.svfs, b'journal.phaseroots'),
2351 2360 )
2352 2361
2353 2362 def undofiles(self):
2354 2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2355 2364
2356 2365 @unfilteredmethod
2357 2366 def _writejournal(self, desc):
2358 2367 self.dirstate.savebackup(None, b'journal.dirstate')
2359 2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2360 2369 narrowspec.savebackup(self, b'journal.narrowspec')
2361 2370 self.vfs.write(
2362 2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2363 2372 )
2364 2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2365 2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2366 2375 bookmarksvfs.write(
2367 2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2368 2377 )
2369 2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2370 2379
2371 2380 def recover(self):
2372 2381 with self.lock():
2373 2382 if self.svfs.exists(b"journal"):
2374 2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2375 2384 vfsmap = {
2376 2385 b'': self.svfs,
2377 2386 b'plain': self.vfs,
2378 2387 }
2379 2388 transaction.rollback(
2380 2389 self.svfs,
2381 2390 vfsmap,
2382 2391 b"journal",
2383 2392 self.ui.warn,
2384 2393 checkambigfiles=_cachedfiles,
2385 2394 )
2386 2395 self.invalidate()
2387 2396 return True
2388 2397 else:
2389 2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2390 2399 return False
2391 2400
2392 2401 def rollback(self, dryrun=False, force=False):
2393 2402 wlock = lock = dsguard = None
2394 2403 try:
2395 2404 wlock = self.wlock()
2396 2405 lock = self.lock()
2397 2406 if self.svfs.exists(b"undo"):
2398 2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2399 2408
2400 2409 return self._rollback(dryrun, force, dsguard)
2401 2410 else:
2402 2411 self.ui.warn(_(b"no rollback information available\n"))
2403 2412 return 1
2404 2413 finally:
2405 2414 release(dsguard, lock, wlock)
2406 2415
2407 2416 @unfilteredmethod # Until we get smarter cache management
2408 2417 def _rollback(self, dryrun, force, dsguard):
2409 2418 ui = self.ui
2410 2419 try:
2411 2420 args = self.vfs.read(b'undo.desc').splitlines()
2412 2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2413 2422 if len(args) >= 3:
2414 2423 detail = args[2]
2415 2424 oldtip = oldlen - 1
2416 2425
2417 2426 if detail and ui.verbose:
2418 2427 msg = _(
2419 2428 b'repository tip rolled back to revision %d'
2420 2429 b' (undo %s: %s)\n'
2421 2430 ) % (oldtip, desc, detail)
2422 2431 else:
2423 2432 msg = _(
2424 2433 b'repository tip rolled back to revision %d (undo %s)\n'
2425 2434 ) % (oldtip, desc)
2426 2435 except IOError:
2427 2436 msg = _(b'rolling back unknown transaction\n')
2428 2437 desc = None
2429 2438
2430 2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2431 2440 raise error.Abort(
2432 2441 _(
2433 2442 b'rollback of last commit while not checked out '
2434 2443 b'may lose data'
2435 2444 ),
2436 2445 hint=_(b'use -f to force'),
2437 2446 )
2438 2447
2439 2448 ui.status(msg)
2440 2449 if dryrun:
2441 2450 return 0
2442 2451
2443 2452 parents = self.dirstate.parents()
2444 2453 self.destroying()
2445 2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2446 2455 transaction.rollback(
2447 2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2448 2457 )
2449 2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2450 2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2451 2460 bookmarksvfs.rename(
2452 2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2453 2462 )
2454 2463 if self.svfs.exists(b'undo.phaseroots'):
2455 2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2456 2465 self.invalidate()
2457 2466
2458 2467 has_node = self.changelog.index.has_node
2459 2468 parentgone = any(not has_node(p) for p in parents)
2460 2469 if parentgone:
2461 2470 # prevent dirstateguard from overwriting already restored one
2462 2471 dsguard.close()
2463 2472
2464 2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2465 2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2466 2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2467 2476 try:
2468 2477 branch = self.vfs.read(b'undo.branch')
2469 2478 self.dirstate.setbranch(encoding.tolocal(branch))
2470 2479 except IOError:
2471 2480 ui.warn(
2472 2481 _(
2473 2482 b'named branch could not be reset: '
2474 2483 b'current branch is still \'%s\'\n'
2475 2484 )
2476 2485 % self.dirstate.branch()
2477 2486 )
2478 2487
2479 2488 parents = tuple([p.rev() for p in self[None].parents()])
2480 2489 if len(parents) > 1:
2481 2490 ui.status(
2482 2491 _(
2483 2492 b'working directory now based on '
2484 2493 b'revisions %d and %d\n'
2485 2494 )
2486 2495 % parents
2487 2496 )
2488 2497 else:
2489 2498 ui.status(
2490 2499 _(b'working directory now based on revision %d\n') % parents
2491 2500 )
2492 2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2493 2502
2494 2503 # TODO: if we know which new heads may result from this rollback, pass
2495 2504 # them to destroy(), which will prevent the branchhead cache from being
2496 2505 # invalidated.
2497 2506 self.destroyed()
2498 2507 return 0
2499 2508
2500 2509 def _buildcacheupdater(self, newtransaction):
2501 2510 """called during transaction to build the callback updating cache
2502 2511
2503 2512 Lives on the repository to help extension who might want to augment
2504 2513 this logic. For this purpose, the created transaction is passed to the
2505 2514 method.
2506 2515 """
2507 2516 # we must avoid cyclic reference between repo and transaction.
2508 2517 reporef = weakref.ref(self)
2509 2518
2510 2519 def updater(tr):
2511 2520 repo = reporef()
2512 2521 repo.updatecaches(tr)
2513 2522
2514 2523 return updater
2515 2524
2516 2525 @unfilteredmethod
2517 2526 def updatecaches(self, tr=None, full=False):
2518 2527 """warm appropriate caches
2519 2528
2520 2529 If this function is called after a transaction closed. The transaction
2521 2530 will be available in the 'tr' argument. This can be used to selectively
2522 2531 update caches relevant to the changes in that transaction.
2523 2532
2524 2533 If 'full' is set, make sure all caches the function knows about have
2525 2534 up-to-date data. Even the ones usually loaded more lazily.
2526 2535 """
2527 2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2528 2537 # During strip, many caches are invalid but
2529 2538 # later call to `destroyed` will refresh them.
2530 2539 return
2531 2540
2532 2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2533 2542 # accessing the 'ser ved' branchmap should refresh all the others,
2534 2543 self.ui.debug(b'updating the branch cache\n')
2535 2544 self.filtered(b'served').branchmap()
2536 2545 self.filtered(b'served.hidden').branchmap()
2537 2546
2538 2547 if full:
2539 2548 unfi = self.unfiltered()
2540 2549
2541 2550 self.changelog.update_caches(transaction=tr)
2542 2551 self.manifestlog.update_caches(transaction=tr)
2543 2552
2544 2553 rbc = unfi.revbranchcache()
2545 2554 for r in unfi.changelog:
2546 2555 rbc.branchinfo(r)
2547 2556 rbc.write()
2548 2557
2549 2558 # ensure the working copy parents are in the manifestfulltextcache
2550 2559 for ctx in self[b'.'].parents():
2551 2560 ctx.manifest() # accessing the manifest is enough
2552 2561
2553 2562 # accessing fnode cache warms the cache
2554 2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2555 2564 # accessing tags warm the cache
2556 2565 self.tags()
2557 2566 self.filtered(b'served').tags()
2558 2567
2559 2568 # The `full` arg is documented as updating even the lazily-loaded
2560 2569 # caches immediately, so we're forcing a write to cause these caches
2561 2570 # to be warmed up even if they haven't explicitly been requested
2562 2571 # yet (if they've never been used by hg, they won't ever have been
2563 2572 # written, even if they're a subset of another kind of cache that
2564 2573 # *has* been used).
2565 2574 for filt in repoview.filtertable.keys():
2566 2575 filtered = self.filtered(filt)
2567 2576 filtered.branchmap().write(filtered)
2568 2577
2569 2578 def invalidatecaches(self):
2570 2579
2571 2580 if '_tagscache' in vars(self):
2572 2581 # can't use delattr on proxy
2573 2582 del self.__dict__['_tagscache']
2574 2583
2575 2584 self._branchcaches.clear()
2576 2585 self.invalidatevolatilesets()
2577 2586 self._sparsesignaturecache.clear()
2578 2587
2579 2588 def invalidatevolatilesets(self):
2580 2589 self.filteredrevcache.clear()
2581 2590 obsolete.clearobscaches(self)
2582 2591 self._quick_access_changeid_invalidate()
2583 2592
2584 2593 def invalidatedirstate(self):
2585 2594 '''Invalidates the dirstate, causing the next call to dirstate
2586 2595 to check if it was modified since the last time it was read,
2587 2596 rereading it if it has.
2588 2597
2589 2598 This is different to dirstate.invalidate() that it doesn't always
2590 2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2591 2600 explicitly read the dirstate again (i.e. restoring it to a previous
2592 2601 known good state).'''
2593 2602 if hasunfilteredcache(self, 'dirstate'):
2594 2603 for k in self.dirstate._filecache:
2595 2604 try:
2596 2605 delattr(self.dirstate, k)
2597 2606 except AttributeError:
2598 2607 pass
2599 2608 delattr(self.unfiltered(), 'dirstate')
2600 2609
2601 2610 def invalidate(self, clearfilecache=False):
2602 2611 '''Invalidates both store and non-store parts other than dirstate
2603 2612
2604 2613 If a transaction is running, invalidation of store is omitted,
2605 2614 because discarding in-memory changes might cause inconsistency
2606 2615 (e.g. incomplete fncache causes unintentional failure, but
2607 2616 redundant one doesn't).
2608 2617 '''
2609 2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2610 2619 for k in list(self._filecache.keys()):
2611 2620 # dirstate is invalidated separately in invalidatedirstate()
2612 2621 if k == b'dirstate':
2613 2622 continue
2614 2623 if (
2615 2624 k == b'changelog'
2616 2625 and self.currenttransaction()
2617 2626 and self.changelog._delayed
2618 2627 ):
2619 2628 # The changelog object may store unwritten revisions. We don't
2620 2629 # want to lose them.
2621 2630 # TODO: Solve the problem instead of working around it.
2622 2631 continue
2623 2632
2624 2633 if clearfilecache:
2625 2634 del self._filecache[k]
2626 2635 try:
2627 2636 delattr(unfiltered, k)
2628 2637 except AttributeError:
2629 2638 pass
2630 2639 self.invalidatecaches()
2631 2640 if not self.currenttransaction():
2632 2641 # TODO: Changing contents of store outside transaction
2633 2642 # causes inconsistency. We should make in-memory store
2634 2643 # changes detectable, and abort if changed.
2635 2644 self.store.invalidatecaches()
2636 2645
2637 2646 def invalidateall(self):
2638 2647 '''Fully invalidates both store and non-store parts, causing the
2639 2648 subsequent operation to reread any outside changes.'''
2640 2649 # extension should hook this to invalidate its caches
2641 2650 self.invalidate()
2642 2651 self.invalidatedirstate()
2643 2652
2644 2653 @unfilteredmethod
2645 2654 def _refreshfilecachestats(self, tr):
2646 2655 """Reload stats of cached files so that they are flagged as valid"""
2647 2656 for k, ce in self._filecache.items():
2648 2657 k = pycompat.sysstr(k)
2649 2658 if k == 'dirstate' or k not in self.__dict__:
2650 2659 continue
2651 2660 ce.refresh()
2652 2661
2653 2662 def _lock(
2654 2663 self,
2655 2664 vfs,
2656 2665 lockname,
2657 2666 wait,
2658 2667 releasefn,
2659 2668 acquirefn,
2660 2669 desc,
2661 2670 inheritchecker=None,
2662 2671 parentenvvar=None,
2663 2672 ):
2664 2673 parentlock = None
2665 2674 # the contents of parentenvvar are used by the underlying lock to
2666 2675 # determine whether it can be inherited
2667 2676 if parentenvvar is not None:
2668 2677 parentlock = encoding.environ.get(parentenvvar)
2669 2678
2670 2679 timeout = 0
2671 2680 warntimeout = 0
2672 2681 if wait:
2673 2682 timeout = self.ui.configint(b"ui", b"timeout")
2674 2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2675 2684 # internal config: ui.signal-safe-lock
2676 2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2677 2686
2678 2687 l = lockmod.trylock(
2679 2688 self.ui,
2680 2689 vfs,
2681 2690 lockname,
2682 2691 timeout,
2683 2692 warntimeout,
2684 2693 releasefn=releasefn,
2685 2694 acquirefn=acquirefn,
2686 2695 desc=desc,
2687 2696 inheritchecker=inheritchecker,
2688 2697 parentlock=parentlock,
2689 2698 signalsafe=signalsafe,
2690 2699 )
2691 2700 return l
2692 2701
2693 2702 def _afterlock(self, callback):
2694 2703 """add a callback to be run when the repository is fully unlocked
2695 2704
2696 2705 The callback will be executed when the outermost lock is released
2697 2706 (with wlock being higher level than 'lock')."""
2698 2707 for ref in (self._wlockref, self._lockref):
2699 2708 l = ref and ref()
2700 2709 if l and l.held:
2701 2710 l.postrelease.append(callback)
2702 2711 break
2703 2712 else: # no lock have been found.
2704 2713 callback(True)
2705 2714
2706 2715 def lock(self, wait=True):
2707 2716 '''Lock the repository store (.hg/store) and return a weak reference
2708 2717 to the lock. Use this before modifying the store (e.g. committing or
2709 2718 stripping). If you are opening a transaction, get a lock as well.)
2710 2719
2711 2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2712 2721 'wlock' first to avoid a dead-lock hazard.'''
2713 2722 l = self._currentlock(self._lockref)
2714 2723 if l is not None:
2715 2724 l.lock()
2716 2725 return l
2717 2726
2718 2727 l = self._lock(
2719 2728 vfs=self.svfs,
2720 2729 lockname=b"lock",
2721 2730 wait=wait,
2722 2731 releasefn=None,
2723 2732 acquirefn=self.invalidate,
2724 2733 desc=_(b'repository %s') % self.origroot,
2725 2734 )
2726 2735 self._lockref = weakref.ref(l)
2727 2736 return l
2728 2737
2729 2738 def _wlockchecktransaction(self):
2730 2739 if self.currenttransaction() is not None:
2731 2740 raise error.LockInheritanceContractViolation(
2732 2741 b'wlock cannot be inherited in the middle of a transaction'
2733 2742 )
2734 2743
2735 2744 def wlock(self, wait=True):
2736 2745 '''Lock the non-store parts of the repository (everything under
2737 2746 .hg except .hg/store) and return a weak reference to the lock.
2738 2747
2739 2748 Use this before modifying files in .hg.
2740 2749
2741 2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2742 2751 'wlock' first to avoid a dead-lock hazard.'''
2743 2752 l = self._wlockref and self._wlockref()
2744 2753 if l is not None and l.held:
2745 2754 l.lock()
2746 2755 return l
2747 2756
2748 2757 # We do not need to check for non-waiting lock acquisition. Such
2749 2758 # acquisition would not cause dead-lock as they would just fail.
2750 2759 if wait and (
2751 2760 self.ui.configbool(b'devel', b'all-warnings')
2752 2761 or self.ui.configbool(b'devel', b'check-locks')
2753 2762 ):
2754 2763 if self._currentlock(self._lockref) is not None:
2755 2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2756 2765
2757 2766 def unlock():
2758 2767 if self.dirstate.pendingparentchange():
2759 2768 self.dirstate.invalidate()
2760 2769 else:
2761 2770 self.dirstate.write(None)
2762 2771
2763 2772 self._filecache[b'dirstate'].refresh()
2764 2773
2765 2774 l = self._lock(
2766 2775 self.vfs,
2767 2776 b"wlock",
2768 2777 wait,
2769 2778 unlock,
2770 2779 self.invalidatedirstate,
2771 2780 _(b'working directory of %s') % self.origroot,
2772 2781 inheritchecker=self._wlockchecktransaction,
2773 2782 parentenvvar=b'HG_WLOCK_LOCKER',
2774 2783 )
2775 2784 self._wlockref = weakref.ref(l)
2776 2785 return l
2777 2786
2778 2787 def _currentlock(self, lockref):
2779 2788 """Returns the lock if it's held, or None if it's not."""
2780 2789 if lockref is None:
2781 2790 return None
2782 2791 l = lockref()
2783 2792 if l is None or not l.held:
2784 2793 return None
2785 2794 return l
2786 2795
2787 2796 def currentwlock(self):
2788 2797 """Returns the wlock if it's held, or None if it's not."""
2789 2798 return self._currentlock(self._wlockref)
2790 2799
2791 2800 def checkcommitpatterns(self, wctx, match, status, fail):
2792 2801 """check for commit arguments that aren't committable"""
2793 2802 if match.isexact() or match.prefix():
2794 2803 matched = set(status.modified + status.added + status.removed)
2795 2804
2796 2805 for f in match.files():
2797 2806 f = self.dirstate.normalize(f)
2798 2807 if f == b'.' or f in matched or f in wctx.substate:
2799 2808 continue
2800 2809 if f in status.deleted:
2801 2810 fail(f, _(b'file not found!'))
2802 2811 # Is it a directory that exists or used to exist?
2803 2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2804 2813 d = f + b'/'
2805 2814 for mf in matched:
2806 2815 if mf.startswith(d):
2807 2816 break
2808 2817 else:
2809 2818 fail(f, _(b"no match under directory!"))
2810 2819 elif f not in self.dirstate:
2811 2820 fail(f, _(b"file not tracked!"))
2812 2821
2813 2822 @unfilteredmethod
2814 2823 def commit(
2815 2824 self,
2816 2825 text=b"",
2817 2826 user=None,
2818 2827 date=None,
2819 2828 match=None,
2820 2829 force=False,
2821 2830 editor=None,
2822 2831 extra=None,
2823 2832 ):
2824 2833 """Add a new revision to current repository.
2825 2834
2826 2835 Revision information is gathered from the working directory,
2827 2836 match can be used to filter the committed files. If editor is
2828 2837 supplied, it is called to get a commit message.
2829 2838 """
2830 2839 if extra is None:
2831 2840 extra = {}
2832 2841
2833 2842 def fail(f, msg):
2834 2843 raise error.Abort(b'%s: %s' % (f, msg))
2835 2844
2836 2845 if not match:
2837 2846 match = matchmod.always()
2838 2847
2839 2848 if not force:
2840 2849 match.bad = fail
2841 2850
2842 2851 # lock() for recent changelog (see issue4368)
2843 2852 with self.wlock(), self.lock():
2844 2853 wctx = self[None]
2845 2854 merge = len(wctx.parents()) > 1
2846 2855
2847 2856 if not force and merge and not match.always():
2848 2857 raise error.Abort(
2849 2858 _(
2850 2859 b'cannot partially commit a merge '
2851 2860 b'(do not specify files or patterns)'
2852 2861 )
2853 2862 )
2854 2863
2855 2864 status = self.status(match=match, clean=force)
2856 2865 if force:
2857 2866 status.modified.extend(
2858 2867 status.clean
2859 2868 ) # mq may commit clean files
2860 2869
2861 2870 # check subrepos
2862 2871 subs, commitsubs, newstate = subrepoutil.precommit(
2863 2872 self.ui, wctx, status, match, force=force
2864 2873 )
2865 2874
2866 2875 # make sure all explicit patterns are matched
2867 2876 if not force:
2868 2877 self.checkcommitpatterns(wctx, match, status, fail)
2869 2878
2870 2879 cctx = context.workingcommitctx(
2871 2880 self, status, text, user, date, extra
2872 2881 )
2873 2882
2874 2883 ms = mergestatemod.mergestate.read(self)
2875 2884 mergeutil.checkunresolved(ms)
2876 2885
2877 2886 # internal config: ui.allowemptycommit
2878 2887 if cctx.isempty() and not self.ui.configbool(
2879 2888 b'ui', b'allowemptycommit'
2880 2889 ):
2881 2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2882 2891 ms.reset()
2883 2892 return None
2884 2893
2885 2894 if merge and cctx.deleted():
2886 2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2887 2896
2888 2897 if editor:
2889 2898 cctx._text = editor(self, cctx, subs)
2890 2899 edited = text != cctx._text
2891 2900
2892 2901 # Save commit message in case this transaction gets rolled back
2893 2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2894 2903 # the assumption that the user will use the same editor again.
2895 2904 msgfn = self.savecommitmessage(cctx._text)
2896 2905
2897 2906 # commit subs and write new state
2898 2907 if subs:
2899 2908 uipathfn = scmutil.getuipathfn(self)
2900 2909 for s in sorted(commitsubs):
2901 2910 sub = wctx.sub(s)
2902 2911 self.ui.status(
2903 2912 _(b'committing subrepository %s\n')
2904 2913 % uipathfn(subrepoutil.subrelpath(sub))
2905 2914 )
2906 2915 sr = sub.commit(cctx._text, user, date)
2907 2916 newstate[s] = (newstate[s][0], sr)
2908 2917 subrepoutil.writestate(self, newstate)
2909 2918
2910 2919 p1, p2 = self.dirstate.parents()
2911 2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2912 2921 try:
2913 2922 self.hook(
2914 2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2915 2924 )
2916 2925 with self.transaction(b'commit'):
2917 2926 ret = self.commitctx(cctx, True)
2918 2927 # update bookmarks, dirstate and mergestate
2919 2928 bookmarks.update(self, [p1, p2], ret)
2920 2929 cctx.markcommitted(ret)
2921 2930 ms.reset()
2922 2931 except: # re-raises
2923 2932 if edited:
2924 2933 self.ui.write(
2925 2934 _(b'note: commit message saved in %s\n') % msgfn
2926 2935 )
2927 2936 self.ui.write(
2928 2937 _(
2929 2938 b"note: use 'hg commit --logfile "
2930 2939 b".hg/last-message.txt --edit' to reuse it\n"
2931 2940 )
2932 2941 )
2933 2942 raise
2934 2943
2935 2944 def commithook(unused_success):
2936 2945 # hack for command that use a temporary commit (eg: histedit)
2937 2946 # temporary commit got stripped before hook release
2938 2947 if self.changelog.hasnode(ret):
2939 2948 self.hook(
2940 2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2941 2950 )
2942 2951
2943 2952 self._afterlock(commithook)
2944 2953 return ret
2945 2954
2946 2955 @unfilteredmethod
2947 2956 def commitctx(self, ctx, error=False, origctx=None):
2948 2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2949 2958
2950 2959 @unfilteredmethod
2951 2960 def destroying(self):
2952 2961 '''Inform the repository that nodes are about to be destroyed.
2953 2962 Intended for use by strip and rollback, so there's a common
2954 2963 place for anything that has to be done before destroying history.
2955 2964
2956 2965 This is mostly useful for saving state that is in memory and waiting
2957 2966 to be flushed when the current lock is released. Because a call to
2958 2967 destroyed is imminent, the repo will be invalidated causing those
2959 2968 changes to stay in memory (waiting for the next unlock), or vanish
2960 2969 completely.
2961 2970 '''
2962 2971 # When using the same lock to commit and strip, the phasecache is left
2963 2972 # dirty after committing. Then when we strip, the repo is invalidated,
2964 2973 # causing those changes to disappear.
2965 2974 if '_phasecache' in vars(self):
2966 2975 self._phasecache.write()
2967 2976
2968 2977 @unfilteredmethod
2969 2978 def destroyed(self):
2970 2979 '''Inform the repository that nodes have been destroyed.
2971 2980 Intended for use by strip and rollback, so there's a common
2972 2981 place for anything that has to be done after destroying history.
2973 2982 '''
2974 2983 # When one tries to:
2975 2984 # 1) destroy nodes thus calling this method (e.g. strip)
2976 2985 # 2) use phasecache somewhere (e.g. commit)
2977 2986 #
2978 2987 # then 2) will fail because the phasecache contains nodes that were
2979 2988 # removed. We can either remove phasecache from the filecache,
2980 2989 # causing it to reload next time it is accessed, or simply filter
2981 2990 # the removed nodes now and write the updated cache.
2982 2991 self._phasecache.filterunknown(self)
2983 2992 self._phasecache.write()
2984 2993
2985 2994 # refresh all repository caches
2986 2995 self.updatecaches()
2987 2996
2988 2997 # Ensure the persistent tag cache is updated. Doing it now
2989 2998 # means that the tag cache only has to worry about destroyed
2990 2999 # heads immediately after a strip/rollback. That in turn
2991 3000 # guarantees that "cachetip == currenttip" (comparing both rev
2992 3001 # and node) always means no nodes have been added or destroyed.
2993 3002
2994 3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
2995 3004 # head, refresh the tag cache, then immediately add a new head.
2996 3005 # But I think doing it this way is necessary for the "instant
2997 3006 # tag cache retrieval" case to work.
2998 3007 self.invalidate()
2999 3008
3000 3009 def status(
3001 3010 self,
3002 3011 node1=b'.',
3003 3012 node2=None,
3004 3013 match=None,
3005 3014 ignored=False,
3006 3015 clean=False,
3007 3016 unknown=False,
3008 3017 listsubrepos=False,
3009 3018 ):
3010 3019 '''a convenience method that calls node1.status(node2)'''
3011 3020 return self[node1].status(
3012 3021 node2, match, ignored, clean, unknown, listsubrepos
3013 3022 )
3014 3023
3015 3024 def addpostdsstatus(self, ps):
3016 3025 """Add a callback to run within the wlock, at the point at which status
3017 3026 fixups happen.
3018 3027
3019 3028 On status completion, callback(wctx, status) will be called with the
3020 3029 wlock held, unless the dirstate has changed from underneath or the wlock
3021 3030 couldn't be grabbed.
3022 3031
3023 3032 Callbacks should not capture and use a cached copy of the dirstate --
3024 3033 it might change in the meanwhile. Instead, they should access the
3025 3034 dirstate via wctx.repo().dirstate.
3026 3035
3027 3036 This list is emptied out after each status run -- extensions should
3028 3037 make sure it adds to this list each time dirstate.status is called.
3029 3038 Extensions should also make sure they don't call this for statuses
3030 3039 that don't involve the dirstate.
3031 3040 """
3032 3041
3033 3042 # The list is located here for uniqueness reasons -- it is actually
3034 3043 # managed by the workingctx, but that isn't unique per-repo.
3035 3044 self._postdsstatus.append(ps)
3036 3045
3037 3046 def postdsstatus(self):
3038 3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3039 3048 return self._postdsstatus
3040 3049
3041 3050 def clearpostdsstatus(self):
3042 3051 """Used by workingctx to clear post-dirstate-status hooks."""
3043 3052 del self._postdsstatus[:]
3044 3053
3045 3054 def heads(self, start=None):
3046 3055 if start is None:
3047 3056 cl = self.changelog
3048 3057 headrevs = reversed(cl.headrevs())
3049 3058 return [cl.node(rev) for rev in headrevs]
3050 3059
3051 3060 heads = self.changelog.heads(start)
3052 3061 # sort the output in rev descending order
3053 3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3054 3063
3055 3064 def branchheads(self, branch=None, start=None, closed=False):
3056 3065 '''return a (possibly filtered) list of heads for the given branch
3057 3066
3058 3067 Heads are returned in topological order, from newest to oldest.
3059 3068 If branch is None, use the dirstate branch.
3060 3069 If start is not None, return only heads reachable from start.
3061 3070 If closed is True, return heads that are marked as closed as well.
3062 3071 '''
3063 3072 if branch is None:
3064 3073 branch = self[None].branch()
3065 3074 branches = self.branchmap()
3066 3075 if not branches.hasbranch(branch):
3067 3076 return []
3068 3077 # the cache returns heads ordered lowest to highest
3069 3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3070 3079 if start is not None:
3071 3080 # filter out the heads that cannot be reached from startrev
3072 3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3073 3082 bheads = [h for h in bheads if h in fbheads]
3074 3083 return bheads
3075 3084
3076 3085 def branches(self, nodes):
3077 3086 if not nodes:
3078 3087 nodes = [self.changelog.tip()]
3079 3088 b = []
3080 3089 for n in nodes:
3081 3090 t = n
3082 3091 while True:
3083 3092 p = self.changelog.parents(n)
3084 3093 if p[1] != nullid or p[0] == nullid:
3085 3094 b.append((t, n, p[0], p[1]))
3086 3095 break
3087 3096 n = p[0]
3088 3097 return b
3089 3098
3090 3099 def between(self, pairs):
3091 3100 r = []
3092 3101
3093 3102 for top, bottom in pairs:
3094 3103 n, l, i = top, [], 0
3095 3104 f = 1
3096 3105
3097 3106 while n != bottom and n != nullid:
3098 3107 p = self.changelog.parents(n)[0]
3099 3108 if i == f:
3100 3109 l.append(n)
3101 3110 f = f * 2
3102 3111 n = p
3103 3112 i += 1
3104 3113
3105 3114 r.append(l)
3106 3115
3107 3116 return r
3108 3117
3109 3118 def checkpush(self, pushop):
3110 3119 """Extensions can override this function if additional checks have
3111 3120 to be performed before pushing, or call it if they override push
3112 3121 command.
3113 3122 """
3114 3123
3115 3124 @unfilteredpropertycache
3116 3125 def prepushoutgoinghooks(self):
3117 3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3118 3127 methods, which are called before pushing changesets.
3119 3128 """
3120 3129 return util.hooks()
3121 3130
3122 3131 def pushkey(self, namespace, key, old, new):
3123 3132 try:
3124 3133 tr = self.currenttransaction()
3125 3134 hookargs = {}
3126 3135 if tr is not None:
3127 3136 hookargs.update(tr.hookargs)
3128 3137 hookargs = pycompat.strkwargs(hookargs)
3129 3138 hookargs['namespace'] = namespace
3130 3139 hookargs['key'] = key
3131 3140 hookargs['old'] = old
3132 3141 hookargs['new'] = new
3133 3142 self.hook(b'prepushkey', throw=True, **hookargs)
3134 3143 except error.HookAbort as exc:
3135 3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3136 3145 if exc.hint:
3137 3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3138 3147 return False
3139 3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3140 3149 ret = pushkey.push(self, namespace, key, old, new)
3141 3150
3142 3151 def runhook(unused_success):
3143 3152 self.hook(
3144 3153 b'pushkey',
3145 3154 namespace=namespace,
3146 3155 key=key,
3147 3156 old=old,
3148 3157 new=new,
3149 3158 ret=ret,
3150 3159 )
3151 3160
3152 3161 self._afterlock(runhook)
3153 3162 return ret
3154 3163
3155 3164 def listkeys(self, namespace):
3156 3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3157 3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3158 3167 values = pushkey.list(self, namespace)
3159 3168 self.hook(b'listkeys', namespace=namespace, values=values)
3160 3169 return values
3161 3170
3162 3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3163 3172 '''used to test argument passing over the wire'''
3164 3173 return b"%s %s %s %s %s" % (
3165 3174 one,
3166 3175 two,
3167 3176 pycompat.bytestr(three),
3168 3177 pycompat.bytestr(four),
3169 3178 pycompat.bytestr(five),
3170 3179 )
3171 3180
3172 3181 def savecommitmessage(self, text):
3173 3182 fp = self.vfs(b'last-message.txt', b'wb')
3174 3183 try:
3175 3184 fp.write(text)
3176 3185 finally:
3177 3186 fp.close()
3178 3187 return self.pathto(fp.name[len(self.root) + 1 :])
3179 3188
3180 3189
3181 3190 # used to avoid circular references so destructors work
3182 3191 def aftertrans(files):
3183 3192 renamefiles = [tuple(t) for t in files]
3184 3193
3185 3194 def a():
3186 3195 for vfs, src, dest in renamefiles:
3187 3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3188 3197 # leaving both src and dest on disk. delete dest to make sure
3189 3198 # the rename couldn't be such a no-op.
3190 3199 vfs.tryunlink(dest)
3191 3200 try:
3192 3201 vfs.rename(src, dest)
3193 3202 except OSError: # journal file does not yet exist
3194 3203 pass
3195 3204
3196 3205 return a
3197 3206
3198 3207
3199 3208 def undoname(fn):
3200 3209 base, name = os.path.split(fn)
3201 3210 assert name.startswith(b'journal')
3202 3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3203 3212
3204 3213
3205 3214 def instance(ui, path, create, intents=None, createopts=None):
3206 3215 localpath = util.urllocalpath(path)
3207 3216 if create:
3208 3217 createrepository(ui, localpath, createopts=createopts)
3209 3218
3210 3219 return makelocalrepository(ui, localpath, intents=intents)
3211 3220
3212 3221
3213 3222 def islocal(path):
3214 3223 return True
3215 3224
3216 3225
3217 3226 def defaultcreateopts(ui, createopts=None):
3218 3227 """Populate the default creation options for a repository.
3219 3228
3220 3229 A dictionary of explicitly requested creation options can be passed
3221 3230 in. Missing keys will be populated.
3222 3231 """
3223 3232 createopts = dict(createopts or {})
3224 3233
3225 3234 if b'backend' not in createopts:
3226 3235 # experimental config: storage.new-repo-backend
3227 3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3228 3237
3229 3238 return createopts
3230 3239
3231 3240
3232 3241 def newreporequirements(ui, createopts):
3233 3242 """Determine the set of requirements for a new local repository.
3234 3243
3235 3244 Extensions can wrap this function to specify custom requirements for
3236 3245 new repositories.
3237 3246 """
3238 3247 # If the repo is being created from a shared repository, we copy
3239 3248 # its requirements.
3240 3249 if b'sharedrepo' in createopts:
3241 3250 requirements = set(createopts[b'sharedrepo'].requirements)
3242 3251 if createopts.get(b'sharedrelative'):
3243 3252 requirements.add(b'relshared')
3244 3253 else:
3245 3254 requirements.add(b'shared')
3246 3255
3247 3256 return requirements
3248 3257
3249 3258 if b'backend' not in createopts:
3250 3259 raise error.ProgrammingError(
3251 3260 b'backend key not present in createopts; '
3252 3261 b'was defaultcreateopts() called?'
3253 3262 )
3254 3263
3255 3264 if createopts[b'backend'] != b'revlogv1':
3256 3265 raise error.Abort(
3257 3266 _(
3258 3267 b'unable to determine repository requirements for '
3259 3268 b'storage backend: %s'
3260 3269 )
3261 3270 % createopts[b'backend']
3262 3271 )
3263 3272
3264 3273 requirements = {b'revlogv1'}
3265 3274 if ui.configbool(b'format', b'usestore'):
3266 3275 requirements.add(b'store')
3267 3276 if ui.configbool(b'format', b'usefncache'):
3268 3277 requirements.add(b'fncache')
3269 3278 if ui.configbool(b'format', b'dotencode'):
3270 3279 requirements.add(b'dotencode')
3271 3280
3272 3281 compengines = ui.configlist(b'format', b'revlog-compression')
3273 3282 for compengine in compengines:
3274 3283 if compengine in util.compengines:
3275 3284 break
3276 3285 else:
3277 3286 raise error.Abort(
3278 3287 _(
3279 3288 b'compression engines %s defined by '
3280 3289 b'format.revlog-compression not available'
3281 3290 )
3282 3291 % b', '.join(b'"%s"' % e for e in compengines),
3283 3292 hint=_(
3284 3293 b'run "hg debuginstall" to list available '
3285 3294 b'compression engines'
3286 3295 ),
3287 3296 )
3288 3297
3289 3298 # zlib is the historical default and doesn't need an explicit requirement.
3290 3299 if compengine == b'zstd':
3291 3300 requirements.add(b'revlog-compression-zstd')
3292 3301 elif compengine != b'zlib':
3293 3302 requirements.add(b'exp-compression-%s' % compengine)
3294 3303
3295 3304 if scmutil.gdinitconfig(ui):
3296 3305 requirements.add(b'generaldelta')
3297 3306 if ui.configbool(b'format', b'sparse-revlog'):
3298 3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3299 3308
3300 3309 # experimental config: format.exp-use-side-data
3301 3310 if ui.configbool(b'format', b'exp-use-side-data'):
3302 3311 requirements.add(SIDEDATA_REQUIREMENT)
3303 3312 # experimental config: format.exp-use-copies-side-data-changeset
3304 3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3305 3314 requirements.add(SIDEDATA_REQUIREMENT)
3306 3315 requirements.add(COPIESSDC_REQUIREMENT)
3307 3316 if ui.configbool(b'experimental', b'treemanifest'):
3308 3317 requirements.add(b'treemanifest')
3309 3318
3310 3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3311 3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3312 3321 requirements.remove(b'revlogv1')
3313 3322 # generaldelta is implied by revlogv2.
3314 3323 requirements.discard(b'generaldelta')
3315 3324 requirements.add(REVLOGV2_REQUIREMENT)
3316 3325 # experimental config: format.internal-phase
3317 3326 if ui.configbool(b'format', b'internal-phase'):
3318 3327 requirements.add(b'internal-phase')
3319 3328
3320 3329 if createopts.get(b'narrowfiles'):
3321 3330 requirements.add(repository.NARROW_REQUIREMENT)
3322 3331
3323 3332 if createopts.get(b'lfs'):
3324 3333 requirements.add(b'lfs')
3325 3334
3326 3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3327 3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3328 3337
3329 3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3330 3339 requirements.add(NODEMAP_REQUIREMENT)
3331 3340
3332 3341 return requirements
3333 3342
3334 3343
3335 3344 def checkrequirementscompat(ui, requirements):
3336 3345 """ Checks compatibility of repository requirements enabled and disabled.
3337 3346
3338 3347 Returns a set of requirements which needs to be dropped because dependend
3339 3348 requirements are not enabled. Also warns users about it """
3340 3349
3341 3350 dropped = set()
3342 3351
3343 3352 if b'store' not in requirements:
3344 3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3345 3354 ui.warn(
3346 3355 _(
3347 3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3348 3357 b'beacuse it is incompatible with disabled '
3349 3358 b'\'format.usestore\' config\n'
3350 3359 )
3351 3360 )
3352 3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3353 3362
3354 3363 if b'shared' in requirements or b'relshared' in requirements:
3355 3364 raise error.Abort(
3356 3365 _(
3357 3366 b"cannot create shared repository as source was created"
3358 3367 b" with 'format.usestore' config disabled"
3359 3368 )
3360 3369 )
3361 3370
3362 3371 return dropped
3363 3372
3364 3373
3365 3374 def filterknowncreateopts(ui, createopts):
3366 3375 """Filters a dict of repo creation options against options that are known.
3367 3376
3368 3377 Receives a dict of repo creation options and returns a dict of those
3369 3378 options that we don't know how to handle.
3370 3379
3371 3380 This function is called as part of repository creation. If the
3372 3381 returned dict contains any items, repository creation will not
3373 3382 be allowed, as it means there was a request to create a repository
3374 3383 with options not recognized by loaded code.
3375 3384
3376 3385 Extensions can wrap this function to filter out creation options
3377 3386 they know how to handle.
3378 3387 """
3379 3388 known = {
3380 3389 b'backend',
3381 3390 b'lfs',
3382 3391 b'narrowfiles',
3383 3392 b'sharedrepo',
3384 3393 b'sharedrelative',
3385 3394 b'shareditems',
3386 3395 b'shallowfilestore',
3387 3396 }
3388 3397
3389 3398 return {k: v for k, v in createopts.items() if k not in known}
3390 3399
3391 3400
3392 3401 def createrepository(ui, path, createopts=None):
3393 3402 """Create a new repository in a vfs.
3394 3403
3395 3404 ``path`` path to the new repo's working directory.
3396 3405 ``createopts`` options for the new repository.
3397 3406
3398 3407 The following keys for ``createopts`` are recognized:
3399 3408
3400 3409 backend
3401 3410 The storage backend to use.
3402 3411 lfs
3403 3412 Repository will be created with ``lfs`` requirement. The lfs extension
3404 3413 will automatically be loaded when the repository is accessed.
3405 3414 narrowfiles
3406 3415 Set up repository to support narrow file storage.
3407 3416 sharedrepo
3408 3417 Repository object from which storage should be shared.
3409 3418 sharedrelative
3410 3419 Boolean indicating if the path to the shared repo should be
3411 3420 stored as relative. By default, the pointer to the "parent" repo
3412 3421 is stored as an absolute path.
3413 3422 shareditems
3414 3423 Set of items to share to the new repository (in addition to storage).
3415 3424 shallowfilestore
3416 3425 Indicates that storage for files should be shallow (not all ancestor
3417 3426 revisions are known).
3418 3427 """
3419 3428 createopts = defaultcreateopts(ui, createopts=createopts)
3420 3429
3421 3430 unknownopts = filterknowncreateopts(ui, createopts)
3422 3431
3423 3432 if not isinstance(unknownopts, dict):
3424 3433 raise error.ProgrammingError(
3425 3434 b'filterknowncreateopts() did not return a dict'
3426 3435 )
3427 3436
3428 3437 if unknownopts:
3429 3438 raise error.Abort(
3430 3439 _(
3431 3440 b'unable to create repository because of unknown '
3432 3441 b'creation option: %s'
3433 3442 )
3434 3443 % b', '.join(sorted(unknownopts)),
3435 3444 hint=_(b'is a required extension not loaded?'),
3436 3445 )
3437 3446
3438 3447 requirements = newreporequirements(ui, createopts=createopts)
3439 3448 requirements -= checkrequirementscompat(ui, requirements)
3440 3449
3441 3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3442 3451
3443 3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3444 3453 if hgvfs.exists():
3445 3454 raise error.RepoError(_(b'repository %s already exists') % path)
3446 3455
3447 3456 if b'sharedrepo' in createopts:
3448 3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3449 3458
3450 3459 if createopts.get(b'sharedrelative'):
3451 3460 try:
3452 3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3453 3462 except (IOError, ValueError) as e:
3454 3463 # ValueError is raised on Windows if the drive letters differ
3455 3464 # on each path.
3456 3465 raise error.Abort(
3457 3466 _(b'cannot calculate relative path'),
3458 3467 hint=stringutil.forcebytestr(e),
3459 3468 )
3460 3469
3461 3470 if not wdirvfs.exists():
3462 3471 wdirvfs.makedirs()
3463 3472
3464 3473 hgvfs.makedir(notindexed=True)
3465 3474 if b'sharedrepo' not in createopts:
3466 3475 hgvfs.mkdir(b'cache')
3467 3476 hgvfs.mkdir(b'wcache')
3468 3477
3469 3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3470 3479 hgvfs.mkdir(b'store')
3471 3480
3472 3481 # We create an invalid changelog outside the store so very old
3473 3482 # Mercurial versions (which didn't know about the requirements
3474 3483 # file) encounter an error on reading the changelog. This
3475 3484 # effectively locks out old clients and prevents them from
3476 3485 # mucking with a repo in an unknown format.
3477 3486 #
3478 3487 # The revlog header has version 2, which won't be recognized by
3479 3488 # such old clients.
3480 3489 hgvfs.append(
3481 3490 b'00changelog.i',
3482 3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3483 3492 b'layout',
3484 3493 )
3485 3494
3486 3495 scmutil.writerequires(hgvfs, requirements)
3487 3496
3488 3497 # Write out file telling readers where to find the shared store.
3489 3498 if b'sharedrepo' in createopts:
3490 3499 hgvfs.write(b'sharedpath', sharedpath)
3491 3500
3492 3501 if createopts.get(b'shareditems'):
3493 3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3494 3503 hgvfs.write(b'shared', shared)
3495 3504
3496 3505
3497 3506 def poisonrepository(repo):
3498 3507 """Poison a repository instance so it can no longer be used."""
3499 3508 # Perform any cleanup on the instance.
3500 3509 repo.close()
3501 3510
3502 3511 # Our strategy is to replace the type of the object with one that
3503 3512 # has all attribute lookups result in error.
3504 3513 #
3505 3514 # But we have to allow the close() method because some constructors
3506 3515 # of repos call close() on repo references.
3507 3516 class poisonedrepository(object):
3508 3517 def __getattribute__(self, item):
3509 3518 if item == 'close':
3510 3519 return object.__getattribute__(self, item)
3511 3520
3512 3521 raise error.ProgrammingError(
3513 3522 b'repo instances should not be used after unshare'
3514 3523 )
3515 3524
3516 3525 def close(self):
3517 3526 pass
3518 3527
3519 3528 # We may have a repoview, which intercepts __setattr__. So be sure
3520 3529 # we operate at the lowest level possible.
3521 3530 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now