##// END OF EJS Templates
localrepo: refactor logic to calculate sharedvfs in separate fn...
Pulkit Goyal -
r45912:665e9115 default
parent child Browse files
Show More
@@ -1,3504 +1,3521 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 commit,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 mergestate as mergestatemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 rcutil,
58 58 repoview,
59 59 revset,
60 60 revsetlang,
61 61 scmutil,
62 62 sparse,
63 63 store as storemod,
64 64 subrepoutil,
65 65 tags as tagsmod,
66 66 transaction,
67 67 txnutil,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76
77 77 from .utils import (
78 78 hashutil,
79 79 procutil,
80 80 stringutil,
81 81 )
82 82
83 83 from .revlogutils import constants as revlogconst
84 84
85 85 release = lockmod.release
86 86 urlerr = util.urlerr
87 87 urlreq = util.urlreq
88 88
89 89 # set of (path, vfs-location) tuples. vfs-location is:
90 90 # - 'plain for vfs relative paths
91 91 # - '' for svfs relative paths
92 92 _cachedfiles = set()
93 93
94 94
95 95 class _basefilecache(scmutil.filecache):
96 96 """All filecache usage on repo are done for logic that should be unfiltered
97 97 """
98 98
99 99 def __get__(self, repo, type=None):
100 100 if repo is None:
101 101 return self
102 102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 103 unfi = repo.unfiltered()
104 104 try:
105 105 return unfi.__dict__[self.sname]
106 106 except KeyError:
107 107 pass
108 108 return super(_basefilecache, self).__get__(unfi, type)
109 109
110 110 def set(self, repo, value):
111 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 112
113 113
114 114 class repofilecache(_basefilecache):
115 115 """filecache for files in .hg but outside of .hg/store"""
116 116
117 117 def __init__(self, *paths):
118 118 super(repofilecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, b'plain'))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.vfs.join(fname)
124 124
125 125
126 126 class storecache(_basefilecache):
127 127 """filecache for files in the store"""
128 128
129 129 def __init__(self, *paths):
130 130 super(storecache, self).__init__(*paths)
131 131 for path in paths:
132 132 _cachedfiles.add((path, b''))
133 133
134 134 def join(self, obj, fname):
135 135 return obj.sjoin(fname)
136 136
137 137
138 138 class mixedrepostorecache(_basefilecache):
139 139 """filecache for a mix files in .hg/store and outside"""
140 140
141 141 def __init__(self, *pathsandlocations):
142 142 # scmutil.filecache only uses the path for passing back into our
143 143 # join(), so we can safely pass a list of paths and locations
144 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 145 _cachedfiles.update(pathsandlocations)
146 146
147 147 def join(self, obj, fnameandlocation):
148 148 fname, location = fnameandlocation
149 149 if location == b'plain':
150 150 return obj.vfs.join(fname)
151 151 else:
152 152 if location != b'':
153 153 raise error.ProgrammingError(
154 154 b'unexpected location: %s' % location
155 155 )
156 156 return obj.sjoin(fname)
157 157
158 158
159 159 def isfilecached(repo, name):
160 160 """check if a repo has already cached "name" filecache-ed property
161 161
162 162 This returns (cachedobj-or-None, iscached) tuple.
163 163 """
164 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 165 if not cacheentry:
166 166 return None, False
167 167 return cacheentry.obj, True
168 168
169 169
170 170 class unfilteredpropertycache(util.propertycache):
171 171 """propertycache that apply to unfiltered repo only"""
172 172
173 173 def __get__(self, repo, type=None):
174 174 unfi = repo.unfiltered()
175 175 if unfi is repo:
176 176 return super(unfilteredpropertycache, self).__get__(unfi)
177 177 return getattr(unfi, self.name)
178 178
179 179
180 180 class filteredpropertycache(util.propertycache):
181 181 """propertycache that must take filtering in account"""
182 182
183 183 def cachevalue(self, obj, value):
184 184 object.__setattr__(obj, self.name, value)
185 185
186 186
187 187 def hasunfilteredcache(repo, name):
188 188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 189 return name in vars(repo.unfiltered())
190 190
191 191
192 192 def unfilteredmethod(orig):
193 193 """decorate method that always need to be run on unfiltered version"""
194 194
195 195 def wrapper(repo, *args, **kwargs):
196 196 return orig(repo.unfiltered(), *args, **kwargs)
197 197
198 198 return wrapper
199 199
200 200
201 201 moderncaps = {
202 202 b'lookup',
203 203 b'branchmap',
204 204 b'pushkey',
205 205 b'known',
206 206 b'getbundle',
207 207 b'unbundle',
208 208 }
209 209 legacycaps = moderncaps.union({b'changegroupsubset'})
210 210
211 211
212 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 213 class localcommandexecutor(object):
214 214 def __init__(self, peer):
215 215 self._peer = peer
216 216 self._sent = False
217 217 self._closed = False
218 218
219 219 def __enter__(self):
220 220 return self
221 221
222 222 def __exit__(self, exctype, excvalue, exctb):
223 223 self.close()
224 224
225 225 def callcommand(self, command, args):
226 226 if self._sent:
227 227 raise error.ProgrammingError(
228 228 b'callcommand() cannot be used after sendcommands()'
229 229 )
230 230
231 231 if self._closed:
232 232 raise error.ProgrammingError(
233 233 b'callcommand() cannot be used after close()'
234 234 )
235 235
236 236 # We don't need to support anything fancy. Just call the named
237 237 # method on the peer and return a resolved future.
238 238 fn = getattr(self._peer, pycompat.sysstr(command))
239 239
240 240 f = pycompat.futures.Future()
241 241
242 242 try:
243 243 result = fn(**pycompat.strkwargs(args))
244 244 except Exception:
245 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 246 else:
247 247 f.set_result(result)
248 248
249 249 return f
250 250
251 251 def sendcommands(self):
252 252 self._sent = True
253 253
254 254 def close(self):
255 255 self._closed = True
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommands)
259 259 class localpeer(repository.peer):
260 260 '''peer for a local repo; reflects only the most recent API'''
261 261
262 262 def __init__(self, repo, caps=None):
263 263 super(localpeer, self).__init__()
264 264
265 265 if caps is None:
266 266 caps = moderncaps.copy()
267 267 self._repo = repo.filtered(b'served')
268 268 self.ui = repo.ui
269 269 self._caps = repo._restrictcapabilities(caps)
270 270
271 271 # Begin of _basepeer interface.
272 272
273 273 def url(self):
274 274 return self._repo.url()
275 275
276 276 def local(self):
277 277 return self._repo
278 278
279 279 def peer(self):
280 280 return self
281 281
282 282 def canpush(self):
283 283 return True
284 284
285 285 def close(self):
286 286 self._repo.close()
287 287
288 288 # End of _basepeer interface.
289 289
290 290 # Begin of _basewirecommands interface.
291 291
292 292 def branchmap(self):
293 293 return self._repo.branchmap()
294 294
295 295 def capabilities(self):
296 296 return self._caps
297 297
298 298 def clonebundles(self):
299 299 return self._repo.tryread(b'clonebundles.manifest')
300 300
301 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 302 """Used to test argument passing over the wire"""
303 303 return b"%s %s %s %s %s" % (
304 304 one,
305 305 two,
306 306 pycompat.bytestr(three),
307 307 pycompat.bytestr(four),
308 308 pycompat.bytestr(five),
309 309 )
310 310
311 311 def getbundle(
312 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 313 ):
314 314 chunks = exchange.getbundlechunks(
315 315 self._repo,
316 316 source,
317 317 heads=heads,
318 318 common=common,
319 319 bundlecaps=bundlecaps,
320 320 **kwargs
321 321 )[1]
322 322 cb = util.chunkbuffer(chunks)
323 323
324 324 if exchange.bundle2requested(bundlecaps):
325 325 # When requesting a bundle2, getbundle returns a stream to make the
326 326 # wire level function happier. We need to build a proper object
327 327 # from it in local peer.
328 328 return bundle2.getunbundler(self.ui, cb)
329 329 else:
330 330 return changegroup.getunbundler(b'01', cb, None)
331 331
332 332 def heads(self):
333 333 return self._repo.heads()
334 334
335 335 def known(self, nodes):
336 336 return self._repo.known(nodes)
337 337
338 338 def listkeys(self, namespace):
339 339 return self._repo.listkeys(namespace)
340 340
341 341 def lookup(self, key):
342 342 return self._repo.lookup(key)
343 343
344 344 def pushkey(self, namespace, key, old, new):
345 345 return self._repo.pushkey(namespace, key, old, new)
346 346
347 347 def stream_out(self):
348 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 349
350 350 def unbundle(self, bundle, heads, url):
351 351 """apply a bundle on a repo
352 352
353 353 This function handles the repo locking itself."""
354 354 try:
355 355 try:
356 356 bundle = exchange.readbundle(self.ui, bundle, None)
357 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 358 if util.safehasattr(ret, b'getchunks'):
359 359 # This is a bundle20 object, turn it into an unbundler.
360 360 # This little dance should be dropped eventually when the
361 361 # API is finally improved.
362 362 stream = util.chunkbuffer(ret.getchunks())
363 363 ret = bundle2.getunbundler(self.ui, stream)
364 364 return ret
365 365 except Exception as exc:
366 366 # If the exception contains output salvaged from a bundle2
367 367 # reply, we need to make sure it is printed before continuing
368 368 # to fail. So we build a bundle2 with such output and consume
369 369 # it directly.
370 370 #
371 371 # This is not very elegant but allows a "simple" solution for
372 372 # issue4594
373 373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 374 if output:
375 375 bundler = bundle2.bundle20(self._repo.ui)
376 376 for out in output:
377 377 bundler.addpart(out)
378 378 stream = util.chunkbuffer(bundler.getchunks())
379 379 b = bundle2.getunbundler(self.ui, stream)
380 380 bundle2.processbundle(self._repo, b)
381 381 raise
382 382 except error.PushRaced as exc:
383 383 raise error.ResponseError(
384 384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 385 )
386 386
387 387 # End of _basewirecommands interface.
388 388
389 389 # Begin of peer interface.
390 390
391 391 def commandexecutor(self):
392 392 return localcommandexecutor(self)
393 393
394 394 # End of peer interface.
395 395
396 396
397 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 398 class locallegacypeer(localpeer):
399 399 '''peer extension which implements legacy methods too; used for tests with
400 400 restricted capabilities'''
401 401
402 402 def __init__(self, repo):
403 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 404
405 405 # Begin of baselegacywirecommands interface.
406 406
407 407 def between(self, pairs):
408 408 return self._repo.between(pairs)
409 409
410 410 def branches(self, nodes):
411 411 return self._repo.branches(nodes)
412 412
413 413 def changegroup(self, nodes, source):
414 414 outgoing = discovery.outgoing(
415 415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 416 )
417 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 418
419 419 def changegroupsubset(self, bases, heads, source):
420 420 outgoing = discovery.outgoing(
421 421 self._repo, missingroots=bases, ancestorsof=heads
422 422 )
423 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 424
425 425 # End of baselegacywirecommands interface.
426 426
427 427
428 428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 429 # clients.
430 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431 431
432 432 # A repository with the sparserevlog feature will have delta chains that
433 433 # can spread over a larger span. Sparse reading cuts these large spans into
434 434 # pieces, so that each piece isn't too big.
435 435 # Without the sparserevlog capability, reading from the repository could use
436 436 # huge amounts of memory, because the whole span would be read at once,
437 437 # including all the intermediate revisions that aren't pertinent for the chain.
438 438 # This is why once a repository has enabled sparse-read, it becomes required.
439 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440 440
441 441 # A repository with the sidedataflag requirement will allow to store extra
442 442 # information for revision without altering their original hashes.
443 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444 444
445 445 # A repository with the the copies-sidedata-changeset requirement will store
446 446 # copies related information in changeset's sidedata.
447 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448 448
449 449 # The repository use persistent nodemap for the changelog and the manifest.
450 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451 451
452 452 # Functions receiving (ui, features) that extensions can register to impact
453 453 # the ability to load repositories with custom requirements. Only
454 454 # functions defined in loaded extensions are called.
455 455 #
456 456 # The function receives a set of requirement strings that the repository
457 457 # is capable of opening. Functions will typically add elements to the
458 458 # set to reflect that the extension knows how to handle that requirements.
459 459 featuresetupfuncs = set()
460 460
461 461
462 def _getsharedvfs(hgvfs, requirements):
463 """ returns the vfs object pointing to root of shared source
464 repo for a shared repository
465
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
468 """
469 # The ``shared`` or ``relshared`` requirements indicate the
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 # This is an absolute path for ``shared`` and relative to
472 # ``.hg/`` for ``relshared``.
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 if b'relshared' in requirements:
475 sharedpath = hgvfs.join(sharedpath)
476
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478
479 if not sharedvfs.exists():
480 raise error.RepoError(
481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 % sharedvfs.base
483 )
484 return sharedvfs
485
486
462 487 def makelocalrepository(baseui, path, intents=None):
463 488 """Create a local repository object.
464 489
465 490 Given arguments needed to construct a local repository, this function
466 491 performs various early repository loading functionality (such as
467 492 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 493 the repository can be opened, derives a type suitable for representing
469 494 that repository, and returns an instance of it.
470 495
471 496 The returned object conforms to the ``repository.completelocalrepository``
472 497 interface.
473 498
474 499 The repository type is derived by calling a series of factory functions
475 500 for each aspect/interface of the final repository. These are defined by
476 501 ``REPO_INTERFACES``.
477 502
478 503 Each factory function is called to produce a type implementing a specific
479 504 interface. The cumulative list of returned types will be combined into a
480 505 new type and that type will be instantiated to represent the local
481 506 repository.
482 507
483 508 The factory functions each receive various state that may be consulted
484 509 as part of deriving a type.
485 510
486 511 Extensions should wrap these factory functions to customize repository type
487 512 creation. Note that an extension's wrapped function may be called even if
488 513 that extension is not loaded for the repo being constructed. Extensions
489 514 should check if their ``__name__`` appears in the
490 515 ``extensionmodulenames`` set passed to the factory function and no-op if
491 516 not.
492 517 """
493 518 ui = baseui.copy()
494 519 # Prevent copying repo configuration.
495 520 ui.copy = baseui.copy
496 521
497 522 # Working directory VFS rooted at repository root.
498 523 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499 524
500 525 # Main VFS for .hg/ directory.
501 526 hgpath = wdirvfs.join(b'.hg')
502 527 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
528 # Whether this repository is shared one or not
529 shared = False
530 # If this repository is shared, vfs pointing to shared repo
531 sharedvfs = None
503 532
504 533 # The .hg/ path should exist and should be a directory. All other
505 534 # cases are errors.
506 535 if not hgvfs.isdir():
507 536 try:
508 537 hgvfs.stat()
509 538 except OSError as e:
510 539 if e.errno != errno.ENOENT:
511 540 raise
512 541 except ValueError as e:
513 542 # Can be raised on Python 3.8 when path is invalid.
514 543 raise error.Abort(
515 544 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 545 )
517 546
518 547 raise error.RepoError(_(b'repository %s not found') % path)
519 548
520 549 # .hg/requires file contains a newline-delimited list of
521 550 # features/capabilities the opener (us) must have in order to use
522 551 # the repository. This file was introduced in Mercurial 0.9.2,
523 552 # which means very old repositories may not have one. We assume
524 553 # a missing file translates to no requirements.
525 554 try:
526 555 requirements = set(hgvfs.read(b'requires').splitlines())
527 556 except IOError as e:
528 557 if e.errno != errno.ENOENT:
529 558 raise
530 559 requirements = set()
531 560
532 561 # The .hg/hgrc file may load extensions or contain config options
533 562 # that influence repository construction. Attempt to load it and
534 563 # process any new extensions that it may have pulled in.
535 564 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 565 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 566 extensions.loadall(ui)
538 567 extensions.populateui(ui)
539 568
540 569 # Set of module names of extensions loaded for this repository.
541 570 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542 571
543 572 supportedrequirements = gathersupportedrequirements(ui)
544 573
545 574 # We first validate the requirements are known.
546 575 ensurerequirementsrecognized(requirements, supportedrequirements)
547 576
548 577 # Then we validate that the known set is reasonable to use together.
549 578 ensurerequirementscompatible(ui, requirements)
550 579
551 580 # TODO there are unhandled edge cases related to opening repositories with
552 581 # shared storage. If storage is shared, we should also test for requirements
553 582 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 583 # that repo, as that repo may load extensions needed to open it. This is a
555 584 # bit complicated because we don't want the other hgrc to overwrite settings
556 585 # in this hgrc.
557 586 #
558 587 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 588 # file when sharing repos. But if a requirement is added after the share is
560 589 # performed, thereby introducing a new requirement for the opener, we may
561 590 # will not see that and could encounter a run-time error interacting with
562 591 # that shared store since it has an unknown-to-us requirement.
563 592
564 593 # At this point, we know we should be capable of opening the repository.
565 594 # Now get on with doing that.
566 595
567 596 features = set()
568 597
569 598 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
581 if not sharedvfs.exists():
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
585 )
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
599 # accessed is determined by various requirements. If `shared` or
600 # `relshared` requirements are present, this indicates current repository
601 # is a share and store exists in path mentioned in `.hg/sharedpath`
602 shared = b'shared' in requirements or b'relshared' in requirements
603 if shared:
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
589 605 storebasepath = sharedvfs.base
590 606 cachepath = sharedvfs.join(b'cache')
607 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
591 608 else:
592 609 storebasepath = hgvfs.base
593 610 cachepath = hgvfs.join(b'cache')
594 611 wcachepath = hgvfs.join(b'wcache')
595 612
596 613 # The store has changed over time and the exact layout is dictated by
597 614 # requirements. The store interface abstracts differences across all
598 615 # of them.
599 616 store = makestore(
600 617 requirements,
601 618 storebasepath,
602 619 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 620 )
604 621 hgvfs.createmode = store.createmode
605 622
606 623 storevfs = store.vfs
607 624 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608 625
609 626 # The cache vfs is used to manage cache files.
610 627 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 628 cachevfs.createmode = store.createmode
612 629 # The cache vfs is used to manage cache files related to the working copy
613 630 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 631 wcachevfs.createmode = store.createmode
615 632
616 633 # Now resolve the type for the repository object. We do this by repeatedly
617 634 # calling a factory function to produces types for specific aspects of the
618 635 # repo's operation. The aggregate returned types are used as base classes
619 636 # for a dynamically-derived type, which will represent our new repository.
620 637
621 638 bases = []
622 639 extrastate = {}
623 640
624 641 for iface, fn in REPO_INTERFACES:
625 642 # We pass all potentially useful state to give extensions tons of
626 643 # flexibility.
627 644 typ = fn()(
628 645 ui=ui,
629 646 intents=intents,
630 647 requirements=requirements,
631 648 features=features,
632 649 wdirvfs=wdirvfs,
633 650 hgvfs=hgvfs,
634 651 store=store,
635 652 storevfs=storevfs,
636 653 storeoptions=storevfs.options,
637 654 cachevfs=cachevfs,
638 655 wcachevfs=wcachevfs,
639 656 extensionmodulenames=extensionmodulenames,
640 657 extrastate=extrastate,
641 658 baseclasses=bases,
642 659 )
643 660
644 661 if not isinstance(typ, type):
645 662 raise error.ProgrammingError(
646 663 b'unable to construct type for %s' % iface
647 664 )
648 665
649 666 bases.append(typ)
650 667
651 668 # type() allows you to use characters in type names that wouldn't be
652 669 # recognized as Python symbols in source code. We abuse that to add
653 670 # rich information about our constructed repo.
654 671 name = pycompat.sysstr(
655 672 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 673 )
657 674
658 675 cls = type(name, tuple(bases), {})
659 676
660 677 return cls(
661 678 baseui=baseui,
662 679 ui=ui,
663 680 origroot=path,
664 681 wdirvfs=wdirvfs,
665 682 hgvfs=hgvfs,
666 683 requirements=requirements,
667 684 supportedrequirements=supportedrequirements,
668 685 sharedpath=storebasepath,
669 686 store=store,
670 687 cachevfs=cachevfs,
671 688 wcachevfs=wcachevfs,
672 689 features=features,
673 690 intents=intents,
674 691 )
675 692
676 693
677 694 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 695 """Load hgrc files/content into a ui instance.
679 696
680 697 This is called during repository opening to load any additional
681 698 config files or settings relevant to the current repository.
682 699
683 700 Returns a bool indicating whether any additional configs were loaded.
684 701
685 702 Extensions should monkeypatch this function to modify how per-repo
686 703 configs are loaded. For example, an extension may wish to pull in
687 704 configs from alternate files or sources.
688 705 """
689 706 if not rcutil.use_repo_hgrc():
690 707 return False
691 708 try:
692 709 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 710 return True
694 711 except IOError:
695 712 return False
696 713
697 714
698 715 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 716 """Perform additional actions after .hg/hgrc is loaded.
700 717
701 718 This function is called during repository loading immediately after
702 719 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703 720
704 721 The function can be used to validate configs, automatically add
705 722 options (including extensions) based on requirements, etc.
706 723 """
707 724
708 725 # Map of requirements to list of extensions to load automatically when
709 726 # requirement is present.
710 727 autoextensions = {
711 728 b'git': [b'git'],
712 729 b'largefiles': [b'largefiles'],
713 730 b'lfs': [b'lfs'],
714 731 }
715 732
716 733 for requirement, names in sorted(autoextensions.items()):
717 734 if requirement not in requirements:
718 735 continue
719 736
720 737 for name in names:
721 738 if not ui.hasconfig(b'extensions', name):
722 739 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723 740
724 741
725 742 def gathersupportedrequirements(ui):
726 743 """Determine the complete set of recognized requirements."""
727 744 # Start with all requirements supported by this file.
728 745 supported = set(localrepository._basesupported)
729 746
730 747 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 748 # relevant to this ui instance.
732 749 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733 750
734 751 for fn in featuresetupfuncs:
735 752 if fn.__module__ in modules:
736 753 fn(ui, supported)
737 754
738 755 # Add derived requirements from registered compression engines.
739 756 for name in util.compengines:
740 757 engine = util.compengines[name]
741 758 if engine.available() and engine.revlogheader():
742 759 supported.add(b'exp-compression-%s' % name)
743 760 if engine.name() == b'zstd':
744 761 supported.add(b'revlog-compression-zstd')
745 762
746 763 return supported
747 764
748 765
749 766 def ensurerequirementsrecognized(requirements, supported):
750 767 """Validate that a set of local requirements is recognized.
751 768
752 769 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 770 exists any requirement in that set that currently loaded code doesn't
754 771 recognize.
755 772
756 773 Returns a set of supported requirements.
757 774 """
758 775 missing = set()
759 776
760 777 for requirement in requirements:
761 778 if requirement in supported:
762 779 continue
763 780
764 781 if not requirement or not requirement[0:1].isalnum():
765 782 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766 783
767 784 missing.add(requirement)
768 785
769 786 if missing:
770 787 raise error.RequirementError(
771 788 _(b'repository requires features unknown to this Mercurial: %s')
772 789 % b' '.join(sorted(missing)),
773 790 hint=_(
774 791 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 792 b'for more information'
776 793 ),
777 794 )
778 795
779 796
780 797 def ensurerequirementscompatible(ui, requirements):
781 798 """Validates that a set of recognized requirements is mutually compatible.
782 799
783 800 Some requirements may not be compatible with others or require
784 801 config options that aren't enabled. This function is called during
785 802 repository opening to ensure that the set of requirements needed
786 803 to open a repository is sane and compatible with config options.
787 804
788 805 Extensions can monkeypatch this function to perform additional
789 806 checking.
790 807
791 808 ``error.RepoError`` should be raised on failure.
792 809 """
793 810 if b'exp-sparse' in requirements and not sparse.enabled:
794 811 raise error.RepoError(
795 812 _(
796 813 b'repository is using sparse feature but '
797 814 b'sparse is not enabled; enable the '
798 815 b'"sparse" extensions to access'
799 816 )
800 817 )
801 818
802 819
803 820 def makestore(requirements, path, vfstype):
804 821 """Construct a storage object for a repository."""
805 822 if b'store' in requirements:
806 823 if b'fncache' in requirements:
807 824 return storemod.fncachestore(
808 825 path, vfstype, b'dotencode' in requirements
809 826 )
810 827
811 828 return storemod.encodedstore(path, vfstype)
812 829
813 830 return storemod.basicstore(path, vfstype)
814 831
815 832
816 833 def resolvestorevfsoptions(ui, requirements, features):
817 834 """Resolve the options to pass to the store vfs opener.
818 835
819 836 The returned dict is used to influence behavior of the storage layer.
820 837 """
821 838 options = {}
822 839
823 840 if b'treemanifest' in requirements:
824 841 options[b'treemanifest'] = True
825 842
826 843 # experimental config: format.manifestcachesize
827 844 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 845 if manifestcachesize is not None:
829 846 options[b'manifestcachesize'] = manifestcachesize
830 847
831 848 # In the absence of another requirement superseding a revlog-related
832 849 # requirement, we have to assume the repo is using revlog version 0.
833 850 # This revlog format is super old and we don't bother trying to parse
834 851 # opener options for it because those options wouldn't do anything
835 852 # meaningful on such old repos.
836 853 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 854 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 855 else: # explicitly mark repo as using revlogv0
839 856 options[b'revlogv0'] = True
840 857
841 858 if COPIESSDC_REQUIREMENT in requirements:
842 859 options[b'copies-storage'] = b'changeset-sidedata'
843 860 else:
844 861 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 862 copiesextramode = (b'changeset-only', b'compatibility')
846 863 if writecopiesto in copiesextramode:
847 864 options[b'copies-storage'] = b'extra'
848 865
849 866 return options
850 867
851 868
852 869 def resolverevlogstorevfsoptions(ui, requirements, features):
853 870 """Resolve opener options specific to revlogs."""
854 871
855 872 options = {}
856 873 options[b'flagprocessors'] = {}
857 874
858 875 if b'revlogv1' in requirements:
859 876 options[b'revlogv1'] = True
860 877 if REVLOGV2_REQUIREMENT in requirements:
861 878 options[b'revlogv2'] = True
862 879
863 880 if b'generaldelta' in requirements:
864 881 options[b'generaldelta'] = True
865 882
866 883 # experimental config: format.chunkcachesize
867 884 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 885 if chunkcachesize is not None:
869 886 options[b'chunkcachesize'] = chunkcachesize
870 887
871 888 deltabothparents = ui.configbool(
872 889 b'storage', b'revlog.optimize-delta-parent-choice'
873 890 )
874 891 options[b'deltabothparents'] = deltabothparents
875 892
876 893 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 894 lazydeltabase = False
878 895 if lazydelta:
879 896 lazydeltabase = ui.configbool(
880 897 b'storage', b'revlog.reuse-external-delta-parent'
881 898 )
882 899 if lazydeltabase is None:
883 900 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 901 options[b'lazydelta'] = lazydelta
885 902 options[b'lazydeltabase'] = lazydeltabase
886 903
887 904 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 905 if 0 <= chainspan:
889 906 options[b'maxdeltachainspan'] = chainspan
890 907
891 908 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 909 if mmapindexthreshold is not None:
893 910 options[b'mmapindexthreshold'] = mmapindexthreshold
894 911
895 912 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 913 srdensitythres = float(
897 914 ui.config(b'experimental', b'sparse-read.density-threshold')
898 915 )
899 916 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 917 options[b'with-sparse-read'] = withsparseread
901 918 options[b'sparse-read-density-threshold'] = srdensitythres
902 919 options[b'sparse-read-min-gap-size'] = srmingapsize
903 920
904 921 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 922 options[b'sparse-revlog'] = sparserevlog
906 923 if sparserevlog:
907 924 options[b'generaldelta'] = True
908 925
909 926 sidedata = SIDEDATA_REQUIREMENT in requirements
910 927 options[b'side-data'] = sidedata
911 928
912 929 maxchainlen = None
913 930 if sparserevlog:
914 931 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 932 # experimental config: format.maxchainlen
916 933 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 934 if maxchainlen is not None:
918 935 options[b'maxchainlen'] = maxchainlen
919 936
920 937 for r in requirements:
921 938 # we allow multiple compression engine requirement to co-exist because
922 939 # strickly speaking, revlog seems to support mixed compression style.
923 940 #
924 941 # The compression used for new entries will be "the last one"
925 942 prefix = r.startswith
926 943 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 944 options[b'compengine'] = r.split(b'-', 2)[2]
928 945
929 946 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 947 if options[b'zlib.level'] is not None:
931 948 if not (0 <= options[b'zlib.level'] <= 9):
932 949 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 950 raise error.Abort(msg % options[b'zlib.level'])
934 951 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 952 if options[b'zstd.level'] is not None:
936 953 if not (0 <= options[b'zstd.level'] <= 22):
937 954 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 955 raise error.Abort(msg % options[b'zstd.level'])
939 956
940 957 if repository.NARROW_REQUIREMENT in requirements:
941 958 options[b'enableellipsis'] = True
942 959
943 960 if ui.configbool(b'experimental', b'rust.index'):
944 961 options[b'rust.index'] = True
945 962 if NODEMAP_REQUIREMENT in requirements:
946 963 options[b'persistent-nodemap'] = True
947 964 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 965 options[b'persistent-nodemap.mmap'] = True
949 966 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 967 options[b'persistent-nodemap.mode'] = epnm
951 968 if ui.configbool(b'devel', b'persistent-nodemap'):
952 969 options[b'devel-force-nodemap'] = True
953 970
954 971 return options
955 972
956 973
957 974 def makemain(**kwargs):
958 975 """Produce a type conforming to ``ilocalrepositorymain``."""
959 976 return localrepository
960 977
961 978
962 979 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 980 class revlogfilestorage(object):
964 981 """File storage when using revlogs."""
965 982
966 983 def file(self, path):
967 984 if path[0] == b'/':
968 985 path = path[1:]
969 986
970 987 return filelog.filelog(self.svfs, path)
971 988
972 989
973 990 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 991 class revlognarrowfilestorage(object):
975 992 """File storage when using revlogs and narrow files."""
976 993
977 994 def file(self, path):
978 995 if path[0] == b'/':
979 996 path = path[1:]
980 997
981 998 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982 999
983 1000
984 1001 def makefilestorage(requirements, features, **kwargs):
985 1002 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 1003 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 1004 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988 1005
989 1006 if repository.NARROW_REQUIREMENT in requirements:
990 1007 return revlognarrowfilestorage
991 1008 else:
992 1009 return revlogfilestorage
993 1010
994 1011
995 1012 # List of repository interfaces and factory functions for them. Each
996 1013 # will be called in order during ``makelocalrepository()`` to iteratively
997 1014 # derive the final type for a local repository instance. We capture the
998 1015 # function as a lambda so we don't hold a reference and the module-level
999 1016 # functions can be wrapped.
1000 1017 REPO_INTERFACES = [
1001 1018 (repository.ilocalrepositorymain, lambda: makemain),
1002 1019 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 1020 ]
1004 1021
1005 1022
1006 1023 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 1024 class localrepository(object):
1008 1025 """Main class for representing local repositories.
1009 1026
1010 1027 All local repositories are instances of this class.
1011 1028
1012 1029 Constructed on its own, instances of this class are not usable as
1013 1030 repository objects. To obtain a usable repository object, call
1014 1031 ``hg.repository()``, ``localrepo.instance()``, or
1015 1032 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 1033 ``instance()`` adds support for creating new repositories.
1017 1034 ``hg.repository()`` adds more extension integration, including calling
1018 1035 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 1036 used.
1020 1037 """
1021 1038
1022 1039 # obsolete experimental requirements:
1023 1040 # - manifestv2: An experimental new manifest format that allowed
1024 1041 # for stem compression of long paths. Experiment ended up not
1025 1042 # being successful (repository sizes went up due to worse delta
1026 1043 # chains), and the code was deleted in 4.6.
1027 1044 supportedformats = {
1028 1045 b'revlogv1',
1029 1046 b'generaldelta',
1030 1047 b'treemanifest',
1031 1048 COPIESSDC_REQUIREMENT,
1032 1049 REVLOGV2_REQUIREMENT,
1033 1050 SIDEDATA_REQUIREMENT,
1034 1051 SPARSEREVLOG_REQUIREMENT,
1035 1052 NODEMAP_REQUIREMENT,
1036 1053 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 1054 }
1038 1055 _basesupported = supportedformats | {
1039 1056 b'store',
1040 1057 b'fncache',
1041 1058 b'shared',
1042 1059 b'relshared',
1043 1060 b'dotencode',
1044 1061 b'exp-sparse',
1045 1062 b'internal-phase',
1046 1063 }
1047 1064
1048 1065 # list of prefix for file which can be written without 'wlock'
1049 1066 # Extensions should extend this list when needed
1050 1067 _wlockfreeprefix = {
1051 1068 # We migh consider requiring 'wlock' for the next
1052 1069 # two, but pretty much all the existing code assume
1053 1070 # wlock is not needed so we keep them excluded for
1054 1071 # now.
1055 1072 b'hgrc',
1056 1073 b'requires',
1057 1074 # XXX cache is a complicatged business someone
1058 1075 # should investigate this in depth at some point
1059 1076 b'cache/',
1060 1077 # XXX shouldn't be dirstate covered by the wlock?
1061 1078 b'dirstate',
1062 1079 # XXX bisect was still a bit too messy at the time
1063 1080 # this changeset was introduced. Someone should fix
1064 1081 # the remainig bit and drop this line
1065 1082 b'bisect.state',
1066 1083 }
1067 1084
1068 1085 def __init__(
1069 1086 self,
1070 1087 baseui,
1071 1088 ui,
1072 1089 origroot,
1073 1090 wdirvfs,
1074 1091 hgvfs,
1075 1092 requirements,
1076 1093 supportedrequirements,
1077 1094 sharedpath,
1078 1095 store,
1079 1096 cachevfs,
1080 1097 wcachevfs,
1081 1098 features,
1082 1099 intents=None,
1083 1100 ):
1084 1101 """Create a new local repository instance.
1085 1102
1086 1103 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 1104 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 1105 object.
1089 1106
1090 1107 Arguments:
1091 1108
1092 1109 baseui
1093 1110 ``ui.ui`` instance that ``ui`` argument was based off of.
1094 1111
1095 1112 ui
1096 1113 ``ui.ui`` instance for use by the repository.
1097 1114
1098 1115 origroot
1099 1116 ``bytes`` path to working directory root of this repository.
1100 1117
1101 1118 wdirvfs
1102 1119 ``vfs.vfs`` rooted at the working directory.
1103 1120
1104 1121 hgvfs
1105 1122 ``vfs.vfs`` rooted at .hg/
1106 1123
1107 1124 requirements
1108 1125 ``set`` of bytestrings representing repository opening requirements.
1109 1126
1110 1127 supportedrequirements
1111 1128 ``set`` of bytestrings representing repository requirements that we
1112 1129 know how to open. May be a supetset of ``requirements``.
1113 1130
1114 1131 sharedpath
1115 1132 ``bytes`` Defining path to storage base directory. Points to a
1116 1133 ``.hg/`` directory somewhere.
1117 1134
1118 1135 store
1119 1136 ``store.basicstore`` (or derived) instance providing access to
1120 1137 versioned storage.
1121 1138
1122 1139 cachevfs
1123 1140 ``vfs.vfs`` used for cache files.
1124 1141
1125 1142 wcachevfs
1126 1143 ``vfs.vfs`` used for cache files related to the working copy.
1127 1144
1128 1145 features
1129 1146 ``set`` of bytestrings defining features/capabilities of this
1130 1147 instance.
1131 1148
1132 1149 intents
1133 1150 ``set`` of system strings indicating what this repo will be used
1134 1151 for.
1135 1152 """
1136 1153 self.baseui = baseui
1137 1154 self.ui = ui
1138 1155 self.origroot = origroot
1139 1156 # vfs rooted at working directory.
1140 1157 self.wvfs = wdirvfs
1141 1158 self.root = wdirvfs.base
1142 1159 # vfs rooted at .hg/. Used to access most non-store paths.
1143 1160 self.vfs = hgvfs
1144 1161 self.path = hgvfs.base
1145 1162 self.requirements = requirements
1146 1163 self.supported = supportedrequirements
1147 1164 self.sharedpath = sharedpath
1148 1165 self.store = store
1149 1166 self.cachevfs = cachevfs
1150 1167 self.wcachevfs = wcachevfs
1151 1168 self.features = features
1152 1169
1153 1170 self.filtername = None
1154 1171
1155 1172 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 1173 b'devel', b'check-locks'
1157 1174 ):
1158 1175 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 1176 # A list of callback to shape the phase if no data were found.
1160 1177 # Callback are in the form: func(repo, roots) --> processed root.
1161 1178 # This list it to be filled by extension during repo setup
1162 1179 self._phasedefaults = []
1163 1180
1164 1181 color.setup(self.ui)
1165 1182
1166 1183 self.spath = self.store.path
1167 1184 self.svfs = self.store.vfs
1168 1185 self.sjoin = self.store.join
1169 1186 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 1187 b'devel', b'check-locks'
1171 1188 ):
1172 1189 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 1190 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 1191 else: # standard vfs
1175 1192 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176 1193
1177 1194 self._dirstatevalidatewarned = False
1178 1195
1179 1196 self._branchcaches = branchmap.BranchMapCache()
1180 1197 self._revbranchcache = None
1181 1198 self._filterpats = {}
1182 1199 self._datafilters = {}
1183 1200 self._transref = self._lockref = self._wlockref = None
1184 1201
1185 1202 # A cache for various files under .hg/ that tracks file changes,
1186 1203 # (used by the filecache decorator)
1187 1204 #
1188 1205 # Maps a property name to its util.filecacheentry
1189 1206 self._filecache = {}
1190 1207
1191 1208 # hold sets of revision to be filtered
1192 1209 # should be cleared when something might have changed the filter value:
1193 1210 # - new changesets,
1194 1211 # - phase change,
1195 1212 # - new obsolescence marker,
1196 1213 # - working directory parent change,
1197 1214 # - bookmark changes
1198 1215 self.filteredrevcache = {}
1199 1216
1200 1217 # post-dirstate-status hooks
1201 1218 self._postdsstatus = []
1202 1219
1203 1220 # generic mapping between names and nodes
1204 1221 self.names = namespaces.namespaces()
1205 1222
1206 1223 # Key to signature value.
1207 1224 self._sparsesignaturecache = {}
1208 1225 # Signature to cached matcher instance.
1209 1226 self._sparsematchercache = {}
1210 1227
1211 1228 self._extrafilterid = repoview.extrafilter(ui)
1212 1229
1213 1230 self.filecopiesmode = None
1214 1231 if COPIESSDC_REQUIREMENT in self.requirements:
1215 1232 self.filecopiesmode = b'changeset-sidedata'
1216 1233
1217 1234 def _getvfsward(self, origfunc):
1218 1235 """build a ward for self.vfs"""
1219 1236 rref = weakref.ref(self)
1220 1237
1221 1238 def checkvfs(path, mode=None):
1222 1239 ret = origfunc(path, mode=mode)
1223 1240 repo = rref()
1224 1241 if (
1225 1242 repo is None
1226 1243 or not util.safehasattr(repo, b'_wlockref')
1227 1244 or not util.safehasattr(repo, b'_lockref')
1228 1245 ):
1229 1246 return
1230 1247 if mode in (None, b'r', b'rb'):
1231 1248 return
1232 1249 if path.startswith(repo.path):
1233 1250 # truncate name relative to the repository (.hg)
1234 1251 path = path[len(repo.path) + 1 :]
1235 1252 if path.startswith(b'cache/'):
1236 1253 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 1254 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 1255 # path prefixes covered by 'lock'
1239 1256 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 1257 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 1258 if repo._currentlock(repo._lockref) is None:
1242 1259 repo.ui.develwarn(
1243 1260 b'write with no lock: "%s"' % path,
1244 1261 stacklevel=3,
1245 1262 config=b'check-locks',
1246 1263 )
1247 1264 elif repo._currentlock(repo._wlockref) is None:
1248 1265 # rest of vfs files are covered by 'wlock'
1249 1266 #
1250 1267 # exclude special files
1251 1268 for prefix in self._wlockfreeprefix:
1252 1269 if path.startswith(prefix):
1253 1270 return
1254 1271 repo.ui.develwarn(
1255 1272 b'write with no wlock: "%s"' % path,
1256 1273 stacklevel=3,
1257 1274 config=b'check-locks',
1258 1275 )
1259 1276 return ret
1260 1277
1261 1278 return checkvfs
1262 1279
1263 1280 def _getsvfsward(self, origfunc):
1264 1281 """build a ward for self.svfs"""
1265 1282 rref = weakref.ref(self)
1266 1283
1267 1284 def checksvfs(path, mode=None):
1268 1285 ret = origfunc(path, mode=mode)
1269 1286 repo = rref()
1270 1287 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 1288 return
1272 1289 if mode in (None, b'r', b'rb'):
1273 1290 return
1274 1291 if path.startswith(repo.sharedpath):
1275 1292 # truncate name relative to the repository (.hg)
1276 1293 path = path[len(repo.sharedpath) + 1 :]
1277 1294 if repo._currentlock(repo._lockref) is None:
1278 1295 repo.ui.develwarn(
1279 1296 b'write with no lock: "%s"' % path, stacklevel=4
1280 1297 )
1281 1298 return ret
1282 1299
1283 1300 return checksvfs
1284 1301
1285 1302 def close(self):
1286 1303 self._writecaches()
1287 1304
1288 1305 def _writecaches(self):
1289 1306 if self._revbranchcache:
1290 1307 self._revbranchcache.write()
1291 1308
1292 1309 def _restrictcapabilities(self, caps):
1293 1310 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 1311 caps = set(caps)
1295 1312 capsblob = bundle2.encodecaps(
1296 1313 bundle2.getrepocaps(self, role=b'client')
1297 1314 )
1298 1315 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 1316 return caps
1300 1317
1301 1318 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 1319 # self -> auditor -> self._checknested -> self
1303 1320
1304 1321 @property
1305 1322 def auditor(self):
1306 1323 # This is only used by context.workingctx.match in order to
1307 1324 # detect files in subrepos.
1308 1325 return pathutil.pathauditor(self.root, callback=self._checknested)
1309 1326
1310 1327 @property
1311 1328 def nofsauditor(self):
1312 1329 # This is only used by context.basectx.match in order to detect
1313 1330 # files in subrepos.
1314 1331 return pathutil.pathauditor(
1315 1332 self.root, callback=self._checknested, realfs=False, cached=True
1316 1333 )
1317 1334
1318 1335 def _checknested(self, path):
1319 1336 """Determine if path is a legal nested repository."""
1320 1337 if not path.startswith(self.root):
1321 1338 return False
1322 1339 subpath = path[len(self.root) + 1 :]
1323 1340 normsubpath = util.pconvert(subpath)
1324 1341
1325 1342 # XXX: Checking against the current working copy is wrong in
1326 1343 # the sense that it can reject things like
1327 1344 #
1328 1345 # $ hg cat -r 10 sub/x.txt
1329 1346 #
1330 1347 # if sub/ is no longer a subrepository in the working copy
1331 1348 # parent revision.
1332 1349 #
1333 1350 # However, it can of course also allow things that would have
1334 1351 # been rejected before, such as the above cat command if sub/
1335 1352 # is a subrepository now, but was a normal directory before.
1336 1353 # The old path auditor would have rejected by mistake since it
1337 1354 # panics when it sees sub/.hg/.
1338 1355 #
1339 1356 # All in all, checking against the working copy seems sensible
1340 1357 # since we want to prevent access to nested repositories on
1341 1358 # the filesystem *now*.
1342 1359 ctx = self[None]
1343 1360 parts = util.splitpath(subpath)
1344 1361 while parts:
1345 1362 prefix = b'/'.join(parts)
1346 1363 if prefix in ctx.substate:
1347 1364 if prefix == normsubpath:
1348 1365 return True
1349 1366 else:
1350 1367 sub = ctx.sub(prefix)
1351 1368 return sub.checknested(subpath[len(prefix) + 1 :])
1352 1369 else:
1353 1370 parts.pop()
1354 1371 return False
1355 1372
1356 1373 def peer(self):
1357 1374 return localpeer(self) # not cached to avoid reference cycle
1358 1375
1359 1376 def unfiltered(self):
1360 1377 """Return unfiltered version of the repository
1361 1378
1362 1379 Intended to be overwritten by filtered repo."""
1363 1380 return self
1364 1381
1365 1382 def filtered(self, name, visibilityexceptions=None):
1366 1383 """Return a filtered version of a repository
1367 1384
1368 1385 The `name` parameter is the identifier of the requested view. This
1369 1386 will return a repoview object set "exactly" to the specified view.
1370 1387
1371 1388 This function does not apply recursive filtering to a repository. For
1372 1389 example calling `repo.filtered("served")` will return a repoview using
1373 1390 the "served" view, regardless of the initial view used by `repo`.
1374 1391
1375 1392 In other word, there is always only one level of `repoview` "filtering".
1376 1393 """
1377 1394 if self._extrafilterid is not None and b'%' not in name:
1378 1395 name = name + b'%' + self._extrafilterid
1379 1396
1380 1397 cls = repoview.newtype(self.unfiltered().__class__)
1381 1398 return cls(self, name, visibilityexceptions)
1382 1399
1383 1400 @mixedrepostorecache(
1384 1401 (b'bookmarks', b'plain'),
1385 1402 (b'bookmarks.current', b'plain'),
1386 1403 (b'bookmarks', b''),
1387 1404 (b'00changelog.i', b''),
1388 1405 )
1389 1406 def _bookmarks(self):
1390 1407 # Since the multiple files involved in the transaction cannot be
1391 1408 # written atomically (with current repository format), there is a race
1392 1409 # condition here.
1393 1410 #
1394 1411 # 1) changelog content A is read
1395 1412 # 2) outside transaction update changelog to content B
1396 1413 # 3) outside transaction update bookmark file referring to content B
1397 1414 # 4) bookmarks file content is read and filtered against changelog-A
1398 1415 #
1399 1416 # When this happens, bookmarks against nodes missing from A are dropped.
1400 1417 #
1401 1418 # Having this happening during read is not great, but it become worse
1402 1419 # when this happen during write because the bookmarks to the "unknown"
1403 1420 # nodes will be dropped for good. However, writes happen within locks.
1404 1421 # This locking makes it possible to have a race free consistent read.
1405 1422 # For this purpose data read from disc before locking are
1406 1423 # "invalidated" right after the locks are taken. This invalidations are
1407 1424 # "light", the `filecache` mechanism keep the data in memory and will
1408 1425 # reuse them if the underlying files did not changed. Not parsing the
1409 1426 # same data multiple times helps performances.
1410 1427 #
1411 1428 # Unfortunately in the case describe above, the files tracked by the
1412 1429 # bookmarks file cache might not have changed, but the in-memory
1413 1430 # content is still "wrong" because we used an older changelog content
1414 1431 # to process the on-disk data. So after locking, the changelog would be
1415 1432 # refreshed but `_bookmarks` would be preserved.
1416 1433 # Adding `00changelog.i` to the list of tracked file is not
1417 1434 # enough, because at the time we build the content for `_bookmarks` in
1418 1435 # (4), the changelog file has already diverged from the content used
1419 1436 # for loading `changelog` in (1)
1420 1437 #
1421 1438 # To prevent the issue, we force the changelog to be explicitly
1422 1439 # reloaded while computing `_bookmarks`. The data race can still happen
1423 1440 # without the lock (with a narrower window), but it would no longer go
1424 1441 # undetected during the lock time refresh.
1425 1442 #
1426 1443 # The new schedule is as follow
1427 1444 #
1428 1445 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 1446 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 1447 # 3) We force `changelog` filecache to be tested
1431 1448 # 4) cachestat for `changelog` are captured (for changelog)
1432 1449 # 5) `_bookmarks` is computed and cached
1433 1450 #
1434 1451 # The step in (3) ensure we have a changelog at least as recent as the
1435 1452 # cache stat computed in (1). As a result at locking time:
1436 1453 # * if the changelog did not changed since (1) -> we can reuse the data
1437 1454 # * otherwise -> the bookmarks get refreshed.
1438 1455 self._refreshchangelog()
1439 1456 return bookmarks.bmstore(self)
1440 1457
1441 1458 def _refreshchangelog(self):
1442 1459 """make sure the in memory changelog match the on-disk one"""
1443 1460 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 1461 del self.changelog
1445 1462
1446 1463 @property
1447 1464 def _activebookmark(self):
1448 1465 return self._bookmarks.active
1449 1466
1450 1467 # _phasesets depend on changelog. what we need is to call
1451 1468 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 1469 # can't be easily expressed in filecache mechanism.
1453 1470 @storecache(b'phaseroots', b'00changelog.i')
1454 1471 def _phasecache(self):
1455 1472 return phases.phasecache(self, self._phasedefaults)
1456 1473
1457 1474 @storecache(b'obsstore')
1458 1475 def obsstore(self):
1459 1476 return obsolete.makestore(self.ui, self)
1460 1477
1461 1478 @storecache(b'00changelog.i')
1462 1479 def changelog(self):
1463 1480 # load dirstate before changelog to avoid race see issue6303
1464 1481 self.dirstate.prefetch_parents()
1465 1482 return self.store.changelog(txnutil.mayhavepending(self.root))
1466 1483
1467 1484 @storecache(b'00manifest.i')
1468 1485 def manifestlog(self):
1469 1486 return self.store.manifestlog(self, self._storenarrowmatch)
1470 1487
1471 1488 @repofilecache(b'dirstate')
1472 1489 def dirstate(self):
1473 1490 return self._makedirstate()
1474 1491
1475 1492 def _makedirstate(self):
1476 1493 """Extension point for wrapping the dirstate per-repo."""
1477 1494 sparsematchfn = lambda: sparse.matcher(self)
1478 1495
1479 1496 return dirstate.dirstate(
1480 1497 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 1498 )
1482 1499
1483 1500 def _dirstatevalidate(self, node):
1484 1501 try:
1485 1502 self.changelog.rev(node)
1486 1503 return node
1487 1504 except error.LookupError:
1488 1505 if not self._dirstatevalidatewarned:
1489 1506 self._dirstatevalidatewarned = True
1490 1507 self.ui.warn(
1491 1508 _(b"warning: ignoring unknown working parent %s!\n")
1492 1509 % short(node)
1493 1510 )
1494 1511 return nullid
1495 1512
1496 1513 @storecache(narrowspec.FILENAME)
1497 1514 def narrowpats(self):
1498 1515 """matcher patterns for this repository's narrowspec
1499 1516
1500 1517 A tuple of (includes, excludes).
1501 1518 """
1502 1519 return narrowspec.load(self)
1503 1520
1504 1521 @storecache(narrowspec.FILENAME)
1505 1522 def _storenarrowmatch(self):
1506 1523 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 1524 return matchmod.always()
1508 1525 include, exclude = self.narrowpats
1509 1526 return narrowspec.match(self.root, include=include, exclude=exclude)
1510 1527
1511 1528 @storecache(narrowspec.FILENAME)
1512 1529 def _narrowmatch(self):
1513 1530 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 1531 return matchmod.always()
1515 1532 narrowspec.checkworkingcopynarrowspec(self)
1516 1533 include, exclude = self.narrowpats
1517 1534 return narrowspec.match(self.root, include=include, exclude=exclude)
1518 1535
1519 1536 def narrowmatch(self, match=None, includeexact=False):
1520 1537 """matcher corresponding the the repo's narrowspec
1521 1538
1522 1539 If `match` is given, then that will be intersected with the narrow
1523 1540 matcher.
1524 1541
1525 1542 If `includeexact` is True, then any exact matches from `match` will
1526 1543 be included even if they're outside the narrowspec.
1527 1544 """
1528 1545 if match:
1529 1546 if includeexact and not self._narrowmatch.always():
1530 1547 # do not exclude explicitly-specified paths so that they can
1531 1548 # be warned later on
1532 1549 em = matchmod.exact(match.files())
1533 1550 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 1551 return matchmod.intersectmatchers(match, nm)
1535 1552 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 1553 return self._narrowmatch
1537 1554
1538 1555 def setnarrowpats(self, newincludes, newexcludes):
1539 1556 narrowspec.save(self, newincludes, newexcludes)
1540 1557 self.invalidate(clearfilecache=True)
1541 1558
1542 1559 @unfilteredpropertycache
1543 1560 def _quick_access_changeid_null(self):
1544 1561 return {
1545 1562 b'null': (nullrev, nullid),
1546 1563 nullrev: (nullrev, nullid),
1547 1564 nullid: (nullrev, nullid),
1548 1565 }
1549 1566
1550 1567 @unfilteredpropertycache
1551 1568 def _quick_access_changeid_wc(self):
1552 1569 # also fast path access to the working copy parents
1553 1570 # however, only do it for filter that ensure wc is visible.
1554 1571 quick = {}
1555 1572 cl = self.unfiltered().changelog
1556 1573 for node in self.dirstate.parents():
1557 1574 if node == nullid:
1558 1575 continue
1559 1576 rev = cl.index.get_rev(node)
1560 1577 if rev is None:
1561 1578 # unknown working copy parent case:
1562 1579 #
1563 1580 # skip the fast path and let higher code deal with it
1564 1581 continue
1565 1582 pair = (rev, node)
1566 1583 quick[rev] = pair
1567 1584 quick[node] = pair
1568 1585 # also add the parents of the parents
1569 1586 for r in cl.parentrevs(rev):
1570 1587 if r == nullrev:
1571 1588 continue
1572 1589 n = cl.node(r)
1573 1590 pair = (r, n)
1574 1591 quick[r] = pair
1575 1592 quick[n] = pair
1576 1593 p1node = self.dirstate.p1()
1577 1594 if p1node != nullid:
1578 1595 quick[b'.'] = quick[p1node]
1579 1596 return quick
1580 1597
1581 1598 @unfilteredmethod
1582 1599 def _quick_access_changeid_invalidate(self):
1583 1600 if '_quick_access_changeid_wc' in vars(self):
1584 1601 del self.__dict__['_quick_access_changeid_wc']
1585 1602
1586 1603 @property
1587 1604 def _quick_access_changeid(self):
1588 1605 """an helper dictionnary for __getitem__ calls
1589 1606
1590 1607 This contains a list of symbol we can recognise right away without
1591 1608 further processing.
1592 1609 """
1593 1610 mapping = self._quick_access_changeid_null
1594 1611 if self.filtername in repoview.filter_has_wc:
1595 1612 mapping = mapping.copy()
1596 1613 mapping.update(self._quick_access_changeid_wc)
1597 1614 return mapping
1598 1615
1599 1616 def __getitem__(self, changeid):
1600 1617 # dealing with special cases
1601 1618 if changeid is None:
1602 1619 return context.workingctx(self)
1603 1620 if isinstance(changeid, context.basectx):
1604 1621 return changeid
1605 1622
1606 1623 # dealing with multiple revisions
1607 1624 if isinstance(changeid, slice):
1608 1625 # wdirrev isn't contiguous so the slice shouldn't include it
1609 1626 return [
1610 1627 self[i]
1611 1628 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 1629 if i not in self.changelog.filteredrevs
1613 1630 ]
1614 1631
1615 1632 # dealing with some special values
1616 1633 quick_access = self._quick_access_changeid.get(changeid)
1617 1634 if quick_access is not None:
1618 1635 rev, node = quick_access
1619 1636 return context.changectx(self, rev, node, maybe_filtered=False)
1620 1637 if changeid == b'tip':
1621 1638 node = self.changelog.tip()
1622 1639 rev = self.changelog.rev(node)
1623 1640 return context.changectx(self, rev, node)
1624 1641
1625 1642 # dealing with arbitrary values
1626 1643 try:
1627 1644 if isinstance(changeid, int):
1628 1645 node = self.changelog.node(changeid)
1629 1646 rev = changeid
1630 1647 elif changeid == b'.':
1631 1648 # this is a hack to delay/avoid loading obsmarkers
1632 1649 # when we know that '.' won't be hidden
1633 1650 node = self.dirstate.p1()
1634 1651 rev = self.unfiltered().changelog.rev(node)
1635 1652 elif len(changeid) == 20:
1636 1653 try:
1637 1654 node = changeid
1638 1655 rev = self.changelog.rev(changeid)
1639 1656 except error.FilteredLookupError:
1640 1657 changeid = hex(changeid) # for the error message
1641 1658 raise
1642 1659 except LookupError:
1643 1660 # check if it might have come from damaged dirstate
1644 1661 #
1645 1662 # XXX we could avoid the unfiltered if we had a recognizable
1646 1663 # exception for filtered changeset access
1647 1664 if (
1648 1665 self.local()
1649 1666 and changeid in self.unfiltered().dirstate.parents()
1650 1667 ):
1651 1668 msg = _(b"working directory has unknown parent '%s'!")
1652 1669 raise error.Abort(msg % short(changeid))
1653 1670 changeid = hex(changeid) # for the error message
1654 1671 raise
1655 1672
1656 1673 elif len(changeid) == 40:
1657 1674 node = bin(changeid)
1658 1675 rev = self.changelog.rev(node)
1659 1676 else:
1660 1677 raise error.ProgrammingError(
1661 1678 b"unsupported changeid '%s' of type %s"
1662 1679 % (changeid, pycompat.bytestr(type(changeid)))
1663 1680 )
1664 1681
1665 1682 return context.changectx(self, rev, node)
1666 1683
1667 1684 except (error.FilteredIndexError, error.FilteredLookupError):
1668 1685 raise error.FilteredRepoLookupError(
1669 1686 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 1687 )
1671 1688 except (IndexError, LookupError):
1672 1689 raise error.RepoLookupError(
1673 1690 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 1691 )
1675 1692 except error.WdirUnsupported:
1676 1693 return context.workingctx(self)
1677 1694
1678 1695 def __contains__(self, changeid):
1679 1696 """True if the given changeid exists
1680 1697
1681 1698 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 1699 specified.
1683 1700 """
1684 1701 try:
1685 1702 self[changeid]
1686 1703 return True
1687 1704 except error.RepoLookupError:
1688 1705 return False
1689 1706
1690 1707 def __nonzero__(self):
1691 1708 return True
1692 1709
1693 1710 __bool__ = __nonzero__
1694 1711
1695 1712 def __len__(self):
1696 1713 # no need to pay the cost of repoview.changelog
1697 1714 unfi = self.unfiltered()
1698 1715 return len(unfi.changelog)
1699 1716
1700 1717 def __iter__(self):
1701 1718 return iter(self.changelog)
1702 1719
1703 1720 def revs(self, expr, *args):
1704 1721 '''Find revisions matching a revset.
1705 1722
1706 1723 The revset is specified as a string ``expr`` that may contain
1707 1724 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708 1725
1709 1726 Revset aliases from the configuration are not expanded. To expand
1710 1727 user aliases, consider calling ``scmutil.revrange()`` or
1711 1728 ``repo.anyrevs([expr], user=True)``.
1712 1729
1713 1730 Returns a smartset.abstractsmartset, which is a list-like interface
1714 1731 that contains integer revisions.
1715 1732 '''
1716 1733 tree = revsetlang.spectree(expr, *args)
1717 1734 return revset.makematcher(tree)(self)
1718 1735
1719 1736 def set(self, expr, *args):
1720 1737 '''Find revisions matching a revset and emit changectx instances.
1721 1738
1722 1739 This is a convenience wrapper around ``revs()`` that iterates the
1723 1740 result and is a generator of changectx instances.
1724 1741
1725 1742 Revset aliases from the configuration are not expanded. To expand
1726 1743 user aliases, consider calling ``scmutil.revrange()``.
1727 1744 '''
1728 1745 for r in self.revs(expr, *args):
1729 1746 yield self[r]
1730 1747
1731 1748 def anyrevs(self, specs, user=False, localalias=None):
1732 1749 '''Find revisions matching one of the given revsets.
1733 1750
1734 1751 Revset aliases from the configuration are not expanded by default. To
1735 1752 expand user aliases, specify ``user=True``. To provide some local
1736 1753 definitions overriding user aliases, set ``localalias`` to
1737 1754 ``{name: definitionstring}``.
1738 1755 '''
1739 1756 if specs == [b'null']:
1740 1757 return revset.baseset([nullrev])
1741 1758 if specs == [b'.']:
1742 1759 quick_data = self._quick_access_changeid.get(b'.')
1743 1760 if quick_data is not None:
1744 1761 return revset.baseset([quick_data[0]])
1745 1762 if user:
1746 1763 m = revset.matchany(
1747 1764 self.ui,
1748 1765 specs,
1749 1766 lookup=revset.lookupfn(self),
1750 1767 localalias=localalias,
1751 1768 )
1752 1769 else:
1753 1770 m = revset.matchany(None, specs, localalias=localalias)
1754 1771 return m(self)
1755 1772
1756 1773 def url(self):
1757 1774 return b'file:' + self.root
1758 1775
1759 1776 def hook(self, name, throw=False, **args):
1760 1777 """Call a hook, passing this repo instance.
1761 1778
1762 1779 This a convenience method to aid invoking hooks. Extensions likely
1763 1780 won't call this unless they have registered a custom hook or are
1764 1781 replacing code that is expected to call a hook.
1765 1782 """
1766 1783 return hook.hook(self.ui, self, name, throw, **args)
1767 1784
1768 1785 @filteredpropertycache
1769 1786 def _tagscache(self):
1770 1787 '''Returns a tagscache object that contains various tags related
1771 1788 caches.'''
1772 1789
1773 1790 # This simplifies its cache management by having one decorated
1774 1791 # function (this one) and the rest simply fetch things from it.
1775 1792 class tagscache(object):
1776 1793 def __init__(self):
1777 1794 # These two define the set of tags for this repository. tags
1778 1795 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 1796 # 'local'. (Global tags are defined by .hgtags across all
1780 1797 # heads, and local tags are defined in .hg/localtags.)
1781 1798 # They constitute the in-memory cache of tags.
1782 1799 self.tags = self.tagtypes = None
1783 1800
1784 1801 self.nodetagscache = self.tagslist = None
1785 1802
1786 1803 cache = tagscache()
1787 1804 cache.tags, cache.tagtypes = self._findtags()
1788 1805
1789 1806 return cache
1790 1807
1791 1808 def tags(self):
1792 1809 '''return a mapping of tag to node'''
1793 1810 t = {}
1794 1811 if self.changelog.filteredrevs:
1795 1812 tags, tt = self._findtags()
1796 1813 else:
1797 1814 tags = self._tagscache.tags
1798 1815 rev = self.changelog.rev
1799 1816 for k, v in pycompat.iteritems(tags):
1800 1817 try:
1801 1818 # ignore tags to unknown nodes
1802 1819 rev(v)
1803 1820 t[k] = v
1804 1821 except (error.LookupError, ValueError):
1805 1822 pass
1806 1823 return t
1807 1824
1808 1825 def _findtags(self):
1809 1826 '''Do the hard work of finding tags. Return a pair of dicts
1810 1827 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 1828 maps tag name to a string like \'global\' or \'local\'.
1812 1829 Subclasses or extensions are free to add their own tags, but
1813 1830 should be aware that the returned dicts will be retained for the
1814 1831 duration of the localrepo object.'''
1815 1832
1816 1833 # XXX what tagtype should subclasses/extensions use? Currently
1817 1834 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 1835 # Should each extension invent its own tag type? Should there
1819 1836 # be one tagtype for all such "virtual" tags? Or is the status
1820 1837 # quo fine?
1821 1838
1822 1839 # map tag name to (node, hist)
1823 1840 alltags = tagsmod.findglobaltags(self.ui, self)
1824 1841 # map tag name to tag type
1825 1842 tagtypes = {tag: b'global' for tag in alltags}
1826 1843
1827 1844 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828 1845
1829 1846 # Build the return dicts. Have to re-encode tag names because
1830 1847 # the tags module always uses UTF-8 (in order not to lose info
1831 1848 # writing to the cache), but the rest of Mercurial wants them in
1832 1849 # local encoding.
1833 1850 tags = {}
1834 1851 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 1852 if node != nullid:
1836 1853 tags[encoding.tolocal(name)] = node
1837 1854 tags[b'tip'] = self.changelog.tip()
1838 1855 tagtypes = {
1839 1856 encoding.tolocal(name): value
1840 1857 for (name, value) in pycompat.iteritems(tagtypes)
1841 1858 }
1842 1859 return (tags, tagtypes)
1843 1860
1844 1861 def tagtype(self, tagname):
1845 1862 '''
1846 1863 return the type of the given tag. result can be:
1847 1864
1848 1865 'local' : a local tag
1849 1866 'global' : a global tag
1850 1867 None : tag does not exist
1851 1868 '''
1852 1869
1853 1870 return self._tagscache.tagtypes.get(tagname)
1854 1871
1855 1872 def tagslist(self):
1856 1873 '''return a list of tags ordered by revision'''
1857 1874 if not self._tagscache.tagslist:
1858 1875 l = []
1859 1876 for t, n in pycompat.iteritems(self.tags()):
1860 1877 l.append((self.changelog.rev(n), t, n))
1861 1878 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862 1879
1863 1880 return self._tagscache.tagslist
1864 1881
1865 1882 def nodetags(self, node):
1866 1883 '''return the tags associated with a node'''
1867 1884 if not self._tagscache.nodetagscache:
1868 1885 nodetagscache = {}
1869 1886 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 1887 nodetagscache.setdefault(n, []).append(t)
1871 1888 for tags in pycompat.itervalues(nodetagscache):
1872 1889 tags.sort()
1873 1890 self._tagscache.nodetagscache = nodetagscache
1874 1891 return self._tagscache.nodetagscache.get(node, [])
1875 1892
1876 1893 def nodebookmarks(self, node):
1877 1894 """return the list of bookmarks pointing to the specified node"""
1878 1895 return self._bookmarks.names(node)
1879 1896
1880 1897 def branchmap(self):
1881 1898 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 1899 ordered by increasing revision number'''
1883 1900 return self._branchcaches[self]
1884 1901
1885 1902 @unfilteredmethod
1886 1903 def revbranchcache(self):
1887 1904 if not self._revbranchcache:
1888 1905 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 1906 return self._revbranchcache
1890 1907
1891 1908 def branchtip(self, branch, ignoremissing=False):
1892 1909 '''return the tip node for a given branch
1893 1910
1894 1911 If ignoremissing is True, then this method will not raise an error.
1895 1912 This is helpful for callers that only expect None for a missing branch
1896 1913 (e.g. namespace).
1897 1914
1898 1915 '''
1899 1916 try:
1900 1917 return self.branchmap().branchtip(branch)
1901 1918 except KeyError:
1902 1919 if not ignoremissing:
1903 1920 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 1921 else:
1905 1922 pass
1906 1923
1907 1924 def lookup(self, key):
1908 1925 node = scmutil.revsymbol(self, key).node()
1909 1926 if node is None:
1910 1927 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 1928 return node
1912 1929
1913 1930 def lookupbranch(self, key):
1914 1931 if self.branchmap().hasbranch(key):
1915 1932 return key
1916 1933
1917 1934 return scmutil.revsymbol(self, key).branch()
1918 1935
1919 1936 def known(self, nodes):
1920 1937 cl = self.changelog
1921 1938 get_rev = cl.index.get_rev
1922 1939 filtered = cl.filteredrevs
1923 1940 result = []
1924 1941 for n in nodes:
1925 1942 r = get_rev(n)
1926 1943 resp = not (r is None or r in filtered)
1927 1944 result.append(resp)
1928 1945 return result
1929 1946
1930 1947 def local(self):
1931 1948 return self
1932 1949
1933 1950 def publishing(self):
1934 1951 # it's safe (and desirable) to trust the publish flag unconditionally
1935 1952 # so that we don't finalize changes shared between users via ssh or nfs
1936 1953 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937 1954
1938 1955 def cancopy(self):
1939 1956 # so statichttprepo's override of local() works
1940 1957 if not self.local():
1941 1958 return False
1942 1959 if not self.publishing():
1943 1960 return True
1944 1961 # if publishing we can't copy if there is filtered content
1945 1962 return not self.filtered(b'visible').changelog.filteredrevs
1946 1963
1947 1964 def shared(self):
1948 1965 '''the type of shared repository (None if not shared)'''
1949 1966 if self.sharedpath != self.path:
1950 1967 return b'store'
1951 1968 return None
1952 1969
1953 1970 def wjoin(self, f, *insidef):
1954 1971 return self.vfs.reljoin(self.root, f, *insidef)
1955 1972
1956 1973 def setparents(self, p1, p2=nullid):
1957 1974 self[None].setparents(p1, p2)
1958 1975 self._quick_access_changeid_invalidate()
1959 1976
1960 1977 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 1978 """changeid must be a changeset revision, if specified.
1962 1979 fileid can be a file revision or node."""
1963 1980 return context.filectx(
1964 1981 self, path, changeid, fileid, changectx=changectx
1965 1982 )
1966 1983
1967 1984 def getcwd(self):
1968 1985 return self.dirstate.getcwd()
1969 1986
1970 1987 def pathto(self, f, cwd=None):
1971 1988 return self.dirstate.pathto(f, cwd)
1972 1989
1973 1990 def _loadfilter(self, filter):
1974 1991 if filter not in self._filterpats:
1975 1992 l = []
1976 1993 for pat, cmd in self.ui.configitems(filter):
1977 1994 if cmd == b'!':
1978 1995 continue
1979 1996 mf = matchmod.match(self.root, b'', [pat])
1980 1997 fn = None
1981 1998 params = cmd
1982 1999 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 2000 if cmd.startswith(name):
1984 2001 fn = filterfn
1985 2002 params = cmd[len(name) :].lstrip()
1986 2003 break
1987 2004 if not fn:
1988 2005 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 2006 fn.__name__ = 'commandfilter'
1990 2007 # Wrap old filters not supporting keyword arguments
1991 2008 if not pycompat.getargspec(fn)[2]:
1992 2009 oldfn = fn
1993 2010 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 2011 fn.__name__ = 'compat-' + oldfn.__name__
1995 2012 l.append((mf, fn, params))
1996 2013 self._filterpats[filter] = l
1997 2014 return self._filterpats[filter]
1998 2015
1999 2016 def _filter(self, filterpats, filename, data):
2000 2017 for mf, fn, cmd in filterpats:
2001 2018 if mf(filename):
2002 2019 self.ui.debug(
2003 2020 b"filtering %s through %s\n"
2004 2021 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 2022 )
2006 2023 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 2024 break
2008 2025
2009 2026 return data
2010 2027
2011 2028 @unfilteredpropertycache
2012 2029 def _encodefilterpats(self):
2013 2030 return self._loadfilter(b'encode')
2014 2031
2015 2032 @unfilteredpropertycache
2016 2033 def _decodefilterpats(self):
2017 2034 return self._loadfilter(b'decode')
2018 2035
2019 2036 def adddatafilter(self, name, filter):
2020 2037 self._datafilters[name] = filter
2021 2038
2022 2039 def wread(self, filename):
2023 2040 if self.wvfs.islink(filename):
2024 2041 data = self.wvfs.readlink(filename)
2025 2042 else:
2026 2043 data = self.wvfs.read(filename)
2027 2044 return self._filter(self._encodefilterpats, filename, data)
2028 2045
2029 2046 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 2047 """write ``data`` into ``filename`` in the working directory
2031 2048
2032 2049 This returns length of written (maybe decoded) data.
2033 2050 """
2034 2051 data = self._filter(self._decodefilterpats, filename, data)
2035 2052 if b'l' in flags:
2036 2053 self.wvfs.symlink(data, filename)
2037 2054 else:
2038 2055 self.wvfs.write(
2039 2056 filename, data, backgroundclose=backgroundclose, **kwargs
2040 2057 )
2041 2058 if b'x' in flags:
2042 2059 self.wvfs.setflags(filename, False, True)
2043 2060 else:
2044 2061 self.wvfs.setflags(filename, False, False)
2045 2062 return len(data)
2046 2063
2047 2064 def wwritedata(self, filename, data):
2048 2065 return self._filter(self._decodefilterpats, filename, data)
2049 2066
2050 2067 def currenttransaction(self):
2051 2068 """return the current transaction or None if non exists"""
2052 2069 if self._transref:
2053 2070 tr = self._transref()
2054 2071 else:
2055 2072 tr = None
2056 2073
2057 2074 if tr and tr.running():
2058 2075 return tr
2059 2076 return None
2060 2077
2061 2078 def transaction(self, desc, report=None):
2062 2079 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 2080 b'devel', b'check-locks'
2064 2081 ):
2065 2082 if self._currentlock(self._lockref) is None:
2066 2083 raise error.ProgrammingError(b'transaction requires locking')
2067 2084 tr = self.currenttransaction()
2068 2085 if tr is not None:
2069 2086 return tr.nest(name=desc)
2070 2087
2071 2088 # abort here if the journal already exists
2072 2089 if self.svfs.exists(b"journal"):
2073 2090 raise error.RepoError(
2074 2091 _(b"abandoned transaction found"),
2075 2092 hint=_(b"run 'hg recover' to clean up transaction"),
2076 2093 )
2077 2094
2078 2095 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 2096 ha = hex(hashutil.sha1(idbase).digest())
2080 2097 txnid = b'TXN:' + ha
2081 2098 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082 2099
2083 2100 self._writejournal(desc)
2084 2101 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 2102 if report:
2086 2103 rp = report
2087 2104 else:
2088 2105 rp = self.ui.warn
2089 2106 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 2107 # we must avoid cyclic reference between repo and transaction.
2091 2108 reporef = weakref.ref(self)
2092 2109 # Code to track tag movement
2093 2110 #
2094 2111 # Since tags are all handled as file content, it is actually quite hard
2095 2112 # to track these movement from a code perspective. So we fallback to a
2096 2113 # tracking at the repository level. One could envision to track changes
2097 2114 # to the '.hgtags' file through changegroup apply but that fails to
2098 2115 # cope with case where transaction expose new heads without changegroup
2099 2116 # being involved (eg: phase movement).
2100 2117 #
2101 2118 # For now, We gate the feature behind a flag since this likely comes
2102 2119 # with performance impacts. The current code run more often than needed
2103 2120 # and do not use caches as much as it could. The current focus is on
2104 2121 # the behavior of the feature so we disable it by default. The flag
2105 2122 # will be removed when we are happy with the performance impact.
2106 2123 #
2107 2124 # Once this feature is no longer experimental move the following
2108 2125 # documentation to the appropriate help section:
2109 2126 #
2110 2127 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 2128 # tags (new or changed or deleted tags). In addition the details of
2112 2129 # these changes are made available in a file at:
2113 2130 # ``REPOROOT/.hg/changes/tags.changes``.
2114 2131 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 2132 # might exist from a previous transaction even if no tag were touched
2116 2133 # in this one. Changes are recorded in a line base format::
2117 2134 #
2118 2135 # <action> <hex-node> <tag-name>\n
2119 2136 #
2120 2137 # Actions are defined as follow:
2121 2138 # "-R": tag is removed,
2122 2139 # "+A": tag is added,
2123 2140 # "-M": tag is moved (old value),
2124 2141 # "+M": tag is moved (new value),
2125 2142 tracktags = lambda x: None
2126 2143 # experimental config: experimental.hook-track-tags
2127 2144 shouldtracktags = self.ui.configbool(
2128 2145 b'experimental', b'hook-track-tags'
2129 2146 )
2130 2147 if desc != b'strip' and shouldtracktags:
2131 2148 oldheads = self.changelog.headrevs()
2132 2149
2133 2150 def tracktags(tr2):
2134 2151 repo = reporef()
2135 2152 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 2153 newheads = repo.changelog.headrevs()
2137 2154 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 2155 # notes: we compare lists here.
2139 2156 # As we do it only once buiding set would not be cheaper
2140 2157 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 2158 if changes:
2142 2159 tr2.hookargs[b'tag_moved'] = b'1'
2143 2160 with repo.vfs(
2144 2161 b'changes/tags.changes', b'w', atomictemp=True
2145 2162 ) as changesfile:
2146 2163 # note: we do not register the file to the transaction
2147 2164 # because we needs it to still exist on the transaction
2148 2165 # is close (for txnclose hooks)
2149 2166 tagsmod.writediff(changesfile, changes)
2150 2167
2151 2168 def validate(tr2):
2152 2169 """will run pre-closing hooks"""
2153 2170 # XXX the transaction API is a bit lacking here so we take a hacky
2154 2171 # path for now
2155 2172 #
2156 2173 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 2174 # dict is copied before these run. In addition we needs the data
2158 2175 # available to in memory hooks too.
2159 2176 #
2160 2177 # Moreover, we also need to make sure this runs before txnclose
2161 2178 # hooks and there is no "pending" mechanism that would execute
2162 2179 # logic only if hooks are about to run.
2163 2180 #
2164 2181 # Fixing this limitation of the transaction is also needed to track
2165 2182 # other families of changes (bookmarks, phases, obsolescence).
2166 2183 #
2167 2184 # This will have to be fixed before we remove the experimental
2168 2185 # gating.
2169 2186 tracktags(tr2)
2170 2187 repo = reporef()
2171 2188
2172 2189 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 2190 singlehead = repo.ui.configbool(*singleheadopt)
2174 2191 if singlehead:
2175 2192 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 2193 accountclosed = singleheadsub.get(
2177 2194 b"account-closed-heads", False
2178 2195 )
2179 2196 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 2197 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 2198 for name, (old, new) in sorted(
2182 2199 tr.changes[b'bookmarks'].items()
2183 2200 ):
2184 2201 args = tr.hookargs.copy()
2185 2202 args.update(bookmarks.preparehookargs(name, old, new))
2186 2203 repo.hook(
2187 2204 b'pretxnclose-bookmark',
2188 2205 throw=True,
2189 2206 **pycompat.strkwargs(args)
2190 2207 )
2191 2208 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 2209 cl = repo.unfiltered().changelog
2193 2210 for revs, (old, new) in tr.changes[b'phases']:
2194 2211 for rev in revs:
2195 2212 args = tr.hookargs.copy()
2196 2213 node = hex(cl.node(rev))
2197 2214 args.update(phases.preparehookargs(node, old, new))
2198 2215 repo.hook(
2199 2216 b'pretxnclose-phase',
2200 2217 throw=True,
2201 2218 **pycompat.strkwargs(args)
2202 2219 )
2203 2220
2204 2221 repo.hook(
2205 2222 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 2223 )
2207 2224
2208 2225 def releasefn(tr, success):
2209 2226 repo = reporef()
2210 2227 if repo is None:
2211 2228 # If the repo has been GC'd (and this release function is being
2212 2229 # called from transaction.__del__), there's not much we can do,
2213 2230 # so just leave the unfinished transaction there and let the
2214 2231 # user run `hg recover`.
2215 2232 return
2216 2233 if success:
2217 2234 # this should be explicitly invoked here, because
2218 2235 # in-memory changes aren't written out at closing
2219 2236 # transaction, if tr.addfilegenerator (via
2220 2237 # dirstate.write or so) isn't invoked while
2221 2238 # transaction running
2222 2239 repo.dirstate.write(None)
2223 2240 else:
2224 2241 # discard all changes (including ones already written
2225 2242 # out) in this transaction
2226 2243 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 2244 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 2245 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229 2246
2230 2247 repo.invalidate(clearfilecache=True)
2231 2248
2232 2249 tr = transaction.transaction(
2233 2250 rp,
2234 2251 self.svfs,
2235 2252 vfsmap,
2236 2253 b"journal",
2237 2254 b"undo",
2238 2255 aftertrans(renames),
2239 2256 self.store.createmode,
2240 2257 validator=validate,
2241 2258 releasefn=releasefn,
2242 2259 checkambigfiles=_cachedfiles,
2243 2260 name=desc,
2244 2261 )
2245 2262 tr.changes[b'origrepolen'] = len(self)
2246 2263 tr.changes[b'obsmarkers'] = set()
2247 2264 tr.changes[b'phases'] = []
2248 2265 tr.changes[b'bookmarks'] = {}
2249 2266
2250 2267 tr.hookargs[b'txnid'] = txnid
2251 2268 tr.hookargs[b'txnname'] = desc
2252 2269 tr.hookargs[b'changes'] = tr.changes
2253 2270 # note: writing the fncache only during finalize mean that the file is
2254 2271 # outdated when running hooks. As fncache is used for streaming clone,
2255 2272 # this is not expected to break anything that happen during the hooks.
2256 2273 tr.addfinalize(b'flush-fncache', self.store.write)
2257 2274
2258 2275 def txnclosehook(tr2):
2259 2276 """To be run if transaction is successful, will schedule a hook run
2260 2277 """
2261 2278 # Don't reference tr2 in hook() so we don't hold a reference.
2262 2279 # This reduces memory consumption when there are multiple
2263 2280 # transactions per lock. This can likely go away if issue5045
2264 2281 # fixes the function accumulation.
2265 2282 hookargs = tr2.hookargs
2266 2283
2267 2284 def hookfunc(unused_success):
2268 2285 repo = reporef()
2269 2286 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 2287 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 2288 for name, (old, new) in bmchanges:
2272 2289 args = tr.hookargs.copy()
2273 2290 args.update(bookmarks.preparehookargs(name, old, new))
2274 2291 repo.hook(
2275 2292 b'txnclose-bookmark',
2276 2293 throw=False,
2277 2294 **pycompat.strkwargs(args)
2278 2295 )
2279 2296
2280 2297 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 2298 cl = repo.unfiltered().changelog
2282 2299 phasemv = sorted(
2283 2300 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 2301 )
2285 2302 for revs, (old, new) in phasemv:
2286 2303 for rev in revs:
2287 2304 args = tr.hookargs.copy()
2288 2305 node = hex(cl.node(rev))
2289 2306 args.update(phases.preparehookargs(node, old, new))
2290 2307 repo.hook(
2291 2308 b'txnclose-phase',
2292 2309 throw=False,
2293 2310 **pycompat.strkwargs(args)
2294 2311 )
2295 2312
2296 2313 repo.hook(
2297 2314 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 2315 )
2299 2316
2300 2317 reporef()._afterlock(hookfunc)
2301 2318
2302 2319 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 2320 # Include a leading "-" to make it happen before the transaction summary
2304 2321 # reports registered via scmutil.registersummarycallback() whose names
2305 2322 # are 00-txnreport etc. That way, the caches will be warm when the
2306 2323 # callbacks run.
2307 2324 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308 2325
2309 2326 def txnaborthook(tr2):
2310 2327 """To be run if transaction is aborted
2311 2328 """
2312 2329 reporef().hook(
2313 2330 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 2331 )
2315 2332
2316 2333 tr.addabort(b'txnabort-hook', txnaborthook)
2317 2334 # avoid eager cache invalidation. in-memory data should be identical
2318 2335 # to stored data if transaction has no error.
2319 2336 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 2337 self._transref = weakref.ref(tr)
2321 2338 scmutil.registersummarycallback(self, tr, desc)
2322 2339 return tr
2323 2340
2324 2341 def _journalfiles(self):
2325 2342 return (
2326 2343 (self.svfs, b'journal'),
2327 2344 (self.svfs, b'journal.narrowspec'),
2328 2345 (self.vfs, b'journal.narrowspec.dirstate'),
2329 2346 (self.vfs, b'journal.dirstate'),
2330 2347 (self.vfs, b'journal.branch'),
2331 2348 (self.vfs, b'journal.desc'),
2332 2349 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 2350 (self.svfs, b'journal.phaseroots'),
2334 2351 )
2335 2352
2336 2353 def undofiles(self):
2337 2354 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338 2355
2339 2356 @unfilteredmethod
2340 2357 def _writejournal(self, desc):
2341 2358 self.dirstate.savebackup(None, b'journal.dirstate')
2342 2359 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 2360 narrowspec.savebackup(self, b'journal.narrowspec')
2344 2361 self.vfs.write(
2345 2362 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 2363 )
2347 2364 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 2365 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 2366 bookmarksvfs.write(
2350 2367 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 2368 )
2352 2369 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353 2370
2354 2371 def recover(self):
2355 2372 with self.lock():
2356 2373 if self.svfs.exists(b"journal"):
2357 2374 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 2375 vfsmap = {
2359 2376 b'': self.svfs,
2360 2377 b'plain': self.vfs,
2361 2378 }
2362 2379 transaction.rollback(
2363 2380 self.svfs,
2364 2381 vfsmap,
2365 2382 b"journal",
2366 2383 self.ui.warn,
2367 2384 checkambigfiles=_cachedfiles,
2368 2385 )
2369 2386 self.invalidate()
2370 2387 return True
2371 2388 else:
2372 2389 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 2390 return False
2374 2391
2375 2392 def rollback(self, dryrun=False, force=False):
2376 2393 wlock = lock = dsguard = None
2377 2394 try:
2378 2395 wlock = self.wlock()
2379 2396 lock = self.lock()
2380 2397 if self.svfs.exists(b"undo"):
2381 2398 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382 2399
2383 2400 return self._rollback(dryrun, force, dsguard)
2384 2401 else:
2385 2402 self.ui.warn(_(b"no rollback information available\n"))
2386 2403 return 1
2387 2404 finally:
2388 2405 release(dsguard, lock, wlock)
2389 2406
2390 2407 @unfilteredmethod # Until we get smarter cache management
2391 2408 def _rollback(self, dryrun, force, dsguard):
2392 2409 ui = self.ui
2393 2410 try:
2394 2411 args = self.vfs.read(b'undo.desc').splitlines()
2395 2412 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 2413 if len(args) >= 3:
2397 2414 detail = args[2]
2398 2415 oldtip = oldlen - 1
2399 2416
2400 2417 if detail and ui.verbose:
2401 2418 msg = _(
2402 2419 b'repository tip rolled back to revision %d'
2403 2420 b' (undo %s: %s)\n'
2404 2421 ) % (oldtip, desc, detail)
2405 2422 else:
2406 2423 msg = _(
2407 2424 b'repository tip rolled back to revision %d (undo %s)\n'
2408 2425 ) % (oldtip, desc)
2409 2426 except IOError:
2410 2427 msg = _(b'rolling back unknown transaction\n')
2411 2428 desc = None
2412 2429
2413 2430 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 2431 raise error.Abort(
2415 2432 _(
2416 2433 b'rollback of last commit while not checked out '
2417 2434 b'may lose data'
2418 2435 ),
2419 2436 hint=_(b'use -f to force'),
2420 2437 )
2421 2438
2422 2439 ui.status(msg)
2423 2440 if dryrun:
2424 2441 return 0
2425 2442
2426 2443 parents = self.dirstate.parents()
2427 2444 self.destroying()
2428 2445 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 2446 transaction.rollback(
2430 2447 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 2448 )
2432 2449 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 2450 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 2451 bookmarksvfs.rename(
2435 2452 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 2453 )
2437 2454 if self.svfs.exists(b'undo.phaseroots'):
2438 2455 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 2456 self.invalidate()
2440 2457
2441 2458 has_node = self.changelog.index.has_node
2442 2459 parentgone = any(not has_node(p) for p in parents)
2443 2460 if parentgone:
2444 2461 # prevent dirstateguard from overwriting already restored one
2445 2462 dsguard.close()
2446 2463
2447 2464 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 2465 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 2466 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 2467 try:
2451 2468 branch = self.vfs.read(b'undo.branch')
2452 2469 self.dirstate.setbranch(encoding.tolocal(branch))
2453 2470 except IOError:
2454 2471 ui.warn(
2455 2472 _(
2456 2473 b'named branch could not be reset: '
2457 2474 b'current branch is still \'%s\'\n'
2458 2475 )
2459 2476 % self.dirstate.branch()
2460 2477 )
2461 2478
2462 2479 parents = tuple([p.rev() for p in self[None].parents()])
2463 2480 if len(parents) > 1:
2464 2481 ui.status(
2465 2482 _(
2466 2483 b'working directory now based on '
2467 2484 b'revisions %d and %d\n'
2468 2485 )
2469 2486 % parents
2470 2487 )
2471 2488 else:
2472 2489 ui.status(
2473 2490 _(b'working directory now based on revision %d\n') % parents
2474 2491 )
2475 2492 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476 2493
2477 2494 # TODO: if we know which new heads may result from this rollback, pass
2478 2495 # them to destroy(), which will prevent the branchhead cache from being
2479 2496 # invalidated.
2480 2497 self.destroyed()
2481 2498 return 0
2482 2499
2483 2500 def _buildcacheupdater(self, newtransaction):
2484 2501 """called during transaction to build the callback updating cache
2485 2502
2486 2503 Lives on the repository to help extension who might want to augment
2487 2504 this logic. For this purpose, the created transaction is passed to the
2488 2505 method.
2489 2506 """
2490 2507 # we must avoid cyclic reference between repo and transaction.
2491 2508 reporef = weakref.ref(self)
2492 2509
2493 2510 def updater(tr):
2494 2511 repo = reporef()
2495 2512 repo.updatecaches(tr)
2496 2513
2497 2514 return updater
2498 2515
2499 2516 @unfilteredmethod
2500 2517 def updatecaches(self, tr=None, full=False):
2501 2518 """warm appropriate caches
2502 2519
2503 2520 If this function is called after a transaction closed. The transaction
2504 2521 will be available in the 'tr' argument. This can be used to selectively
2505 2522 update caches relevant to the changes in that transaction.
2506 2523
2507 2524 If 'full' is set, make sure all caches the function knows about have
2508 2525 up-to-date data. Even the ones usually loaded more lazily.
2509 2526 """
2510 2527 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 2528 # During strip, many caches are invalid but
2512 2529 # later call to `destroyed` will refresh them.
2513 2530 return
2514 2531
2515 2532 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 2533 # accessing the 'ser ved' branchmap should refresh all the others,
2517 2534 self.ui.debug(b'updating the branch cache\n')
2518 2535 self.filtered(b'served').branchmap()
2519 2536 self.filtered(b'served.hidden').branchmap()
2520 2537
2521 2538 if full:
2522 2539 unfi = self.unfiltered()
2523 2540
2524 2541 self.changelog.update_caches(transaction=tr)
2525 2542 self.manifestlog.update_caches(transaction=tr)
2526 2543
2527 2544 rbc = unfi.revbranchcache()
2528 2545 for r in unfi.changelog:
2529 2546 rbc.branchinfo(r)
2530 2547 rbc.write()
2531 2548
2532 2549 # ensure the working copy parents are in the manifestfulltextcache
2533 2550 for ctx in self[b'.'].parents():
2534 2551 ctx.manifest() # accessing the manifest is enough
2535 2552
2536 2553 # accessing fnode cache warms the cache
2537 2554 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 2555 # accessing tags warm the cache
2539 2556 self.tags()
2540 2557 self.filtered(b'served').tags()
2541 2558
2542 2559 # The `full` arg is documented as updating even the lazily-loaded
2543 2560 # caches immediately, so we're forcing a write to cause these caches
2544 2561 # to be warmed up even if they haven't explicitly been requested
2545 2562 # yet (if they've never been used by hg, they won't ever have been
2546 2563 # written, even if they're a subset of another kind of cache that
2547 2564 # *has* been used).
2548 2565 for filt in repoview.filtertable.keys():
2549 2566 filtered = self.filtered(filt)
2550 2567 filtered.branchmap().write(filtered)
2551 2568
2552 2569 def invalidatecaches(self):
2553 2570
2554 2571 if '_tagscache' in vars(self):
2555 2572 # can't use delattr on proxy
2556 2573 del self.__dict__['_tagscache']
2557 2574
2558 2575 self._branchcaches.clear()
2559 2576 self.invalidatevolatilesets()
2560 2577 self._sparsesignaturecache.clear()
2561 2578
2562 2579 def invalidatevolatilesets(self):
2563 2580 self.filteredrevcache.clear()
2564 2581 obsolete.clearobscaches(self)
2565 2582 self._quick_access_changeid_invalidate()
2566 2583
2567 2584 def invalidatedirstate(self):
2568 2585 '''Invalidates the dirstate, causing the next call to dirstate
2569 2586 to check if it was modified since the last time it was read,
2570 2587 rereading it if it has.
2571 2588
2572 2589 This is different to dirstate.invalidate() that it doesn't always
2573 2590 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 2591 explicitly read the dirstate again (i.e. restoring it to a previous
2575 2592 known good state).'''
2576 2593 if hasunfilteredcache(self, 'dirstate'):
2577 2594 for k in self.dirstate._filecache:
2578 2595 try:
2579 2596 delattr(self.dirstate, k)
2580 2597 except AttributeError:
2581 2598 pass
2582 2599 delattr(self.unfiltered(), 'dirstate')
2583 2600
2584 2601 def invalidate(self, clearfilecache=False):
2585 2602 '''Invalidates both store and non-store parts other than dirstate
2586 2603
2587 2604 If a transaction is running, invalidation of store is omitted,
2588 2605 because discarding in-memory changes might cause inconsistency
2589 2606 (e.g. incomplete fncache causes unintentional failure, but
2590 2607 redundant one doesn't).
2591 2608 '''
2592 2609 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 2610 for k in list(self._filecache.keys()):
2594 2611 # dirstate is invalidated separately in invalidatedirstate()
2595 2612 if k == b'dirstate':
2596 2613 continue
2597 2614 if (
2598 2615 k == b'changelog'
2599 2616 and self.currenttransaction()
2600 2617 and self.changelog._delayed
2601 2618 ):
2602 2619 # The changelog object may store unwritten revisions. We don't
2603 2620 # want to lose them.
2604 2621 # TODO: Solve the problem instead of working around it.
2605 2622 continue
2606 2623
2607 2624 if clearfilecache:
2608 2625 del self._filecache[k]
2609 2626 try:
2610 2627 delattr(unfiltered, k)
2611 2628 except AttributeError:
2612 2629 pass
2613 2630 self.invalidatecaches()
2614 2631 if not self.currenttransaction():
2615 2632 # TODO: Changing contents of store outside transaction
2616 2633 # causes inconsistency. We should make in-memory store
2617 2634 # changes detectable, and abort if changed.
2618 2635 self.store.invalidatecaches()
2619 2636
2620 2637 def invalidateall(self):
2621 2638 '''Fully invalidates both store and non-store parts, causing the
2622 2639 subsequent operation to reread any outside changes.'''
2623 2640 # extension should hook this to invalidate its caches
2624 2641 self.invalidate()
2625 2642 self.invalidatedirstate()
2626 2643
2627 2644 @unfilteredmethod
2628 2645 def _refreshfilecachestats(self, tr):
2629 2646 """Reload stats of cached files so that they are flagged as valid"""
2630 2647 for k, ce in self._filecache.items():
2631 2648 k = pycompat.sysstr(k)
2632 2649 if k == 'dirstate' or k not in self.__dict__:
2633 2650 continue
2634 2651 ce.refresh()
2635 2652
2636 2653 def _lock(
2637 2654 self,
2638 2655 vfs,
2639 2656 lockname,
2640 2657 wait,
2641 2658 releasefn,
2642 2659 acquirefn,
2643 2660 desc,
2644 2661 inheritchecker=None,
2645 2662 parentenvvar=None,
2646 2663 ):
2647 2664 parentlock = None
2648 2665 # the contents of parentenvvar are used by the underlying lock to
2649 2666 # determine whether it can be inherited
2650 2667 if parentenvvar is not None:
2651 2668 parentlock = encoding.environ.get(parentenvvar)
2652 2669
2653 2670 timeout = 0
2654 2671 warntimeout = 0
2655 2672 if wait:
2656 2673 timeout = self.ui.configint(b"ui", b"timeout")
2657 2674 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 2675 # internal config: ui.signal-safe-lock
2659 2676 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660 2677
2661 2678 l = lockmod.trylock(
2662 2679 self.ui,
2663 2680 vfs,
2664 2681 lockname,
2665 2682 timeout,
2666 2683 warntimeout,
2667 2684 releasefn=releasefn,
2668 2685 acquirefn=acquirefn,
2669 2686 desc=desc,
2670 2687 inheritchecker=inheritchecker,
2671 2688 parentlock=parentlock,
2672 2689 signalsafe=signalsafe,
2673 2690 )
2674 2691 return l
2675 2692
2676 2693 def _afterlock(self, callback):
2677 2694 """add a callback to be run when the repository is fully unlocked
2678 2695
2679 2696 The callback will be executed when the outermost lock is released
2680 2697 (with wlock being higher level than 'lock')."""
2681 2698 for ref in (self._wlockref, self._lockref):
2682 2699 l = ref and ref()
2683 2700 if l and l.held:
2684 2701 l.postrelease.append(callback)
2685 2702 break
2686 2703 else: # no lock have been found.
2687 2704 callback(True)
2688 2705
2689 2706 def lock(self, wait=True):
2690 2707 '''Lock the repository store (.hg/store) and return a weak reference
2691 2708 to the lock. Use this before modifying the store (e.g. committing or
2692 2709 stripping). If you are opening a transaction, get a lock as well.)
2693 2710
2694 2711 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 2712 'wlock' first to avoid a dead-lock hazard.'''
2696 2713 l = self._currentlock(self._lockref)
2697 2714 if l is not None:
2698 2715 l.lock()
2699 2716 return l
2700 2717
2701 2718 l = self._lock(
2702 2719 vfs=self.svfs,
2703 2720 lockname=b"lock",
2704 2721 wait=wait,
2705 2722 releasefn=None,
2706 2723 acquirefn=self.invalidate,
2707 2724 desc=_(b'repository %s') % self.origroot,
2708 2725 )
2709 2726 self._lockref = weakref.ref(l)
2710 2727 return l
2711 2728
2712 2729 def _wlockchecktransaction(self):
2713 2730 if self.currenttransaction() is not None:
2714 2731 raise error.LockInheritanceContractViolation(
2715 2732 b'wlock cannot be inherited in the middle of a transaction'
2716 2733 )
2717 2734
2718 2735 def wlock(self, wait=True):
2719 2736 '''Lock the non-store parts of the repository (everything under
2720 2737 .hg except .hg/store) and return a weak reference to the lock.
2721 2738
2722 2739 Use this before modifying files in .hg.
2723 2740
2724 2741 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 2742 'wlock' first to avoid a dead-lock hazard.'''
2726 2743 l = self._wlockref and self._wlockref()
2727 2744 if l is not None and l.held:
2728 2745 l.lock()
2729 2746 return l
2730 2747
2731 2748 # We do not need to check for non-waiting lock acquisition. Such
2732 2749 # acquisition would not cause dead-lock as they would just fail.
2733 2750 if wait and (
2734 2751 self.ui.configbool(b'devel', b'all-warnings')
2735 2752 or self.ui.configbool(b'devel', b'check-locks')
2736 2753 ):
2737 2754 if self._currentlock(self._lockref) is not None:
2738 2755 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739 2756
2740 2757 def unlock():
2741 2758 if self.dirstate.pendingparentchange():
2742 2759 self.dirstate.invalidate()
2743 2760 else:
2744 2761 self.dirstate.write(None)
2745 2762
2746 2763 self._filecache[b'dirstate'].refresh()
2747 2764
2748 2765 l = self._lock(
2749 2766 self.vfs,
2750 2767 b"wlock",
2751 2768 wait,
2752 2769 unlock,
2753 2770 self.invalidatedirstate,
2754 2771 _(b'working directory of %s') % self.origroot,
2755 2772 inheritchecker=self._wlockchecktransaction,
2756 2773 parentenvvar=b'HG_WLOCK_LOCKER',
2757 2774 )
2758 2775 self._wlockref = weakref.ref(l)
2759 2776 return l
2760 2777
2761 2778 def _currentlock(self, lockref):
2762 2779 """Returns the lock if it's held, or None if it's not."""
2763 2780 if lockref is None:
2764 2781 return None
2765 2782 l = lockref()
2766 2783 if l is None or not l.held:
2767 2784 return None
2768 2785 return l
2769 2786
2770 2787 def currentwlock(self):
2771 2788 """Returns the wlock if it's held, or None if it's not."""
2772 2789 return self._currentlock(self._wlockref)
2773 2790
2774 2791 def checkcommitpatterns(self, wctx, match, status, fail):
2775 2792 """check for commit arguments that aren't committable"""
2776 2793 if match.isexact() or match.prefix():
2777 2794 matched = set(status.modified + status.added + status.removed)
2778 2795
2779 2796 for f in match.files():
2780 2797 f = self.dirstate.normalize(f)
2781 2798 if f == b'.' or f in matched or f in wctx.substate:
2782 2799 continue
2783 2800 if f in status.deleted:
2784 2801 fail(f, _(b'file not found!'))
2785 2802 # Is it a directory that exists or used to exist?
2786 2803 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2787 2804 d = f + b'/'
2788 2805 for mf in matched:
2789 2806 if mf.startswith(d):
2790 2807 break
2791 2808 else:
2792 2809 fail(f, _(b"no match under directory!"))
2793 2810 elif f not in self.dirstate:
2794 2811 fail(f, _(b"file not tracked!"))
2795 2812
2796 2813 @unfilteredmethod
2797 2814 def commit(
2798 2815 self,
2799 2816 text=b"",
2800 2817 user=None,
2801 2818 date=None,
2802 2819 match=None,
2803 2820 force=False,
2804 2821 editor=None,
2805 2822 extra=None,
2806 2823 ):
2807 2824 """Add a new revision to current repository.
2808 2825
2809 2826 Revision information is gathered from the working directory,
2810 2827 match can be used to filter the committed files. If editor is
2811 2828 supplied, it is called to get a commit message.
2812 2829 """
2813 2830 if extra is None:
2814 2831 extra = {}
2815 2832
2816 2833 def fail(f, msg):
2817 2834 raise error.Abort(b'%s: %s' % (f, msg))
2818 2835
2819 2836 if not match:
2820 2837 match = matchmod.always()
2821 2838
2822 2839 if not force:
2823 2840 match.bad = fail
2824 2841
2825 2842 # lock() for recent changelog (see issue4368)
2826 2843 with self.wlock(), self.lock():
2827 2844 wctx = self[None]
2828 2845 merge = len(wctx.parents()) > 1
2829 2846
2830 2847 if not force and merge and not match.always():
2831 2848 raise error.Abort(
2832 2849 _(
2833 2850 b'cannot partially commit a merge '
2834 2851 b'(do not specify files or patterns)'
2835 2852 )
2836 2853 )
2837 2854
2838 2855 status = self.status(match=match, clean=force)
2839 2856 if force:
2840 2857 status.modified.extend(
2841 2858 status.clean
2842 2859 ) # mq may commit clean files
2843 2860
2844 2861 # check subrepos
2845 2862 subs, commitsubs, newstate = subrepoutil.precommit(
2846 2863 self.ui, wctx, status, match, force=force
2847 2864 )
2848 2865
2849 2866 # make sure all explicit patterns are matched
2850 2867 if not force:
2851 2868 self.checkcommitpatterns(wctx, match, status, fail)
2852 2869
2853 2870 cctx = context.workingcommitctx(
2854 2871 self, status, text, user, date, extra
2855 2872 )
2856 2873
2857 2874 ms = mergestatemod.mergestate.read(self)
2858 2875 mergeutil.checkunresolved(ms)
2859 2876
2860 2877 # internal config: ui.allowemptycommit
2861 2878 if cctx.isempty() and not self.ui.configbool(
2862 2879 b'ui', b'allowemptycommit'
2863 2880 ):
2864 2881 self.ui.debug(b'nothing to commit, clearing merge state\n')
2865 2882 ms.reset()
2866 2883 return None
2867 2884
2868 2885 if merge and cctx.deleted():
2869 2886 raise error.Abort(_(b"cannot commit merge with missing files"))
2870 2887
2871 2888 if editor:
2872 2889 cctx._text = editor(self, cctx, subs)
2873 2890 edited = text != cctx._text
2874 2891
2875 2892 # Save commit message in case this transaction gets rolled back
2876 2893 # (e.g. by a pretxncommit hook). Leave the content alone on
2877 2894 # the assumption that the user will use the same editor again.
2878 2895 msgfn = self.savecommitmessage(cctx._text)
2879 2896
2880 2897 # commit subs and write new state
2881 2898 if subs:
2882 2899 uipathfn = scmutil.getuipathfn(self)
2883 2900 for s in sorted(commitsubs):
2884 2901 sub = wctx.sub(s)
2885 2902 self.ui.status(
2886 2903 _(b'committing subrepository %s\n')
2887 2904 % uipathfn(subrepoutil.subrelpath(sub))
2888 2905 )
2889 2906 sr = sub.commit(cctx._text, user, date)
2890 2907 newstate[s] = (newstate[s][0], sr)
2891 2908 subrepoutil.writestate(self, newstate)
2892 2909
2893 2910 p1, p2 = self.dirstate.parents()
2894 2911 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2895 2912 try:
2896 2913 self.hook(
2897 2914 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2898 2915 )
2899 2916 with self.transaction(b'commit'):
2900 2917 ret = self.commitctx(cctx, True)
2901 2918 # update bookmarks, dirstate and mergestate
2902 2919 bookmarks.update(self, [p1, p2], ret)
2903 2920 cctx.markcommitted(ret)
2904 2921 ms.reset()
2905 2922 except: # re-raises
2906 2923 if edited:
2907 2924 self.ui.write(
2908 2925 _(b'note: commit message saved in %s\n') % msgfn
2909 2926 )
2910 2927 self.ui.write(
2911 2928 _(
2912 2929 b"note: use 'hg commit --logfile "
2913 2930 b".hg/last-message.txt --edit' to reuse it\n"
2914 2931 )
2915 2932 )
2916 2933 raise
2917 2934
2918 2935 def commithook(unused_success):
2919 2936 # hack for command that use a temporary commit (eg: histedit)
2920 2937 # temporary commit got stripped before hook release
2921 2938 if self.changelog.hasnode(ret):
2922 2939 self.hook(
2923 2940 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2924 2941 )
2925 2942
2926 2943 self._afterlock(commithook)
2927 2944 return ret
2928 2945
2929 2946 @unfilteredmethod
2930 2947 def commitctx(self, ctx, error=False, origctx=None):
2931 2948 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2932 2949
2933 2950 @unfilteredmethod
2934 2951 def destroying(self):
2935 2952 '''Inform the repository that nodes are about to be destroyed.
2936 2953 Intended for use by strip and rollback, so there's a common
2937 2954 place for anything that has to be done before destroying history.
2938 2955
2939 2956 This is mostly useful for saving state that is in memory and waiting
2940 2957 to be flushed when the current lock is released. Because a call to
2941 2958 destroyed is imminent, the repo will be invalidated causing those
2942 2959 changes to stay in memory (waiting for the next unlock), or vanish
2943 2960 completely.
2944 2961 '''
2945 2962 # When using the same lock to commit and strip, the phasecache is left
2946 2963 # dirty after committing. Then when we strip, the repo is invalidated,
2947 2964 # causing those changes to disappear.
2948 2965 if '_phasecache' in vars(self):
2949 2966 self._phasecache.write()
2950 2967
2951 2968 @unfilteredmethod
2952 2969 def destroyed(self):
2953 2970 '''Inform the repository that nodes have been destroyed.
2954 2971 Intended for use by strip and rollback, so there's a common
2955 2972 place for anything that has to be done after destroying history.
2956 2973 '''
2957 2974 # When one tries to:
2958 2975 # 1) destroy nodes thus calling this method (e.g. strip)
2959 2976 # 2) use phasecache somewhere (e.g. commit)
2960 2977 #
2961 2978 # then 2) will fail because the phasecache contains nodes that were
2962 2979 # removed. We can either remove phasecache from the filecache,
2963 2980 # causing it to reload next time it is accessed, or simply filter
2964 2981 # the removed nodes now and write the updated cache.
2965 2982 self._phasecache.filterunknown(self)
2966 2983 self._phasecache.write()
2967 2984
2968 2985 # refresh all repository caches
2969 2986 self.updatecaches()
2970 2987
2971 2988 # Ensure the persistent tag cache is updated. Doing it now
2972 2989 # means that the tag cache only has to worry about destroyed
2973 2990 # heads immediately after a strip/rollback. That in turn
2974 2991 # guarantees that "cachetip == currenttip" (comparing both rev
2975 2992 # and node) always means no nodes have been added or destroyed.
2976 2993
2977 2994 # XXX this is suboptimal when qrefresh'ing: we strip the current
2978 2995 # head, refresh the tag cache, then immediately add a new head.
2979 2996 # But I think doing it this way is necessary for the "instant
2980 2997 # tag cache retrieval" case to work.
2981 2998 self.invalidate()
2982 2999
2983 3000 def status(
2984 3001 self,
2985 3002 node1=b'.',
2986 3003 node2=None,
2987 3004 match=None,
2988 3005 ignored=False,
2989 3006 clean=False,
2990 3007 unknown=False,
2991 3008 listsubrepos=False,
2992 3009 ):
2993 3010 '''a convenience method that calls node1.status(node2)'''
2994 3011 return self[node1].status(
2995 3012 node2, match, ignored, clean, unknown, listsubrepos
2996 3013 )
2997 3014
2998 3015 def addpostdsstatus(self, ps):
2999 3016 """Add a callback to run within the wlock, at the point at which status
3000 3017 fixups happen.
3001 3018
3002 3019 On status completion, callback(wctx, status) will be called with the
3003 3020 wlock held, unless the dirstate has changed from underneath or the wlock
3004 3021 couldn't be grabbed.
3005 3022
3006 3023 Callbacks should not capture and use a cached copy of the dirstate --
3007 3024 it might change in the meanwhile. Instead, they should access the
3008 3025 dirstate via wctx.repo().dirstate.
3009 3026
3010 3027 This list is emptied out after each status run -- extensions should
3011 3028 make sure it adds to this list each time dirstate.status is called.
3012 3029 Extensions should also make sure they don't call this for statuses
3013 3030 that don't involve the dirstate.
3014 3031 """
3015 3032
3016 3033 # The list is located here for uniqueness reasons -- it is actually
3017 3034 # managed by the workingctx, but that isn't unique per-repo.
3018 3035 self._postdsstatus.append(ps)
3019 3036
3020 3037 def postdsstatus(self):
3021 3038 """Used by workingctx to get the list of post-dirstate-status hooks."""
3022 3039 return self._postdsstatus
3023 3040
3024 3041 def clearpostdsstatus(self):
3025 3042 """Used by workingctx to clear post-dirstate-status hooks."""
3026 3043 del self._postdsstatus[:]
3027 3044
3028 3045 def heads(self, start=None):
3029 3046 if start is None:
3030 3047 cl = self.changelog
3031 3048 headrevs = reversed(cl.headrevs())
3032 3049 return [cl.node(rev) for rev in headrevs]
3033 3050
3034 3051 heads = self.changelog.heads(start)
3035 3052 # sort the output in rev descending order
3036 3053 return sorted(heads, key=self.changelog.rev, reverse=True)
3037 3054
3038 3055 def branchheads(self, branch=None, start=None, closed=False):
3039 3056 '''return a (possibly filtered) list of heads for the given branch
3040 3057
3041 3058 Heads are returned in topological order, from newest to oldest.
3042 3059 If branch is None, use the dirstate branch.
3043 3060 If start is not None, return only heads reachable from start.
3044 3061 If closed is True, return heads that are marked as closed as well.
3045 3062 '''
3046 3063 if branch is None:
3047 3064 branch = self[None].branch()
3048 3065 branches = self.branchmap()
3049 3066 if not branches.hasbranch(branch):
3050 3067 return []
3051 3068 # the cache returns heads ordered lowest to highest
3052 3069 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3053 3070 if start is not None:
3054 3071 # filter out the heads that cannot be reached from startrev
3055 3072 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3056 3073 bheads = [h for h in bheads if h in fbheads]
3057 3074 return bheads
3058 3075
3059 3076 def branches(self, nodes):
3060 3077 if not nodes:
3061 3078 nodes = [self.changelog.tip()]
3062 3079 b = []
3063 3080 for n in nodes:
3064 3081 t = n
3065 3082 while True:
3066 3083 p = self.changelog.parents(n)
3067 3084 if p[1] != nullid or p[0] == nullid:
3068 3085 b.append((t, n, p[0], p[1]))
3069 3086 break
3070 3087 n = p[0]
3071 3088 return b
3072 3089
3073 3090 def between(self, pairs):
3074 3091 r = []
3075 3092
3076 3093 for top, bottom in pairs:
3077 3094 n, l, i = top, [], 0
3078 3095 f = 1
3079 3096
3080 3097 while n != bottom and n != nullid:
3081 3098 p = self.changelog.parents(n)[0]
3082 3099 if i == f:
3083 3100 l.append(n)
3084 3101 f = f * 2
3085 3102 n = p
3086 3103 i += 1
3087 3104
3088 3105 r.append(l)
3089 3106
3090 3107 return r
3091 3108
3092 3109 def checkpush(self, pushop):
3093 3110 """Extensions can override this function if additional checks have
3094 3111 to be performed before pushing, or call it if they override push
3095 3112 command.
3096 3113 """
3097 3114
3098 3115 @unfilteredpropertycache
3099 3116 def prepushoutgoinghooks(self):
3100 3117 """Return util.hooks consists of a pushop with repo, remote, outgoing
3101 3118 methods, which are called before pushing changesets.
3102 3119 """
3103 3120 return util.hooks()
3104 3121
3105 3122 def pushkey(self, namespace, key, old, new):
3106 3123 try:
3107 3124 tr = self.currenttransaction()
3108 3125 hookargs = {}
3109 3126 if tr is not None:
3110 3127 hookargs.update(tr.hookargs)
3111 3128 hookargs = pycompat.strkwargs(hookargs)
3112 3129 hookargs['namespace'] = namespace
3113 3130 hookargs['key'] = key
3114 3131 hookargs['old'] = old
3115 3132 hookargs['new'] = new
3116 3133 self.hook(b'prepushkey', throw=True, **hookargs)
3117 3134 except error.HookAbort as exc:
3118 3135 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3119 3136 if exc.hint:
3120 3137 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3121 3138 return False
3122 3139 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3123 3140 ret = pushkey.push(self, namespace, key, old, new)
3124 3141
3125 3142 def runhook(unused_success):
3126 3143 self.hook(
3127 3144 b'pushkey',
3128 3145 namespace=namespace,
3129 3146 key=key,
3130 3147 old=old,
3131 3148 new=new,
3132 3149 ret=ret,
3133 3150 )
3134 3151
3135 3152 self._afterlock(runhook)
3136 3153 return ret
3137 3154
3138 3155 def listkeys(self, namespace):
3139 3156 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3140 3157 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3141 3158 values = pushkey.list(self, namespace)
3142 3159 self.hook(b'listkeys', namespace=namespace, values=values)
3143 3160 return values
3144 3161
3145 3162 def debugwireargs(self, one, two, three=None, four=None, five=None):
3146 3163 '''used to test argument passing over the wire'''
3147 3164 return b"%s %s %s %s %s" % (
3148 3165 one,
3149 3166 two,
3150 3167 pycompat.bytestr(three),
3151 3168 pycompat.bytestr(four),
3152 3169 pycompat.bytestr(five),
3153 3170 )
3154 3171
3155 3172 def savecommitmessage(self, text):
3156 3173 fp = self.vfs(b'last-message.txt', b'wb')
3157 3174 try:
3158 3175 fp.write(text)
3159 3176 finally:
3160 3177 fp.close()
3161 3178 return self.pathto(fp.name[len(self.root) + 1 :])
3162 3179
3163 3180
3164 3181 # used to avoid circular references so destructors work
3165 3182 def aftertrans(files):
3166 3183 renamefiles = [tuple(t) for t in files]
3167 3184
3168 3185 def a():
3169 3186 for vfs, src, dest in renamefiles:
3170 3187 # if src and dest refer to a same file, vfs.rename is a no-op,
3171 3188 # leaving both src and dest on disk. delete dest to make sure
3172 3189 # the rename couldn't be such a no-op.
3173 3190 vfs.tryunlink(dest)
3174 3191 try:
3175 3192 vfs.rename(src, dest)
3176 3193 except OSError: # journal file does not yet exist
3177 3194 pass
3178 3195
3179 3196 return a
3180 3197
3181 3198
3182 3199 def undoname(fn):
3183 3200 base, name = os.path.split(fn)
3184 3201 assert name.startswith(b'journal')
3185 3202 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3186 3203
3187 3204
3188 3205 def instance(ui, path, create, intents=None, createopts=None):
3189 3206 localpath = util.urllocalpath(path)
3190 3207 if create:
3191 3208 createrepository(ui, localpath, createopts=createopts)
3192 3209
3193 3210 return makelocalrepository(ui, localpath, intents=intents)
3194 3211
3195 3212
3196 3213 def islocal(path):
3197 3214 return True
3198 3215
3199 3216
3200 3217 def defaultcreateopts(ui, createopts=None):
3201 3218 """Populate the default creation options for a repository.
3202 3219
3203 3220 A dictionary of explicitly requested creation options can be passed
3204 3221 in. Missing keys will be populated.
3205 3222 """
3206 3223 createopts = dict(createopts or {})
3207 3224
3208 3225 if b'backend' not in createopts:
3209 3226 # experimental config: storage.new-repo-backend
3210 3227 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3211 3228
3212 3229 return createopts
3213 3230
3214 3231
3215 3232 def newreporequirements(ui, createopts):
3216 3233 """Determine the set of requirements for a new local repository.
3217 3234
3218 3235 Extensions can wrap this function to specify custom requirements for
3219 3236 new repositories.
3220 3237 """
3221 3238 # If the repo is being created from a shared repository, we copy
3222 3239 # its requirements.
3223 3240 if b'sharedrepo' in createopts:
3224 3241 requirements = set(createopts[b'sharedrepo'].requirements)
3225 3242 if createopts.get(b'sharedrelative'):
3226 3243 requirements.add(b'relshared')
3227 3244 else:
3228 3245 requirements.add(b'shared')
3229 3246
3230 3247 return requirements
3231 3248
3232 3249 if b'backend' not in createopts:
3233 3250 raise error.ProgrammingError(
3234 3251 b'backend key not present in createopts; '
3235 3252 b'was defaultcreateopts() called?'
3236 3253 )
3237 3254
3238 3255 if createopts[b'backend'] != b'revlogv1':
3239 3256 raise error.Abort(
3240 3257 _(
3241 3258 b'unable to determine repository requirements for '
3242 3259 b'storage backend: %s'
3243 3260 )
3244 3261 % createopts[b'backend']
3245 3262 )
3246 3263
3247 3264 requirements = {b'revlogv1'}
3248 3265 if ui.configbool(b'format', b'usestore'):
3249 3266 requirements.add(b'store')
3250 3267 if ui.configbool(b'format', b'usefncache'):
3251 3268 requirements.add(b'fncache')
3252 3269 if ui.configbool(b'format', b'dotencode'):
3253 3270 requirements.add(b'dotencode')
3254 3271
3255 3272 compengines = ui.configlist(b'format', b'revlog-compression')
3256 3273 for compengine in compengines:
3257 3274 if compengine in util.compengines:
3258 3275 break
3259 3276 else:
3260 3277 raise error.Abort(
3261 3278 _(
3262 3279 b'compression engines %s defined by '
3263 3280 b'format.revlog-compression not available'
3264 3281 )
3265 3282 % b', '.join(b'"%s"' % e for e in compengines),
3266 3283 hint=_(
3267 3284 b'run "hg debuginstall" to list available '
3268 3285 b'compression engines'
3269 3286 ),
3270 3287 )
3271 3288
3272 3289 # zlib is the historical default and doesn't need an explicit requirement.
3273 3290 if compengine == b'zstd':
3274 3291 requirements.add(b'revlog-compression-zstd')
3275 3292 elif compengine != b'zlib':
3276 3293 requirements.add(b'exp-compression-%s' % compengine)
3277 3294
3278 3295 if scmutil.gdinitconfig(ui):
3279 3296 requirements.add(b'generaldelta')
3280 3297 if ui.configbool(b'format', b'sparse-revlog'):
3281 3298 requirements.add(SPARSEREVLOG_REQUIREMENT)
3282 3299
3283 3300 # experimental config: format.exp-use-side-data
3284 3301 if ui.configbool(b'format', b'exp-use-side-data'):
3285 3302 requirements.add(SIDEDATA_REQUIREMENT)
3286 3303 # experimental config: format.exp-use-copies-side-data-changeset
3287 3304 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3288 3305 requirements.add(SIDEDATA_REQUIREMENT)
3289 3306 requirements.add(COPIESSDC_REQUIREMENT)
3290 3307 if ui.configbool(b'experimental', b'treemanifest'):
3291 3308 requirements.add(b'treemanifest')
3292 3309
3293 3310 revlogv2 = ui.config(b'experimental', b'revlogv2')
3294 3311 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3295 3312 requirements.remove(b'revlogv1')
3296 3313 # generaldelta is implied by revlogv2.
3297 3314 requirements.discard(b'generaldelta')
3298 3315 requirements.add(REVLOGV2_REQUIREMENT)
3299 3316 # experimental config: format.internal-phase
3300 3317 if ui.configbool(b'format', b'internal-phase'):
3301 3318 requirements.add(b'internal-phase')
3302 3319
3303 3320 if createopts.get(b'narrowfiles'):
3304 3321 requirements.add(repository.NARROW_REQUIREMENT)
3305 3322
3306 3323 if createopts.get(b'lfs'):
3307 3324 requirements.add(b'lfs')
3308 3325
3309 3326 if ui.configbool(b'format', b'bookmarks-in-store'):
3310 3327 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3311 3328
3312 3329 if ui.configbool(b'format', b'use-persistent-nodemap'):
3313 3330 requirements.add(NODEMAP_REQUIREMENT)
3314 3331
3315 3332 return requirements
3316 3333
3317 3334
3318 3335 def checkrequirementscompat(ui, requirements):
3319 3336 """ Checks compatibility of repository requirements enabled and disabled.
3320 3337
3321 3338 Returns a set of requirements which needs to be dropped because dependend
3322 3339 requirements are not enabled. Also warns users about it """
3323 3340
3324 3341 dropped = set()
3325 3342
3326 3343 if b'store' not in requirements:
3327 3344 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3328 3345 ui.warn(
3329 3346 _(
3330 3347 b'ignoring enabled \'format.bookmarks-in-store\' config '
3331 3348 b'beacuse it is incompatible with disabled '
3332 3349 b'\'format.usestore\' config\n'
3333 3350 )
3334 3351 )
3335 3352 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3336 3353
3337 3354 if b'shared' in requirements or b'relshared' in requirements:
3338 3355 raise error.Abort(
3339 3356 _(
3340 3357 b"cannot create shared repository as source was created"
3341 3358 b" with 'format.usestore' config disabled"
3342 3359 )
3343 3360 )
3344 3361
3345 3362 return dropped
3346 3363
3347 3364
3348 3365 def filterknowncreateopts(ui, createopts):
3349 3366 """Filters a dict of repo creation options against options that are known.
3350 3367
3351 3368 Receives a dict of repo creation options and returns a dict of those
3352 3369 options that we don't know how to handle.
3353 3370
3354 3371 This function is called as part of repository creation. If the
3355 3372 returned dict contains any items, repository creation will not
3356 3373 be allowed, as it means there was a request to create a repository
3357 3374 with options not recognized by loaded code.
3358 3375
3359 3376 Extensions can wrap this function to filter out creation options
3360 3377 they know how to handle.
3361 3378 """
3362 3379 known = {
3363 3380 b'backend',
3364 3381 b'lfs',
3365 3382 b'narrowfiles',
3366 3383 b'sharedrepo',
3367 3384 b'sharedrelative',
3368 3385 b'shareditems',
3369 3386 b'shallowfilestore',
3370 3387 }
3371 3388
3372 3389 return {k: v for k, v in createopts.items() if k not in known}
3373 3390
3374 3391
3375 3392 def createrepository(ui, path, createopts=None):
3376 3393 """Create a new repository in a vfs.
3377 3394
3378 3395 ``path`` path to the new repo's working directory.
3379 3396 ``createopts`` options for the new repository.
3380 3397
3381 3398 The following keys for ``createopts`` are recognized:
3382 3399
3383 3400 backend
3384 3401 The storage backend to use.
3385 3402 lfs
3386 3403 Repository will be created with ``lfs`` requirement. The lfs extension
3387 3404 will automatically be loaded when the repository is accessed.
3388 3405 narrowfiles
3389 3406 Set up repository to support narrow file storage.
3390 3407 sharedrepo
3391 3408 Repository object from which storage should be shared.
3392 3409 sharedrelative
3393 3410 Boolean indicating if the path to the shared repo should be
3394 3411 stored as relative. By default, the pointer to the "parent" repo
3395 3412 is stored as an absolute path.
3396 3413 shareditems
3397 3414 Set of items to share to the new repository (in addition to storage).
3398 3415 shallowfilestore
3399 3416 Indicates that storage for files should be shallow (not all ancestor
3400 3417 revisions are known).
3401 3418 """
3402 3419 createopts = defaultcreateopts(ui, createopts=createopts)
3403 3420
3404 3421 unknownopts = filterknowncreateopts(ui, createopts)
3405 3422
3406 3423 if not isinstance(unknownopts, dict):
3407 3424 raise error.ProgrammingError(
3408 3425 b'filterknowncreateopts() did not return a dict'
3409 3426 )
3410 3427
3411 3428 if unknownopts:
3412 3429 raise error.Abort(
3413 3430 _(
3414 3431 b'unable to create repository because of unknown '
3415 3432 b'creation option: %s'
3416 3433 )
3417 3434 % b', '.join(sorted(unknownopts)),
3418 3435 hint=_(b'is a required extension not loaded?'),
3419 3436 )
3420 3437
3421 3438 requirements = newreporequirements(ui, createopts=createopts)
3422 3439 requirements -= checkrequirementscompat(ui, requirements)
3423 3440
3424 3441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3425 3442
3426 3443 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3427 3444 if hgvfs.exists():
3428 3445 raise error.RepoError(_(b'repository %s already exists') % path)
3429 3446
3430 3447 if b'sharedrepo' in createopts:
3431 3448 sharedpath = createopts[b'sharedrepo'].sharedpath
3432 3449
3433 3450 if createopts.get(b'sharedrelative'):
3434 3451 try:
3435 3452 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3436 3453 except (IOError, ValueError) as e:
3437 3454 # ValueError is raised on Windows if the drive letters differ
3438 3455 # on each path.
3439 3456 raise error.Abort(
3440 3457 _(b'cannot calculate relative path'),
3441 3458 hint=stringutil.forcebytestr(e),
3442 3459 )
3443 3460
3444 3461 if not wdirvfs.exists():
3445 3462 wdirvfs.makedirs()
3446 3463
3447 3464 hgvfs.makedir(notindexed=True)
3448 3465 if b'sharedrepo' not in createopts:
3449 3466 hgvfs.mkdir(b'cache')
3450 3467 hgvfs.mkdir(b'wcache')
3451 3468
3452 3469 if b'store' in requirements and b'sharedrepo' not in createopts:
3453 3470 hgvfs.mkdir(b'store')
3454 3471
3455 3472 # We create an invalid changelog outside the store so very old
3456 3473 # Mercurial versions (which didn't know about the requirements
3457 3474 # file) encounter an error on reading the changelog. This
3458 3475 # effectively locks out old clients and prevents them from
3459 3476 # mucking with a repo in an unknown format.
3460 3477 #
3461 3478 # The revlog header has version 2, which won't be recognized by
3462 3479 # such old clients.
3463 3480 hgvfs.append(
3464 3481 b'00changelog.i',
3465 3482 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3466 3483 b'layout',
3467 3484 )
3468 3485
3469 3486 scmutil.writerequires(hgvfs, requirements)
3470 3487
3471 3488 # Write out file telling readers where to find the shared store.
3472 3489 if b'sharedrepo' in createopts:
3473 3490 hgvfs.write(b'sharedpath', sharedpath)
3474 3491
3475 3492 if createopts.get(b'shareditems'):
3476 3493 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3477 3494 hgvfs.write(b'shared', shared)
3478 3495
3479 3496
3480 3497 def poisonrepository(repo):
3481 3498 """Poison a repository instance so it can no longer be used."""
3482 3499 # Perform any cleanup on the instance.
3483 3500 repo.close()
3484 3501
3485 3502 # Our strategy is to replace the type of the object with one that
3486 3503 # has all attribute lookups result in error.
3487 3504 #
3488 3505 # But we have to allow the close() method because some constructors
3489 3506 # of repos call close() on repo references.
3490 3507 class poisonedrepository(object):
3491 3508 def __getattribute__(self, item):
3492 3509 if item == 'close':
3493 3510 return object.__getattribute__(self, item)
3494 3511
3495 3512 raise error.ProgrammingError(
3496 3513 b'repo instances should not be used after unshare'
3497 3514 )
3498 3515
3499 3516 def close(self):
3500 3517 pass
3501 3518
3502 3519 # We may have a repoview, which intercepts __setattr__. So be sure
3503 3520 # we operate at the lowest level possible.
3504 3521 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now