##// END OF EJS Templates
localrepo: handle ValueError during repository opening...
Gregory Szorc -
r45469:9e5b4dbe default
parent child Browse files
Show More
@@ -1,3789 +1,3794 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 mergestate as mergestatemod,
48 48 mergeutil,
49 49 metadata,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 rcutil,
58 58 repoview,
59 59 revset,
60 60 revsetlang,
61 61 scmutil,
62 62 sparse,
63 63 store as storemod,
64 64 subrepoutil,
65 65 tags as tagsmod,
66 66 transaction,
67 67 txnutil,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76
77 77 from .utils import (
78 78 hashutil,
79 79 procutil,
80 80 stringutil,
81 81 )
82 82
83 83 from .revlogutils import constants as revlogconst
84 84
85 85 release = lockmod.release
86 86 urlerr = util.urlerr
87 87 urlreq = util.urlreq
88 88
89 89 # set of (path, vfs-location) tuples. vfs-location is:
90 90 # - 'plain for vfs relative paths
91 91 # - '' for svfs relative paths
92 92 _cachedfiles = set()
93 93
94 94
95 95 class _basefilecache(scmutil.filecache):
96 96 """All filecache usage on repo are done for logic that should be unfiltered
97 97 """
98 98
99 99 def __get__(self, repo, type=None):
100 100 if repo is None:
101 101 return self
102 102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 103 unfi = repo.unfiltered()
104 104 try:
105 105 return unfi.__dict__[self.sname]
106 106 except KeyError:
107 107 pass
108 108 return super(_basefilecache, self).__get__(unfi, type)
109 109
110 110 def set(self, repo, value):
111 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 112
113 113
114 114 class repofilecache(_basefilecache):
115 115 """filecache for files in .hg but outside of .hg/store"""
116 116
117 117 def __init__(self, *paths):
118 118 super(repofilecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, b'plain'))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.vfs.join(fname)
124 124
125 125
126 126 class storecache(_basefilecache):
127 127 """filecache for files in the store"""
128 128
129 129 def __init__(self, *paths):
130 130 super(storecache, self).__init__(*paths)
131 131 for path in paths:
132 132 _cachedfiles.add((path, b''))
133 133
134 134 def join(self, obj, fname):
135 135 return obj.sjoin(fname)
136 136
137 137
138 138 class mixedrepostorecache(_basefilecache):
139 139 """filecache for a mix files in .hg/store and outside"""
140 140
141 141 def __init__(self, *pathsandlocations):
142 142 # scmutil.filecache only uses the path for passing back into our
143 143 # join(), so we can safely pass a list of paths and locations
144 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 145 _cachedfiles.update(pathsandlocations)
146 146
147 147 def join(self, obj, fnameandlocation):
148 148 fname, location = fnameandlocation
149 149 if location == b'plain':
150 150 return obj.vfs.join(fname)
151 151 else:
152 152 if location != b'':
153 153 raise error.ProgrammingError(
154 154 b'unexpected location: %s' % location
155 155 )
156 156 return obj.sjoin(fname)
157 157
158 158
159 159 def isfilecached(repo, name):
160 160 """check if a repo has already cached "name" filecache-ed property
161 161
162 162 This returns (cachedobj-or-None, iscached) tuple.
163 163 """
164 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 165 if not cacheentry:
166 166 return None, False
167 167 return cacheentry.obj, True
168 168
169 169
170 170 class unfilteredpropertycache(util.propertycache):
171 171 """propertycache that apply to unfiltered repo only"""
172 172
173 173 def __get__(self, repo, type=None):
174 174 unfi = repo.unfiltered()
175 175 if unfi is repo:
176 176 return super(unfilteredpropertycache, self).__get__(unfi)
177 177 return getattr(unfi, self.name)
178 178
179 179
180 180 class filteredpropertycache(util.propertycache):
181 181 """propertycache that must take filtering in account"""
182 182
183 183 def cachevalue(self, obj, value):
184 184 object.__setattr__(obj, self.name, value)
185 185
186 186
187 187 def hasunfilteredcache(repo, name):
188 188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 189 return name in vars(repo.unfiltered())
190 190
191 191
192 192 def unfilteredmethod(orig):
193 193 """decorate method that always need to be run on unfiltered version"""
194 194
195 195 def wrapper(repo, *args, **kwargs):
196 196 return orig(repo.unfiltered(), *args, **kwargs)
197 197
198 198 return wrapper
199 199
200 200
201 201 moderncaps = {
202 202 b'lookup',
203 203 b'branchmap',
204 204 b'pushkey',
205 205 b'known',
206 206 b'getbundle',
207 207 b'unbundle',
208 208 }
209 209 legacycaps = moderncaps.union({b'changegroupsubset'})
210 210
211 211
212 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 213 class localcommandexecutor(object):
214 214 def __init__(self, peer):
215 215 self._peer = peer
216 216 self._sent = False
217 217 self._closed = False
218 218
219 219 def __enter__(self):
220 220 return self
221 221
222 222 def __exit__(self, exctype, excvalue, exctb):
223 223 self.close()
224 224
225 225 def callcommand(self, command, args):
226 226 if self._sent:
227 227 raise error.ProgrammingError(
228 228 b'callcommand() cannot be used after sendcommands()'
229 229 )
230 230
231 231 if self._closed:
232 232 raise error.ProgrammingError(
233 233 b'callcommand() cannot be used after close()'
234 234 )
235 235
236 236 # We don't need to support anything fancy. Just call the named
237 237 # method on the peer and return a resolved future.
238 238 fn = getattr(self._peer, pycompat.sysstr(command))
239 239
240 240 f = pycompat.futures.Future()
241 241
242 242 try:
243 243 result = fn(**pycompat.strkwargs(args))
244 244 except Exception:
245 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 246 else:
247 247 f.set_result(result)
248 248
249 249 return f
250 250
251 251 def sendcommands(self):
252 252 self._sent = True
253 253
254 254 def close(self):
255 255 self._closed = True
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommands)
259 259 class localpeer(repository.peer):
260 260 '''peer for a local repo; reflects only the most recent API'''
261 261
262 262 def __init__(self, repo, caps=None):
263 263 super(localpeer, self).__init__()
264 264
265 265 if caps is None:
266 266 caps = moderncaps.copy()
267 267 self._repo = repo.filtered(b'served')
268 268 self.ui = repo.ui
269 269 self._caps = repo._restrictcapabilities(caps)
270 270
271 271 # Begin of _basepeer interface.
272 272
273 273 def url(self):
274 274 return self._repo.url()
275 275
276 276 def local(self):
277 277 return self._repo
278 278
279 279 def peer(self):
280 280 return self
281 281
282 282 def canpush(self):
283 283 return True
284 284
285 285 def close(self):
286 286 self._repo.close()
287 287
288 288 # End of _basepeer interface.
289 289
290 290 # Begin of _basewirecommands interface.
291 291
292 292 def branchmap(self):
293 293 return self._repo.branchmap()
294 294
295 295 def capabilities(self):
296 296 return self._caps
297 297
298 298 def clonebundles(self):
299 299 return self._repo.tryread(b'clonebundles.manifest')
300 300
301 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 302 """Used to test argument passing over the wire"""
303 303 return b"%s %s %s %s %s" % (
304 304 one,
305 305 two,
306 306 pycompat.bytestr(three),
307 307 pycompat.bytestr(four),
308 308 pycompat.bytestr(five),
309 309 )
310 310
311 311 def getbundle(
312 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 313 ):
314 314 chunks = exchange.getbundlechunks(
315 315 self._repo,
316 316 source,
317 317 heads=heads,
318 318 common=common,
319 319 bundlecaps=bundlecaps,
320 320 **kwargs
321 321 )[1]
322 322 cb = util.chunkbuffer(chunks)
323 323
324 324 if exchange.bundle2requested(bundlecaps):
325 325 # When requesting a bundle2, getbundle returns a stream to make the
326 326 # wire level function happier. We need to build a proper object
327 327 # from it in local peer.
328 328 return bundle2.getunbundler(self.ui, cb)
329 329 else:
330 330 return changegroup.getunbundler(b'01', cb, None)
331 331
332 332 def heads(self):
333 333 return self._repo.heads()
334 334
335 335 def known(self, nodes):
336 336 return self._repo.known(nodes)
337 337
338 338 def listkeys(self, namespace):
339 339 return self._repo.listkeys(namespace)
340 340
341 341 def lookup(self, key):
342 342 return self._repo.lookup(key)
343 343
344 344 def pushkey(self, namespace, key, old, new):
345 345 return self._repo.pushkey(namespace, key, old, new)
346 346
347 347 def stream_out(self):
348 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 349
350 350 def unbundle(self, bundle, heads, url):
351 351 """apply a bundle on a repo
352 352
353 353 This function handles the repo locking itself."""
354 354 try:
355 355 try:
356 356 bundle = exchange.readbundle(self.ui, bundle, None)
357 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 358 if util.safehasattr(ret, b'getchunks'):
359 359 # This is a bundle20 object, turn it into an unbundler.
360 360 # This little dance should be dropped eventually when the
361 361 # API is finally improved.
362 362 stream = util.chunkbuffer(ret.getchunks())
363 363 ret = bundle2.getunbundler(self.ui, stream)
364 364 return ret
365 365 except Exception as exc:
366 366 # If the exception contains output salvaged from a bundle2
367 367 # reply, we need to make sure it is printed before continuing
368 368 # to fail. So we build a bundle2 with such output and consume
369 369 # it directly.
370 370 #
371 371 # This is not very elegant but allows a "simple" solution for
372 372 # issue4594
373 373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 374 if output:
375 375 bundler = bundle2.bundle20(self._repo.ui)
376 376 for out in output:
377 377 bundler.addpart(out)
378 378 stream = util.chunkbuffer(bundler.getchunks())
379 379 b = bundle2.getunbundler(self.ui, stream)
380 380 bundle2.processbundle(self._repo, b)
381 381 raise
382 382 except error.PushRaced as exc:
383 383 raise error.ResponseError(
384 384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 385 )
386 386
387 387 # End of _basewirecommands interface.
388 388
389 389 # Begin of peer interface.
390 390
391 391 def commandexecutor(self):
392 392 return localcommandexecutor(self)
393 393
394 394 # End of peer interface.
395 395
396 396
397 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 398 class locallegacypeer(localpeer):
399 399 '''peer extension which implements legacy methods too; used for tests with
400 400 restricted capabilities'''
401 401
402 402 def __init__(self, repo):
403 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 404
405 405 # Begin of baselegacywirecommands interface.
406 406
407 407 def between(self, pairs):
408 408 return self._repo.between(pairs)
409 409
410 410 def branches(self, nodes):
411 411 return self._repo.branches(nodes)
412 412
413 413 def changegroup(self, nodes, source):
414 414 outgoing = discovery.outgoing(
415 415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
416 416 )
417 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 418
419 419 def changegroupsubset(self, bases, heads, source):
420 420 outgoing = discovery.outgoing(
421 421 self._repo, missingroots=bases, missingheads=heads
422 422 )
423 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 424
425 425 # End of baselegacywirecommands interface.
426 426
427 427
428 428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 429 # clients.
430 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431 431
432 432 # A repository with the sparserevlog feature will have delta chains that
433 433 # can spread over a larger span. Sparse reading cuts these large spans into
434 434 # pieces, so that each piece isn't too big.
435 435 # Without the sparserevlog capability, reading from the repository could use
436 436 # huge amounts of memory, because the whole span would be read at once,
437 437 # including all the intermediate revisions that aren't pertinent for the chain.
438 438 # This is why once a repository has enabled sparse-read, it becomes required.
439 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440 440
441 441 # A repository with the sidedataflag requirement will allow to store extra
442 442 # information for revision without altering their original hashes.
443 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444 444
445 445 # A repository with the the copies-sidedata-changeset requirement will store
446 446 # copies related information in changeset's sidedata.
447 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448 448
449 449 # The repository use persistent nodemap for the changelog and the manifest.
450 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451 451
452 452 # Functions receiving (ui, features) that extensions can register to impact
453 453 # the ability to load repositories with custom requirements. Only
454 454 # functions defined in loaded extensions are called.
455 455 #
456 456 # The function receives a set of requirement strings that the repository
457 457 # is capable of opening. Functions will typically add elements to the
458 458 # set to reflect that the extension knows how to handle that requirements.
459 459 featuresetupfuncs = set()
460 460
461 461
462 462 def makelocalrepository(baseui, path, intents=None):
463 463 """Create a local repository object.
464 464
465 465 Given arguments needed to construct a local repository, this function
466 466 performs various early repository loading functionality (such as
467 467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 468 the repository can be opened, derives a type suitable for representing
469 469 that repository, and returns an instance of it.
470 470
471 471 The returned object conforms to the ``repository.completelocalrepository``
472 472 interface.
473 473
474 474 The repository type is derived by calling a series of factory functions
475 475 for each aspect/interface of the final repository. These are defined by
476 476 ``REPO_INTERFACES``.
477 477
478 478 Each factory function is called to produce a type implementing a specific
479 479 interface. The cumulative list of returned types will be combined into a
480 480 new type and that type will be instantiated to represent the local
481 481 repository.
482 482
483 483 The factory functions each receive various state that may be consulted
484 484 as part of deriving a type.
485 485
486 486 Extensions should wrap these factory functions to customize repository type
487 487 creation. Note that an extension's wrapped function may be called even if
488 488 that extension is not loaded for the repo being constructed. Extensions
489 489 should check if their ``__name__`` appears in the
490 490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 491 not.
492 492 """
493 493 ui = baseui.copy()
494 494 # Prevent copying repo configuration.
495 495 ui.copy = baseui.copy
496 496
497 497 # Working directory VFS rooted at repository root.
498 498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499 499
500 500 # Main VFS for .hg/ directory.
501 501 hgpath = wdirvfs.join(b'.hg')
502 502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503 503
504 504 # The .hg/ path should exist and should be a directory. All other
505 505 # cases are errors.
506 506 if not hgvfs.isdir():
507 507 try:
508 508 hgvfs.stat()
509 509 except OSError as e:
510 510 if e.errno != errno.ENOENT:
511 511 raise
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
512 517
513 518 raise error.RepoError(_(b'repository %s not found') % path)
514 519
515 520 # .hg/requires file contains a newline-delimited list of
516 521 # features/capabilities the opener (us) must have in order to use
517 522 # the repository. This file was introduced in Mercurial 0.9.2,
518 523 # which means very old repositories may not have one. We assume
519 524 # a missing file translates to no requirements.
520 525 try:
521 526 requirements = set(hgvfs.read(b'requires').splitlines())
522 527 except IOError as e:
523 528 if e.errno != errno.ENOENT:
524 529 raise
525 530 requirements = set()
526 531
527 532 # The .hg/hgrc file may load extensions or contain config options
528 533 # that influence repository construction. Attempt to load it and
529 534 # process any new extensions that it may have pulled in.
530 535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
531 536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
532 537 extensions.loadall(ui)
533 538 extensions.populateui(ui)
534 539
535 540 # Set of module names of extensions loaded for this repository.
536 541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
537 542
538 543 supportedrequirements = gathersupportedrequirements(ui)
539 544
540 545 # We first validate the requirements are known.
541 546 ensurerequirementsrecognized(requirements, supportedrequirements)
542 547
543 548 # Then we validate that the known set is reasonable to use together.
544 549 ensurerequirementscompatible(ui, requirements)
545 550
546 551 # TODO there are unhandled edge cases related to opening repositories with
547 552 # shared storage. If storage is shared, we should also test for requirements
548 553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
549 554 # that repo, as that repo may load extensions needed to open it. This is a
550 555 # bit complicated because we don't want the other hgrc to overwrite settings
551 556 # in this hgrc.
552 557 #
553 558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
554 559 # file when sharing repos. But if a requirement is added after the share is
555 560 # performed, thereby introducing a new requirement for the opener, we may
556 561 # will not see that and could encounter a run-time error interacting with
557 562 # that shared store since it has an unknown-to-us requirement.
558 563
559 564 # At this point, we know we should be capable of opening the repository.
560 565 # Now get on with doing that.
561 566
562 567 features = set()
563 568
564 569 # The "store" part of the repository holds versioned data. How it is
565 570 # accessed is determined by various requirements. The ``shared`` or
566 571 # ``relshared`` requirements indicate the store lives in the path contained
567 572 # in the ``.hg/sharedpath`` file. This is an absolute path for
568 573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
569 574 if b'shared' in requirements or b'relshared' in requirements:
570 575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
571 576 if b'relshared' in requirements:
572 577 sharedpath = hgvfs.join(sharedpath)
573 578
574 579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
575 580
576 581 if not sharedvfs.exists():
577 582 raise error.RepoError(
578 583 _(b'.hg/sharedpath points to nonexistent directory %s')
579 584 % sharedvfs.base
580 585 )
581 586
582 587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
583 588
584 589 storebasepath = sharedvfs.base
585 590 cachepath = sharedvfs.join(b'cache')
586 591 else:
587 592 storebasepath = hgvfs.base
588 593 cachepath = hgvfs.join(b'cache')
589 594 wcachepath = hgvfs.join(b'wcache')
590 595
591 596 # The store has changed over time and the exact layout is dictated by
592 597 # requirements. The store interface abstracts differences across all
593 598 # of them.
594 599 store = makestore(
595 600 requirements,
596 601 storebasepath,
597 602 lambda base: vfsmod.vfs(base, cacheaudited=True),
598 603 )
599 604 hgvfs.createmode = store.createmode
600 605
601 606 storevfs = store.vfs
602 607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
603 608
604 609 # The cache vfs is used to manage cache files.
605 610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
606 611 cachevfs.createmode = store.createmode
607 612 # The cache vfs is used to manage cache files related to the working copy
608 613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
609 614 wcachevfs.createmode = store.createmode
610 615
611 616 # Now resolve the type for the repository object. We do this by repeatedly
612 617 # calling a factory function to produces types for specific aspects of the
613 618 # repo's operation. The aggregate returned types are used as base classes
614 619 # for a dynamically-derived type, which will represent our new repository.
615 620
616 621 bases = []
617 622 extrastate = {}
618 623
619 624 for iface, fn in REPO_INTERFACES:
620 625 # We pass all potentially useful state to give extensions tons of
621 626 # flexibility.
622 627 typ = fn()(
623 628 ui=ui,
624 629 intents=intents,
625 630 requirements=requirements,
626 631 features=features,
627 632 wdirvfs=wdirvfs,
628 633 hgvfs=hgvfs,
629 634 store=store,
630 635 storevfs=storevfs,
631 636 storeoptions=storevfs.options,
632 637 cachevfs=cachevfs,
633 638 wcachevfs=wcachevfs,
634 639 extensionmodulenames=extensionmodulenames,
635 640 extrastate=extrastate,
636 641 baseclasses=bases,
637 642 )
638 643
639 644 if not isinstance(typ, type):
640 645 raise error.ProgrammingError(
641 646 b'unable to construct type for %s' % iface
642 647 )
643 648
644 649 bases.append(typ)
645 650
646 651 # type() allows you to use characters in type names that wouldn't be
647 652 # recognized as Python symbols in source code. We abuse that to add
648 653 # rich information about our constructed repo.
649 654 name = pycompat.sysstr(
650 655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
651 656 )
652 657
653 658 cls = type(name, tuple(bases), {})
654 659
655 660 return cls(
656 661 baseui=baseui,
657 662 ui=ui,
658 663 origroot=path,
659 664 wdirvfs=wdirvfs,
660 665 hgvfs=hgvfs,
661 666 requirements=requirements,
662 667 supportedrequirements=supportedrequirements,
663 668 sharedpath=storebasepath,
664 669 store=store,
665 670 cachevfs=cachevfs,
666 671 wcachevfs=wcachevfs,
667 672 features=features,
668 673 intents=intents,
669 674 )
670 675
671 676
672 677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
673 678 """Load hgrc files/content into a ui instance.
674 679
675 680 This is called during repository opening to load any additional
676 681 config files or settings relevant to the current repository.
677 682
678 683 Returns a bool indicating whether any additional configs were loaded.
679 684
680 685 Extensions should monkeypatch this function to modify how per-repo
681 686 configs are loaded. For example, an extension may wish to pull in
682 687 configs from alternate files or sources.
683 688 """
684 689 if not rcutil.use_repo_hgrc():
685 690 return False
686 691 try:
687 692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
688 693 return True
689 694 except IOError:
690 695 return False
691 696
692 697
693 698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
694 699 """Perform additional actions after .hg/hgrc is loaded.
695 700
696 701 This function is called during repository loading immediately after
697 702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
698 703
699 704 The function can be used to validate configs, automatically add
700 705 options (including extensions) based on requirements, etc.
701 706 """
702 707
703 708 # Map of requirements to list of extensions to load automatically when
704 709 # requirement is present.
705 710 autoextensions = {
706 711 b'git': [b'git'],
707 712 b'largefiles': [b'largefiles'],
708 713 b'lfs': [b'lfs'],
709 714 }
710 715
711 716 for requirement, names in sorted(autoextensions.items()):
712 717 if requirement not in requirements:
713 718 continue
714 719
715 720 for name in names:
716 721 if not ui.hasconfig(b'extensions', name):
717 722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
718 723
719 724
720 725 def gathersupportedrequirements(ui):
721 726 """Determine the complete set of recognized requirements."""
722 727 # Start with all requirements supported by this file.
723 728 supported = set(localrepository._basesupported)
724 729
725 730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
726 731 # relevant to this ui instance.
727 732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
728 733
729 734 for fn in featuresetupfuncs:
730 735 if fn.__module__ in modules:
731 736 fn(ui, supported)
732 737
733 738 # Add derived requirements from registered compression engines.
734 739 for name in util.compengines:
735 740 engine = util.compengines[name]
736 741 if engine.available() and engine.revlogheader():
737 742 supported.add(b'exp-compression-%s' % name)
738 743 if engine.name() == b'zstd':
739 744 supported.add(b'revlog-compression-zstd')
740 745
741 746 return supported
742 747
743 748
744 749 def ensurerequirementsrecognized(requirements, supported):
745 750 """Validate that a set of local requirements is recognized.
746 751
747 752 Receives a set of requirements. Raises an ``error.RepoError`` if there
748 753 exists any requirement in that set that currently loaded code doesn't
749 754 recognize.
750 755
751 756 Returns a set of supported requirements.
752 757 """
753 758 missing = set()
754 759
755 760 for requirement in requirements:
756 761 if requirement in supported:
757 762 continue
758 763
759 764 if not requirement or not requirement[0:1].isalnum():
760 765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
761 766
762 767 missing.add(requirement)
763 768
764 769 if missing:
765 770 raise error.RequirementError(
766 771 _(b'repository requires features unknown to this Mercurial: %s')
767 772 % b' '.join(sorted(missing)),
768 773 hint=_(
769 774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
770 775 b'for more information'
771 776 ),
772 777 )
773 778
774 779
775 780 def ensurerequirementscompatible(ui, requirements):
776 781 """Validates that a set of recognized requirements is mutually compatible.
777 782
778 783 Some requirements may not be compatible with others or require
779 784 config options that aren't enabled. This function is called during
780 785 repository opening to ensure that the set of requirements needed
781 786 to open a repository is sane and compatible with config options.
782 787
783 788 Extensions can monkeypatch this function to perform additional
784 789 checking.
785 790
786 791 ``error.RepoError`` should be raised on failure.
787 792 """
788 793 if b'exp-sparse' in requirements and not sparse.enabled:
789 794 raise error.RepoError(
790 795 _(
791 796 b'repository is using sparse feature but '
792 797 b'sparse is not enabled; enable the '
793 798 b'"sparse" extensions to access'
794 799 )
795 800 )
796 801
797 802
798 803 def makestore(requirements, path, vfstype):
799 804 """Construct a storage object for a repository."""
800 805 if b'store' in requirements:
801 806 if b'fncache' in requirements:
802 807 return storemod.fncachestore(
803 808 path, vfstype, b'dotencode' in requirements
804 809 )
805 810
806 811 return storemod.encodedstore(path, vfstype)
807 812
808 813 return storemod.basicstore(path, vfstype)
809 814
810 815
811 816 def resolvestorevfsoptions(ui, requirements, features):
812 817 """Resolve the options to pass to the store vfs opener.
813 818
814 819 The returned dict is used to influence behavior of the storage layer.
815 820 """
816 821 options = {}
817 822
818 823 if b'treemanifest' in requirements:
819 824 options[b'treemanifest'] = True
820 825
821 826 # experimental config: format.manifestcachesize
822 827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
823 828 if manifestcachesize is not None:
824 829 options[b'manifestcachesize'] = manifestcachesize
825 830
826 831 # In the absence of another requirement superseding a revlog-related
827 832 # requirement, we have to assume the repo is using revlog version 0.
828 833 # This revlog format is super old and we don't bother trying to parse
829 834 # opener options for it because those options wouldn't do anything
830 835 # meaningful on such old repos.
831 836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
832 837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
833 838 else: # explicitly mark repo as using revlogv0
834 839 options[b'revlogv0'] = True
835 840
836 841 if COPIESSDC_REQUIREMENT in requirements:
837 842 options[b'copies-storage'] = b'changeset-sidedata'
838 843 else:
839 844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
840 845 copiesextramode = (b'changeset-only', b'compatibility')
841 846 if writecopiesto in copiesextramode:
842 847 options[b'copies-storage'] = b'extra'
843 848
844 849 return options
845 850
846 851
847 852 def resolverevlogstorevfsoptions(ui, requirements, features):
848 853 """Resolve opener options specific to revlogs."""
849 854
850 855 options = {}
851 856 options[b'flagprocessors'] = {}
852 857
853 858 if b'revlogv1' in requirements:
854 859 options[b'revlogv1'] = True
855 860 if REVLOGV2_REQUIREMENT in requirements:
856 861 options[b'revlogv2'] = True
857 862
858 863 if b'generaldelta' in requirements:
859 864 options[b'generaldelta'] = True
860 865
861 866 # experimental config: format.chunkcachesize
862 867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
863 868 if chunkcachesize is not None:
864 869 options[b'chunkcachesize'] = chunkcachesize
865 870
866 871 deltabothparents = ui.configbool(
867 872 b'storage', b'revlog.optimize-delta-parent-choice'
868 873 )
869 874 options[b'deltabothparents'] = deltabothparents
870 875
871 876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
872 877 lazydeltabase = False
873 878 if lazydelta:
874 879 lazydeltabase = ui.configbool(
875 880 b'storage', b'revlog.reuse-external-delta-parent'
876 881 )
877 882 if lazydeltabase is None:
878 883 lazydeltabase = not scmutil.gddeltaconfig(ui)
879 884 options[b'lazydelta'] = lazydelta
880 885 options[b'lazydeltabase'] = lazydeltabase
881 886
882 887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
883 888 if 0 <= chainspan:
884 889 options[b'maxdeltachainspan'] = chainspan
885 890
886 891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
887 892 if mmapindexthreshold is not None:
888 893 options[b'mmapindexthreshold'] = mmapindexthreshold
889 894
890 895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
891 896 srdensitythres = float(
892 897 ui.config(b'experimental', b'sparse-read.density-threshold')
893 898 )
894 899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
895 900 options[b'with-sparse-read'] = withsparseread
896 901 options[b'sparse-read-density-threshold'] = srdensitythres
897 902 options[b'sparse-read-min-gap-size'] = srmingapsize
898 903
899 904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
900 905 options[b'sparse-revlog'] = sparserevlog
901 906 if sparserevlog:
902 907 options[b'generaldelta'] = True
903 908
904 909 sidedata = SIDEDATA_REQUIREMENT in requirements
905 910 options[b'side-data'] = sidedata
906 911
907 912 maxchainlen = None
908 913 if sparserevlog:
909 914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
910 915 # experimental config: format.maxchainlen
911 916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
912 917 if maxchainlen is not None:
913 918 options[b'maxchainlen'] = maxchainlen
914 919
915 920 for r in requirements:
916 921 # we allow multiple compression engine requirement to co-exist because
917 922 # strickly speaking, revlog seems to support mixed compression style.
918 923 #
919 924 # The compression used for new entries will be "the last one"
920 925 prefix = r.startswith
921 926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
922 927 options[b'compengine'] = r.split(b'-', 2)[2]
923 928
924 929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
925 930 if options[b'zlib.level'] is not None:
926 931 if not (0 <= options[b'zlib.level'] <= 9):
927 932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
928 933 raise error.Abort(msg % options[b'zlib.level'])
929 934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
930 935 if options[b'zstd.level'] is not None:
931 936 if not (0 <= options[b'zstd.level'] <= 22):
932 937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
933 938 raise error.Abort(msg % options[b'zstd.level'])
934 939
935 940 if repository.NARROW_REQUIREMENT in requirements:
936 941 options[b'enableellipsis'] = True
937 942
938 943 if ui.configbool(b'experimental', b'rust.index'):
939 944 options[b'rust.index'] = True
940 945 if NODEMAP_REQUIREMENT in requirements:
941 946 options[b'persistent-nodemap'] = True
942 947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
943 948 options[b'persistent-nodemap.mmap'] = True
944 949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
945 950 options[b'persistent-nodemap.mode'] = epnm
946 951 if ui.configbool(b'devel', b'persistent-nodemap'):
947 952 options[b'devel-force-nodemap'] = True
948 953
949 954 return options
950 955
951 956
952 957 def makemain(**kwargs):
953 958 """Produce a type conforming to ``ilocalrepositorymain``."""
954 959 return localrepository
955 960
956 961
957 962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
958 963 class revlogfilestorage(object):
959 964 """File storage when using revlogs."""
960 965
961 966 def file(self, path):
962 967 if path[0] == b'/':
963 968 path = path[1:]
964 969
965 970 return filelog.filelog(self.svfs, path)
966 971
967 972
968 973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
969 974 class revlognarrowfilestorage(object):
970 975 """File storage when using revlogs and narrow files."""
971 976
972 977 def file(self, path):
973 978 if path[0] == b'/':
974 979 path = path[1:]
975 980
976 981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
977 982
978 983
979 984 def makefilestorage(requirements, features, **kwargs):
980 985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
981 986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
982 987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
983 988
984 989 if repository.NARROW_REQUIREMENT in requirements:
985 990 return revlognarrowfilestorage
986 991 else:
987 992 return revlogfilestorage
988 993
989 994
990 995 # List of repository interfaces and factory functions for them. Each
991 996 # will be called in order during ``makelocalrepository()`` to iteratively
992 997 # derive the final type for a local repository instance. We capture the
993 998 # function as a lambda so we don't hold a reference and the module-level
994 999 # functions can be wrapped.
995 1000 REPO_INTERFACES = [
996 1001 (repository.ilocalrepositorymain, lambda: makemain),
997 1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
998 1003 ]
999 1004
1000 1005
1001 1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1002 1007 class localrepository(object):
1003 1008 """Main class for representing local repositories.
1004 1009
1005 1010 All local repositories are instances of this class.
1006 1011
1007 1012 Constructed on its own, instances of this class are not usable as
1008 1013 repository objects. To obtain a usable repository object, call
1009 1014 ``hg.repository()``, ``localrepo.instance()``, or
1010 1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1011 1016 ``instance()`` adds support for creating new repositories.
1012 1017 ``hg.repository()`` adds more extension integration, including calling
1013 1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1014 1019 used.
1015 1020 """
1016 1021
1017 1022 # obsolete experimental requirements:
1018 1023 # - manifestv2: An experimental new manifest format that allowed
1019 1024 # for stem compression of long paths. Experiment ended up not
1020 1025 # being successful (repository sizes went up due to worse delta
1021 1026 # chains), and the code was deleted in 4.6.
1022 1027 supportedformats = {
1023 1028 b'revlogv1',
1024 1029 b'generaldelta',
1025 1030 b'treemanifest',
1026 1031 COPIESSDC_REQUIREMENT,
1027 1032 REVLOGV2_REQUIREMENT,
1028 1033 SIDEDATA_REQUIREMENT,
1029 1034 SPARSEREVLOG_REQUIREMENT,
1030 1035 NODEMAP_REQUIREMENT,
1031 1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1032 1037 }
1033 1038 _basesupported = supportedformats | {
1034 1039 b'store',
1035 1040 b'fncache',
1036 1041 b'shared',
1037 1042 b'relshared',
1038 1043 b'dotencode',
1039 1044 b'exp-sparse',
1040 1045 b'internal-phase',
1041 1046 }
1042 1047
1043 1048 # list of prefix for file which can be written without 'wlock'
1044 1049 # Extensions should extend this list when needed
1045 1050 _wlockfreeprefix = {
1046 1051 # We migh consider requiring 'wlock' for the next
1047 1052 # two, but pretty much all the existing code assume
1048 1053 # wlock is not needed so we keep them excluded for
1049 1054 # now.
1050 1055 b'hgrc',
1051 1056 b'requires',
1052 1057 # XXX cache is a complicatged business someone
1053 1058 # should investigate this in depth at some point
1054 1059 b'cache/',
1055 1060 # XXX shouldn't be dirstate covered by the wlock?
1056 1061 b'dirstate',
1057 1062 # XXX bisect was still a bit too messy at the time
1058 1063 # this changeset was introduced. Someone should fix
1059 1064 # the remainig bit and drop this line
1060 1065 b'bisect.state',
1061 1066 }
1062 1067
1063 1068 def __init__(
1064 1069 self,
1065 1070 baseui,
1066 1071 ui,
1067 1072 origroot,
1068 1073 wdirvfs,
1069 1074 hgvfs,
1070 1075 requirements,
1071 1076 supportedrequirements,
1072 1077 sharedpath,
1073 1078 store,
1074 1079 cachevfs,
1075 1080 wcachevfs,
1076 1081 features,
1077 1082 intents=None,
1078 1083 ):
1079 1084 """Create a new local repository instance.
1080 1085
1081 1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1082 1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1083 1088 object.
1084 1089
1085 1090 Arguments:
1086 1091
1087 1092 baseui
1088 1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1089 1094
1090 1095 ui
1091 1096 ``ui.ui`` instance for use by the repository.
1092 1097
1093 1098 origroot
1094 1099 ``bytes`` path to working directory root of this repository.
1095 1100
1096 1101 wdirvfs
1097 1102 ``vfs.vfs`` rooted at the working directory.
1098 1103
1099 1104 hgvfs
1100 1105 ``vfs.vfs`` rooted at .hg/
1101 1106
1102 1107 requirements
1103 1108 ``set`` of bytestrings representing repository opening requirements.
1104 1109
1105 1110 supportedrequirements
1106 1111 ``set`` of bytestrings representing repository requirements that we
1107 1112 know how to open. May be a supetset of ``requirements``.
1108 1113
1109 1114 sharedpath
1110 1115 ``bytes`` Defining path to storage base directory. Points to a
1111 1116 ``.hg/`` directory somewhere.
1112 1117
1113 1118 store
1114 1119 ``store.basicstore`` (or derived) instance providing access to
1115 1120 versioned storage.
1116 1121
1117 1122 cachevfs
1118 1123 ``vfs.vfs`` used for cache files.
1119 1124
1120 1125 wcachevfs
1121 1126 ``vfs.vfs`` used for cache files related to the working copy.
1122 1127
1123 1128 features
1124 1129 ``set`` of bytestrings defining features/capabilities of this
1125 1130 instance.
1126 1131
1127 1132 intents
1128 1133 ``set`` of system strings indicating what this repo will be used
1129 1134 for.
1130 1135 """
1131 1136 self.baseui = baseui
1132 1137 self.ui = ui
1133 1138 self.origroot = origroot
1134 1139 # vfs rooted at working directory.
1135 1140 self.wvfs = wdirvfs
1136 1141 self.root = wdirvfs.base
1137 1142 # vfs rooted at .hg/. Used to access most non-store paths.
1138 1143 self.vfs = hgvfs
1139 1144 self.path = hgvfs.base
1140 1145 self.requirements = requirements
1141 1146 self.supported = supportedrequirements
1142 1147 self.sharedpath = sharedpath
1143 1148 self.store = store
1144 1149 self.cachevfs = cachevfs
1145 1150 self.wcachevfs = wcachevfs
1146 1151 self.features = features
1147 1152
1148 1153 self.filtername = None
1149 1154
1150 1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1151 1156 b'devel', b'check-locks'
1152 1157 ):
1153 1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1154 1159 # A list of callback to shape the phase if no data were found.
1155 1160 # Callback are in the form: func(repo, roots) --> processed root.
1156 1161 # This list it to be filled by extension during repo setup
1157 1162 self._phasedefaults = []
1158 1163
1159 1164 color.setup(self.ui)
1160 1165
1161 1166 self.spath = self.store.path
1162 1167 self.svfs = self.store.vfs
1163 1168 self.sjoin = self.store.join
1164 1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1165 1170 b'devel', b'check-locks'
1166 1171 ):
1167 1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1168 1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1169 1174 else: # standard vfs
1170 1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1171 1176
1172 1177 self._dirstatevalidatewarned = False
1173 1178
1174 1179 self._branchcaches = branchmap.BranchMapCache()
1175 1180 self._revbranchcache = None
1176 1181 self._filterpats = {}
1177 1182 self._datafilters = {}
1178 1183 self._transref = self._lockref = self._wlockref = None
1179 1184
1180 1185 # A cache for various files under .hg/ that tracks file changes,
1181 1186 # (used by the filecache decorator)
1182 1187 #
1183 1188 # Maps a property name to its util.filecacheentry
1184 1189 self._filecache = {}
1185 1190
1186 1191 # hold sets of revision to be filtered
1187 1192 # should be cleared when something might have changed the filter value:
1188 1193 # - new changesets,
1189 1194 # - phase change,
1190 1195 # - new obsolescence marker,
1191 1196 # - working directory parent change,
1192 1197 # - bookmark changes
1193 1198 self.filteredrevcache = {}
1194 1199
1195 1200 # post-dirstate-status hooks
1196 1201 self._postdsstatus = []
1197 1202
1198 1203 # generic mapping between names and nodes
1199 1204 self.names = namespaces.namespaces()
1200 1205
1201 1206 # Key to signature value.
1202 1207 self._sparsesignaturecache = {}
1203 1208 # Signature to cached matcher instance.
1204 1209 self._sparsematchercache = {}
1205 1210
1206 1211 self._extrafilterid = repoview.extrafilter(ui)
1207 1212
1208 1213 self.filecopiesmode = None
1209 1214 if COPIESSDC_REQUIREMENT in self.requirements:
1210 1215 self.filecopiesmode = b'changeset-sidedata'
1211 1216
1212 1217 def _getvfsward(self, origfunc):
1213 1218 """build a ward for self.vfs"""
1214 1219 rref = weakref.ref(self)
1215 1220
1216 1221 def checkvfs(path, mode=None):
1217 1222 ret = origfunc(path, mode=mode)
1218 1223 repo = rref()
1219 1224 if (
1220 1225 repo is None
1221 1226 or not util.safehasattr(repo, b'_wlockref')
1222 1227 or not util.safehasattr(repo, b'_lockref')
1223 1228 ):
1224 1229 return
1225 1230 if mode in (None, b'r', b'rb'):
1226 1231 return
1227 1232 if path.startswith(repo.path):
1228 1233 # truncate name relative to the repository (.hg)
1229 1234 path = path[len(repo.path) + 1 :]
1230 1235 if path.startswith(b'cache/'):
1231 1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1232 1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1233 1238 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1234 1239 # journal is covered by 'lock'
1235 1240 if repo._currentlock(repo._lockref) is None:
1236 1241 repo.ui.develwarn(
1237 1242 b'write with no lock: "%s"' % path,
1238 1243 stacklevel=3,
1239 1244 config=b'check-locks',
1240 1245 )
1241 1246 elif repo._currentlock(repo._wlockref) is None:
1242 1247 # rest of vfs files are covered by 'wlock'
1243 1248 #
1244 1249 # exclude special files
1245 1250 for prefix in self._wlockfreeprefix:
1246 1251 if path.startswith(prefix):
1247 1252 return
1248 1253 repo.ui.develwarn(
1249 1254 b'write with no wlock: "%s"' % path,
1250 1255 stacklevel=3,
1251 1256 config=b'check-locks',
1252 1257 )
1253 1258 return ret
1254 1259
1255 1260 return checkvfs
1256 1261
1257 1262 def _getsvfsward(self, origfunc):
1258 1263 """build a ward for self.svfs"""
1259 1264 rref = weakref.ref(self)
1260 1265
1261 1266 def checksvfs(path, mode=None):
1262 1267 ret = origfunc(path, mode=mode)
1263 1268 repo = rref()
1264 1269 if repo is None or not util.safehasattr(repo, b'_lockref'):
1265 1270 return
1266 1271 if mode in (None, b'r', b'rb'):
1267 1272 return
1268 1273 if path.startswith(repo.sharedpath):
1269 1274 # truncate name relative to the repository (.hg)
1270 1275 path = path[len(repo.sharedpath) + 1 :]
1271 1276 if repo._currentlock(repo._lockref) is None:
1272 1277 repo.ui.develwarn(
1273 1278 b'write with no lock: "%s"' % path, stacklevel=4
1274 1279 )
1275 1280 return ret
1276 1281
1277 1282 return checksvfs
1278 1283
1279 1284 def close(self):
1280 1285 self._writecaches()
1281 1286
1282 1287 def _writecaches(self):
1283 1288 if self._revbranchcache:
1284 1289 self._revbranchcache.write()
1285 1290
1286 1291 def _restrictcapabilities(self, caps):
1287 1292 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1288 1293 caps = set(caps)
1289 1294 capsblob = bundle2.encodecaps(
1290 1295 bundle2.getrepocaps(self, role=b'client')
1291 1296 )
1292 1297 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1293 1298 return caps
1294 1299
1295 1300 def _writerequirements(self):
1296 1301 scmutil.writerequires(self.vfs, self.requirements)
1297 1302
1298 1303 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1299 1304 # self -> auditor -> self._checknested -> self
1300 1305
1301 1306 @property
1302 1307 def auditor(self):
1303 1308 # This is only used by context.workingctx.match in order to
1304 1309 # detect files in subrepos.
1305 1310 return pathutil.pathauditor(self.root, callback=self._checknested)
1306 1311
1307 1312 @property
1308 1313 def nofsauditor(self):
1309 1314 # This is only used by context.basectx.match in order to detect
1310 1315 # files in subrepos.
1311 1316 return pathutil.pathauditor(
1312 1317 self.root, callback=self._checknested, realfs=False, cached=True
1313 1318 )
1314 1319
1315 1320 def _checknested(self, path):
1316 1321 """Determine if path is a legal nested repository."""
1317 1322 if not path.startswith(self.root):
1318 1323 return False
1319 1324 subpath = path[len(self.root) + 1 :]
1320 1325 normsubpath = util.pconvert(subpath)
1321 1326
1322 1327 # XXX: Checking against the current working copy is wrong in
1323 1328 # the sense that it can reject things like
1324 1329 #
1325 1330 # $ hg cat -r 10 sub/x.txt
1326 1331 #
1327 1332 # if sub/ is no longer a subrepository in the working copy
1328 1333 # parent revision.
1329 1334 #
1330 1335 # However, it can of course also allow things that would have
1331 1336 # been rejected before, such as the above cat command if sub/
1332 1337 # is a subrepository now, but was a normal directory before.
1333 1338 # The old path auditor would have rejected by mistake since it
1334 1339 # panics when it sees sub/.hg/.
1335 1340 #
1336 1341 # All in all, checking against the working copy seems sensible
1337 1342 # since we want to prevent access to nested repositories on
1338 1343 # the filesystem *now*.
1339 1344 ctx = self[None]
1340 1345 parts = util.splitpath(subpath)
1341 1346 while parts:
1342 1347 prefix = b'/'.join(parts)
1343 1348 if prefix in ctx.substate:
1344 1349 if prefix == normsubpath:
1345 1350 return True
1346 1351 else:
1347 1352 sub = ctx.sub(prefix)
1348 1353 return sub.checknested(subpath[len(prefix) + 1 :])
1349 1354 else:
1350 1355 parts.pop()
1351 1356 return False
1352 1357
1353 1358 def peer(self):
1354 1359 return localpeer(self) # not cached to avoid reference cycle
1355 1360
1356 1361 def unfiltered(self):
1357 1362 """Return unfiltered version of the repository
1358 1363
1359 1364 Intended to be overwritten by filtered repo."""
1360 1365 return self
1361 1366
1362 1367 def filtered(self, name, visibilityexceptions=None):
1363 1368 """Return a filtered version of a repository
1364 1369
1365 1370 The `name` parameter is the identifier of the requested view. This
1366 1371 will return a repoview object set "exactly" to the specified view.
1367 1372
1368 1373 This function does not apply recursive filtering to a repository. For
1369 1374 example calling `repo.filtered("served")` will return a repoview using
1370 1375 the "served" view, regardless of the initial view used by `repo`.
1371 1376
1372 1377 In other word, there is always only one level of `repoview` "filtering".
1373 1378 """
1374 1379 if self._extrafilterid is not None and b'%' not in name:
1375 1380 name = name + b'%' + self._extrafilterid
1376 1381
1377 1382 cls = repoview.newtype(self.unfiltered().__class__)
1378 1383 return cls(self, name, visibilityexceptions)
1379 1384
1380 1385 @mixedrepostorecache(
1381 1386 (b'bookmarks', b'plain'),
1382 1387 (b'bookmarks.current', b'plain'),
1383 1388 (b'bookmarks', b''),
1384 1389 (b'00changelog.i', b''),
1385 1390 )
1386 1391 def _bookmarks(self):
1387 1392 # Since the multiple files involved in the transaction cannot be
1388 1393 # written atomically (with current repository format), there is a race
1389 1394 # condition here.
1390 1395 #
1391 1396 # 1) changelog content A is read
1392 1397 # 2) outside transaction update changelog to content B
1393 1398 # 3) outside transaction update bookmark file referring to content B
1394 1399 # 4) bookmarks file content is read and filtered against changelog-A
1395 1400 #
1396 1401 # When this happens, bookmarks against nodes missing from A are dropped.
1397 1402 #
1398 1403 # Having this happening during read is not great, but it become worse
1399 1404 # when this happen during write because the bookmarks to the "unknown"
1400 1405 # nodes will be dropped for good. However, writes happen within locks.
1401 1406 # This locking makes it possible to have a race free consistent read.
1402 1407 # For this purpose data read from disc before locking are
1403 1408 # "invalidated" right after the locks are taken. This invalidations are
1404 1409 # "light", the `filecache` mechanism keep the data in memory and will
1405 1410 # reuse them if the underlying files did not changed. Not parsing the
1406 1411 # same data multiple times helps performances.
1407 1412 #
1408 1413 # Unfortunately in the case describe above, the files tracked by the
1409 1414 # bookmarks file cache might not have changed, but the in-memory
1410 1415 # content is still "wrong" because we used an older changelog content
1411 1416 # to process the on-disk data. So after locking, the changelog would be
1412 1417 # refreshed but `_bookmarks` would be preserved.
1413 1418 # Adding `00changelog.i` to the list of tracked file is not
1414 1419 # enough, because at the time we build the content for `_bookmarks` in
1415 1420 # (4), the changelog file has already diverged from the content used
1416 1421 # for loading `changelog` in (1)
1417 1422 #
1418 1423 # To prevent the issue, we force the changelog to be explicitly
1419 1424 # reloaded while computing `_bookmarks`. The data race can still happen
1420 1425 # without the lock (with a narrower window), but it would no longer go
1421 1426 # undetected during the lock time refresh.
1422 1427 #
1423 1428 # The new schedule is as follow
1424 1429 #
1425 1430 # 1) filecache logic detect that `_bookmarks` needs to be computed
1426 1431 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1427 1432 # 3) We force `changelog` filecache to be tested
1428 1433 # 4) cachestat for `changelog` are captured (for changelog)
1429 1434 # 5) `_bookmarks` is computed and cached
1430 1435 #
1431 1436 # The step in (3) ensure we have a changelog at least as recent as the
1432 1437 # cache stat computed in (1). As a result at locking time:
1433 1438 # * if the changelog did not changed since (1) -> we can reuse the data
1434 1439 # * otherwise -> the bookmarks get refreshed.
1435 1440 self._refreshchangelog()
1436 1441 return bookmarks.bmstore(self)
1437 1442
1438 1443 def _refreshchangelog(self):
1439 1444 """make sure the in memory changelog match the on-disk one"""
1440 1445 if 'changelog' in vars(self) and self.currenttransaction() is None:
1441 1446 del self.changelog
1442 1447
1443 1448 @property
1444 1449 def _activebookmark(self):
1445 1450 return self._bookmarks.active
1446 1451
1447 1452 # _phasesets depend on changelog. what we need is to call
1448 1453 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1449 1454 # can't be easily expressed in filecache mechanism.
1450 1455 @storecache(b'phaseroots', b'00changelog.i')
1451 1456 def _phasecache(self):
1452 1457 return phases.phasecache(self, self._phasedefaults)
1453 1458
1454 1459 @storecache(b'obsstore')
1455 1460 def obsstore(self):
1456 1461 return obsolete.makestore(self.ui, self)
1457 1462
1458 1463 @storecache(b'00changelog.i')
1459 1464 def changelog(self):
1460 1465 # load dirstate before changelog to avoid race see issue6303
1461 1466 self.dirstate.prefetch_parents()
1462 1467 return self.store.changelog(txnutil.mayhavepending(self.root))
1463 1468
1464 1469 @storecache(b'00manifest.i')
1465 1470 def manifestlog(self):
1466 1471 return self.store.manifestlog(self, self._storenarrowmatch)
1467 1472
1468 1473 @repofilecache(b'dirstate')
1469 1474 def dirstate(self):
1470 1475 return self._makedirstate()
1471 1476
1472 1477 def _makedirstate(self):
1473 1478 """Extension point for wrapping the dirstate per-repo."""
1474 1479 sparsematchfn = lambda: sparse.matcher(self)
1475 1480
1476 1481 return dirstate.dirstate(
1477 1482 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1478 1483 )
1479 1484
1480 1485 def _dirstatevalidate(self, node):
1481 1486 try:
1482 1487 self.changelog.rev(node)
1483 1488 return node
1484 1489 except error.LookupError:
1485 1490 if not self._dirstatevalidatewarned:
1486 1491 self._dirstatevalidatewarned = True
1487 1492 self.ui.warn(
1488 1493 _(b"warning: ignoring unknown working parent %s!\n")
1489 1494 % short(node)
1490 1495 )
1491 1496 return nullid
1492 1497
1493 1498 @storecache(narrowspec.FILENAME)
1494 1499 def narrowpats(self):
1495 1500 """matcher patterns for this repository's narrowspec
1496 1501
1497 1502 A tuple of (includes, excludes).
1498 1503 """
1499 1504 return narrowspec.load(self)
1500 1505
1501 1506 @storecache(narrowspec.FILENAME)
1502 1507 def _storenarrowmatch(self):
1503 1508 if repository.NARROW_REQUIREMENT not in self.requirements:
1504 1509 return matchmod.always()
1505 1510 include, exclude = self.narrowpats
1506 1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1507 1512
1508 1513 @storecache(narrowspec.FILENAME)
1509 1514 def _narrowmatch(self):
1510 1515 if repository.NARROW_REQUIREMENT not in self.requirements:
1511 1516 return matchmod.always()
1512 1517 narrowspec.checkworkingcopynarrowspec(self)
1513 1518 include, exclude = self.narrowpats
1514 1519 return narrowspec.match(self.root, include=include, exclude=exclude)
1515 1520
1516 1521 def narrowmatch(self, match=None, includeexact=False):
1517 1522 """matcher corresponding the the repo's narrowspec
1518 1523
1519 1524 If `match` is given, then that will be intersected with the narrow
1520 1525 matcher.
1521 1526
1522 1527 If `includeexact` is True, then any exact matches from `match` will
1523 1528 be included even if they're outside the narrowspec.
1524 1529 """
1525 1530 if match:
1526 1531 if includeexact and not self._narrowmatch.always():
1527 1532 # do not exclude explicitly-specified paths so that they can
1528 1533 # be warned later on
1529 1534 em = matchmod.exact(match.files())
1530 1535 nm = matchmod.unionmatcher([self._narrowmatch, em])
1531 1536 return matchmod.intersectmatchers(match, nm)
1532 1537 return matchmod.intersectmatchers(match, self._narrowmatch)
1533 1538 return self._narrowmatch
1534 1539
1535 1540 def setnarrowpats(self, newincludes, newexcludes):
1536 1541 narrowspec.save(self, newincludes, newexcludes)
1537 1542 self.invalidate(clearfilecache=True)
1538 1543
1539 1544 @unfilteredpropertycache
1540 1545 def _quick_access_changeid_null(self):
1541 1546 return {
1542 1547 b'null': (nullrev, nullid),
1543 1548 nullrev: (nullrev, nullid),
1544 1549 nullid: (nullrev, nullid),
1545 1550 }
1546 1551
1547 1552 @unfilteredpropertycache
1548 1553 def _quick_access_changeid_wc(self):
1549 1554 # also fast path access to the working copy parents
1550 1555 # however, only do it for filter that ensure wc is visible.
1551 1556 quick = {}
1552 1557 cl = self.unfiltered().changelog
1553 1558 for node in self.dirstate.parents():
1554 1559 if node == nullid:
1555 1560 continue
1556 1561 rev = cl.index.get_rev(node)
1557 1562 if rev is None:
1558 1563 # unknown working copy parent case:
1559 1564 #
1560 1565 # skip the fast path and let higher code deal with it
1561 1566 continue
1562 1567 pair = (rev, node)
1563 1568 quick[rev] = pair
1564 1569 quick[node] = pair
1565 1570 # also add the parents of the parents
1566 1571 for r in cl.parentrevs(rev):
1567 1572 if r == nullrev:
1568 1573 continue
1569 1574 n = cl.node(r)
1570 1575 pair = (r, n)
1571 1576 quick[r] = pair
1572 1577 quick[n] = pair
1573 1578 p1node = self.dirstate.p1()
1574 1579 if p1node != nullid:
1575 1580 quick[b'.'] = quick[p1node]
1576 1581 return quick
1577 1582
1578 1583 @unfilteredmethod
1579 1584 def _quick_access_changeid_invalidate(self):
1580 1585 if '_quick_access_changeid_wc' in vars(self):
1581 1586 del self.__dict__['_quick_access_changeid_wc']
1582 1587
1583 1588 @property
1584 1589 def _quick_access_changeid(self):
1585 1590 """an helper dictionnary for __getitem__ calls
1586 1591
1587 1592 This contains a list of symbol we can recognise right away without
1588 1593 further processing.
1589 1594 """
1590 1595 mapping = self._quick_access_changeid_null
1591 1596 if self.filtername in repoview.filter_has_wc:
1592 1597 mapping = mapping.copy()
1593 1598 mapping.update(self._quick_access_changeid_wc)
1594 1599 return mapping
1595 1600
1596 1601 def __getitem__(self, changeid):
1597 1602 # dealing with special cases
1598 1603 if changeid is None:
1599 1604 return context.workingctx(self)
1600 1605 if isinstance(changeid, context.basectx):
1601 1606 return changeid
1602 1607
1603 1608 # dealing with multiple revisions
1604 1609 if isinstance(changeid, slice):
1605 1610 # wdirrev isn't contiguous so the slice shouldn't include it
1606 1611 return [
1607 1612 self[i]
1608 1613 for i in pycompat.xrange(*changeid.indices(len(self)))
1609 1614 if i not in self.changelog.filteredrevs
1610 1615 ]
1611 1616
1612 1617 # dealing with some special values
1613 1618 quick_access = self._quick_access_changeid.get(changeid)
1614 1619 if quick_access is not None:
1615 1620 rev, node = quick_access
1616 1621 return context.changectx(self, rev, node, maybe_filtered=False)
1617 1622 if changeid == b'tip':
1618 1623 node = self.changelog.tip()
1619 1624 rev = self.changelog.rev(node)
1620 1625 return context.changectx(self, rev, node)
1621 1626
1622 1627 # dealing with arbitrary values
1623 1628 try:
1624 1629 if isinstance(changeid, int):
1625 1630 node = self.changelog.node(changeid)
1626 1631 rev = changeid
1627 1632 elif changeid == b'.':
1628 1633 # this is a hack to delay/avoid loading obsmarkers
1629 1634 # when we know that '.' won't be hidden
1630 1635 node = self.dirstate.p1()
1631 1636 rev = self.unfiltered().changelog.rev(node)
1632 1637 elif len(changeid) == 20:
1633 1638 try:
1634 1639 node = changeid
1635 1640 rev = self.changelog.rev(changeid)
1636 1641 except error.FilteredLookupError:
1637 1642 changeid = hex(changeid) # for the error message
1638 1643 raise
1639 1644 except LookupError:
1640 1645 # check if it might have come from damaged dirstate
1641 1646 #
1642 1647 # XXX we could avoid the unfiltered if we had a recognizable
1643 1648 # exception for filtered changeset access
1644 1649 if (
1645 1650 self.local()
1646 1651 and changeid in self.unfiltered().dirstate.parents()
1647 1652 ):
1648 1653 msg = _(b"working directory has unknown parent '%s'!")
1649 1654 raise error.Abort(msg % short(changeid))
1650 1655 changeid = hex(changeid) # for the error message
1651 1656 raise
1652 1657
1653 1658 elif len(changeid) == 40:
1654 1659 node = bin(changeid)
1655 1660 rev = self.changelog.rev(node)
1656 1661 else:
1657 1662 raise error.ProgrammingError(
1658 1663 b"unsupported changeid '%s' of type %s"
1659 1664 % (changeid, pycompat.bytestr(type(changeid)))
1660 1665 )
1661 1666
1662 1667 return context.changectx(self, rev, node)
1663 1668
1664 1669 except (error.FilteredIndexError, error.FilteredLookupError):
1665 1670 raise error.FilteredRepoLookupError(
1666 1671 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1667 1672 )
1668 1673 except (IndexError, LookupError):
1669 1674 raise error.RepoLookupError(
1670 1675 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1671 1676 )
1672 1677 except error.WdirUnsupported:
1673 1678 return context.workingctx(self)
1674 1679
1675 1680 def __contains__(self, changeid):
1676 1681 """True if the given changeid exists
1677 1682
1678 1683 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1679 1684 specified.
1680 1685 """
1681 1686 try:
1682 1687 self[changeid]
1683 1688 return True
1684 1689 except error.RepoLookupError:
1685 1690 return False
1686 1691
1687 1692 def __nonzero__(self):
1688 1693 return True
1689 1694
1690 1695 __bool__ = __nonzero__
1691 1696
1692 1697 def __len__(self):
1693 1698 # no need to pay the cost of repoview.changelog
1694 1699 unfi = self.unfiltered()
1695 1700 return len(unfi.changelog)
1696 1701
1697 1702 def __iter__(self):
1698 1703 return iter(self.changelog)
1699 1704
1700 1705 def revs(self, expr, *args):
1701 1706 '''Find revisions matching a revset.
1702 1707
1703 1708 The revset is specified as a string ``expr`` that may contain
1704 1709 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1705 1710
1706 1711 Revset aliases from the configuration are not expanded. To expand
1707 1712 user aliases, consider calling ``scmutil.revrange()`` or
1708 1713 ``repo.anyrevs([expr], user=True)``.
1709 1714
1710 1715 Returns a smartset.abstractsmartset, which is a list-like interface
1711 1716 that contains integer revisions.
1712 1717 '''
1713 1718 tree = revsetlang.spectree(expr, *args)
1714 1719 return revset.makematcher(tree)(self)
1715 1720
1716 1721 def set(self, expr, *args):
1717 1722 '''Find revisions matching a revset and emit changectx instances.
1718 1723
1719 1724 This is a convenience wrapper around ``revs()`` that iterates the
1720 1725 result and is a generator of changectx instances.
1721 1726
1722 1727 Revset aliases from the configuration are not expanded. To expand
1723 1728 user aliases, consider calling ``scmutil.revrange()``.
1724 1729 '''
1725 1730 for r in self.revs(expr, *args):
1726 1731 yield self[r]
1727 1732
1728 1733 def anyrevs(self, specs, user=False, localalias=None):
1729 1734 '''Find revisions matching one of the given revsets.
1730 1735
1731 1736 Revset aliases from the configuration are not expanded by default. To
1732 1737 expand user aliases, specify ``user=True``. To provide some local
1733 1738 definitions overriding user aliases, set ``localalias`` to
1734 1739 ``{name: definitionstring}``.
1735 1740 '''
1736 1741 if specs == [b'null']:
1737 1742 return revset.baseset([nullrev])
1738 1743 if specs == [b'.']:
1739 1744 quick_data = self._quick_access_changeid.get(b'.')
1740 1745 if quick_data is not None:
1741 1746 return revset.baseset([quick_data[0]])
1742 1747 if user:
1743 1748 m = revset.matchany(
1744 1749 self.ui,
1745 1750 specs,
1746 1751 lookup=revset.lookupfn(self),
1747 1752 localalias=localalias,
1748 1753 )
1749 1754 else:
1750 1755 m = revset.matchany(None, specs, localalias=localalias)
1751 1756 return m(self)
1752 1757
1753 1758 def url(self):
1754 1759 return b'file:' + self.root
1755 1760
1756 1761 def hook(self, name, throw=False, **args):
1757 1762 """Call a hook, passing this repo instance.
1758 1763
1759 1764 This a convenience method to aid invoking hooks. Extensions likely
1760 1765 won't call this unless they have registered a custom hook or are
1761 1766 replacing code that is expected to call a hook.
1762 1767 """
1763 1768 return hook.hook(self.ui, self, name, throw, **args)
1764 1769
1765 1770 @filteredpropertycache
1766 1771 def _tagscache(self):
1767 1772 '''Returns a tagscache object that contains various tags related
1768 1773 caches.'''
1769 1774
1770 1775 # This simplifies its cache management by having one decorated
1771 1776 # function (this one) and the rest simply fetch things from it.
1772 1777 class tagscache(object):
1773 1778 def __init__(self):
1774 1779 # These two define the set of tags for this repository. tags
1775 1780 # maps tag name to node; tagtypes maps tag name to 'global' or
1776 1781 # 'local'. (Global tags are defined by .hgtags across all
1777 1782 # heads, and local tags are defined in .hg/localtags.)
1778 1783 # They constitute the in-memory cache of tags.
1779 1784 self.tags = self.tagtypes = None
1780 1785
1781 1786 self.nodetagscache = self.tagslist = None
1782 1787
1783 1788 cache = tagscache()
1784 1789 cache.tags, cache.tagtypes = self._findtags()
1785 1790
1786 1791 return cache
1787 1792
1788 1793 def tags(self):
1789 1794 '''return a mapping of tag to node'''
1790 1795 t = {}
1791 1796 if self.changelog.filteredrevs:
1792 1797 tags, tt = self._findtags()
1793 1798 else:
1794 1799 tags = self._tagscache.tags
1795 1800 rev = self.changelog.rev
1796 1801 for k, v in pycompat.iteritems(tags):
1797 1802 try:
1798 1803 # ignore tags to unknown nodes
1799 1804 rev(v)
1800 1805 t[k] = v
1801 1806 except (error.LookupError, ValueError):
1802 1807 pass
1803 1808 return t
1804 1809
1805 1810 def _findtags(self):
1806 1811 '''Do the hard work of finding tags. Return a pair of dicts
1807 1812 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1808 1813 maps tag name to a string like \'global\' or \'local\'.
1809 1814 Subclasses or extensions are free to add their own tags, but
1810 1815 should be aware that the returned dicts will be retained for the
1811 1816 duration of the localrepo object.'''
1812 1817
1813 1818 # XXX what tagtype should subclasses/extensions use? Currently
1814 1819 # mq and bookmarks add tags, but do not set the tagtype at all.
1815 1820 # Should each extension invent its own tag type? Should there
1816 1821 # be one tagtype for all such "virtual" tags? Or is the status
1817 1822 # quo fine?
1818 1823
1819 1824 # map tag name to (node, hist)
1820 1825 alltags = tagsmod.findglobaltags(self.ui, self)
1821 1826 # map tag name to tag type
1822 1827 tagtypes = {tag: b'global' for tag in alltags}
1823 1828
1824 1829 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1825 1830
1826 1831 # Build the return dicts. Have to re-encode tag names because
1827 1832 # the tags module always uses UTF-8 (in order not to lose info
1828 1833 # writing to the cache), but the rest of Mercurial wants them in
1829 1834 # local encoding.
1830 1835 tags = {}
1831 1836 for (name, (node, hist)) in pycompat.iteritems(alltags):
1832 1837 if node != nullid:
1833 1838 tags[encoding.tolocal(name)] = node
1834 1839 tags[b'tip'] = self.changelog.tip()
1835 1840 tagtypes = {
1836 1841 encoding.tolocal(name): value
1837 1842 for (name, value) in pycompat.iteritems(tagtypes)
1838 1843 }
1839 1844 return (tags, tagtypes)
1840 1845
1841 1846 def tagtype(self, tagname):
1842 1847 '''
1843 1848 return the type of the given tag. result can be:
1844 1849
1845 1850 'local' : a local tag
1846 1851 'global' : a global tag
1847 1852 None : tag does not exist
1848 1853 '''
1849 1854
1850 1855 return self._tagscache.tagtypes.get(tagname)
1851 1856
1852 1857 def tagslist(self):
1853 1858 '''return a list of tags ordered by revision'''
1854 1859 if not self._tagscache.tagslist:
1855 1860 l = []
1856 1861 for t, n in pycompat.iteritems(self.tags()):
1857 1862 l.append((self.changelog.rev(n), t, n))
1858 1863 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1859 1864
1860 1865 return self._tagscache.tagslist
1861 1866
1862 1867 def nodetags(self, node):
1863 1868 '''return the tags associated with a node'''
1864 1869 if not self._tagscache.nodetagscache:
1865 1870 nodetagscache = {}
1866 1871 for t, n in pycompat.iteritems(self._tagscache.tags):
1867 1872 nodetagscache.setdefault(n, []).append(t)
1868 1873 for tags in pycompat.itervalues(nodetagscache):
1869 1874 tags.sort()
1870 1875 self._tagscache.nodetagscache = nodetagscache
1871 1876 return self._tagscache.nodetagscache.get(node, [])
1872 1877
1873 1878 def nodebookmarks(self, node):
1874 1879 """return the list of bookmarks pointing to the specified node"""
1875 1880 return self._bookmarks.names(node)
1876 1881
1877 1882 def branchmap(self):
1878 1883 '''returns a dictionary {branch: [branchheads]} with branchheads
1879 1884 ordered by increasing revision number'''
1880 1885 return self._branchcaches[self]
1881 1886
1882 1887 @unfilteredmethod
1883 1888 def revbranchcache(self):
1884 1889 if not self._revbranchcache:
1885 1890 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1886 1891 return self._revbranchcache
1887 1892
1888 1893 def branchtip(self, branch, ignoremissing=False):
1889 1894 '''return the tip node for a given branch
1890 1895
1891 1896 If ignoremissing is True, then this method will not raise an error.
1892 1897 This is helpful for callers that only expect None for a missing branch
1893 1898 (e.g. namespace).
1894 1899
1895 1900 '''
1896 1901 try:
1897 1902 return self.branchmap().branchtip(branch)
1898 1903 except KeyError:
1899 1904 if not ignoremissing:
1900 1905 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1901 1906 else:
1902 1907 pass
1903 1908
1904 1909 def lookup(self, key):
1905 1910 node = scmutil.revsymbol(self, key).node()
1906 1911 if node is None:
1907 1912 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1908 1913 return node
1909 1914
1910 1915 def lookupbranch(self, key):
1911 1916 if self.branchmap().hasbranch(key):
1912 1917 return key
1913 1918
1914 1919 return scmutil.revsymbol(self, key).branch()
1915 1920
1916 1921 def known(self, nodes):
1917 1922 cl = self.changelog
1918 1923 get_rev = cl.index.get_rev
1919 1924 filtered = cl.filteredrevs
1920 1925 result = []
1921 1926 for n in nodes:
1922 1927 r = get_rev(n)
1923 1928 resp = not (r is None or r in filtered)
1924 1929 result.append(resp)
1925 1930 return result
1926 1931
1927 1932 def local(self):
1928 1933 return self
1929 1934
1930 1935 def publishing(self):
1931 1936 # it's safe (and desirable) to trust the publish flag unconditionally
1932 1937 # so that we don't finalize changes shared between users via ssh or nfs
1933 1938 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1934 1939
1935 1940 def cancopy(self):
1936 1941 # so statichttprepo's override of local() works
1937 1942 if not self.local():
1938 1943 return False
1939 1944 if not self.publishing():
1940 1945 return True
1941 1946 # if publishing we can't copy if there is filtered content
1942 1947 return not self.filtered(b'visible').changelog.filteredrevs
1943 1948
1944 1949 def shared(self):
1945 1950 '''the type of shared repository (None if not shared)'''
1946 1951 if self.sharedpath != self.path:
1947 1952 return b'store'
1948 1953 return None
1949 1954
1950 1955 def wjoin(self, f, *insidef):
1951 1956 return self.vfs.reljoin(self.root, f, *insidef)
1952 1957
1953 1958 def setparents(self, p1, p2=nullid):
1954 1959 self[None].setparents(p1, p2)
1955 1960 self._quick_access_changeid_invalidate()
1956 1961
1957 1962 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1958 1963 """changeid must be a changeset revision, if specified.
1959 1964 fileid can be a file revision or node."""
1960 1965 return context.filectx(
1961 1966 self, path, changeid, fileid, changectx=changectx
1962 1967 )
1963 1968
1964 1969 def getcwd(self):
1965 1970 return self.dirstate.getcwd()
1966 1971
1967 1972 def pathto(self, f, cwd=None):
1968 1973 return self.dirstate.pathto(f, cwd)
1969 1974
1970 1975 def _loadfilter(self, filter):
1971 1976 if filter not in self._filterpats:
1972 1977 l = []
1973 1978 for pat, cmd in self.ui.configitems(filter):
1974 1979 if cmd == b'!':
1975 1980 continue
1976 1981 mf = matchmod.match(self.root, b'', [pat])
1977 1982 fn = None
1978 1983 params = cmd
1979 1984 for name, filterfn in pycompat.iteritems(self._datafilters):
1980 1985 if cmd.startswith(name):
1981 1986 fn = filterfn
1982 1987 params = cmd[len(name) :].lstrip()
1983 1988 break
1984 1989 if not fn:
1985 1990 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1986 1991 fn.__name__ = 'commandfilter'
1987 1992 # Wrap old filters not supporting keyword arguments
1988 1993 if not pycompat.getargspec(fn)[2]:
1989 1994 oldfn = fn
1990 1995 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1991 1996 fn.__name__ = 'compat-' + oldfn.__name__
1992 1997 l.append((mf, fn, params))
1993 1998 self._filterpats[filter] = l
1994 1999 return self._filterpats[filter]
1995 2000
1996 2001 def _filter(self, filterpats, filename, data):
1997 2002 for mf, fn, cmd in filterpats:
1998 2003 if mf(filename):
1999 2004 self.ui.debug(
2000 2005 b"filtering %s through %s\n"
2001 2006 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2002 2007 )
2003 2008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2004 2009 break
2005 2010
2006 2011 return data
2007 2012
2008 2013 @unfilteredpropertycache
2009 2014 def _encodefilterpats(self):
2010 2015 return self._loadfilter(b'encode')
2011 2016
2012 2017 @unfilteredpropertycache
2013 2018 def _decodefilterpats(self):
2014 2019 return self._loadfilter(b'decode')
2015 2020
2016 2021 def adddatafilter(self, name, filter):
2017 2022 self._datafilters[name] = filter
2018 2023
2019 2024 def wread(self, filename):
2020 2025 if self.wvfs.islink(filename):
2021 2026 data = self.wvfs.readlink(filename)
2022 2027 else:
2023 2028 data = self.wvfs.read(filename)
2024 2029 return self._filter(self._encodefilterpats, filename, data)
2025 2030
2026 2031 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2027 2032 """write ``data`` into ``filename`` in the working directory
2028 2033
2029 2034 This returns length of written (maybe decoded) data.
2030 2035 """
2031 2036 data = self._filter(self._decodefilterpats, filename, data)
2032 2037 if b'l' in flags:
2033 2038 self.wvfs.symlink(data, filename)
2034 2039 else:
2035 2040 self.wvfs.write(
2036 2041 filename, data, backgroundclose=backgroundclose, **kwargs
2037 2042 )
2038 2043 if b'x' in flags:
2039 2044 self.wvfs.setflags(filename, False, True)
2040 2045 else:
2041 2046 self.wvfs.setflags(filename, False, False)
2042 2047 return len(data)
2043 2048
2044 2049 def wwritedata(self, filename, data):
2045 2050 return self._filter(self._decodefilterpats, filename, data)
2046 2051
2047 2052 def currenttransaction(self):
2048 2053 """return the current transaction or None if non exists"""
2049 2054 if self._transref:
2050 2055 tr = self._transref()
2051 2056 else:
2052 2057 tr = None
2053 2058
2054 2059 if tr and tr.running():
2055 2060 return tr
2056 2061 return None
2057 2062
2058 2063 def transaction(self, desc, report=None):
2059 2064 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2060 2065 b'devel', b'check-locks'
2061 2066 ):
2062 2067 if self._currentlock(self._lockref) is None:
2063 2068 raise error.ProgrammingError(b'transaction requires locking')
2064 2069 tr = self.currenttransaction()
2065 2070 if tr is not None:
2066 2071 return tr.nest(name=desc)
2067 2072
2068 2073 # abort here if the journal already exists
2069 2074 if self.svfs.exists(b"journal"):
2070 2075 raise error.RepoError(
2071 2076 _(b"abandoned transaction found"),
2072 2077 hint=_(b"run 'hg recover' to clean up transaction"),
2073 2078 )
2074 2079
2075 2080 idbase = b"%.40f#%f" % (random.random(), time.time())
2076 2081 ha = hex(hashutil.sha1(idbase).digest())
2077 2082 txnid = b'TXN:' + ha
2078 2083 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2079 2084
2080 2085 self._writejournal(desc)
2081 2086 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2082 2087 if report:
2083 2088 rp = report
2084 2089 else:
2085 2090 rp = self.ui.warn
2086 2091 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2087 2092 # we must avoid cyclic reference between repo and transaction.
2088 2093 reporef = weakref.ref(self)
2089 2094 # Code to track tag movement
2090 2095 #
2091 2096 # Since tags are all handled as file content, it is actually quite hard
2092 2097 # to track these movement from a code perspective. So we fallback to a
2093 2098 # tracking at the repository level. One could envision to track changes
2094 2099 # to the '.hgtags' file through changegroup apply but that fails to
2095 2100 # cope with case where transaction expose new heads without changegroup
2096 2101 # being involved (eg: phase movement).
2097 2102 #
2098 2103 # For now, We gate the feature behind a flag since this likely comes
2099 2104 # with performance impacts. The current code run more often than needed
2100 2105 # and do not use caches as much as it could. The current focus is on
2101 2106 # the behavior of the feature so we disable it by default. The flag
2102 2107 # will be removed when we are happy with the performance impact.
2103 2108 #
2104 2109 # Once this feature is no longer experimental move the following
2105 2110 # documentation to the appropriate help section:
2106 2111 #
2107 2112 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2108 2113 # tags (new or changed or deleted tags). In addition the details of
2109 2114 # these changes are made available in a file at:
2110 2115 # ``REPOROOT/.hg/changes/tags.changes``.
2111 2116 # Make sure you check for HG_TAG_MOVED before reading that file as it
2112 2117 # might exist from a previous transaction even if no tag were touched
2113 2118 # in this one. Changes are recorded in a line base format::
2114 2119 #
2115 2120 # <action> <hex-node> <tag-name>\n
2116 2121 #
2117 2122 # Actions are defined as follow:
2118 2123 # "-R": tag is removed,
2119 2124 # "+A": tag is added,
2120 2125 # "-M": tag is moved (old value),
2121 2126 # "+M": tag is moved (new value),
2122 2127 tracktags = lambda x: None
2123 2128 # experimental config: experimental.hook-track-tags
2124 2129 shouldtracktags = self.ui.configbool(
2125 2130 b'experimental', b'hook-track-tags'
2126 2131 )
2127 2132 if desc != b'strip' and shouldtracktags:
2128 2133 oldheads = self.changelog.headrevs()
2129 2134
2130 2135 def tracktags(tr2):
2131 2136 repo = reporef()
2132 2137 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2133 2138 newheads = repo.changelog.headrevs()
2134 2139 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2135 2140 # notes: we compare lists here.
2136 2141 # As we do it only once buiding set would not be cheaper
2137 2142 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2138 2143 if changes:
2139 2144 tr2.hookargs[b'tag_moved'] = b'1'
2140 2145 with repo.vfs(
2141 2146 b'changes/tags.changes', b'w', atomictemp=True
2142 2147 ) as changesfile:
2143 2148 # note: we do not register the file to the transaction
2144 2149 # because we needs it to still exist on the transaction
2145 2150 # is close (for txnclose hooks)
2146 2151 tagsmod.writediff(changesfile, changes)
2147 2152
2148 2153 def validate(tr2):
2149 2154 """will run pre-closing hooks"""
2150 2155 # XXX the transaction API is a bit lacking here so we take a hacky
2151 2156 # path for now
2152 2157 #
2153 2158 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2154 2159 # dict is copied before these run. In addition we needs the data
2155 2160 # available to in memory hooks too.
2156 2161 #
2157 2162 # Moreover, we also need to make sure this runs before txnclose
2158 2163 # hooks and there is no "pending" mechanism that would execute
2159 2164 # logic only if hooks are about to run.
2160 2165 #
2161 2166 # Fixing this limitation of the transaction is also needed to track
2162 2167 # other families of changes (bookmarks, phases, obsolescence).
2163 2168 #
2164 2169 # This will have to be fixed before we remove the experimental
2165 2170 # gating.
2166 2171 tracktags(tr2)
2167 2172 repo = reporef()
2168 2173
2169 2174 singleheadopt = (b'experimental', b'single-head-per-branch')
2170 2175 singlehead = repo.ui.configbool(*singleheadopt)
2171 2176 if singlehead:
2172 2177 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2173 2178 accountclosed = singleheadsub.get(
2174 2179 b"account-closed-heads", False
2175 2180 )
2176 2181 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2177 2182 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2178 2183 for name, (old, new) in sorted(
2179 2184 tr.changes[b'bookmarks'].items()
2180 2185 ):
2181 2186 args = tr.hookargs.copy()
2182 2187 args.update(bookmarks.preparehookargs(name, old, new))
2183 2188 repo.hook(
2184 2189 b'pretxnclose-bookmark',
2185 2190 throw=True,
2186 2191 **pycompat.strkwargs(args)
2187 2192 )
2188 2193 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2189 2194 cl = repo.unfiltered().changelog
2190 2195 for revs, (old, new) in tr.changes[b'phases']:
2191 2196 for rev in revs:
2192 2197 args = tr.hookargs.copy()
2193 2198 node = hex(cl.node(rev))
2194 2199 args.update(phases.preparehookargs(node, old, new))
2195 2200 repo.hook(
2196 2201 b'pretxnclose-phase',
2197 2202 throw=True,
2198 2203 **pycompat.strkwargs(args)
2199 2204 )
2200 2205
2201 2206 repo.hook(
2202 2207 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2203 2208 )
2204 2209
2205 2210 def releasefn(tr, success):
2206 2211 repo = reporef()
2207 2212 if repo is None:
2208 2213 # If the repo has been GC'd (and this release function is being
2209 2214 # called from transaction.__del__), there's not much we can do,
2210 2215 # so just leave the unfinished transaction there and let the
2211 2216 # user run `hg recover`.
2212 2217 return
2213 2218 if success:
2214 2219 # this should be explicitly invoked here, because
2215 2220 # in-memory changes aren't written out at closing
2216 2221 # transaction, if tr.addfilegenerator (via
2217 2222 # dirstate.write or so) isn't invoked while
2218 2223 # transaction running
2219 2224 repo.dirstate.write(None)
2220 2225 else:
2221 2226 # discard all changes (including ones already written
2222 2227 # out) in this transaction
2223 2228 narrowspec.restorebackup(self, b'journal.narrowspec')
2224 2229 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2225 2230 repo.dirstate.restorebackup(None, b'journal.dirstate')
2226 2231
2227 2232 repo.invalidate(clearfilecache=True)
2228 2233
2229 2234 tr = transaction.transaction(
2230 2235 rp,
2231 2236 self.svfs,
2232 2237 vfsmap,
2233 2238 b"journal",
2234 2239 b"undo",
2235 2240 aftertrans(renames),
2236 2241 self.store.createmode,
2237 2242 validator=validate,
2238 2243 releasefn=releasefn,
2239 2244 checkambigfiles=_cachedfiles,
2240 2245 name=desc,
2241 2246 )
2242 2247 tr.changes[b'origrepolen'] = len(self)
2243 2248 tr.changes[b'obsmarkers'] = set()
2244 2249 tr.changes[b'phases'] = []
2245 2250 tr.changes[b'bookmarks'] = {}
2246 2251
2247 2252 tr.hookargs[b'txnid'] = txnid
2248 2253 tr.hookargs[b'txnname'] = desc
2249 2254 tr.hookargs[b'changes'] = tr.changes
2250 2255 # note: writing the fncache only during finalize mean that the file is
2251 2256 # outdated when running hooks. As fncache is used for streaming clone,
2252 2257 # this is not expected to break anything that happen during the hooks.
2253 2258 tr.addfinalize(b'flush-fncache', self.store.write)
2254 2259
2255 2260 def txnclosehook(tr2):
2256 2261 """To be run if transaction is successful, will schedule a hook run
2257 2262 """
2258 2263 # Don't reference tr2 in hook() so we don't hold a reference.
2259 2264 # This reduces memory consumption when there are multiple
2260 2265 # transactions per lock. This can likely go away if issue5045
2261 2266 # fixes the function accumulation.
2262 2267 hookargs = tr2.hookargs
2263 2268
2264 2269 def hookfunc(unused_success):
2265 2270 repo = reporef()
2266 2271 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2267 2272 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2268 2273 for name, (old, new) in bmchanges:
2269 2274 args = tr.hookargs.copy()
2270 2275 args.update(bookmarks.preparehookargs(name, old, new))
2271 2276 repo.hook(
2272 2277 b'txnclose-bookmark',
2273 2278 throw=False,
2274 2279 **pycompat.strkwargs(args)
2275 2280 )
2276 2281
2277 2282 if hook.hashook(repo.ui, b'txnclose-phase'):
2278 2283 cl = repo.unfiltered().changelog
2279 2284 phasemv = sorted(
2280 2285 tr.changes[b'phases'], key=lambda r: r[0][0]
2281 2286 )
2282 2287 for revs, (old, new) in phasemv:
2283 2288 for rev in revs:
2284 2289 args = tr.hookargs.copy()
2285 2290 node = hex(cl.node(rev))
2286 2291 args.update(phases.preparehookargs(node, old, new))
2287 2292 repo.hook(
2288 2293 b'txnclose-phase',
2289 2294 throw=False,
2290 2295 **pycompat.strkwargs(args)
2291 2296 )
2292 2297
2293 2298 repo.hook(
2294 2299 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2295 2300 )
2296 2301
2297 2302 reporef()._afterlock(hookfunc)
2298 2303
2299 2304 tr.addfinalize(b'txnclose-hook', txnclosehook)
2300 2305 # Include a leading "-" to make it happen before the transaction summary
2301 2306 # reports registered via scmutil.registersummarycallback() whose names
2302 2307 # are 00-txnreport etc. That way, the caches will be warm when the
2303 2308 # callbacks run.
2304 2309 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2305 2310
2306 2311 def txnaborthook(tr2):
2307 2312 """To be run if transaction is aborted
2308 2313 """
2309 2314 reporef().hook(
2310 2315 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2311 2316 )
2312 2317
2313 2318 tr.addabort(b'txnabort-hook', txnaborthook)
2314 2319 # avoid eager cache invalidation. in-memory data should be identical
2315 2320 # to stored data if transaction has no error.
2316 2321 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2317 2322 self._transref = weakref.ref(tr)
2318 2323 scmutil.registersummarycallback(self, tr, desc)
2319 2324 return tr
2320 2325
2321 2326 def _journalfiles(self):
2322 2327 return (
2323 2328 (self.svfs, b'journal'),
2324 2329 (self.svfs, b'journal.narrowspec'),
2325 2330 (self.vfs, b'journal.narrowspec.dirstate'),
2326 2331 (self.vfs, b'journal.dirstate'),
2327 2332 (self.vfs, b'journal.branch'),
2328 2333 (self.vfs, b'journal.desc'),
2329 2334 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2330 2335 (self.svfs, b'journal.phaseroots'),
2331 2336 )
2332 2337
2333 2338 def undofiles(self):
2334 2339 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2335 2340
2336 2341 @unfilteredmethod
2337 2342 def _writejournal(self, desc):
2338 2343 self.dirstate.savebackup(None, b'journal.dirstate')
2339 2344 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2340 2345 narrowspec.savebackup(self, b'journal.narrowspec')
2341 2346 self.vfs.write(
2342 2347 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2343 2348 )
2344 2349 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2345 2350 bookmarksvfs = bookmarks.bookmarksvfs(self)
2346 2351 bookmarksvfs.write(
2347 2352 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2348 2353 )
2349 2354 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2350 2355
2351 2356 def recover(self):
2352 2357 with self.lock():
2353 2358 if self.svfs.exists(b"journal"):
2354 2359 self.ui.status(_(b"rolling back interrupted transaction\n"))
2355 2360 vfsmap = {
2356 2361 b'': self.svfs,
2357 2362 b'plain': self.vfs,
2358 2363 }
2359 2364 transaction.rollback(
2360 2365 self.svfs,
2361 2366 vfsmap,
2362 2367 b"journal",
2363 2368 self.ui.warn,
2364 2369 checkambigfiles=_cachedfiles,
2365 2370 )
2366 2371 self.invalidate()
2367 2372 return True
2368 2373 else:
2369 2374 self.ui.warn(_(b"no interrupted transaction available\n"))
2370 2375 return False
2371 2376
2372 2377 def rollback(self, dryrun=False, force=False):
2373 2378 wlock = lock = dsguard = None
2374 2379 try:
2375 2380 wlock = self.wlock()
2376 2381 lock = self.lock()
2377 2382 if self.svfs.exists(b"undo"):
2378 2383 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2379 2384
2380 2385 return self._rollback(dryrun, force, dsguard)
2381 2386 else:
2382 2387 self.ui.warn(_(b"no rollback information available\n"))
2383 2388 return 1
2384 2389 finally:
2385 2390 release(dsguard, lock, wlock)
2386 2391
2387 2392 @unfilteredmethod # Until we get smarter cache management
2388 2393 def _rollback(self, dryrun, force, dsguard):
2389 2394 ui = self.ui
2390 2395 try:
2391 2396 args = self.vfs.read(b'undo.desc').splitlines()
2392 2397 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2393 2398 if len(args) >= 3:
2394 2399 detail = args[2]
2395 2400 oldtip = oldlen - 1
2396 2401
2397 2402 if detail and ui.verbose:
2398 2403 msg = _(
2399 2404 b'repository tip rolled back to revision %d'
2400 2405 b' (undo %s: %s)\n'
2401 2406 ) % (oldtip, desc, detail)
2402 2407 else:
2403 2408 msg = _(
2404 2409 b'repository tip rolled back to revision %d (undo %s)\n'
2405 2410 ) % (oldtip, desc)
2406 2411 except IOError:
2407 2412 msg = _(b'rolling back unknown transaction\n')
2408 2413 desc = None
2409 2414
2410 2415 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2411 2416 raise error.Abort(
2412 2417 _(
2413 2418 b'rollback of last commit while not checked out '
2414 2419 b'may lose data'
2415 2420 ),
2416 2421 hint=_(b'use -f to force'),
2417 2422 )
2418 2423
2419 2424 ui.status(msg)
2420 2425 if dryrun:
2421 2426 return 0
2422 2427
2423 2428 parents = self.dirstate.parents()
2424 2429 self.destroying()
2425 2430 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2426 2431 transaction.rollback(
2427 2432 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2428 2433 )
2429 2434 bookmarksvfs = bookmarks.bookmarksvfs(self)
2430 2435 if bookmarksvfs.exists(b'undo.bookmarks'):
2431 2436 bookmarksvfs.rename(
2432 2437 b'undo.bookmarks', b'bookmarks', checkambig=True
2433 2438 )
2434 2439 if self.svfs.exists(b'undo.phaseroots'):
2435 2440 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2436 2441 self.invalidate()
2437 2442
2438 2443 has_node = self.changelog.index.has_node
2439 2444 parentgone = any(not has_node(p) for p in parents)
2440 2445 if parentgone:
2441 2446 # prevent dirstateguard from overwriting already restored one
2442 2447 dsguard.close()
2443 2448
2444 2449 narrowspec.restorebackup(self, b'undo.narrowspec')
2445 2450 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2446 2451 self.dirstate.restorebackup(None, b'undo.dirstate')
2447 2452 try:
2448 2453 branch = self.vfs.read(b'undo.branch')
2449 2454 self.dirstate.setbranch(encoding.tolocal(branch))
2450 2455 except IOError:
2451 2456 ui.warn(
2452 2457 _(
2453 2458 b'named branch could not be reset: '
2454 2459 b'current branch is still \'%s\'\n'
2455 2460 )
2456 2461 % self.dirstate.branch()
2457 2462 )
2458 2463
2459 2464 parents = tuple([p.rev() for p in self[None].parents()])
2460 2465 if len(parents) > 1:
2461 2466 ui.status(
2462 2467 _(
2463 2468 b'working directory now based on '
2464 2469 b'revisions %d and %d\n'
2465 2470 )
2466 2471 % parents
2467 2472 )
2468 2473 else:
2469 2474 ui.status(
2470 2475 _(b'working directory now based on revision %d\n') % parents
2471 2476 )
2472 2477 mergestatemod.mergestate.clean(self, self[b'.'].node())
2473 2478
2474 2479 # TODO: if we know which new heads may result from this rollback, pass
2475 2480 # them to destroy(), which will prevent the branchhead cache from being
2476 2481 # invalidated.
2477 2482 self.destroyed()
2478 2483 return 0
2479 2484
2480 2485 def _buildcacheupdater(self, newtransaction):
2481 2486 """called during transaction to build the callback updating cache
2482 2487
2483 2488 Lives on the repository to help extension who might want to augment
2484 2489 this logic. For this purpose, the created transaction is passed to the
2485 2490 method.
2486 2491 """
2487 2492 # we must avoid cyclic reference between repo and transaction.
2488 2493 reporef = weakref.ref(self)
2489 2494
2490 2495 def updater(tr):
2491 2496 repo = reporef()
2492 2497 repo.updatecaches(tr)
2493 2498
2494 2499 return updater
2495 2500
2496 2501 @unfilteredmethod
2497 2502 def updatecaches(self, tr=None, full=False):
2498 2503 """warm appropriate caches
2499 2504
2500 2505 If this function is called after a transaction closed. The transaction
2501 2506 will be available in the 'tr' argument. This can be used to selectively
2502 2507 update caches relevant to the changes in that transaction.
2503 2508
2504 2509 If 'full' is set, make sure all caches the function knows about have
2505 2510 up-to-date data. Even the ones usually loaded more lazily.
2506 2511 """
2507 2512 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2508 2513 # During strip, many caches are invalid but
2509 2514 # later call to `destroyed` will refresh them.
2510 2515 return
2511 2516
2512 2517 if tr is None or tr.changes[b'origrepolen'] < len(self):
2513 2518 # accessing the 'ser ved' branchmap should refresh all the others,
2514 2519 self.ui.debug(b'updating the branch cache\n')
2515 2520 self.filtered(b'served').branchmap()
2516 2521 self.filtered(b'served.hidden').branchmap()
2517 2522
2518 2523 if full:
2519 2524 unfi = self.unfiltered()
2520 2525
2521 2526 self.changelog.update_caches(transaction=tr)
2522 2527 self.manifestlog.update_caches(transaction=tr)
2523 2528
2524 2529 rbc = unfi.revbranchcache()
2525 2530 for r in unfi.changelog:
2526 2531 rbc.branchinfo(r)
2527 2532 rbc.write()
2528 2533
2529 2534 # ensure the working copy parents are in the manifestfulltextcache
2530 2535 for ctx in self[b'.'].parents():
2531 2536 ctx.manifest() # accessing the manifest is enough
2532 2537
2533 2538 # accessing fnode cache warms the cache
2534 2539 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2535 2540 # accessing tags warm the cache
2536 2541 self.tags()
2537 2542 self.filtered(b'served').tags()
2538 2543
2539 2544 # The `full` arg is documented as updating even the lazily-loaded
2540 2545 # caches immediately, so we're forcing a write to cause these caches
2541 2546 # to be warmed up even if they haven't explicitly been requested
2542 2547 # yet (if they've never been used by hg, they won't ever have been
2543 2548 # written, even if they're a subset of another kind of cache that
2544 2549 # *has* been used).
2545 2550 for filt in repoview.filtertable.keys():
2546 2551 filtered = self.filtered(filt)
2547 2552 filtered.branchmap().write(filtered)
2548 2553
2549 2554 def invalidatecaches(self):
2550 2555
2551 2556 if '_tagscache' in vars(self):
2552 2557 # can't use delattr on proxy
2553 2558 del self.__dict__['_tagscache']
2554 2559
2555 2560 self._branchcaches.clear()
2556 2561 self.invalidatevolatilesets()
2557 2562 self._sparsesignaturecache.clear()
2558 2563
2559 2564 def invalidatevolatilesets(self):
2560 2565 self.filteredrevcache.clear()
2561 2566 obsolete.clearobscaches(self)
2562 2567 self._quick_access_changeid_invalidate()
2563 2568
2564 2569 def invalidatedirstate(self):
2565 2570 '''Invalidates the dirstate, causing the next call to dirstate
2566 2571 to check if it was modified since the last time it was read,
2567 2572 rereading it if it has.
2568 2573
2569 2574 This is different to dirstate.invalidate() that it doesn't always
2570 2575 rereads the dirstate. Use dirstate.invalidate() if you want to
2571 2576 explicitly read the dirstate again (i.e. restoring it to a previous
2572 2577 known good state).'''
2573 2578 if hasunfilteredcache(self, 'dirstate'):
2574 2579 for k in self.dirstate._filecache:
2575 2580 try:
2576 2581 delattr(self.dirstate, k)
2577 2582 except AttributeError:
2578 2583 pass
2579 2584 delattr(self.unfiltered(), 'dirstate')
2580 2585
2581 2586 def invalidate(self, clearfilecache=False):
2582 2587 '''Invalidates both store and non-store parts other than dirstate
2583 2588
2584 2589 If a transaction is running, invalidation of store is omitted,
2585 2590 because discarding in-memory changes might cause inconsistency
2586 2591 (e.g. incomplete fncache causes unintentional failure, but
2587 2592 redundant one doesn't).
2588 2593 '''
2589 2594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2590 2595 for k in list(self._filecache.keys()):
2591 2596 # dirstate is invalidated separately in invalidatedirstate()
2592 2597 if k == b'dirstate':
2593 2598 continue
2594 2599 if (
2595 2600 k == b'changelog'
2596 2601 and self.currenttransaction()
2597 2602 and self.changelog._delayed
2598 2603 ):
2599 2604 # The changelog object may store unwritten revisions. We don't
2600 2605 # want to lose them.
2601 2606 # TODO: Solve the problem instead of working around it.
2602 2607 continue
2603 2608
2604 2609 if clearfilecache:
2605 2610 del self._filecache[k]
2606 2611 try:
2607 2612 delattr(unfiltered, k)
2608 2613 except AttributeError:
2609 2614 pass
2610 2615 self.invalidatecaches()
2611 2616 if not self.currenttransaction():
2612 2617 # TODO: Changing contents of store outside transaction
2613 2618 # causes inconsistency. We should make in-memory store
2614 2619 # changes detectable, and abort if changed.
2615 2620 self.store.invalidatecaches()
2616 2621
2617 2622 def invalidateall(self):
2618 2623 '''Fully invalidates both store and non-store parts, causing the
2619 2624 subsequent operation to reread any outside changes.'''
2620 2625 # extension should hook this to invalidate its caches
2621 2626 self.invalidate()
2622 2627 self.invalidatedirstate()
2623 2628
2624 2629 @unfilteredmethod
2625 2630 def _refreshfilecachestats(self, tr):
2626 2631 """Reload stats of cached files so that they are flagged as valid"""
2627 2632 for k, ce in self._filecache.items():
2628 2633 k = pycompat.sysstr(k)
2629 2634 if k == 'dirstate' or k not in self.__dict__:
2630 2635 continue
2631 2636 ce.refresh()
2632 2637
2633 2638 def _lock(
2634 2639 self,
2635 2640 vfs,
2636 2641 lockname,
2637 2642 wait,
2638 2643 releasefn,
2639 2644 acquirefn,
2640 2645 desc,
2641 2646 inheritchecker=None,
2642 2647 parentenvvar=None,
2643 2648 ):
2644 2649 parentlock = None
2645 2650 # the contents of parentenvvar are used by the underlying lock to
2646 2651 # determine whether it can be inherited
2647 2652 if parentenvvar is not None:
2648 2653 parentlock = encoding.environ.get(parentenvvar)
2649 2654
2650 2655 timeout = 0
2651 2656 warntimeout = 0
2652 2657 if wait:
2653 2658 timeout = self.ui.configint(b"ui", b"timeout")
2654 2659 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2655 2660 # internal config: ui.signal-safe-lock
2656 2661 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2657 2662
2658 2663 l = lockmod.trylock(
2659 2664 self.ui,
2660 2665 vfs,
2661 2666 lockname,
2662 2667 timeout,
2663 2668 warntimeout,
2664 2669 releasefn=releasefn,
2665 2670 acquirefn=acquirefn,
2666 2671 desc=desc,
2667 2672 inheritchecker=inheritchecker,
2668 2673 parentlock=parentlock,
2669 2674 signalsafe=signalsafe,
2670 2675 )
2671 2676 return l
2672 2677
2673 2678 def _afterlock(self, callback):
2674 2679 """add a callback to be run when the repository is fully unlocked
2675 2680
2676 2681 The callback will be executed when the outermost lock is released
2677 2682 (with wlock being higher level than 'lock')."""
2678 2683 for ref in (self._wlockref, self._lockref):
2679 2684 l = ref and ref()
2680 2685 if l and l.held:
2681 2686 l.postrelease.append(callback)
2682 2687 break
2683 2688 else: # no lock have been found.
2684 2689 callback(True)
2685 2690
2686 2691 def lock(self, wait=True):
2687 2692 '''Lock the repository store (.hg/store) and return a weak reference
2688 2693 to the lock. Use this before modifying the store (e.g. committing or
2689 2694 stripping). If you are opening a transaction, get a lock as well.)
2690 2695
2691 2696 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2692 2697 'wlock' first to avoid a dead-lock hazard.'''
2693 2698 l = self._currentlock(self._lockref)
2694 2699 if l is not None:
2695 2700 l.lock()
2696 2701 return l
2697 2702
2698 2703 l = self._lock(
2699 2704 vfs=self.svfs,
2700 2705 lockname=b"lock",
2701 2706 wait=wait,
2702 2707 releasefn=None,
2703 2708 acquirefn=self.invalidate,
2704 2709 desc=_(b'repository %s') % self.origroot,
2705 2710 )
2706 2711 self._lockref = weakref.ref(l)
2707 2712 return l
2708 2713
2709 2714 def _wlockchecktransaction(self):
2710 2715 if self.currenttransaction() is not None:
2711 2716 raise error.LockInheritanceContractViolation(
2712 2717 b'wlock cannot be inherited in the middle of a transaction'
2713 2718 )
2714 2719
2715 2720 def wlock(self, wait=True):
2716 2721 '''Lock the non-store parts of the repository (everything under
2717 2722 .hg except .hg/store) and return a weak reference to the lock.
2718 2723
2719 2724 Use this before modifying files in .hg.
2720 2725
2721 2726 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2722 2727 'wlock' first to avoid a dead-lock hazard.'''
2723 2728 l = self._wlockref and self._wlockref()
2724 2729 if l is not None and l.held:
2725 2730 l.lock()
2726 2731 return l
2727 2732
2728 2733 # We do not need to check for non-waiting lock acquisition. Such
2729 2734 # acquisition would not cause dead-lock as they would just fail.
2730 2735 if wait and (
2731 2736 self.ui.configbool(b'devel', b'all-warnings')
2732 2737 or self.ui.configbool(b'devel', b'check-locks')
2733 2738 ):
2734 2739 if self._currentlock(self._lockref) is not None:
2735 2740 self.ui.develwarn(b'"wlock" acquired after "lock"')
2736 2741
2737 2742 def unlock():
2738 2743 if self.dirstate.pendingparentchange():
2739 2744 self.dirstate.invalidate()
2740 2745 else:
2741 2746 self.dirstate.write(None)
2742 2747
2743 2748 self._filecache[b'dirstate'].refresh()
2744 2749
2745 2750 l = self._lock(
2746 2751 self.vfs,
2747 2752 b"wlock",
2748 2753 wait,
2749 2754 unlock,
2750 2755 self.invalidatedirstate,
2751 2756 _(b'working directory of %s') % self.origroot,
2752 2757 inheritchecker=self._wlockchecktransaction,
2753 2758 parentenvvar=b'HG_WLOCK_LOCKER',
2754 2759 )
2755 2760 self._wlockref = weakref.ref(l)
2756 2761 return l
2757 2762
2758 2763 def _currentlock(self, lockref):
2759 2764 """Returns the lock if it's held, or None if it's not."""
2760 2765 if lockref is None:
2761 2766 return None
2762 2767 l = lockref()
2763 2768 if l is None or not l.held:
2764 2769 return None
2765 2770 return l
2766 2771
2767 2772 def currentwlock(self):
2768 2773 """Returns the wlock if it's held, or None if it's not."""
2769 2774 return self._currentlock(self._wlockref)
2770 2775
2771 2776 def _filecommit(
2772 2777 self,
2773 2778 fctx,
2774 2779 manifest1,
2775 2780 manifest2,
2776 2781 linkrev,
2777 2782 tr,
2778 2783 changelist,
2779 2784 includecopymeta,
2780 2785 ):
2781 2786 """
2782 2787 commit an individual file as part of a larger transaction
2783 2788 """
2784 2789
2785 2790 fname = fctx.path()
2786 2791 fparent1 = manifest1.get(fname, nullid)
2787 2792 fparent2 = manifest2.get(fname, nullid)
2788 2793 if isinstance(fctx, context.filectx):
2789 2794 node = fctx.filenode()
2790 2795 if node in [fparent1, fparent2]:
2791 2796 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2792 2797 if (
2793 2798 fparent1 != nullid
2794 2799 and manifest1.flags(fname) != fctx.flags()
2795 2800 ) or (
2796 2801 fparent2 != nullid
2797 2802 and manifest2.flags(fname) != fctx.flags()
2798 2803 ):
2799 2804 changelist.append(fname)
2800 2805 return node
2801 2806
2802 2807 flog = self.file(fname)
2803 2808 meta = {}
2804 2809 cfname = fctx.copysource()
2805 2810 if cfname and cfname != fname:
2806 2811 # Mark the new revision of this file as a copy of another
2807 2812 # file. This copy data will effectively act as a parent
2808 2813 # of this new revision. If this is a merge, the first
2809 2814 # parent will be the nullid (meaning "look up the copy data")
2810 2815 # and the second one will be the other parent. For example:
2811 2816 #
2812 2817 # 0 --- 1 --- 3 rev1 changes file foo
2813 2818 # \ / rev2 renames foo to bar and changes it
2814 2819 # \- 2 -/ rev3 should have bar with all changes and
2815 2820 # should record that bar descends from
2816 2821 # bar in rev2 and foo in rev1
2817 2822 #
2818 2823 # this allows this merge to succeed:
2819 2824 #
2820 2825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2821 2826 # \ / merging rev3 and rev4 should use bar@rev2
2822 2827 # \- 2 --- 4 as the merge base
2823 2828 #
2824 2829
2825 2830 cnode = manifest1.get(cfname)
2826 2831 newfparent = fparent2
2827 2832
2828 2833 if manifest2: # branch merge
2829 2834 if fparent2 == nullid or cnode is None: # copied on remote side
2830 2835 if cfname in manifest2:
2831 2836 cnode = manifest2[cfname]
2832 2837 newfparent = fparent1
2833 2838
2834 2839 # Here, we used to search backwards through history to try to find
2835 2840 # where the file copy came from if the source of a copy was not in
2836 2841 # the parent directory. However, this doesn't actually make sense to
2837 2842 # do (what does a copy from something not in your working copy even
2838 2843 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2839 2844 # the user that copy information was dropped, so if they didn't
2840 2845 # expect this outcome it can be fixed, but this is the correct
2841 2846 # behavior in this circumstance.
2842 2847
2843 2848 if cnode:
2844 2849 self.ui.debug(
2845 2850 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2846 2851 )
2847 2852 if includecopymeta:
2848 2853 meta[b"copy"] = cfname
2849 2854 meta[b"copyrev"] = hex(cnode)
2850 2855 fparent1, fparent2 = nullid, newfparent
2851 2856 else:
2852 2857 self.ui.warn(
2853 2858 _(
2854 2859 b"warning: can't find ancestor for '%s' "
2855 2860 b"copied from '%s'!\n"
2856 2861 )
2857 2862 % (fname, cfname)
2858 2863 )
2859 2864
2860 2865 elif fparent1 == nullid:
2861 2866 fparent1, fparent2 = fparent2, nullid
2862 2867 elif fparent2 != nullid:
2863 2868 # is one parent an ancestor of the other?
2864 2869 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2865 2870 if fparent1 in fparentancestors:
2866 2871 fparent1, fparent2 = fparent2, nullid
2867 2872 elif fparent2 in fparentancestors:
2868 2873 fparent2 = nullid
2869 2874 elif not fparentancestors:
2870 2875 # TODO: this whole if-else might be simplified much more
2871 2876 ms = mergestatemod.mergestate.read(self)
2872 2877 if (
2873 2878 fname in ms
2874 2879 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2875 2880 ):
2876 2881 fparent1, fparent2 = fparent2, nullid
2877 2882
2878 2883 # is the file changed?
2879 2884 text = fctx.data()
2880 2885 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2881 2886 changelist.append(fname)
2882 2887 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2883 2888 # are just the flags changed during merge?
2884 2889 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2885 2890 changelist.append(fname)
2886 2891
2887 2892 return fparent1
2888 2893
2889 2894 def checkcommitpatterns(self, wctx, match, status, fail):
2890 2895 """check for commit arguments that aren't committable"""
2891 2896 if match.isexact() or match.prefix():
2892 2897 matched = set(status.modified + status.added + status.removed)
2893 2898
2894 2899 for f in match.files():
2895 2900 f = self.dirstate.normalize(f)
2896 2901 if f == b'.' or f in matched or f in wctx.substate:
2897 2902 continue
2898 2903 if f in status.deleted:
2899 2904 fail(f, _(b'file not found!'))
2900 2905 # Is it a directory that exists or used to exist?
2901 2906 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2902 2907 d = f + b'/'
2903 2908 for mf in matched:
2904 2909 if mf.startswith(d):
2905 2910 break
2906 2911 else:
2907 2912 fail(f, _(b"no match under directory!"))
2908 2913 elif f not in self.dirstate:
2909 2914 fail(f, _(b"file not tracked!"))
2910 2915
2911 2916 @unfilteredmethod
2912 2917 def commit(
2913 2918 self,
2914 2919 text=b"",
2915 2920 user=None,
2916 2921 date=None,
2917 2922 match=None,
2918 2923 force=False,
2919 2924 editor=None,
2920 2925 extra=None,
2921 2926 ):
2922 2927 """Add a new revision to current repository.
2923 2928
2924 2929 Revision information is gathered from the working directory,
2925 2930 match can be used to filter the committed files. If editor is
2926 2931 supplied, it is called to get a commit message.
2927 2932 """
2928 2933 if extra is None:
2929 2934 extra = {}
2930 2935
2931 2936 def fail(f, msg):
2932 2937 raise error.Abort(b'%s: %s' % (f, msg))
2933 2938
2934 2939 if not match:
2935 2940 match = matchmod.always()
2936 2941
2937 2942 if not force:
2938 2943 match.bad = fail
2939 2944
2940 2945 # lock() for recent changelog (see issue4368)
2941 2946 with self.wlock(), self.lock():
2942 2947 wctx = self[None]
2943 2948 merge = len(wctx.parents()) > 1
2944 2949
2945 2950 if not force and merge and not match.always():
2946 2951 raise error.Abort(
2947 2952 _(
2948 2953 b'cannot partially commit a merge '
2949 2954 b'(do not specify files or patterns)'
2950 2955 )
2951 2956 )
2952 2957
2953 2958 status = self.status(match=match, clean=force)
2954 2959 if force:
2955 2960 status.modified.extend(
2956 2961 status.clean
2957 2962 ) # mq may commit clean files
2958 2963
2959 2964 # check subrepos
2960 2965 subs, commitsubs, newstate = subrepoutil.precommit(
2961 2966 self.ui, wctx, status, match, force=force
2962 2967 )
2963 2968
2964 2969 # make sure all explicit patterns are matched
2965 2970 if not force:
2966 2971 self.checkcommitpatterns(wctx, match, status, fail)
2967 2972
2968 2973 cctx = context.workingcommitctx(
2969 2974 self, status, text, user, date, extra
2970 2975 )
2971 2976
2972 2977 ms = mergestatemod.mergestate.read(self)
2973 2978 mergeutil.checkunresolved(ms)
2974 2979
2975 2980 # internal config: ui.allowemptycommit
2976 2981 allowemptycommit = (
2977 2982 wctx.branch() != wctx.p1().branch()
2978 2983 or extra.get(b'close')
2979 2984 or merge
2980 2985 or cctx.files()
2981 2986 or self.ui.configbool(b'ui', b'allowemptycommit')
2982 2987 )
2983 2988 if not allowemptycommit:
2984 2989 self.ui.debug(b'nothing to commit, clearing merge state\n')
2985 2990 ms.reset()
2986 2991 return None
2987 2992
2988 2993 if merge and cctx.deleted():
2989 2994 raise error.Abort(_(b"cannot commit merge with missing files"))
2990 2995
2991 2996 if editor:
2992 2997 cctx._text = editor(self, cctx, subs)
2993 2998 edited = text != cctx._text
2994 2999
2995 3000 # Save commit message in case this transaction gets rolled back
2996 3001 # (e.g. by a pretxncommit hook). Leave the content alone on
2997 3002 # the assumption that the user will use the same editor again.
2998 3003 msgfn = self.savecommitmessage(cctx._text)
2999 3004
3000 3005 # commit subs and write new state
3001 3006 if subs:
3002 3007 uipathfn = scmutil.getuipathfn(self)
3003 3008 for s in sorted(commitsubs):
3004 3009 sub = wctx.sub(s)
3005 3010 self.ui.status(
3006 3011 _(b'committing subrepository %s\n')
3007 3012 % uipathfn(subrepoutil.subrelpath(sub))
3008 3013 )
3009 3014 sr = sub.commit(cctx._text, user, date)
3010 3015 newstate[s] = (newstate[s][0], sr)
3011 3016 subrepoutil.writestate(self, newstate)
3012 3017
3013 3018 p1, p2 = self.dirstate.parents()
3014 3019 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3015 3020 try:
3016 3021 self.hook(
3017 3022 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3018 3023 )
3019 3024 with self.transaction(b'commit'):
3020 3025 ret = self.commitctx(cctx, True)
3021 3026 # update bookmarks, dirstate and mergestate
3022 3027 bookmarks.update(self, [p1, p2], ret)
3023 3028 cctx.markcommitted(ret)
3024 3029 ms.reset()
3025 3030 except: # re-raises
3026 3031 if edited:
3027 3032 self.ui.write(
3028 3033 _(b'note: commit message saved in %s\n') % msgfn
3029 3034 )
3030 3035 self.ui.write(
3031 3036 _(
3032 3037 b"note: use 'hg commit --logfile "
3033 3038 b".hg/last-message.txt --edit' to reuse it\n"
3034 3039 )
3035 3040 )
3036 3041 raise
3037 3042
3038 3043 def commithook(unused_success):
3039 3044 # hack for command that use a temporary commit (eg: histedit)
3040 3045 # temporary commit got stripped before hook release
3041 3046 if self.changelog.hasnode(ret):
3042 3047 self.hook(
3043 3048 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3044 3049 )
3045 3050
3046 3051 self._afterlock(commithook)
3047 3052 return ret
3048 3053
3049 3054 @unfilteredmethod
3050 3055 def commitctx(self, ctx, error=False, origctx=None):
3051 3056 """Add a new revision to current repository.
3052 3057 Revision information is passed via the context argument.
3053 3058
3054 3059 ctx.files() should list all files involved in this commit, i.e.
3055 3060 modified/added/removed files. On merge, it may be wider than the
3056 3061 ctx.files() to be committed, since any file nodes derived directly
3057 3062 from p1 or p2 are excluded from the committed ctx.files().
3058 3063
3059 3064 origctx is for convert to work around the problem that bug
3060 3065 fixes to the files list in changesets change hashes. For
3061 3066 convert to be the identity, it can pass an origctx and this
3062 3067 function will use the same files list when it makes sense to
3063 3068 do so.
3064 3069 """
3065 3070
3066 3071 p1, p2 = ctx.p1(), ctx.p2()
3067 3072 user = ctx.user()
3068 3073
3069 3074 if self.filecopiesmode == b'changeset-sidedata':
3070 3075 writechangesetcopy = True
3071 3076 writefilecopymeta = True
3072 3077 writecopiesto = None
3073 3078 else:
3074 3079 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3075 3080 writefilecopymeta = writecopiesto != b'changeset-only'
3076 3081 writechangesetcopy = writecopiesto in (
3077 3082 b'changeset-only',
3078 3083 b'compatibility',
3079 3084 )
3080 3085 p1copies, p2copies = None, None
3081 3086 if writechangesetcopy:
3082 3087 p1copies = ctx.p1copies()
3083 3088 p2copies = ctx.p2copies()
3084 3089 filesadded, filesremoved = None, None
3085 3090 with self.lock(), self.transaction(b"commit") as tr:
3086 3091 trp = weakref.proxy(tr)
3087 3092
3088 3093 if ctx.manifestnode():
3089 3094 # reuse an existing manifest revision
3090 3095 self.ui.debug(b'reusing known manifest\n')
3091 3096 mn = ctx.manifestnode()
3092 3097 files = ctx.files()
3093 3098 if writechangesetcopy:
3094 3099 filesadded = ctx.filesadded()
3095 3100 filesremoved = ctx.filesremoved()
3096 3101 elif ctx.files():
3097 3102 m1ctx = p1.manifestctx()
3098 3103 m2ctx = p2.manifestctx()
3099 3104 mctx = m1ctx.copy()
3100 3105
3101 3106 m = mctx.read()
3102 3107 m1 = m1ctx.read()
3103 3108 m2 = m2ctx.read()
3104 3109
3105 3110 # check in files
3106 3111 added = []
3107 3112 changed = []
3108 3113 removed = list(ctx.removed())
3109 3114 linkrev = len(self)
3110 3115 self.ui.note(_(b"committing files:\n"))
3111 3116 uipathfn = scmutil.getuipathfn(self)
3112 3117 for f in sorted(ctx.modified() + ctx.added()):
3113 3118 self.ui.note(uipathfn(f) + b"\n")
3114 3119 try:
3115 3120 fctx = ctx[f]
3116 3121 if fctx is None:
3117 3122 removed.append(f)
3118 3123 else:
3119 3124 added.append(f)
3120 3125 m[f] = self._filecommit(
3121 3126 fctx,
3122 3127 m1,
3123 3128 m2,
3124 3129 linkrev,
3125 3130 trp,
3126 3131 changed,
3127 3132 writefilecopymeta,
3128 3133 )
3129 3134 m.setflag(f, fctx.flags())
3130 3135 except OSError:
3131 3136 self.ui.warn(
3132 3137 _(b"trouble committing %s!\n") % uipathfn(f)
3133 3138 )
3134 3139 raise
3135 3140 except IOError as inst:
3136 3141 errcode = getattr(inst, 'errno', errno.ENOENT)
3137 3142 if error or errcode and errcode != errno.ENOENT:
3138 3143 self.ui.warn(
3139 3144 _(b"trouble committing %s!\n") % uipathfn(f)
3140 3145 )
3141 3146 raise
3142 3147
3143 3148 # update manifest
3144 3149 removed = [f for f in removed if f in m1 or f in m2]
3145 3150 drop = sorted([f for f in removed if f in m])
3146 3151 for f in drop:
3147 3152 del m[f]
3148 3153 if p2.rev() != nullrev:
3149 3154 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3150 3155 removed = [f for f in removed if not rf(f)]
3151 3156
3152 3157 files = changed + removed
3153 3158 md = None
3154 3159 if not files:
3155 3160 # if no "files" actually changed in terms of the changelog,
3156 3161 # try hard to detect unmodified manifest entry so that the
3157 3162 # exact same commit can be reproduced later on convert.
3158 3163 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3159 3164 if not files and md:
3160 3165 self.ui.debug(
3161 3166 b'not reusing manifest (no file change in '
3162 3167 b'changelog, but manifest differs)\n'
3163 3168 )
3164 3169 if files or md:
3165 3170 self.ui.note(_(b"committing manifest\n"))
3166 3171 # we're using narrowmatch here since it's already applied at
3167 3172 # other stages (such as dirstate.walk), so we're already
3168 3173 # ignoring things outside of narrowspec in most cases. The
3169 3174 # one case where we might have files outside the narrowspec
3170 3175 # at this point is merges, and we already error out in the
3171 3176 # case where the merge has files outside of the narrowspec,
3172 3177 # so this is safe.
3173 3178 mn = mctx.write(
3174 3179 trp,
3175 3180 linkrev,
3176 3181 p1.manifestnode(),
3177 3182 p2.manifestnode(),
3178 3183 added,
3179 3184 drop,
3180 3185 match=self.narrowmatch(),
3181 3186 )
3182 3187
3183 3188 if writechangesetcopy:
3184 3189 filesadded = [
3185 3190 f for f in changed if not (f in m1 or f in m2)
3186 3191 ]
3187 3192 filesremoved = removed
3188 3193 else:
3189 3194 self.ui.debug(
3190 3195 b'reusing manifest from p1 (listed files '
3191 3196 b'actually unchanged)\n'
3192 3197 )
3193 3198 mn = p1.manifestnode()
3194 3199 else:
3195 3200 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3196 3201 mn = p1.manifestnode()
3197 3202 files = []
3198 3203
3199 3204 if writecopiesto == b'changeset-only':
3200 3205 # If writing only to changeset extras, use None to indicate that
3201 3206 # no entry should be written. If writing to both, write an empty
3202 3207 # entry to prevent the reader from falling back to reading
3203 3208 # filelogs.
3204 3209 p1copies = p1copies or None
3205 3210 p2copies = p2copies or None
3206 3211 filesadded = filesadded or None
3207 3212 filesremoved = filesremoved or None
3208 3213
3209 3214 if origctx and origctx.manifestnode() == mn:
3210 3215 files = origctx.files()
3211 3216
3212 3217 # update changelog
3213 3218 self.ui.note(_(b"committing changelog\n"))
3214 3219 self.changelog.delayupdate(tr)
3215 3220 n = self.changelog.add(
3216 3221 mn,
3217 3222 files,
3218 3223 ctx.description(),
3219 3224 trp,
3220 3225 p1.node(),
3221 3226 p2.node(),
3222 3227 user,
3223 3228 ctx.date(),
3224 3229 ctx.extra().copy(),
3225 3230 p1copies,
3226 3231 p2copies,
3227 3232 filesadded,
3228 3233 filesremoved,
3229 3234 )
3230 3235 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3231 3236 self.hook(
3232 3237 b'pretxncommit',
3233 3238 throw=True,
3234 3239 node=hex(n),
3235 3240 parent1=xp1,
3236 3241 parent2=xp2,
3237 3242 )
3238 3243 # set the new commit is proper phase
3239 3244 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3240 3245 if targetphase:
3241 3246 # retract boundary do not alter parent changeset.
3242 3247 # if a parent have higher the resulting phase will
3243 3248 # be compliant anyway
3244 3249 #
3245 3250 # if minimal phase was 0 we don't need to retract anything
3246 3251 phases.registernew(self, tr, targetphase, [n])
3247 3252 return n
3248 3253
3249 3254 @unfilteredmethod
3250 3255 def destroying(self):
3251 3256 '''Inform the repository that nodes are about to be destroyed.
3252 3257 Intended for use by strip and rollback, so there's a common
3253 3258 place for anything that has to be done before destroying history.
3254 3259
3255 3260 This is mostly useful for saving state that is in memory and waiting
3256 3261 to be flushed when the current lock is released. Because a call to
3257 3262 destroyed is imminent, the repo will be invalidated causing those
3258 3263 changes to stay in memory (waiting for the next unlock), or vanish
3259 3264 completely.
3260 3265 '''
3261 3266 # When using the same lock to commit and strip, the phasecache is left
3262 3267 # dirty after committing. Then when we strip, the repo is invalidated,
3263 3268 # causing those changes to disappear.
3264 3269 if '_phasecache' in vars(self):
3265 3270 self._phasecache.write()
3266 3271
3267 3272 @unfilteredmethod
3268 3273 def destroyed(self):
3269 3274 '''Inform the repository that nodes have been destroyed.
3270 3275 Intended for use by strip and rollback, so there's a common
3271 3276 place for anything that has to be done after destroying history.
3272 3277 '''
3273 3278 # When one tries to:
3274 3279 # 1) destroy nodes thus calling this method (e.g. strip)
3275 3280 # 2) use phasecache somewhere (e.g. commit)
3276 3281 #
3277 3282 # then 2) will fail because the phasecache contains nodes that were
3278 3283 # removed. We can either remove phasecache from the filecache,
3279 3284 # causing it to reload next time it is accessed, or simply filter
3280 3285 # the removed nodes now and write the updated cache.
3281 3286 self._phasecache.filterunknown(self)
3282 3287 self._phasecache.write()
3283 3288
3284 3289 # refresh all repository caches
3285 3290 self.updatecaches()
3286 3291
3287 3292 # Ensure the persistent tag cache is updated. Doing it now
3288 3293 # means that the tag cache only has to worry about destroyed
3289 3294 # heads immediately after a strip/rollback. That in turn
3290 3295 # guarantees that "cachetip == currenttip" (comparing both rev
3291 3296 # and node) always means no nodes have been added or destroyed.
3292 3297
3293 3298 # XXX this is suboptimal when qrefresh'ing: we strip the current
3294 3299 # head, refresh the tag cache, then immediately add a new head.
3295 3300 # But I think doing it this way is necessary for the "instant
3296 3301 # tag cache retrieval" case to work.
3297 3302 self.invalidate()
3298 3303
3299 3304 def status(
3300 3305 self,
3301 3306 node1=b'.',
3302 3307 node2=None,
3303 3308 match=None,
3304 3309 ignored=False,
3305 3310 clean=False,
3306 3311 unknown=False,
3307 3312 listsubrepos=False,
3308 3313 ):
3309 3314 '''a convenience method that calls node1.status(node2)'''
3310 3315 return self[node1].status(
3311 3316 node2, match, ignored, clean, unknown, listsubrepos
3312 3317 )
3313 3318
3314 3319 def addpostdsstatus(self, ps):
3315 3320 """Add a callback to run within the wlock, at the point at which status
3316 3321 fixups happen.
3317 3322
3318 3323 On status completion, callback(wctx, status) will be called with the
3319 3324 wlock held, unless the dirstate has changed from underneath or the wlock
3320 3325 couldn't be grabbed.
3321 3326
3322 3327 Callbacks should not capture and use a cached copy of the dirstate --
3323 3328 it might change in the meanwhile. Instead, they should access the
3324 3329 dirstate via wctx.repo().dirstate.
3325 3330
3326 3331 This list is emptied out after each status run -- extensions should
3327 3332 make sure it adds to this list each time dirstate.status is called.
3328 3333 Extensions should also make sure they don't call this for statuses
3329 3334 that don't involve the dirstate.
3330 3335 """
3331 3336
3332 3337 # The list is located here for uniqueness reasons -- it is actually
3333 3338 # managed by the workingctx, but that isn't unique per-repo.
3334 3339 self._postdsstatus.append(ps)
3335 3340
3336 3341 def postdsstatus(self):
3337 3342 """Used by workingctx to get the list of post-dirstate-status hooks."""
3338 3343 return self._postdsstatus
3339 3344
3340 3345 def clearpostdsstatus(self):
3341 3346 """Used by workingctx to clear post-dirstate-status hooks."""
3342 3347 del self._postdsstatus[:]
3343 3348
3344 3349 def heads(self, start=None):
3345 3350 if start is None:
3346 3351 cl = self.changelog
3347 3352 headrevs = reversed(cl.headrevs())
3348 3353 return [cl.node(rev) for rev in headrevs]
3349 3354
3350 3355 heads = self.changelog.heads(start)
3351 3356 # sort the output in rev descending order
3352 3357 return sorted(heads, key=self.changelog.rev, reverse=True)
3353 3358
3354 3359 def branchheads(self, branch=None, start=None, closed=False):
3355 3360 '''return a (possibly filtered) list of heads for the given branch
3356 3361
3357 3362 Heads are returned in topological order, from newest to oldest.
3358 3363 If branch is None, use the dirstate branch.
3359 3364 If start is not None, return only heads reachable from start.
3360 3365 If closed is True, return heads that are marked as closed as well.
3361 3366 '''
3362 3367 if branch is None:
3363 3368 branch = self[None].branch()
3364 3369 branches = self.branchmap()
3365 3370 if not branches.hasbranch(branch):
3366 3371 return []
3367 3372 # the cache returns heads ordered lowest to highest
3368 3373 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3369 3374 if start is not None:
3370 3375 # filter out the heads that cannot be reached from startrev
3371 3376 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3372 3377 bheads = [h for h in bheads if h in fbheads]
3373 3378 return bheads
3374 3379
3375 3380 def branches(self, nodes):
3376 3381 if not nodes:
3377 3382 nodes = [self.changelog.tip()]
3378 3383 b = []
3379 3384 for n in nodes:
3380 3385 t = n
3381 3386 while True:
3382 3387 p = self.changelog.parents(n)
3383 3388 if p[1] != nullid or p[0] == nullid:
3384 3389 b.append((t, n, p[0], p[1]))
3385 3390 break
3386 3391 n = p[0]
3387 3392 return b
3388 3393
3389 3394 def between(self, pairs):
3390 3395 r = []
3391 3396
3392 3397 for top, bottom in pairs:
3393 3398 n, l, i = top, [], 0
3394 3399 f = 1
3395 3400
3396 3401 while n != bottom and n != nullid:
3397 3402 p = self.changelog.parents(n)[0]
3398 3403 if i == f:
3399 3404 l.append(n)
3400 3405 f = f * 2
3401 3406 n = p
3402 3407 i += 1
3403 3408
3404 3409 r.append(l)
3405 3410
3406 3411 return r
3407 3412
3408 3413 def checkpush(self, pushop):
3409 3414 """Extensions can override this function if additional checks have
3410 3415 to be performed before pushing, or call it if they override push
3411 3416 command.
3412 3417 """
3413 3418
3414 3419 @unfilteredpropertycache
3415 3420 def prepushoutgoinghooks(self):
3416 3421 """Return util.hooks consists of a pushop with repo, remote, outgoing
3417 3422 methods, which are called before pushing changesets.
3418 3423 """
3419 3424 return util.hooks()
3420 3425
3421 3426 def pushkey(self, namespace, key, old, new):
3422 3427 try:
3423 3428 tr = self.currenttransaction()
3424 3429 hookargs = {}
3425 3430 if tr is not None:
3426 3431 hookargs.update(tr.hookargs)
3427 3432 hookargs = pycompat.strkwargs(hookargs)
3428 3433 hookargs['namespace'] = namespace
3429 3434 hookargs['key'] = key
3430 3435 hookargs['old'] = old
3431 3436 hookargs['new'] = new
3432 3437 self.hook(b'prepushkey', throw=True, **hookargs)
3433 3438 except error.HookAbort as exc:
3434 3439 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3435 3440 if exc.hint:
3436 3441 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3437 3442 return False
3438 3443 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3439 3444 ret = pushkey.push(self, namespace, key, old, new)
3440 3445
3441 3446 def runhook(unused_success):
3442 3447 self.hook(
3443 3448 b'pushkey',
3444 3449 namespace=namespace,
3445 3450 key=key,
3446 3451 old=old,
3447 3452 new=new,
3448 3453 ret=ret,
3449 3454 )
3450 3455
3451 3456 self._afterlock(runhook)
3452 3457 return ret
3453 3458
3454 3459 def listkeys(self, namespace):
3455 3460 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3456 3461 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3457 3462 values = pushkey.list(self, namespace)
3458 3463 self.hook(b'listkeys', namespace=namespace, values=values)
3459 3464 return values
3460 3465
3461 3466 def debugwireargs(self, one, two, three=None, four=None, five=None):
3462 3467 '''used to test argument passing over the wire'''
3463 3468 return b"%s %s %s %s %s" % (
3464 3469 one,
3465 3470 two,
3466 3471 pycompat.bytestr(three),
3467 3472 pycompat.bytestr(four),
3468 3473 pycompat.bytestr(five),
3469 3474 )
3470 3475
3471 3476 def savecommitmessage(self, text):
3472 3477 fp = self.vfs(b'last-message.txt', b'wb')
3473 3478 try:
3474 3479 fp.write(text)
3475 3480 finally:
3476 3481 fp.close()
3477 3482 return self.pathto(fp.name[len(self.root) + 1 :])
3478 3483
3479 3484
3480 3485 # used to avoid circular references so destructors work
3481 3486 def aftertrans(files):
3482 3487 renamefiles = [tuple(t) for t in files]
3483 3488
3484 3489 def a():
3485 3490 for vfs, src, dest in renamefiles:
3486 3491 # if src and dest refer to a same file, vfs.rename is a no-op,
3487 3492 # leaving both src and dest on disk. delete dest to make sure
3488 3493 # the rename couldn't be such a no-op.
3489 3494 vfs.tryunlink(dest)
3490 3495 try:
3491 3496 vfs.rename(src, dest)
3492 3497 except OSError: # journal file does not yet exist
3493 3498 pass
3494 3499
3495 3500 return a
3496 3501
3497 3502
3498 3503 def undoname(fn):
3499 3504 base, name = os.path.split(fn)
3500 3505 assert name.startswith(b'journal')
3501 3506 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3502 3507
3503 3508
3504 3509 def instance(ui, path, create, intents=None, createopts=None):
3505 3510 localpath = util.urllocalpath(path)
3506 3511 if create:
3507 3512 createrepository(ui, localpath, createopts=createopts)
3508 3513
3509 3514 return makelocalrepository(ui, localpath, intents=intents)
3510 3515
3511 3516
3512 3517 def islocal(path):
3513 3518 return True
3514 3519
3515 3520
3516 3521 def defaultcreateopts(ui, createopts=None):
3517 3522 """Populate the default creation options for a repository.
3518 3523
3519 3524 A dictionary of explicitly requested creation options can be passed
3520 3525 in. Missing keys will be populated.
3521 3526 """
3522 3527 createopts = dict(createopts or {})
3523 3528
3524 3529 if b'backend' not in createopts:
3525 3530 # experimental config: storage.new-repo-backend
3526 3531 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3527 3532
3528 3533 return createopts
3529 3534
3530 3535
3531 3536 def newreporequirements(ui, createopts):
3532 3537 """Determine the set of requirements for a new local repository.
3533 3538
3534 3539 Extensions can wrap this function to specify custom requirements for
3535 3540 new repositories.
3536 3541 """
3537 3542 # If the repo is being created from a shared repository, we copy
3538 3543 # its requirements.
3539 3544 if b'sharedrepo' in createopts:
3540 3545 requirements = set(createopts[b'sharedrepo'].requirements)
3541 3546 if createopts.get(b'sharedrelative'):
3542 3547 requirements.add(b'relshared')
3543 3548 else:
3544 3549 requirements.add(b'shared')
3545 3550
3546 3551 return requirements
3547 3552
3548 3553 if b'backend' not in createopts:
3549 3554 raise error.ProgrammingError(
3550 3555 b'backend key not present in createopts; '
3551 3556 b'was defaultcreateopts() called?'
3552 3557 )
3553 3558
3554 3559 if createopts[b'backend'] != b'revlogv1':
3555 3560 raise error.Abort(
3556 3561 _(
3557 3562 b'unable to determine repository requirements for '
3558 3563 b'storage backend: %s'
3559 3564 )
3560 3565 % createopts[b'backend']
3561 3566 )
3562 3567
3563 3568 requirements = {b'revlogv1'}
3564 3569 if ui.configbool(b'format', b'usestore'):
3565 3570 requirements.add(b'store')
3566 3571 if ui.configbool(b'format', b'usefncache'):
3567 3572 requirements.add(b'fncache')
3568 3573 if ui.configbool(b'format', b'dotencode'):
3569 3574 requirements.add(b'dotencode')
3570 3575
3571 3576 compengines = ui.configlist(b'format', b'revlog-compression')
3572 3577 for compengine in compengines:
3573 3578 if compengine in util.compengines:
3574 3579 break
3575 3580 else:
3576 3581 raise error.Abort(
3577 3582 _(
3578 3583 b'compression engines %s defined by '
3579 3584 b'format.revlog-compression not available'
3580 3585 )
3581 3586 % b', '.join(b'"%s"' % e for e in compengines),
3582 3587 hint=_(
3583 3588 b'run "hg debuginstall" to list available '
3584 3589 b'compression engines'
3585 3590 ),
3586 3591 )
3587 3592
3588 3593 # zlib is the historical default and doesn't need an explicit requirement.
3589 3594 if compengine == b'zstd':
3590 3595 requirements.add(b'revlog-compression-zstd')
3591 3596 elif compengine != b'zlib':
3592 3597 requirements.add(b'exp-compression-%s' % compengine)
3593 3598
3594 3599 if scmutil.gdinitconfig(ui):
3595 3600 requirements.add(b'generaldelta')
3596 3601 if ui.configbool(b'format', b'sparse-revlog'):
3597 3602 requirements.add(SPARSEREVLOG_REQUIREMENT)
3598 3603
3599 3604 # experimental config: format.exp-use-side-data
3600 3605 if ui.configbool(b'format', b'exp-use-side-data'):
3601 3606 requirements.add(SIDEDATA_REQUIREMENT)
3602 3607 # experimental config: format.exp-use-copies-side-data-changeset
3603 3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3604 3609 requirements.add(SIDEDATA_REQUIREMENT)
3605 3610 requirements.add(COPIESSDC_REQUIREMENT)
3606 3611 if ui.configbool(b'experimental', b'treemanifest'):
3607 3612 requirements.add(b'treemanifest')
3608 3613
3609 3614 revlogv2 = ui.config(b'experimental', b'revlogv2')
3610 3615 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3611 3616 requirements.remove(b'revlogv1')
3612 3617 # generaldelta is implied by revlogv2.
3613 3618 requirements.discard(b'generaldelta')
3614 3619 requirements.add(REVLOGV2_REQUIREMENT)
3615 3620 # experimental config: format.internal-phase
3616 3621 if ui.configbool(b'format', b'internal-phase'):
3617 3622 requirements.add(b'internal-phase')
3618 3623
3619 3624 if createopts.get(b'narrowfiles'):
3620 3625 requirements.add(repository.NARROW_REQUIREMENT)
3621 3626
3622 3627 if createopts.get(b'lfs'):
3623 3628 requirements.add(b'lfs')
3624 3629
3625 3630 if ui.configbool(b'format', b'bookmarks-in-store'):
3626 3631 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3627 3632
3628 3633 if ui.configbool(b'format', b'use-persistent-nodemap'):
3629 3634 requirements.add(NODEMAP_REQUIREMENT)
3630 3635
3631 3636 return requirements
3632 3637
3633 3638
3634 3639 def filterknowncreateopts(ui, createopts):
3635 3640 """Filters a dict of repo creation options against options that are known.
3636 3641
3637 3642 Receives a dict of repo creation options and returns a dict of those
3638 3643 options that we don't know how to handle.
3639 3644
3640 3645 This function is called as part of repository creation. If the
3641 3646 returned dict contains any items, repository creation will not
3642 3647 be allowed, as it means there was a request to create a repository
3643 3648 with options not recognized by loaded code.
3644 3649
3645 3650 Extensions can wrap this function to filter out creation options
3646 3651 they know how to handle.
3647 3652 """
3648 3653 known = {
3649 3654 b'backend',
3650 3655 b'lfs',
3651 3656 b'narrowfiles',
3652 3657 b'sharedrepo',
3653 3658 b'sharedrelative',
3654 3659 b'shareditems',
3655 3660 b'shallowfilestore',
3656 3661 }
3657 3662
3658 3663 return {k: v for k, v in createopts.items() if k not in known}
3659 3664
3660 3665
3661 3666 def createrepository(ui, path, createopts=None):
3662 3667 """Create a new repository in a vfs.
3663 3668
3664 3669 ``path`` path to the new repo's working directory.
3665 3670 ``createopts`` options for the new repository.
3666 3671
3667 3672 The following keys for ``createopts`` are recognized:
3668 3673
3669 3674 backend
3670 3675 The storage backend to use.
3671 3676 lfs
3672 3677 Repository will be created with ``lfs`` requirement. The lfs extension
3673 3678 will automatically be loaded when the repository is accessed.
3674 3679 narrowfiles
3675 3680 Set up repository to support narrow file storage.
3676 3681 sharedrepo
3677 3682 Repository object from which storage should be shared.
3678 3683 sharedrelative
3679 3684 Boolean indicating if the path to the shared repo should be
3680 3685 stored as relative. By default, the pointer to the "parent" repo
3681 3686 is stored as an absolute path.
3682 3687 shareditems
3683 3688 Set of items to share to the new repository (in addition to storage).
3684 3689 shallowfilestore
3685 3690 Indicates that storage for files should be shallow (not all ancestor
3686 3691 revisions are known).
3687 3692 """
3688 3693 createopts = defaultcreateopts(ui, createopts=createopts)
3689 3694
3690 3695 unknownopts = filterknowncreateopts(ui, createopts)
3691 3696
3692 3697 if not isinstance(unknownopts, dict):
3693 3698 raise error.ProgrammingError(
3694 3699 b'filterknowncreateopts() did not return a dict'
3695 3700 )
3696 3701
3697 3702 if unknownopts:
3698 3703 raise error.Abort(
3699 3704 _(
3700 3705 b'unable to create repository because of unknown '
3701 3706 b'creation option: %s'
3702 3707 )
3703 3708 % b', '.join(sorted(unknownopts)),
3704 3709 hint=_(b'is a required extension not loaded?'),
3705 3710 )
3706 3711
3707 3712 requirements = newreporequirements(ui, createopts=createopts)
3708 3713
3709 3714 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3710 3715
3711 3716 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3712 3717 if hgvfs.exists():
3713 3718 raise error.RepoError(_(b'repository %s already exists') % path)
3714 3719
3715 3720 if b'sharedrepo' in createopts:
3716 3721 sharedpath = createopts[b'sharedrepo'].sharedpath
3717 3722
3718 3723 if createopts.get(b'sharedrelative'):
3719 3724 try:
3720 3725 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3721 3726 except (IOError, ValueError) as e:
3722 3727 # ValueError is raised on Windows if the drive letters differ
3723 3728 # on each path.
3724 3729 raise error.Abort(
3725 3730 _(b'cannot calculate relative path'),
3726 3731 hint=stringutil.forcebytestr(e),
3727 3732 )
3728 3733
3729 3734 if not wdirvfs.exists():
3730 3735 wdirvfs.makedirs()
3731 3736
3732 3737 hgvfs.makedir(notindexed=True)
3733 3738 if b'sharedrepo' not in createopts:
3734 3739 hgvfs.mkdir(b'cache')
3735 3740 hgvfs.mkdir(b'wcache')
3736 3741
3737 3742 if b'store' in requirements and b'sharedrepo' not in createopts:
3738 3743 hgvfs.mkdir(b'store')
3739 3744
3740 3745 # We create an invalid changelog outside the store so very old
3741 3746 # Mercurial versions (which didn't know about the requirements
3742 3747 # file) encounter an error on reading the changelog. This
3743 3748 # effectively locks out old clients and prevents them from
3744 3749 # mucking with a repo in an unknown format.
3745 3750 #
3746 3751 # The revlog header has version 2, which won't be recognized by
3747 3752 # such old clients.
3748 3753 hgvfs.append(
3749 3754 b'00changelog.i',
3750 3755 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3751 3756 b'layout',
3752 3757 )
3753 3758
3754 3759 scmutil.writerequires(hgvfs, requirements)
3755 3760
3756 3761 # Write out file telling readers where to find the shared store.
3757 3762 if b'sharedrepo' in createopts:
3758 3763 hgvfs.write(b'sharedpath', sharedpath)
3759 3764
3760 3765 if createopts.get(b'shareditems'):
3761 3766 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3762 3767 hgvfs.write(b'shared', shared)
3763 3768
3764 3769
3765 3770 def poisonrepository(repo):
3766 3771 """Poison a repository instance so it can no longer be used."""
3767 3772 # Perform any cleanup on the instance.
3768 3773 repo.close()
3769 3774
3770 3775 # Our strategy is to replace the type of the object with one that
3771 3776 # has all attribute lookups result in error.
3772 3777 #
3773 3778 # But we have to allow the close() method because some constructors
3774 3779 # of repos call close() on repo references.
3775 3780 class poisonedrepository(object):
3776 3781 def __getattribute__(self, item):
3777 3782 if item == 'close':
3778 3783 return object.__getattribute__(self, item)
3779 3784
3780 3785 raise error.ProgrammingError(
3781 3786 b'repo instances should not be used after unshare'
3782 3787 )
3783 3788
3784 3789 def close(self):
3785 3790 pass
3786 3791
3787 3792 # We may have a repoview, which intercepts __setattr__. So be sure
3788 3793 # we operate at the lowest level possible.
3789 3794 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now