##// END OF EJS Templates
upgrade: add support to downgrade share safe mode...
Pulkit Goyal -
r46618:c6a1fa42 default
parent child Browse files
Show More
@@ -1,3566 +1,3576 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revset,
63 63 revsetlang,
64 64 scmutil,
65 65 sparse,
66 66 store as storemod,
67 67 subrepoutil,
68 68 tags as tagsmod,
69 69 transaction,
70 70 txnutil,
71 71 util,
72 72 vfs as vfsmod,
73 73 )
74 74
75 75 from .interfaces import (
76 76 repository,
77 77 util as interfaceutil,
78 78 )
79 79
80 80 from .utils import (
81 81 hashutil,
82 82 procutil,
83 83 stringutil,
84 84 )
85 85
86 86 from .revlogutils import constants as revlogconst
87 87
88 88 release = lockmod.release
89 89 urlerr = util.urlerr
90 90 urlreq = util.urlreq
91 91
92 92 # set of (path, vfs-location) tuples. vfs-location is:
93 93 # - 'plain for vfs relative paths
94 94 # - '' for svfs relative paths
95 95 _cachedfiles = set()
96 96
97 97
98 98 class _basefilecache(scmutil.filecache):
99 99 """All filecache usage on repo are done for logic that should be unfiltered"""
100 100
101 101 def __get__(self, repo, type=None):
102 102 if repo is None:
103 103 return self
104 104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 105 unfi = repo.unfiltered()
106 106 try:
107 107 return unfi.__dict__[self.sname]
108 108 except KeyError:
109 109 pass
110 110 return super(_basefilecache, self).__get__(unfi, type)
111 111
112 112 def set(self, repo, value):
113 113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 114
115 115
116 116 class repofilecache(_basefilecache):
117 117 """filecache for files in .hg but outside of .hg/store"""
118 118
119 119 def __init__(self, *paths):
120 120 super(repofilecache, self).__init__(*paths)
121 121 for path in paths:
122 122 _cachedfiles.add((path, b'plain'))
123 123
124 124 def join(self, obj, fname):
125 125 return obj.vfs.join(fname)
126 126
127 127
128 128 class storecache(_basefilecache):
129 129 """filecache for files in the store"""
130 130
131 131 def __init__(self, *paths):
132 132 super(storecache, self).__init__(*paths)
133 133 for path in paths:
134 134 _cachedfiles.add((path, b''))
135 135
136 136 def join(self, obj, fname):
137 137 return obj.sjoin(fname)
138 138
139 139
140 140 class mixedrepostorecache(_basefilecache):
141 141 """filecache for a mix files in .hg/store and outside"""
142 142
143 143 def __init__(self, *pathsandlocations):
144 144 # scmutil.filecache only uses the path for passing back into our
145 145 # join(), so we can safely pass a list of paths and locations
146 146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 147 _cachedfiles.update(pathsandlocations)
148 148
149 149 def join(self, obj, fnameandlocation):
150 150 fname, location = fnameandlocation
151 151 if location == b'plain':
152 152 return obj.vfs.join(fname)
153 153 else:
154 154 if location != b'':
155 155 raise error.ProgrammingError(
156 156 b'unexpected location: %s' % location
157 157 )
158 158 return obj.sjoin(fname)
159 159
160 160
161 161 def isfilecached(repo, name):
162 162 """check if a repo has already cached "name" filecache-ed property
163 163
164 164 This returns (cachedobj-or-None, iscached) tuple.
165 165 """
166 166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 167 if not cacheentry:
168 168 return None, False
169 169 return cacheentry.obj, True
170 170
171 171
172 172 class unfilteredpropertycache(util.propertycache):
173 173 """propertycache that apply to unfiltered repo only"""
174 174
175 175 def __get__(self, repo, type=None):
176 176 unfi = repo.unfiltered()
177 177 if unfi is repo:
178 178 return super(unfilteredpropertycache, self).__get__(unfi)
179 179 return getattr(unfi, self.name)
180 180
181 181
182 182 class filteredpropertycache(util.propertycache):
183 183 """propertycache that must take filtering in account"""
184 184
185 185 def cachevalue(self, obj, value):
186 186 object.__setattr__(obj, self.name, value)
187 187
188 188
189 189 def hasunfilteredcache(repo, name):
190 190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 191 return name in vars(repo.unfiltered())
192 192
193 193
194 194 def unfilteredmethod(orig):
195 195 """decorate method that always need to be run on unfiltered version"""
196 196
197 197 @functools.wraps(orig)
198 198 def wrapper(repo, *args, **kwargs):
199 199 return orig(repo.unfiltered(), *args, **kwargs)
200 200
201 201 return wrapper
202 202
203 203
204 204 moderncaps = {
205 205 b'lookup',
206 206 b'branchmap',
207 207 b'pushkey',
208 208 b'known',
209 209 b'getbundle',
210 210 b'unbundle',
211 211 }
212 212 legacycaps = moderncaps.union({b'changegroupsubset'})
213 213
214 214
215 215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 216 class localcommandexecutor(object):
217 217 def __init__(self, peer):
218 218 self._peer = peer
219 219 self._sent = False
220 220 self._closed = False
221 221
222 222 def __enter__(self):
223 223 return self
224 224
225 225 def __exit__(self, exctype, excvalue, exctb):
226 226 self.close()
227 227
228 228 def callcommand(self, command, args):
229 229 if self._sent:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after sendcommands()'
232 232 )
233 233
234 234 if self._closed:
235 235 raise error.ProgrammingError(
236 236 b'callcommand() cannot be used after close()'
237 237 )
238 238
239 239 # We don't need to support anything fancy. Just call the named
240 240 # method on the peer and return a resolved future.
241 241 fn = getattr(self._peer, pycompat.sysstr(command))
242 242
243 243 f = pycompat.futures.Future()
244 244
245 245 try:
246 246 result = fn(**pycompat.strkwargs(args))
247 247 except Exception:
248 248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 249 else:
250 250 f.set_result(result)
251 251
252 252 return f
253 253
254 254 def sendcommands(self):
255 255 self._sent = True
256 256
257 257 def close(self):
258 258 self._closed = True
259 259
260 260
261 261 @interfaceutil.implementer(repository.ipeercommands)
262 262 class localpeer(repository.peer):
263 263 '''peer for a local repo; reflects only the most recent API'''
264 264
265 265 def __init__(self, repo, caps=None):
266 266 super(localpeer, self).__init__()
267 267
268 268 if caps is None:
269 269 caps = moderncaps.copy()
270 270 self._repo = repo.filtered(b'served')
271 271 self.ui = repo.ui
272 272 self._caps = repo._restrictcapabilities(caps)
273 273
274 274 # Begin of _basepeer interface.
275 275
276 276 def url(self):
277 277 return self._repo.url()
278 278
279 279 def local(self):
280 280 return self._repo
281 281
282 282 def peer(self):
283 283 return self
284 284
285 285 def canpush(self):
286 286 return True
287 287
288 288 def close(self):
289 289 self._repo.close()
290 290
291 291 # End of _basepeer interface.
292 292
293 293 # Begin of _basewirecommands interface.
294 294
295 295 def branchmap(self):
296 296 return self._repo.branchmap()
297 297
298 298 def capabilities(self):
299 299 return self._caps
300 300
301 301 def clonebundles(self):
302 302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 303
304 304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 305 """Used to test argument passing over the wire"""
306 306 return b"%s %s %s %s %s" % (
307 307 one,
308 308 two,
309 309 pycompat.bytestr(three),
310 310 pycompat.bytestr(four),
311 311 pycompat.bytestr(five),
312 312 )
313 313
314 314 def getbundle(
315 315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 316 ):
317 317 chunks = exchange.getbundlechunks(
318 318 self._repo,
319 319 source,
320 320 heads=heads,
321 321 common=common,
322 322 bundlecaps=bundlecaps,
323 323 **kwargs
324 324 )[1]
325 325 cb = util.chunkbuffer(chunks)
326 326
327 327 if exchange.bundle2requested(bundlecaps):
328 328 # When requesting a bundle2, getbundle returns a stream to make the
329 329 # wire level function happier. We need to build a proper object
330 330 # from it in local peer.
331 331 return bundle2.getunbundler(self.ui, cb)
332 332 else:
333 333 return changegroup.getunbundler(b'01', cb, None)
334 334
335 335 def heads(self):
336 336 return self._repo.heads()
337 337
338 338 def known(self, nodes):
339 339 return self._repo.known(nodes)
340 340
341 341 def listkeys(self, namespace):
342 342 return self._repo.listkeys(namespace)
343 343
344 344 def lookup(self, key):
345 345 return self._repo.lookup(key)
346 346
347 347 def pushkey(self, namespace, key, old, new):
348 348 return self._repo.pushkey(namespace, key, old, new)
349 349
350 350 def stream_out(self):
351 351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 352
353 353 def unbundle(self, bundle, heads, url):
354 354 """apply a bundle on a repo
355 355
356 356 This function handles the repo locking itself."""
357 357 try:
358 358 try:
359 359 bundle = exchange.readbundle(self.ui, bundle, None)
360 360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 361 if util.safehasattr(ret, b'getchunks'):
362 362 # This is a bundle20 object, turn it into an unbundler.
363 363 # This little dance should be dropped eventually when the
364 364 # API is finally improved.
365 365 stream = util.chunkbuffer(ret.getchunks())
366 366 ret = bundle2.getunbundler(self.ui, stream)
367 367 return ret
368 368 except Exception as exc:
369 369 # If the exception contains output salvaged from a bundle2
370 370 # reply, we need to make sure it is printed before continuing
371 371 # to fail. So we build a bundle2 with such output and consume
372 372 # it directly.
373 373 #
374 374 # This is not very elegant but allows a "simple" solution for
375 375 # issue4594
376 376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 377 if output:
378 378 bundler = bundle2.bundle20(self._repo.ui)
379 379 for out in output:
380 380 bundler.addpart(out)
381 381 stream = util.chunkbuffer(bundler.getchunks())
382 382 b = bundle2.getunbundler(self.ui, stream)
383 383 bundle2.processbundle(self._repo, b)
384 384 raise
385 385 except error.PushRaced as exc:
386 386 raise error.ResponseError(
387 387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 388 )
389 389
390 390 # End of _basewirecommands interface.
391 391
392 392 # Begin of peer interface.
393 393
394 394 def commandexecutor(self):
395 395 return localcommandexecutor(self)
396 396
397 397 # End of peer interface.
398 398
399 399
400 400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 401 class locallegacypeer(localpeer):
402 402 """peer extension which implements legacy methods too; used for tests with
403 403 restricted capabilities"""
404 404
405 405 def __init__(self, repo):
406 406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 407
408 408 # Begin of baselegacywirecommands interface.
409 409
410 410 def between(self, pairs):
411 411 return self._repo.between(pairs)
412 412
413 413 def branches(self, nodes):
414 414 return self._repo.branches(nodes)
415 415
416 416 def changegroup(self, nodes, source):
417 417 outgoing = discovery.outgoing(
418 418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 419 )
420 420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 421
422 422 def changegroupsubset(self, bases, heads, source):
423 423 outgoing = discovery.outgoing(
424 424 self._repo, missingroots=bases, ancestorsof=heads
425 425 )
426 426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 427
428 428 # End of baselegacywirecommands interface.
429 429
430 430
431 431 # Functions receiving (ui, features) that extensions can register to impact
432 432 # the ability to load repositories with custom requirements. Only
433 433 # functions defined in loaded extensions are called.
434 434 #
435 435 # The function receives a set of requirement strings that the repository
436 436 # is capable of opening. Functions will typically add elements to the
437 437 # set to reflect that the extension knows how to handle that requirements.
438 438 featuresetupfuncs = set()
439 439
440 440
441 441 def _getsharedvfs(hgvfs, requirements):
442 442 """returns the vfs object pointing to root of shared source
443 443 repo for a shared repository
444 444
445 445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 446 requirements is a set of requirements of current repo (shared one)
447 447 """
448 448 # The ``shared`` or ``relshared`` requirements indicate the
449 449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 450 # This is an absolute path for ``shared`` and relative to
451 451 # ``.hg/`` for ``relshared``.
452 452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 454 sharedpath = hgvfs.join(sharedpath)
455 455
456 456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 457
458 458 if not sharedvfs.exists():
459 459 raise error.RepoError(
460 460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 461 % sharedvfs.base
462 462 )
463 463 return sharedvfs
464 464
465 465
466 466 def _readrequires(vfs, allowmissing):
467 467 """reads the require file present at root of this vfs
468 468 and return a set of requirements
469 469
470 470 If allowmissing is True, we suppress ENOENT if raised"""
471 471 # requires file contains a newline-delimited list of
472 472 # features/capabilities the opener (us) must have in order to use
473 473 # the repository. This file was introduced in Mercurial 0.9.2,
474 474 # which means very old repositories may not have one. We assume
475 475 # a missing file translates to no requirements.
476 476 try:
477 477 requirements = set(vfs.read(b'requires').splitlines())
478 478 except IOError as e:
479 479 if not (allowmissing and e.errno == errno.ENOENT):
480 480 raise
481 481 requirements = set()
482 482 return requirements
483 483
484 484
485 485 def makelocalrepository(baseui, path, intents=None):
486 486 """Create a local repository object.
487 487
488 488 Given arguments needed to construct a local repository, this function
489 489 performs various early repository loading functionality (such as
490 490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 491 the repository can be opened, derives a type suitable for representing
492 492 that repository, and returns an instance of it.
493 493
494 494 The returned object conforms to the ``repository.completelocalrepository``
495 495 interface.
496 496
497 497 The repository type is derived by calling a series of factory functions
498 498 for each aspect/interface of the final repository. These are defined by
499 499 ``REPO_INTERFACES``.
500 500
501 501 Each factory function is called to produce a type implementing a specific
502 502 interface. The cumulative list of returned types will be combined into a
503 503 new type and that type will be instantiated to represent the local
504 504 repository.
505 505
506 506 The factory functions each receive various state that may be consulted
507 507 as part of deriving a type.
508 508
509 509 Extensions should wrap these factory functions to customize repository type
510 510 creation. Note that an extension's wrapped function may be called even if
511 511 that extension is not loaded for the repo being constructed. Extensions
512 512 should check if their ``__name__`` appears in the
513 513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 514 not.
515 515 """
516 516 ui = baseui.copy()
517 517 # Prevent copying repo configuration.
518 518 ui.copy = baseui.copy
519 519
520 520 # Working directory VFS rooted at repository root.
521 521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 522
523 523 # Main VFS for .hg/ directory.
524 524 hgpath = wdirvfs.join(b'.hg')
525 525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 526 # Whether this repository is shared one or not
527 527 shared = False
528 528 # If this repository is shared, vfs pointing to shared repo
529 529 sharedvfs = None
530 530
531 531 # The .hg/ path should exist and should be a directory. All other
532 532 # cases are errors.
533 533 if not hgvfs.isdir():
534 534 try:
535 535 hgvfs.stat()
536 536 except OSError as e:
537 537 if e.errno != errno.ENOENT:
538 538 raise
539 539 except ValueError as e:
540 540 # Can be raised on Python 3.8 when path is invalid.
541 541 raise error.Abort(
542 542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 543 )
544 544
545 545 raise error.RepoError(_(b'repository %s not found') % path)
546 546
547 547 requirements = _readrequires(hgvfs, True)
548 548 shared = (
549 549 requirementsmod.SHARED_REQUIREMENT in requirements
550 550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 551 )
552 552 if shared:
553 553 sharedvfs = _getsharedvfs(hgvfs, requirements)
554 554
555 555 # if .hg/requires contains the sharesafe requirement, it means
556 556 # there exists a `.hg/store/requires` too and we should read it
557 557 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
558 558 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
559 559 # is not present, refer checkrequirementscompat() for that
560 560 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
561
562 if (
563 shared
564 and requirementsmod.SHARESAFE_REQUIREMENT
565 not in _readrequires(sharedvfs, True)
566 ):
567 raise error.Abort(
568 _(b"share source does not support exp-sharesafe requirement")
569 )
570
561 571 if shared:
562 572 # This is a shared repo
563 573 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
564 574 else:
565 575 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
566 576
567 577 requirements |= _readrequires(storevfs, False)
568 578
569 579 # The .hg/hgrc file may load extensions or contain config options
570 580 # that influence repository construction. Attempt to load it and
571 581 # process any new extensions that it may have pulled in.
572 582 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
573 583 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
574 584 extensions.loadall(ui)
575 585 extensions.populateui(ui)
576 586
577 587 # Set of module names of extensions loaded for this repository.
578 588 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
579 589
580 590 supportedrequirements = gathersupportedrequirements(ui)
581 591
582 592 # We first validate the requirements are known.
583 593 ensurerequirementsrecognized(requirements, supportedrequirements)
584 594
585 595 # Then we validate that the known set is reasonable to use together.
586 596 ensurerequirementscompatible(ui, requirements)
587 597
588 598 # TODO there are unhandled edge cases related to opening repositories with
589 599 # shared storage. If storage is shared, we should also test for requirements
590 600 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
591 601 # that repo, as that repo may load extensions needed to open it. This is a
592 602 # bit complicated because we don't want the other hgrc to overwrite settings
593 603 # in this hgrc.
594 604 #
595 605 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
596 606 # file when sharing repos. But if a requirement is added after the share is
597 607 # performed, thereby introducing a new requirement for the opener, we may
598 608 # will not see that and could encounter a run-time error interacting with
599 609 # that shared store since it has an unknown-to-us requirement.
600 610
601 611 # At this point, we know we should be capable of opening the repository.
602 612 # Now get on with doing that.
603 613
604 614 features = set()
605 615
606 616 # The "store" part of the repository holds versioned data. How it is
607 617 # accessed is determined by various requirements. If `shared` or
608 618 # `relshared` requirements are present, this indicates current repository
609 619 # is a share and store exists in path mentioned in `.hg/sharedpath`
610 620 if shared:
611 621 storebasepath = sharedvfs.base
612 622 cachepath = sharedvfs.join(b'cache')
613 623 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
614 624 else:
615 625 storebasepath = hgvfs.base
616 626 cachepath = hgvfs.join(b'cache')
617 627 wcachepath = hgvfs.join(b'wcache')
618 628
619 629 # The store has changed over time and the exact layout is dictated by
620 630 # requirements. The store interface abstracts differences across all
621 631 # of them.
622 632 store = makestore(
623 633 requirements,
624 634 storebasepath,
625 635 lambda base: vfsmod.vfs(base, cacheaudited=True),
626 636 )
627 637 hgvfs.createmode = store.createmode
628 638
629 639 storevfs = store.vfs
630 640 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
631 641
632 642 # The cache vfs is used to manage cache files.
633 643 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
634 644 cachevfs.createmode = store.createmode
635 645 # The cache vfs is used to manage cache files related to the working copy
636 646 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
637 647 wcachevfs.createmode = store.createmode
638 648
639 649 # Now resolve the type for the repository object. We do this by repeatedly
640 650 # calling a factory function to produces types for specific aspects of the
641 651 # repo's operation. The aggregate returned types are used as base classes
642 652 # for a dynamically-derived type, which will represent our new repository.
643 653
644 654 bases = []
645 655 extrastate = {}
646 656
647 657 for iface, fn in REPO_INTERFACES:
648 658 # We pass all potentially useful state to give extensions tons of
649 659 # flexibility.
650 660 typ = fn()(
651 661 ui=ui,
652 662 intents=intents,
653 663 requirements=requirements,
654 664 features=features,
655 665 wdirvfs=wdirvfs,
656 666 hgvfs=hgvfs,
657 667 store=store,
658 668 storevfs=storevfs,
659 669 storeoptions=storevfs.options,
660 670 cachevfs=cachevfs,
661 671 wcachevfs=wcachevfs,
662 672 extensionmodulenames=extensionmodulenames,
663 673 extrastate=extrastate,
664 674 baseclasses=bases,
665 675 )
666 676
667 677 if not isinstance(typ, type):
668 678 raise error.ProgrammingError(
669 679 b'unable to construct type for %s' % iface
670 680 )
671 681
672 682 bases.append(typ)
673 683
674 684 # type() allows you to use characters in type names that wouldn't be
675 685 # recognized as Python symbols in source code. We abuse that to add
676 686 # rich information about our constructed repo.
677 687 name = pycompat.sysstr(
678 688 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
679 689 )
680 690
681 691 cls = type(name, tuple(bases), {})
682 692
683 693 return cls(
684 694 baseui=baseui,
685 695 ui=ui,
686 696 origroot=path,
687 697 wdirvfs=wdirvfs,
688 698 hgvfs=hgvfs,
689 699 requirements=requirements,
690 700 supportedrequirements=supportedrequirements,
691 701 sharedpath=storebasepath,
692 702 store=store,
693 703 cachevfs=cachevfs,
694 704 wcachevfs=wcachevfs,
695 705 features=features,
696 706 intents=intents,
697 707 )
698 708
699 709
700 710 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
701 711 """Load hgrc files/content into a ui instance.
702 712
703 713 This is called during repository opening to load any additional
704 714 config files or settings relevant to the current repository.
705 715
706 716 Returns a bool indicating whether any additional configs were loaded.
707 717
708 718 Extensions should monkeypatch this function to modify how per-repo
709 719 configs are loaded. For example, an extension may wish to pull in
710 720 configs from alternate files or sources.
711 721
712 722 sharedvfs is vfs object pointing to source repo if the current one is a
713 723 shared one
714 724 """
715 725 if not rcutil.use_repo_hgrc():
716 726 return False
717 727
718 728 ret = False
719 729 # first load config from shared source if we has to
720 730 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
721 731 try:
722 732 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
723 733 ret = True
724 734 except IOError:
725 735 pass
726 736
727 737 try:
728 738 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
729 739 ret = True
730 740 except IOError:
731 741 pass
732 742
733 743 try:
734 744 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
735 745 ret = True
736 746 except IOError:
737 747 pass
738 748
739 749 return ret
740 750
741 751
742 752 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
743 753 """Perform additional actions after .hg/hgrc is loaded.
744 754
745 755 This function is called during repository loading immediately after
746 756 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
747 757
748 758 The function can be used to validate configs, automatically add
749 759 options (including extensions) based on requirements, etc.
750 760 """
751 761
752 762 # Map of requirements to list of extensions to load automatically when
753 763 # requirement is present.
754 764 autoextensions = {
755 765 b'git': [b'git'],
756 766 b'largefiles': [b'largefiles'],
757 767 b'lfs': [b'lfs'],
758 768 }
759 769
760 770 for requirement, names in sorted(autoextensions.items()):
761 771 if requirement not in requirements:
762 772 continue
763 773
764 774 for name in names:
765 775 if not ui.hasconfig(b'extensions', name):
766 776 ui.setconfig(b'extensions', name, b'', source=b'autoload')
767 777
768 778
769 779 def gathersupportedrequirements(ui):
770 780 """Determine the complete set of recognized requirements."""
771 781 # Start with all requirements supported by this file.
772 782 supported = set(localrepository._basesupported)
773 783
774 784 # Execute ``featuresetupfuncs`` entries if they belong to an extension
775 785 # relevant to this ui instance.
776 786 modules = {m.__name__ for n, m in extensions.extensions(ui)}
777 787
778 788 for fn in featuresetupfuncs:
779 789 if fn.__module__ in modules:
780 790 fn(ui, supported)
781 791
782 792 # Add derived requirements from registered compression engines.
783 793 for name in util.compengines:
784 794 engine = util.compengines[name]
785 795 if engine.available() and engine.revlogheader():
786 796 supported.add(b'exp-compression-%s' % name)
787 797 if engine.name() == b'zstd':
788 798 supported.add(b'revlog-compression-zstd')
789 799
790 800 return supported
791 801
792 802
793 803 def ensurerequirementsrecognized(requirements, supported):
794 804 """Validate that a set of local requirements is recognized.
795 805
796 806 Receives a set of requirements. Raises an ``error.RepoError`` if there
797 807 exists any requirement in that set that currently loaded code doesn't
798 808 recognize.
799 809
800 810 Returns a set of supported requirements.
801 811 """
802 812 missing = set()
803 813
804 814 for requirement in requirements:
805 815 if requirement in supported:
806 816 continue
807 817
808 818 if not requirement or not requirement[0:1].isalnum():
809 819 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
810 820
811 821 missing.add(requirement)
812 822
813 823 if missing:
814 824 raise error.RequirementError(
815 825 _(b'repository requires features unknown to this Mercurial: %s')
816 826 % b' '.join(sorted(missing)),
817 827 hint=_(
818 828 b'see https://mercurial-scm.org/wiki/MissingRequirement '
819 829 b'for more information'
820 830 ),
821 831 )
822 832
823 833
824 834 def ensurerequirementscompatible(ui, requirements):
825 835 """Validates that a set of recognized requirements is mutually compatible.
826 836
827 837 Some requirements may not be compatible with others or require
828 838 config options that aren't enabled. This function is called during
829 839 repository opening to ensure that the set of requirements needed
830 840 to open a repository is sane and compatible with config options.
831 841
832 842 Extensions can monkeypatch this function to perform additional
833 843 checking.
834 844
835 845 ``error.RepoError`` should be raised on failure.
836 846 """
837 847 if (
838 848 requirementsmod.SPARSE_REQUIREMENT in requirements
839 849 and not sparse.enabled
840 850 ):
841 851 raise error.RepoError(
842 852 _(
843 853 b'repository is using sparse feature but '
844 854 b'sparse is not enabled; enable the '
845 855 b'"sparse" extensions to access'
846 856 )
847 857 )
848 858
849 859
850 860 def makestore(requirements, path, vfstype):
851 861 """Construct a storage object for a repository."""
852 862 if b'store' in requirements:
853 863 if b'fncache' in requirements:
854 864 return storemod.fncachestore(
855 865 path, vfstype, b'dotencode' in requirements
856 866 )
857 867
858 868 return storemod.encodedstore(path, vfstype)
859 869
860 870 return storemod.basicstore(path, vfstype)
861 871
862 872
863 873 def resolvestorevfsoptions(ui, requirements, features):
864 874 """Resolve the options to pass to the store vfs opener.
865 875
866 876 The returned dict is used to influence behavior of the storage layer.
867 877 """
868 878 options = {}
869 879
870 880 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
871 881 options[b'treemanifest'] = True
872 882
873 883 # experimental config: format.manifestcachesize
874 884 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
875 885 if manifestcachesize is not None:
876 886 options[b'manifestcachesize'] = manifestcachesize
877 887
878 888 # In the absence of another requirement superseding a revlog-related
879 889 # requirement, we have to assume the repo is using revlog version 0.
880 890 # This revlog format is super old and we don't bother trying to parse
881 891 # opener options for it because those options wouldn't do anything
882 892 # meaningful on such old repos.
883 893 if (
884 894 b'revlogv1' in requirements
885 895 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
886 896 ):
887 897 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
888 898 else: # explicitly mark repo as using revlogv0
889 899 options[b'revlogv0'] = True
890 900
891 901 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
892 902 options[b'copies-storage'] = b'changeset-sidedata'
893 903 else:
894 904 writecopiesto = ui.config(b'experimental', b'copies.write-to')
895 905 copiesextramode = (b'changeset-only', b'compatibility')
896 906 if writecopiesto in copiesextramode:
897 907 options[b'copies-storage'] = b'extra'
898 908
899 909 return options
900 910
901 911
902 912 def resolverevlogstorevfsoptions(ui, requirements, features):
903 913 """Resolve opener options specific to revlogs."""
904 914
905 915 options = {}
906 916 options[b'flagprocessors'] = {}
907 917
908 918 if b'revlogv1' in requirements:
909 919 options[b'revlogv1'] = True
910 920 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
911 921 options[b'revlogv2'] = True
912 922
913 923 if b'generaldelta' in requirements:
914 924 options[b'generaldelta'] = True
915 925
916 926 # experimental config: format.chunkcachesize
917 927 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
918 928 if chunkcachesize is not None:
919 929 options[b'chunkcachesize'] = chunkcachesize
920 930
921 931 deltabothparents = ui.configbool(
922 932 b'storage', b'revlog.optimize-delta-parent-choice'
923 933 )
924 934 options[b'deltabothparents'] = deltabothparents
925 935
926 936 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
927 937 lazydeltabase = False
928 938 if lazydelta:
929 939 lazydeltabase = ui.configbool(
930 940 b'storage', b'revlog.reuse-external-delta-parent'
931 941 )
932 942 if lazydeltabase is None:
933 943 lazydeltabase = not scmutil.gddeltaconfig(ui)
934 944 options[b'lazydelta'] = lazydelta
935 945 options[b'lazydeltabase'] = lazydeltabase
936 946
937 947 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
938 948 if 0 <= chainspan:
939 949 options[b'maxdeltachainspan'] = chainspan
940 950
941 951 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
942 952 if mmapindexthreshold is not None:
943 953 options[b'mmapindexthreshold'] = mmapindexthreshold
944 954
945 955 withsparseread = ui.configbool(b'experimental', b'sparse-read')
946 956 srdensitythres = float(
947 957 ui.config(b'experimental', b'sparse-read.density-threshold')
948 958 )
949 959 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
950 960 options[b'with-sparse-read'] = withsparseread
951 961 options[b'sparse-read-density-threshold'] = srdensitythres
952 962 options[b'sparse-read-min-gap-size'] = srmingapsize
953 963
954 964 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
955 965 options[b'sparse-revlog'] = sparserevlog
956 966 if sparserevlog:
957 967 options[b'generaldelta'] = True
958 968
959 969 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
960 970 options[b'side-data'] = sidedata
961 971
962 972 maxchainlen = None
963 973 if sparserevlog:
964 974 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
965 975 # experimental config: format.maxchainlen
966 976 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
967 977 if maxchainlen is not None:
968 978 options[b'maxchainlen'] = maxchainlen
969 979
970 980 for r in requirements:
971 981 # we allow multiple compression engine requirement to co-exist because
972 982 # strickly speaking, revlog seems to support mixed compression style.
973 983 #
974 984 # The compression used for new entries will be "the last one"
975 985 prefix = r.startswith
976 986 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
977 987 options[b'compengine'] = r.split(b'-', 2)[2]
978 988
979 989 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
980 990 if options[b'zlib.level'] is not None:
981 991 if not (0 <= options[b'zlib.level'] <= 9):
982 992 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
983 993 raise error.Abort(msg % options[b'zlib.level'])
984 994 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
985 995 if options[b'zstd.level'] is not None:
986 996 if not (0 <= options[b'zstd.level'] <= 22):
987 997 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
988 998 raise error.Abort(msg % options[b'zstd.level'])
989 999
990 1000 if requirementsmod.NARROW_REQUIREMENT in requirements:
991 1001 options[b'enableellipsis'] = True
992 1002
993 1003 if ui.configbool(b'experimental', b'rust.index'):
994 1004 options[b'rust.index'] = True
995 1005 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
996 1006 options[b'persistent-nodemap'] = True
997 1007 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
998 1008 options[b'persistent-nodemap.mmap'] = True
999 1009 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1000 1010 options[b'persistent-nodemap.mode'] = epnm
1001 1011 if ui.configbool(b'devel', b'persistent-nodemap'):
1002 1012 options[b'devel-force-nodemap'] = True
1003 1013
1004 1014 return options
1005 1015
1006 1016
1007 1017 def makemain(**kwargs):
1008 1018 """Produce a type conforming to ``ilocalrepositorymain``."""
1009 1019 return localrepository
1010 1020
1011 1021
1012 1022 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1013 1023 class revlogfilestorage(object):
1014 1024 """File storage when using revlogs."""
1015 1025
1016 1026 def file(self, path):
1017 1027 if path[0] == b'/':
1018 1028 path = path[1:]
1019 1029
1020 1030 return filelog.filelog(self.svfs, path)
1021 1031
1022 1032
1023 1033 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1024 1034 class revlognarrowfilestorage(object):
1025 1035 """File storage when using revlogs and narrow files."""
1026 1036
1027 1037 def file(self, path):
1028 1038 if path[0] == b'/':
1029 1039 path = path[1:]
1030 1040
1031 1041 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1032 1042
1033 1043
1034 1044 def makefilestorage(requirements, features, **kwargs):
1035 1045 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1036 1046 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1037 1047 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1038 1048
1039 1049 if requirementsmod.NARROW_REQUIREMENT in requirements:
1040 1050 return revlognarrowfilestorage
1041 1051 else:
1042 1052 return revlogfilestorage
1043 1053
1044 1054
1045 1055 # List of repository interfaces and factory functions for them. Each
1046 1056 # will be called in order during ``makelocalrepository()`` to iteratively
1047 1057 # derive the final type for a local repository instance. We capture the
1048 1058 # function as a lambda so we don't hold a reference and the module-level
1049 1059 # functions can be wrapped.
1050 1060 REPO_INTERFACES = [
1051 1061 (repository.ilocalrepositorymain, lambda: makemain),
1052 1062 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1053 1063 ]
1054 1064
1055 1065
1056 1066 @interfaceutil.implementer(repository.ilocalrepositorymain)
1057 1067 class localrepository(object):
1058 1068 """Main class for representing local repositories.
1059 1069
1060 1070 All local repositories are instances of this class.
1061 1071
1062 1072 Constructed on its own, instances of this class are not usable as
1063 1073 repository objects. To obtain a usable repository object, call
1064 1074 ``hg.repository()``, ``localrepo.instance()``, or
1065 1075 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1066 1076 ``instance()`` adds support for creating new repositories.
1067 1077 ``hg.repository()`` adds more extension integration, including calling
1068 1078 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1069 1079 used.
1070 1080 """
1071 1081
1072 1082 # obsolete experimental requirements:
1073 1083 # - manifestv2: An experimental new manifest format that allowed
1074 1084 # for stem compression of long paths. Experiment ended up not
1075 1085 # being successful (repository sizes went up due to worse delta
1076 1086 # chains), and the code was deleted in 4.6.
1077 1087 supportedformats = {
1078 1088 b'revlogv1',
1079 1089 b'generaldelta',
1080 1090 requirementsmod.TREEMANIFEST_REQUIREMENT,
1081 1091 requirementsmod.COPIESSDC_REQUIREMENT,
1082 1092 requirementsmod.REVLOGV2_REQUIREMENT,
1083 1093 requirementsmod.SIDEDATA_REQUIREMENT,
1084 1094 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1085 1095 requirementsmod.NODEMAP_REQUIREMENT,
1086 1096 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1087 1097 requirementsmod.SHARESAFE_REQUIREMENT,
1088 1098 }
1089 1099 _basesupported = supportedformats | {
1090 1100 b'store',
1091 1101 b'fncache',
1092 1102 requirementsmod.SHARED_REQUIREMENT,
1093 1103 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1094 1104 b'dotencode',
1095 1105 requirementsmod.SPARSE_REQUIREMENT,
1096 1106 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1097 1107 }
1098 1108
1099 1109 # list of prefix for file which can be written without 'wlock'
1100 1110 # Extensions should extend this list when needed
1101 1111 _wlockfreeprefix = {
1102 1112 # We migh consider requiring 'wlock' for the next
1103 1113 # two, but pretty much all the existing code assume
1104 1114 # wlock is not needed so we keep them excluded for
1105 1115 # now.
1106 1116 b'hgrc',
1107 1117 b'requires',
1108 1118 # XXX cache is a complicatged business someone
1109 1119 # should investigate this in depth at some point
1110 1120 b'cache/',
1111 1121 # XXX shouldn't be dirstate covered by the wlock?
1112 1122 b'dirstate',
1113 1123 # XXX bisect was still a bit too messy at the time
1114 1124 # this changeset was introduced. Someone should fix
1115 1125 # the remainig bit and drop this line
1116 1126 b'bisect.state',
1117 1127 }
1118 1128
1119 1129 def __init__(
1120 1130 self,
1121 1131 baseui,
1122 1132 ui,
1123 1133 origroot,
1124 1134 wdirvfs,
1125 1135 hgvfs,
1126 1136 requirements,
1127 1137 supportedrequirements,
1128 1138 sharedpath,
1129 1139 store,
1130 1140 cachevfs,
1131 1141 wcachevfs,
1132 1142 features,
1133 1143 intents=None,
1134 1144 ):
1135 1145 """Create a new local repository instance.
1136 1146
1137 1147 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1138 1148 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1139 1149 object.
1140 1150
1141 1151 Arguments:
1142 1152
1143 1153 baseui
1144 1154 ``ui.ui`` instance that ``ui`` argument was based off of.
1145 1155
1146 1156 ui
1147 1157 ``ui.ui`` instance for use by the repository.
1148 1158
1149 1159 origroot
1150 1160 ``bytes`` path to working directory root of this repository.
1151 1161
1152 1162 wdirvfs
1153 1163 ``vfs.vfs`` rooted at the working directory.
1154 1164
1155 1165 hgvfs
1156 1166 ``vfs.vfs`` rooted at .hg/
1157 1167
1158 1168 requirements
1159 1169 ``set`` of bytestrings representing repository opening requirements.
1160 1170
1161 1171 supportedrequirements
1162 1172 ``set`` of bytestrings representing repository requirements that we
1163 1173 know how to open. May be a supetset of ``requirements``.
1164 1174
1165 1175 sharedpath
1166 1176 ``bytes`` Defining path to storage base directory. Points to a
1167 1177 ``.hg/`` directory somewhere.
1168 1178
1169 1179 store
1170 1180 ``store.basicstore`` (or derived) instance providing access to
1171 1181 versioned storage.
1172 1182
1173 1183 cachevfs
1174 1184 ``vfs.vfs`` used for cache files.
1175 1185
1176 1186 wcachevfs
1177 1187 ``vfs.vfs`` used for cache files related to the working copy.
1178 1188
1179 1189 features
1180 1190 ``set`` of bytestrings defining features/capabilities of this
1181 1191 instance.
1182 1192
1183 1193 intents
1184 1194 ``set`` of system strings indicating what this repo will be used
1185 1195 for.
1186 1196 """
1187 1197 self.baseui = baseui
1188 1198 self.ui = ui
1189 1199 self.origroot = origroot
1190 1200 # vfs rooted at working directory.
1191 1201 self.wvfs = wdirvfs
1192 1202 self.root = wdirvfs.base
1193 1203 # vfs rooted at .hg/. Used to access most non-store paths.
1194 1204 self.vfs = hgvfs
1195 1205 self.path = hgvfs.base
1196 1206 self.requirements = requirements
1197 1207 self.supported = supportedrequirements
1198 1208 self.sharedpath = sharedpath
1199 1209 self.store = store
1200 1210 self.cachevfs = cachevfs
1201 1211 self.wcachevfs = wcachevfs
1202 1212 self.features = features
1203 1213
1204 1214 self.filtername = None
1205 1215
1206 1216 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1207 1217 b'devel', b'check-locks'
1208 1218 ):
1209 1219 self.vfs.audit = self._getvfsward(self.vfs.audit)
1210 1220 # A list of callback to shape the phase if no data were found.
1211 1221 # Callback are in the form: func(repo, roots) --> processed root.
1212 1222 # This list it to be filled by extension during repo setup
1213 1223 self._phasedefaults = []
1214 1224
1215 1225 color.setup(self.ui)
1216 1226
1217 1227 self.spath = self.store.path
1218 1228 self.svfs = self.store.vfs
1219 1229 self.sjoin = self.store.join
1220 1230 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1221 1231 b'devel', b'check-locks'
1222 1232 ):
1223 1233 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1224 1234 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1225 1235 else: # standard vfs
1226 1236 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1227 1237
1228 1238 self._dirstatevalidatewarned = False
1229 1239
1230 1240 self._branchcaches = branchmap.BranchMapCache()
1231 1241 self._revbranchcache = None
1232 1242 self._filterpats = {}
1233 1243 self._datafilters = {}
1234 1244 self._transref = self._lockref = self._wlockref = None
1235 1245
1236 1246 # A cache for various files under .hg/ that tracks file changes,
1237 1247 # (used by the filecache decorator)
1238 1248 #
1239 1249 # Maps a property name to its util.filecacheentry
1240 1250 self._filecache = {}
1241 1251
1242 1252 # hold sets of revision to be filtered
1243 1253 # should be cleared when something might have changed the filter value:
1244 1254 # - new changesets,
1245 1255 # - phase change,
1246 1256 # - new obsolescence marker,
1247 1257 # - working directory parent change,
1248 1258 # - bookmark changes
1249 1259 self.filteredrevcache = {}
1250 1260
1251 1261 # post-dirstate-status hooks
1252 1262 self._postdsstatus = []
1253 1263
1254 1264 # generic mapping between names and nodes
1255 1265 self.names = namespaces.namespaces()
1256 1266
1257 1267 # Key to signature value.
1258 1268 self._sparsesignaturecache = {}
1259 1269 # Signature to cached matcher instance.
1260 1270 self._sparsematchercache = {}
1261 1271
1262 1272 self._extrafilterid = repoview.extrafilter(ui)
1263 1273
1264 1274 self.filecopiesmode = None
1265 1275 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1266 1276 self.filecopiesmode = b'changeset-sidedata'
1267 1277
1268 1278 def _getvfsward(self, origfunc):
1269 1279 """build a ward for self.vfs"""
1270 1280 rref = weakref.ref(self)
1271 1281
1272 1282 def checkvfs(path, mode=None):
1273 1283 ret = origfunc(path, mode=mode)
1274 1284 repo = rref()
1275 1285 if (
1276 1286 repo is None
1277 1287 or not util.safehasattr(repo, b'_wlockref')
1278 1288 or not util.safehasattr(repo, b'_lockref')
1279 1289 ):
1280 1290 return
1281 1291 if mode in (None, b'r', b'rb'):
1282 1292 return
1283 1293 if path.startswith(repo.path):
1284 1294 # truncate name relative to the repository (.hg)
1285 1295 path = path[len(repo.path) + 1 :]
1286 1296 if path.startswith(b'cache/'):
1287 1297 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1288 1298 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1289 1299 # path prefixes covered by 'lock'
1290 1300 vfs_path_prefixes = (
1291 1301 b'journal.',
1292 1302 b'undo.',
1293 1303 b'strip-backup/',
1294 1304 b'cache/',
1295 1305 )
1296 1306 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1297 1307 if repo._currentlock(repo._lockref) is None:
1298 1308 repo.ui.develwarn(
1299 1309 b'write with no lock: "%s"' % path,
1300 1310 stacklevel=3,
1301 1311 config=b'check-locks',
1302 1312 )
1303 1313 elif repo._currentlock(repo._wlockref) is None:
1304 1314 # rest of vfs files are covered by 'wlock'
1305 1315 #
1306 1316 # exclude special files
1307 1317 for prefix in self._wlockfreeprefix:
1308 1318 if path.startswith(prefix):
1309 1319 return
1310 1320 repo.ui.develwarn(
1311 1321 b'write with no wlock: "%s"' % path,
1312 1322 stacklevel=3,
1313 1323 config=b'check-locks',
1314 1324 )
1315 1325 return ret
1316 1326
1317 1327 return checkvfs
1318 1328
1319 1329 def _getsvfsward(self, origfunc):
1320 1330 """build a ward for self.svfs"""
1321 1331 rref = weakref.ref(self)
1322 1332
1323 1333 def checksvfs(path, mode=None):
1324 1334 ret = origfunc(path, mode=mode)
1325 1335 repo = rref()
1326 1336 if repo is None or not util.safehasattr(repo, b'_lockref'):
1327 1337 return
1328 1338 if mode in (None, b'r', b'rb'):
1329 1339 return
1330 1340 if path.startswith(repo.sharedpath):
1331 1341 # truncate name relative to the repository (.hg)
1332 1342 path = path[len(repo.sharedpath) + 1 :]
1333 1343 if repo._currentlock(repo._lockref) is None:
1334 1344 repo.ui.develwarn(
1335 1345 b'write with no lock: "%s"' % path, stacklevel=4
1336 1346 )
1337 1347 return ret
1338 1348
1339 1349 return checksvfs
1340 1350
1341 1351 def close(self):
1342 1352 self._writecaches()
1343 1353
1344 1354 def _writecaches(self):
1345 1355 if self._revbranchcache:
1346 1356 self._revbranchcache.write()
1347 1357
1348 1358 def _restrictcapabilities(self, caps):
1349 1359 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1350 1360 caps = set(caps)
1351 1361 capsblob = bundle2.encodecaps(
1352 1362 bundle2.getrepocaps(self, role=b'client')
1353 1363 )
1354 1364 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1355 1365 return caps
1356 1366
1357 1367 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1358 1368 # self -> auditor -> self._checknested -> self
1359 1369
1360 1370 @property
1361 1371 def auditor(self):
1362 1372 # This is only used by context.workingctx.match in order to
1363 1373 # detect files in subrepos.
1364 1374 return pathutil.pathauditor(self.root, callback=self._checknested)
1365 1375
1366 1376 @property
1367 1377 def nofsauditor(self):
1368 1378 # This is only used by context.basectx.match in order to detect
1369 1379 # files in subrepos.
1370 1380 return pathutil.pathauditor(
1371 1381 self.root, callback=self._checknested, realfs=False, cached=True
1372 1382 )
1373 1383
1374 1384 def _checknested(self, path):
1375 1385 """Determine if path is a legal nested repository."""
1376 1386 if not path.startswith(self.root):
1377 1387 return False
1378 1388 subpath = path[len(self.root) + 1 :]
1379 1389 normsubpath = util.pconvert(subpath)
1380 1390
1381 1391 # XXX: Checking against the current working copy is wrong in
1382 1392 # the sense that it can reject things like
1383 1393 #
1384 1394 # $ hg cat -r 10 sub/x.txt
1385 1395 #
1386 1396 # if sub/ is no longer a subrepository in the working copy
1387 1397 # parent revision.
1388 1398 #
1389 1399 # However, it can of course also allow things that would have
1390 1400 # been rejected before, such as the above cat command if sub/
1391 1401 # is a subrepository now, but was a normal directory before.
1392 1402 # The old path auditor would have rejected by mistake since it
1393 1403 # panics when it sees sub/.hg/.
1394 1404 #
1395 1405 # All in all, checking against the working copy seems sensible
1396 1406 # since we want to prevent access to nested repositories on
1397 1407 # the filesystem *now*.
1398 1408 ctx = self[None]
1399 1409 parts = util.splitpath(subpath)
1400 1410 while parts:
1401 1411 prefix = b'/'.join(parts)
1402 1412 if prefix in ctx.substate:
1403 1413 if prefix == normsubpath:
1404 1414 return True
1405 1415 else:
1406 1416 sub = ctx.sub(prefix)
1407 1417 return sub.checknested(subpath[len(prefix) + 1 :])
1408 1418 else:
1409 1419 parts.pop()
1410 1420 return False
1411 1421
1412 1422 def peer(self):
1413 1423 return localpeer(self) # not cached to avoid reference cycle
1414 1424
1415 1425 def unfiltered(self):
1416 1426 """Return unfiltered version of the repository
1417 1427
1418 1428 Intended to be overwritten by filtered repo."""
1419 1429 return self
1420 1430
1421 1431 def filtered(self, name, visibilityexceptions=None):
1422 1432 """Return a filtered version of a repository
1423 1433
1424 1434 The `name` parameter is the identifier of the requested view. This
1425 1435 will return a repoview object set "exactly" to the specified view.
1426 1436
1427 1437 This function does not apply recursive filtering to a repository. For
1428 1438 example calling `repo.filtered("served")` will return a repoview using
1429 1439 the "served" view, regardless of the initial view used by `repo`.
1430 1440
1431 1441 In other word, there is always only one level of `repoview` "filtering".
1432 1442 """
1433 1443 if self._extrafilterid is not None and b'%' not in name:
1434 1444 name = name + b'%' + self._extrafilterid
1435 1445
1436 1446 cls = repoview.newtype(self.unfiltered().__class__)
1437 1447 return cls(self, name, visibilityexceptions)
1438 1448
1439 1449 @mixedrepostorecache(
1440 1450 (b'bookmarks', b'plain'),
1441 1451 (b'bookmarks.current', b'plain'),
1442 1452 (b'bookmarks', b''),
1443 1453 (b'00changelog.i', b''),
1444 1454 )
1445 1455 def _bookmarks(self):
1446 1456 # Since the multiple files involved in the transaction cannot be
1447 1457 # written atomically (with current repository format), there is a race
1448 1458 # condition here.
1449 1459 #
1450 1460 # 1) changelog content A is read
1451 1461 # 2) outside transaction update changelog to content B
1452 1462 # 3) outside transaction update bookmark file referring to content B
1453 1463 # 4) bookmarks file content is read and filtered against changelog-A
1454 1464 #
1455 1465 # When this happens, bookmarks against nodes missing from A are dropped.
1456 1466 #
1457 1467 # Having this happening during read is not great, but it become worse
1458 1468 # when this happen during write because the bookmarks to the "unknown"
1459 1469 # nodes will be dropped for good. However, writes happen within locks.
1460 1470 # This locking makes it possible to have a race free consistent read.
1461 1471 # For this purpose data read from disc before locking are
1462 1472 # "invalidated" right after the locks are taken. This invalidations are
1463 1473 # "light", the `filecache` mechanism keep the data in memory and will
1464 1474 # reuse them if the underlying files did not changed. Not parsing the
1465 1475 # same data multiple times helps performances.
1466 1476 #
1467 1477 # Unfortunately in the case describe above, the files tracked by the
1468 1478 # bookmarks file cache might not have changed, but the in-memory
1469 1479 # content is still "wrong" because we used an older changelog content
1470 1480 # to process the on-disk data. So after locking, the changelog would be
1471 1481 # refreshed but `_bookmarks` would be preserved.
1472 1482 # Adding `00changelog.i` to the list of tracked file is not
1473 1483 # enough, because at the time we build the content for `_bookmarks` in
1474 1484 # (4), the changelog file has already diverged from the content used
1475 1485 # for loading `changelog` in (1)
1476 1486 #
1477 1487 # To prevent the issue, we force the changelog to be explicitly
1478 1488 # reloaded while computing `_bookmarks`. The data race can still happen
1479 1489 # without the lock (with a narrower window), but it would no longer go
1480 1490 # undetected during the lock time refresh.
1481 1491 #
1482 1492 # The new schedule is as follow
1483 1493 #
1484 1494 # 1) filecache logic detect that `_bookmarks` needs to be computed
1485 1495 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1486 1496 # 3) We force `changelog` filecache to be tested
1487 1497 # 4) cachestat for `changelog` are captured (for changelog)
1488 1498 # 5) `_bookmarks` is computed and cached
1489 1499 #
1490 1500 # The step in (3) ensure we have a changelog at least as recent as the
1491 1501 # cache stat computed in (1). As a result at locking time:
1492 1502 # * if the changelog did not changed since (1) -> we can reuse the data
1493 1503 # * otherwise -> the bookmarks get refreshed.
1494 1504 self._refreshchangelog()
1495 1505 return bookmarks.bmstore(self)
1496 1506
1497 1507 def _refreshchangelog(self):
1498 1508 """make sure the in memory changelog match the on-disk one"""
1499 1509 if 'changelog' in vars(self) and self.currenttransaction() is None:
1500 1510 del self.changelog
1501 1511
1502 1512 @property
1503 1513 def _activebookmark(self):
1504 1514 return self._bookmarks.active
1505 1515
1506 1516 # _phasesets depend on changelog. what we need is to call
1507 1517 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1508 1518 # can't be easily expressed in filecache mechanism.
1509 1519 @storecache(b'phaseroots', b'00changelog.i')
1510 1520 def _phasecache(self):
1511 1521 return phases.phasecache(self, self._phasedefaults)
1512 1522
1513 1523 @storecache(b'obsstore')
1514 1524 def obsstore(self):
1515 1525 return obsolete.makestore(self.ui, self)
1516 1526
1517 1527 @storecache(b'00changelog.i')
1518 1528 def changelog(self):
1519 1529 # load dirstate before changelog to avoid race see issue6303
1520 1530 self.dirstate.prefetch_parents()
1521 1531 return self.store.changelog(txnutil.mayhavepending(self.root))
1522 1532
1523 1533 @storecache(b'00manifest.i')
1524 1534 def manifestlog(self):
1525 1535 return self.store.manifestlog(self, self._storenarrowmatch)
1526 1536
1527 1537 @repofilecache(b'dirstate')
1528 1538 def dirstate(self):
1529 1539 return self._makedirstate()
1530 1540
1531 1541 def _makedirstate(self):
1532 1542 """Extension point for wrapping the dirstate per-repo."""
1533 1543 sparsematchfn = lambda: sparse.matcher(self)
1534 1544
1535 1545 return dirstate.dirstate(
1536 1546 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1537 1547 )
1538 1548
1539 1549 def _dirstatevalidate(self, node):
1540 1550 try:
1541 1551 self.changelog.rev(node)
1542 1552 return node
1543 1553 except error.LookupError:
1544 1554 if not self._dirstatevalidatewarned:
1545 1555 self._dirstatevalidatewarned = True
1546 1556 self.ui.warn(
1547 1557 _(b"warning: ignoring unknown working parent %s!\n")
1548 1558 % short(node)
1549 1559 )
1550 1560 return nullid
1551 1561
1552 1562 @storecache(narrowspec.FILENAME)
1553 1563 def narrowpats(self):
1554 1564 """matcher patterns for this repository's narrowspec
1555 1565
1556 1566 A tuple of (includes, excludes).
1557 1567 """
1558 1568 return narrowspec.load(self)
1559 1569
1560 1570 @storecache(narrowspec.FILENAME)
1561 1571 def _storenarrowmatch(self):
1562 1572 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1563 1573 return matchmod.always()
1564 1574 include, exclude = self.narrowpats
1565 1575 return narrowspec.match(self.root, include=include, exclude=exclude)
1566 1576
1567 1577 @storecache(narrowspec.FILENAME)
1568 1578 def _narrowmatch(self):
1569 1579 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1570 1580 return matchmod.always()
1571 1581 narrowspec.checkworkingcopynarrowspec(self)
1572 1582 include, exclude = self.narrowpats
1573 1583 return narrowspec.match(self.root, include=include, exclude=exclude)
1574 1584
1575 1585 def narrowmatch(self, match=None, includeexact=False):
1576 1586 """matcher corresponding the the repo's narrowspec
1577 1587
1578 1588 If `match` is given, then that will be intersected with the narrow
1579 1589 matcher.
1580 1590
1581 1591 If `includeexact` is True, then any exact matches from `match` will
1582 1592 be included even if they're outside the narrowspec.
1583 1593 """
1584 1594 if match:
1585 1595 if includeexact and not self._narrowmatch.always():
1586 1596 # do not exclude explicitly-specified paths so that they can
1587 1597 # be warned later on
1588 1598 em = matchmod.exact(match.files())
1589 1599 nm = matchmod.unionmatcher([self._narrowmatch, em])
1590 1600 return matchmod.intersectmatchers(match, nm)
1591 1601 return matchmod.intersectmatchers(match, self._narrowmatch)
1592 1602 return self._narrowmatch
1593 1603
1594 1604 def setnarrowpats(self, newincludes, newexcludes):
1595 1605 narrowspec.save(self, newincludes, newexcludes)
1596 1606 self.invalidate(clearfilecache=True)
1597 1607
1598 1608 @unfilteredpropertycache
1599 1609 def _quick_access_changeid_null(self):
1600 1610 return {
1601 1611 b'null': (nullrev, nullid),
1602 1612 nullrev: (nullrev, nullid),
1603 1613 nullid: (nullrev, nullid),
1604 1614 }
1605 1615
1606 1616 @unfilteredpropertycache
1607 1617 def _quick_access_changeid_wc(self):
1608 1618 # also fast path access to the working copy parents
1609 1619 # however, only do it for filter that ensure wc is visible.
1610 1620 quick = self._quick_access_changeid_null.copy()
1611 1621 cl = self.unfiltered().changelog
1612 1622 for node in self.dirstate.parents():
1613 1623 if node == nullid:
1614 1624 continue
1615 1625 rev = cl.index.get_rev(node)
1616 1626 if rev is None:
1617 1627 # unknown working copy parent case:
1618 1628 #
1619 1629 # skip the fast path and let higher code deal with it
1620 1630 continue
1621 1631 pair = (rev, node)
1622 1632 quick[rev] = pair
1623 1633 quick[node] = pair
1624 1634 # also add the parents of the parents
1625 1635 for r in cl.parentrevs(rev):
1626 1636 if r == nullrev:
1627 1637 continue
1628 1638 n = cl.node(r)
1629 1639 pair = (r, n)
1630 1640 quick[r] = pair
1631 1641 quick[n] = pair
1632 1642 p1node = self.dirstate.p1()
1633 1643 if p1node != nullid:
1634 1644 quick[b'.'] = quick[p1node]
1635 1645 return quick
1636 1646
1637 1647 @unfilteredmethod
1638 1648 def _quick_access_changeid_invalidate(self):
1639 1649 if '_quick_access_changeid_wc' in vars(self):
1640 1650 del self.__dict__['_quick_access_changeid_wc']
1641 1651
1642 1652 @property
1643 1653 def _quick_access_changeid(self):
1644 1654 """an helper dictionnary for __getitem__ calls
1645 1655
1646 1656 This contains a list of symbol we can recognise right away without
1647 1657 further processing.
1648 1658 """
1649 1659 if self.filtername in repoview.filter_has_wc:
1650 1660 return self._quick_access_changeid_wc
1651 1661 return self._quick_access_changeid_null
1652 1662
1653 1663 def __getitem__(self, changeid):
1654 1664 # dealing with special cases
1655 1665 if changeid is None:
1656 1666 return context.workingctx(self)
1657 1667 if isinstance(changeid, context.basectx):
1658 1668 return changeid
1659 1669
1660 1670 # dealing with multiple revisions
1661 1671 if isinstance(changeid, slice):
1662 1672 # wdirrev isn't contiguous so the slice shouldn't include it
1663 1673 return [
1664 1674 self[i]
1665 1675 for i in pycompat.xrange(*changeid.indices(len(self)))
1666 1676 if i not in self.changelog.filteredrevs
1667 1677 ]
1668 1678
1669 1679 # dealing with some special values
1670 1680 quick_access = self._quick_access_changeid.get(changeid)
1671 1681 if quick_access is not None:
1672 1682 rev, node = quick_access
1673 1683 return context.changectx(self, rev, node, maybe_filtered=False)
1674 1684 if changeid == b'tip':
1675 1685 node = self.changelog.tip()
1676 1686 rev = self.changelog.rev(node)
1677 1687 return context.changectx(self, rev, node)
1678 1688
1679 1689 # dealing with arbitrary values
1680 1690 try:
1681 1691 if isinstance(changeid, int):
1682 1692 node = self.changelog.node(changeid)
1683 1693 rev = changeid
1684 1694 elif changeid == b'.':
1685 1695 # this is a hack to delay/avoid loading obsmarkers
1686 1696 # when we know that '.' won't be hidden
1687 1697 node = self.dirstate.p1()
1688 1698 rev = self.unfiltered().changelog.rev(node)
1689 1699 elif len(changeid) == 20:
1690 1700 try:
1691 1701 node = changeid
1692 1702 rev = self.changelog.rev(changeid)
1693 1703 except error.FilteredLookupError:
1694 1704 changeid = hex(changeid) # for the error message
1695 1705 raise
1696 1706 except LookupError:
1697 1707 # check if it might have come from damaged dirstate
1698 1708 #
1699 1709 # XXX we could avoid the unfiltered if we had a recognizable
1700 1710 # exception for filtered changeset access
1701 1711 if (
1702 1712 self.local()
1703 1713 and changeid in self.unfiltered().dirstate.parents()
1704 1714 ):
1705 1715 msg = _(b"working directory has unknown parent '%s'!")
1706 1716 raise error.Abort(msg % short(changeid))
1707 1717 changeid = hex(changeid) # for the error message
1708 1718 raise
1709 1719
1710 1720 elif len(changeid) == 40:
1711 1721 node = bin(changeid)
1712 1722 rev = self.changelog.rev(node)
1713 1723 else:
1714 1724 raise error.ProgrammingError(
1715 1725 b"unsupported changeid '%s' of type %s"
1716 1726 % (changeid, pycompat.bytestr(type(changeid)))
1717 1727 )
1718 1728
1719 1729 return context.changectx(self, rev, node)
1720 1730
1721 1731 except (error.FilteredIndexError, error.FilteredLookupError):
1722 1732 raise error.FilteredRepoLookupError(
1723 1733 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1724 1734 )
1725 1735 except (IndexError, LookupError):
1726 1736 raise error.RepoLookupError(
1727 1737 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1728 1738 )
1729 1739 except error.WdirUnsupported:
1730 1740 return context.workingctx(self)
1731 1741
1732 1742 def __contains__(self, changeid):
1733 1743 """True if the given changeid exists
1734 1744
1735 1745 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1736 1746 specified.
1737 1747 """
1738 1748 try:
1739 1749 self[changeid]
1740 1750 return True
1741 1751 except error.RepoLookupError:
1742 1752 return False
1743 1753
1744 1754 def __nonzero__(self):
1745 1755 return True
1746 1756
1747 1757 __bool__ = __nonzero__
1748 1758
1749 1759 def __len__(self):
1750 1760 # no need to pay the cost of repoview.changelog
1751 1761 unfi = self.unfiltered()
1752 1762 return len(unfi.changelog)
1753 1763
1754 1764 def __iter__(self):
1755 1765 return iter(self.changelog)
1756 1766
1757 1767 def revs(self, expr, *args):
1758 1768 """Find revisions matching a revset.
1759 1769
1760 1770 The revset is specified as a string ``expr`` that may contain
1761 1771 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1762 1772
1763 1773 Revset aliases from the configuration are not expanded. To expand
1764 1774 user aliases, consider calling ``scmutil.revrange()`` or
1765 1775 ``repo.anyrevs([expr], user=True)``.
1766 1776
1767 1777 Returns a smartset.abstractsmartset, which is a list-like interface
1768 1778 that contains integer revisions.
1769 1779 """
1770 1780 tree = revsetlang.spectree(expr, *args)
1771 1781 return revset.makematcher(tree)(self)
1772 1782
1773 1783 def set(self, expr, *args):
1774 1784 """Find revisions matching a revset and emit changectx instances.
1775 1785
1776 1786 This is a convenience wrapper around ``revs()`` that iterates the
1777 1787 result and is a generator of changectx instances.
1778 1788
1779 1789 Revset aliases from the configuration are not expanded. To expand
1780 1790 user aliases, consider calling ``scmutil.revrange()``.
1781 1791 """
1782 1792 for r in self.revs(expr, *args):
1783 1793 yield self[r]
1784 1794
1785 1795 def anyrevs(self, specs, user=False, localalias=None):
1786 1796 """Find revisions matching one of the given revsets.
1787 1797
1788 1798 Revset aliases from the configuration are not expanded by default. To
1789 1799 expand user aliases, specify ``user=True``. To provide some local
1790 1800 definitions overriding user aliases, set ``localalias`` to
1791 1801 ``{name: definitionstring}``.
1792 1802 """
1793 1803 if specs == [b'null']:
1794 1804 return revset.baseset([nullrev])
1795 1805 if specs == [b'.']:
1796 1806 quick_data = self._quick_access_changeid.get(b'.')
1797 1807 if quick_data is not None:
1798 1808 return revset.baseset([quick_data[0]])
1799 1809 if user:
1800 1810 m = revset.matchany(
1801 1811 self.ui,
1802 1812 specs,
1803 1813 lookup=revset.lookupfn(self),
1804 1814 localalias=localalias,
1805 1815 )
1806 1816 else:
1807 1817 m = revset.matchany(None, specs, localalias=localalias)
1808 1818 return m(self)
1809 1819
1810 1820 def url(self):
1811 1821 return b'file:' + self.root
1812 1822
1813 1823 def hook(self, name, throw=False, **args):
1814 1824 """Call a hook, passing this repo instance.
1815 1825
1816 1826 This a convenience method to aid invoking hooks. Extensions likely
1817 1827 won't call this unless they have registered a custom hook or are
1818 1828 replacing code that is expected to call a hook.
1819 1829 """
1820 1830 return hook.hook(self.ui, self, name, throw, **args)
1821 1831
1822 1832 @filteredpropertycache
1823 1833 def _tagscache(self):
1824 1834 """Returns a tagscache object that contains various tags related
1825 1835 caches."""
1826 1836
1827 1837 # This simplifies its cache management by having one decorated
1828 1838 # function (this one) and the rest simply fetch things from it.
1829 1839 class tagscache(object):
1830 1840 def __init__(self):
1831 1841 # These two define the set of tags for this repository. tags
1832 1842 # maps tag name to node; tagtypes maps tag name to 'global' or
1833 1843 # 'local'. (Global tags are defined by .hgtags across all
1834 1844 # heads, and local tags are defined in .hg/localtags.)
1835 1845 # They constitute the in-memory cache of tags.
1836 1846 self.tags = self.tagtypes = None
1837 1847
1838 1848 self.nodetagscache = self.tagslist = None
1839 1849
1840 1850 cache = tagscache()
1841 1851 cache.tags, cache.tagtypes = self._findtags()
1842 1852
1843 1853 return cache
1844 1854
1845 1855 def tags(self):
1846 1856 '''return a mapping of tag to node'''
1847 1857 t = {}
1848 1858 if self.changelog.filteredrevs:
1849 1859 tags, tt = self._findtags()
1850 1860 else:
1851 1861 tags = self._tagscache.tags
1852 1862 rev = self.changelog.rev
1853 1863 for k, v in pycompat.iteritems(tags):
1854 1864 try:
1855 1865 # ignore tags to unknown nodes
1856 1866 rev(v)
1857 1867 t[k] = v
1858 1868 except (error.LookupError, ValueError):
1859 1869 pass
1860 1870 return t
1861 1871
1862 1872 def _findtags(self):
1863 1873 """Do the hard work of finding tags. Return a pair of dicts
1864 1874 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1865 1875 maps tag name to a string like \'global\' or \'local\'.
1866 1876 Subclasses or extensions are free to add their own tags, but
1867 1877 should be aware that the returned dicts will be retained for the
1868 1878 duration of the localrepo object."""
1869 1879
1870 1880 # XXX what tagtype should subclasses/extensions use? Currently
1871 1881 # mq and bookmarks add tags, but do not set the tagtype at all.
1872 1882 # Should each extension invent its own tag type? Should there
1873 1883 # be one tagtype for all such "virtual" tags? Or is the status
1874 1884 # quo fine?
1875 1885
1876 1886 # map tag name to (node, hist)
1877 1887 alltags = tagsmod.findglobaltags(self.ui, self)
1878 1888 # map tag name to tag type
1879 1889 tagtypes = {tag: b'global' for tag in alltags}
1880 1890
1881 1891 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1882 1892
1883 1893 # Build the return dicts. Have to re-encode tag names because
1884 1894 # the tags module always uses UTF-8 (in order not to lose info
1885 1895 # writing to the cache), but the rest of Mercurial wants them in
1886 1896 # local encoding.
1887 1897 tags = {}
1888 1898 for (name, (node, hist)) in pycompat.iteritems(alltags):
1889 1899 if node != nullid:
1890 1900 tags[encoding.tolocal(name)] = node
1891 1901 tags[b'tip'] = self.changelog.tip()
1892 1902 tagtypes = {
1893 1903 encoding.tolocal(name): value
1894 1904 for (name, value) in pycompat.iteritems(tagtypes)
1895 1905 }
1896 1906 return (tags, tagtypes)
1897 1907
1898 1908 def tagtype(self, tagname):
1899 1909 """
1900 1910 return the type of the given tag. result can be:
1901 1911
1902 1912 'local' : a local tag
1903 1913 'global' : a global tag
1904 1914 None : tag does not exist
1905 1915 """
1906 1916
1907 1917 return self._tagscache.tagtypes.get(tagname)
1908 1918
1909 1919 def tagslist(self):
1910 1920 '''return a list of tags ordered by revision'''
1911 1921 if not self._tagscache.tagslist:
1912 1922 l = []
1913 1923 for t, n in pycompat.iteritems(self.tags()):
1914 1924 l.append((self.changelog.rev(n), t, n))
1915 1925 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1916 1926
1917 1927 return self._tagscache.tagslist
1918 1928
1919 1929 def nodetags(self, node):
1920 1930 '''return the tags associated with a node'''
1921 1931 if not self._tagscache.nodetagscache:
1922 1932 nodetagscache = {}
1923 1933 for t, n in pycompat.iteritems(self._tagscache.tags):
1924 1934 nodetagscache.setdefault(n, []).append(t)
1925 1935 for tags in pycompat.itervalues(nodetagscache):
1926 1936 tags.sort()
1927 1937 self._tagscache.nodetagscache = nodetagscache
1928 1938 return self._tagscache.nodetagscache.get(node, [])
1929 1939
1930 1940 def nodebookmarks(self, node):
1931 1941 """return the list of bookmarks pointing to the specified node"""
1932 1942 return self._bookmarks.names(node)
1933 1943
1934 1944 def branchmap(self):
1935 1945 """returns a dictionary {branch: [branchheads]} with branchheads
1936 1946 ordered by increasing revision number"""
1937 1947 return self._branchcaches[self]
1938 1948
1939 1949 @unfilteredmethod
1940 1950 def revbranchcache(self):
1941 1951 if not self._revbranchcache:
1942 1952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1943 1953 return self._revbranchcache
1944 1954
1945 1955 def branchtip(self, branch, ignoremissing=False):
1946 1956 """return the tip node for a given branch
1947 1957
1948 1958 If ignoremissing is True, then this method will not raise an error.
1949 1959 This is helpful for callers that only expect None for a missing branch
1950 1960 (e.g. namespace).
1951 1961
1952 1962 """
1953 1963 try:
1954 1964 return self.branchmap().branchtip(branch)
1955 1965 except KeyError:
1956 1966 if not ignoremissing:
1957 1967 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1958 1968 else:
1959 1969 pass
1960 1970
1961 1971 def lookup(self, key):
1962 1972 node = scmutil.revsymbol(self, key).node()
1963 1973 if node is None:
1964 1974 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1965 1975 return node
1966 1976
1967 1977 def lookupbranch(self, key):
1968 1978 if self.branchmap().hasbranch(key):
1969 1979 return key
1970 1980
1971 1981 return scmutil.revsymbol(self, key).branch()
1972 1982
1973 1983 def known(self, nodes):
1974 1984 cl = self.changelog
1975 1985 get_rev = cl.index.get_rev
1976 1986 filtered = cl.filteredrevs
1977 1987 result = []
1978 1988 for n in nodes:
1979 1989 r = get_rev(n)
1980 1990 resp = not (r is None or r in filtered)
1981 1991 result.append(resp)
1982 1992 return result
1983 1993
1984 1994 def local(self):
1985 1995 return self
1986 1996
1987 1997 def publishing(self):
1988 1998 # it's safe (and desirable) to trust the publish flag unconditionally
1989 1999 # so that we don't finalize changes shared between users via ssh or nfs
1990 2000 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1991 2001
1992 2002 def cancopy(self):
1993 2003 # so statichttprepo's override of local() works
1994 2004 if not self.local():
1995 2005 return False
1996 2006 if not self.publishing():
1997 2007 return True
1998 2008 # if publishing we can't copy if there is filtered content
1999 2009 return not self.filtered(b'visible').changelog.filteredrevs
2000 2010
2001 2011 def shared(self):
2002 2012 '''the type of shared repository (None if not shared)'''
2003 2013 if self.sharedpath != self.path:
2004 2014 return b'store'
2005 2015 return None
2006 2016
2007 2017 def wjoin(self, f, *insidef):
2008 2018 return self.vfs.reljoin(self.root, f, *insidef)
2009 2019
2010 2020 def setparents(self, p1, p2=nullid):
2011 2021 self[None].setparents(p1, p2)
2012 2022 self._quick_access_changeid_invalidate()
2013 2023
2014 2024 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2015 2025 """changeid must be a changeset revision, if specified.
2016 2026 fileid can be a file revision or node."""
2017 2027 return context.filectx(
2018 2028 self, path, changeid, fileid, changectx=changectx
2019 2029 )
2020 2030
2021 2031 def getcwd(self):
2022 2032 return self.dirstate.getcwd()
2023 2033
2024 2034 def pathto(self, f, cwd=None):
2025 2035 return self.dirstate.pathto(f, cwd)
2026 2036
2027 2037 def _loadfilter(self, filter):
2028 2038 if filter not in self._filterpats:
2029 2039 l = []
2030 2040 for pat, cmd in self.ui.configitems(filter):
2031 2041 if cmd == b'!':
2032 2042 continue
2033 2043 mf = matchmod.match(self.root, b'', [pat])
2034 2044 fn = None
2035 2045 params = cmd
2036 2046 for name, filterfn in pycompat.iteritems(self._datafilters):
2037 2047 if cmd.startswith(name):
2038 2048 fn = filterfn
2039 2049 params = cmd[len(name) :].lstrip()
2040 2050 break
2041 2051 if not fn:
2042 2052 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2043 2053 fn.__name__ = 'commandfilter'
2044 2054 # Wrap old filters not supporting keyword arguments
2045 2055 if not pycompat.getargspec(fn)[2]:
2046 2056 oldfn = fn
2047 2057 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2048 2058 fn.__name__ = 'compat-' + oldfn.__name__
2049 2059 l.append((mf, fn, params))
2050 2060 self._filterpats[filter] = l
2051 2061 return self._filterpats[filter]
2052 2062
2053 2063 def _filter(self, filterpats, filename, data):
2054 2064 for mf, fn, cmd in filterpats:
2055 2065 if mf(filename):
2056 2066 self.ui.debug(
2057 2067 b"filtering %s through %s\n"
2058 2068 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2059 2069 )
2060 2070 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2061 2071 break
2062 2072
2063 2073 return data
2064 2074
2065 2075 @unfilteredpropertycache
2066 2076 def _encodefilterpats(self):
2067 2077 return self._loadfilter(b'encode')
2068 2078
2069 2079 @unfilteredpropertycache
2070 2080 def _decodefilterpats(self):
2071 2081 return self._loadfilter(b'decode')
2072 2082
2073 2083 def adddatafilter(self, name, filter):
2074 2084 self._datafilters[name] = filter
2075 2085
2076 2086 def wread(self, filename):
2077 2087 if self.wvfs.islink(filename):
2078 2088 data = self.wvfs.readlink(filename)
2079 2089 else:
2080 2090 data = self.wvfs.read(filename)
2081 2091 return self._filter(self._encodefilterpats, filename, data)
2082 2092
2083 2093 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2084 2094 """write ``data`` into ``filename`` in the working directory
2085 2095
2086 2096 This returns length of written (maybe decoded) data.
2087 2097 """
2088 2098 data = self._filter(self._decodefilterpats, filename, data)
2089 2099 if b'l' in flags:
2090 2100 self.wvfs.symlink(data, filename)
2091 2101 else:
2092 2102 self.wvfs.write(
2093 2103 filename, data, backgroundclose=backgroundclose, **kwargs
2094 2104 )
2095 2105 if b'x' in flags:
2096 2106 self.wvfs.setflags(filename, False, True)
2097 2107 else:
2098 2108 self.wvfs.setflags(filename, False, False)
2099 2109 return len(data)
2100 2110
2101 2111 def wwritedata(self, filename, data):
2102 2112 return self._filter(self._decodefilterpats, filename, data)
2103 2113
2104 2114 def currenttransaction(self):
2105 2115 """return the current transaction or None if non exists"""
2106 2116 if self._transref:
2107 2117 tr = self._transref()
2108 2118 else:
2109 2119 tr = None
2110 2120
2111 2121 if tr and tr.running():
2112 2122 return tr
2113 2123 return None
2114 2124
2115 2125 def transaction(self, desc, report=None):
2116 2126 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2117 2127 b'devel', b'check-locks'
2118 2128 ):
2119 2129 if self._currentlock(self._lockref) is None:
2120 2130 raise error.ProgrammingError(b'transaction requires locking')
2121 2131 tr = self.currenttransaction()
2122 2132 if tr is not None:
2123 2133 return tr.nest(name=desc)
2124 2134
2125 2135 # abort here if the journal already exists
2126 2136 if self.svfs.exists(b"journal"):
2127 2137 raise error.RepoError(
2128 2138 _(b"abandoned transaction found"),
2129 2139 hint=_(b"run 'hg recover' to clean up transaction"),
2130 2140 )
2131 2141
2132 2142 idbase = b"%.40f#%f" % (random.random(), time.time())
2133 2143 ha = hex(hashutil.sha1(idbase).digest())
2134 2144 txnid = b'TXN:' + ha
2135 2145 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2136 2146
2137 2147 self._writejournal(desc)
2138 2148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2139 2149 if report:
2140 2150 rp = report
2141 2151 else:
2142 2152 rp = self.ui.warn
2143 2153 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2144 2154 # we must avoid cyclic reference between repo and transaction.
2145 2155 reporef = weakref.ref(self)
2146 2156 # Code to track tag movement
2147 2157 #
2148 2158 # Since tags are all handled as file content, it is actually quite hard
2149 2159 # to track these movement from a code perspective. So we fallback to a
2150 2160 # tracking at the repository level. One could envision to track changes
2151 2161 # to the '.hgtags' file through changegroup apply but that fails to
2152 2162 # cope with case where transaction expose new heads without changegroup
2153 2163 # being involved (eg: phase movement).
2154 2164 #
2155 2165 # For now, We gate the feature behind a flag since this likely comes
2156 2166 # with performance impacts. The current code run more often than needed
2157 2167 # and do not use caches as much as it could. The current focus is on
2158 2168 # the behavior of the feature so we disable it by default. The flag
2159 2169 # will be removed when we are happy with the performance impact.
2160 2170 #
2161 2171 # Once this feature is no longer experimental move the following
2162 2172 # documentation to the appropriate help section:
2163 2173 #
2164 2174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2165 2175 # tags (new or changed or deleted tags). In addition the details of
2166 2176 # these changes are made available in a file at:
2167 2177 # ``REPOROOT/.hg/changes/tags.changes``.
2168 2178 # Make sure you check for HG_TAG_MOVED before reading that file as it
2169 2179 # might exist from a previous transaction even if no tag were touched
2170 2180 # in this one. Changes are recorded in a line base format::
2171 2181 #
2172 2182 # <action> <hex-node> <tag-name>\n
2173 2183 #
2174 2184 # Actions are defined as follow:
2175 2185 # "-R": tag is removed,
2176 2186 # "+A": tag is added,
2177 2187 # "-M": tag is moved (old value),
2178 2188 # "+M": tag is moved (new value),
2179 2189 tracktags = lambda x: None
2180 2190 # experimental config: experimental.hook-track-tags
2181 2191 shouldtracktags = self.ui.configbool(
2182 2192 b'experimental', b'hook-track-tags'
2183 2193 )
2184 2194 if desc != b'strip' and shouldtracktags:
2185 2195 oldheads = self.changelog.headrevs()
2186 2196
2187 2197 def tracktags(tr2):
2188 2198 repo = reporef()
2189 2199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2190 2200 newheads = repo.changelog.headrevs()
2191 2201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2192 2202 # notes: we compare lists here.
2193 2203 # As we do it only once buiding set would not be cheaper
2194 2204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2195 2205 if changes:
2196 2206 tr2.hookargs[b'tag_moved'] = b'1'
2197 2207 with repo.vfs(
2198 2208 b'changes/tags.changes', b'w', atomictemp=True
2199 2209 ) as changesfile:
2200 2210 # note: we do not register the file to the transaction
2201 2211 # because we needs it to still exist on the transaction
2202 2212 # is close (for txnclose hooks)
2203 2213 tagsmod.writediff(changesfile, changes)
2204 2214
2205 2215 def validate(tr2):
2206 2216 """will run pre-closing hooks"""
2207 2217 # XXX the transaction API is a bit lacking here so we take a hacky
2208 2218 # path for now
2209 2219 #
2210 2220 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2211 2221 # dict is copied before these run. In addition we needs the data
2212 2222 # available to in memory hooks too.
2213 2223 #
2214 2224 # Moreover, we also need to make sure this runs before txnclose
2215 2225 # hooks and there is no "pending" mechanism that would execute
2216 2226 # logic only if hooks are about to run.
2217 2227 #
2218 2228 # Fixing this limitation of the transaction is also needed to track
2219 2229 # other families of changes (bookmarks, phases, obsolescence).
2220 2230 #
2221 2231 # This will have to be fixed before we remove the experimental
2222 2232 # gating.
2223 2233 tracktags(tr2)
2224 2234 repo = reporef()
2225 2235
2226 2236 singleheadopt = (b'experimental', b'single-head-per-branch')
2227 2237 singlehead = repo.ui.configbool(*singleheadopt)
2228 2238 if singlehead:
2229 2239 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2230 2240 accountclosed = singleheadsub.get(
2231 2241 b"account-closed-heads", False
2232 2242 )
2233 2243 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2234 2244 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2235 2245 for name, (old, new) in sorted(
2236 2246 tr.changes[b'bookmarks'].items()
2237 2247 ):
2238 2248 args = tr.hookargs.copy()
2239 2249 args.update(bookmarks.preparehookargs(name, old, new))
2240 2250 repo.hook(
2241 2251 b'pretxnclose-bookmark',
2242 2252 throw=True,
2243 2253 **pycompat.strkwargs(args)
2244 2254 )
2245 2255 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2246 2256 cl = repo.unfiltered().changelog
2247 2257 for revs, (old, new) in tr.changes[b'phases']:
2248 2258 for rev in revs:
2249 2259 args = tr.hookargs.copy()
2250 2260 node = hex(cl.node(rev))
2251 2261 args.update(phases.preparehookargs(node, old, new))
2252 2262 repo.hook(
2253 2263 b'pretxnclose-phase',
2254 2264 throw=True,
2255 2265 **pycompat.strkwargs(args)
2256 2266 )
2257 2267
2258 2268 repo.hook(
2259 2269 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2260 2270 )
2261 2271
2262 2272 def releasefn(tr, success):
2263 2273 repo = reporef()
2264 2274 if repo is None:
2265 2275 # If the repo has been GC'd (and this release function is being
2266 2276 # called from transaction.__del__), there's not much we can do,
2267 2277 # so just leave the unfinished transaction there and let the
2268 2278 # user run `hg recover`.
2269 2279 return
2270 2280 if success:
2271 2281 # this should be explicitly invoked here, because
2272 2282 # in-memory changes aren't written out at closing
2273 2283 # transaction, if tr.addfilegenerator (via
2274 2284 # dirstate.write or so) isn't invoked while
2275 2285 # transaction running
2276 2286 repo.dirstate.write(None)
2277 2287 else:
2278 2288 # discard all changes (including ones already written
2279 2289 # out) in this transaction
2280 2290 narrowspec.restorebackup(self, b'journal.narrowspec')
2281 2291 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2282 2292 repo.dirstate.restorebackup(None, b'journal.dirstate')
2283 2293
2284 2294 repo.invalidate(clearfilecache=True)
2285 2295
2286 2296 tr = transaction.transaction(
2287 2297 rp,
2288 2298 self.svfs,
2289 2299 vfsmap,
2290 2300 b"journal",
2291 2301 b"undo",
2292 2302 aftertrans(renames),
2293 2303 self.store.createmode,
2294 2304 validator=validate,
2295 2305 releasefn=releasefn,
2296 2306 checkambigfiles=_cachedfiles,
2297 2307 name=desc,
2298 2308 )
2299 2309 tr.changes[b'origrepolen'] = len(self)
2300 2310 tr.changes[b'obsmarkers'] = set()
2301 2311 tr.changes[b'phases'] = []
2302 2312 tr.changes[b'bookmarks'] = {}
2303 2313
2304 2314 tr.hookargs[b'txnid'] = txnid
2305 2315 tr.hookargs[b'txnname'] = desc
2306 2316 tr.hookargs[b'changes'] = tr.changes
2307 2317 # note: writing the fncache only during finalize mean that the file is
2308 2318 # outdated when running hooks. As fncache is used for streaming clone,
2309 2319 # this is not expected to break anything that happen during the hooks.
2310 2320 tr.addfinalize(b'flush-fncache', self.store.write)
2311 2321
2312 2322 def txnclosehook(tr2):
2313 2323 """To be run if transaction is successful, will schedule a hook run"""
2314 2324 # Don't reference tr2 in hook() so we don't hold a reference.
2315 2325 # This reduces memory consumption when there are multiple
2316 2326 # transactions per lock. This can likely go away if issue5045
2317 2327 # fixes the function accumulation.
2318 2328 hookargs = tr2.hookargs
2319 2329
2320 2330 def hookfunc(unused_success):
2321 2331 repo = reporef()
2322 2332 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2323 2333 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2324 2334 for name, (old, new) in bmchanges:
2325 2335 args = tr.hookargs.copy()
2326 2336 args.update(bookmarks.preparehookargs(name, old, new))
2327 2337 repo.hook(
2328 2338 b'txnclose-bookmark',
2329 2339 throw=False,
2330 2340 **pycompat.strkwargs(args)
2331 2341 )
2332 2342
2333 2343 if hook.hashook(repo.ui, b'txnclose-phase'):
2334 2344 cl = repo.unfiltered().changelog
2335 2345 phasemv = sorted(
2336 2346 tr.changes[b'phases'], key=lambda r: r[0][0]
2337 2347 )
2338 2348 for revs, (old, new) in phasemv:
2339 2349 for rev in revs:
2340 2350 args = tr.hookargs.copy()
2341 2351 node = hex(cl.node(rev))
2342 2352 args.update(phases.preparehookargs(node, old, new))
2343 2353 repo.hook(
2344 2354 b'txnclose-phase',
2345 2355 throw=False,
2346 2356 **pycompat.strkwargs(args)
2347 2357 )
2348 2358
2349 2359 repo.hook(
2350 2360 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2351 2361 )
2352 2362
2353 2363 reporef()._afterlock(hookfunc)
2354 2364
2355 2365 tr.addfinalize(b'txnclose-hook', txnclosehook)
2356 2366 # Include a leading "-" to make it happen before the transaction summary
2357 2367 # reports registered via scmutil.registersummarycallback() whose names
2358 2368 # are 00-txnreport etc. That way, the caches will be warm when the
2359 2369 # callbacks run.
2360 2370 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2361 2371
2362 2372 def txnaborthook(tr2):
2363 2373 """To be run if transaction is aborted"""
2364 2374 reporef().hook(
2365 2375 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2366 2376 )
2367 2377
2368 2378 tr.addabort(b'txnabort-hook', txnaborthook)
2369 2379 # avoid eager cache invalidation. in-memory data should be identical
2370 2380 # to stored data if transaction has no error.
2371 2381 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2372 2382 self._transref = weakref.ref(tr)
2373 2383 scmutil.registersummarycallback(self, tr, desc)
2374 2384 return tr
2375 2385
2376 2386 def _journalfiles(self):
2377 2387 return (
2378 2388 (self.svfs, b'journal'),
2379 2389 (self.svfs, b'journal.narrowspec'),
2380 2390 (self.vfs, b'journal.narrowspec.dirstate'),
2381 2391 (self.vfs, b'journal.dirstate'),
2382 2392 (self.vfs, b'journal.branch'),
2383 2393 (self.vfs, b'journal.desc'),
2384 2394 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2385 2395 (self.svfs, b'journal.phaseroots'),
2386 2396 )
2387 2397
2388 2398 def undofiles(self):
2389 2399 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2390 2400
2391 2401 @unfilteredmethod
2392 2402 def _writejournal(self, desc):
2393 2403 self.dirstate.savebackup(None, b'journal.dirstate')
2394 2404 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2395 2405 narrowspec.savebackup(self, b'journal.narrowspec')
2396 2406 self.vfs.write(
2397 2407 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2398 2408 )
2399 2409 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2400 2410 bookmarksvfs = bookmarks.bookmarksvfs(self)
2401 2411 bookmarksvfs.write(
2402 2412 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2403 2413 )
2404 2414 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2405 2415
2406 2416 def recover(self):
2407 2417 with self.lock():
2408 2418 if self.svfs.exists(b"journal"):
2409 2419 self.ui.status(_(b"rolling back interrupted transaction\n"))
2410 2420 vfsmap = {
2411 2421 b'': self.svfs,
2412 2422 b'plain': self.vfs,
2413 2423 }
2414 2424 transaction.rollback(
2415 2425 self.svfs,
2416 2426 vfsmap,
2417 2427 b"journal",
2418 2428 self.ui.warn,
2419 2429 checkambigfiles=_cachedfiles,
2420 2430 )
2421 2431 self.invalidate()
2422 2432 return True
2423 2433 else:
2424 2434 self.ui.warn(_(b"no interrupted transaction available\n"))
2425 2435 return False
2426 2436
2427 2437 def rollback(self, dryrun=False, force=False):
2428 2438 wlock = lock = dsguard = None
2429 2439 try:
2430 2440 wlock = self.wlock()
2431 2441 lock = self.lock()
2432 2442 if self.svfs.exists(b"undo"):
2433 2443 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2434 2444
2435 2445 return self._rollback(dryrun, force, dsguard)
2436 2446 else:
2437 2447 self.ui.warn(_(b"no rollback information available\n"))
2438 2448 return 1
2439 2449 finally:
2440 2450 release(dsguard, lock, wlock)
2441 2451
2442 2452 @unfilteredmethod # Until we get smarter cache management
2443 2453 def _rollback(self, dryrun, force, dsguard):
2444 2454 ui = self.ui
2445 2455 try:
2446 2456 args = self.vfs.read(b'undo.desc').splitlines()
2447 2457 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2448 2458 if len(args) >= 3:
2449 2459 detail = args[2]
2450 2460 oldtip = oldlen - 1
2451 2461
2452 2462 if detail and ui.verbose:
2453 2463 msg = _(
2454 2464 b'repository tip rolled back to revision %d'
2455 2465 b' (undo %s: %s)\n'
2456 2466 ) % (oldtip, desc, detail)
2457 2467 else:
2458 2468 msg = _(
2459 2469 b'repository tip rolled back to revision %d (undo %s)\n'
2460 2470 ) % (oldtip, desc)
2461 2471 except IOError:
2462 2472 msg = _(b'rolling back unknown transaction\n')
2463 2473 desc = None
2464 2474
2465 2475 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2466 2476 raise error.Abort(
2467 2477 _(
2468 2478 b'rollback of last commit while not checked out '
2469 2479 b'may lose data'
2470 2480 ),
2471 2481 hint=_(b'use -f to force'),
2472 2482 )
2473 2483
2474 2484 ui.status(msg)
2475 2485 if dryrun:
2476 2486 return 0
2477 2487
2478 2488 parents = self.dirstate.parents()
2479 2489 self.destroying()
2480 2490 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2481 2491 transaction.rollback(
2482 2492 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2483 2493 )
2484 2494 bookmarksvfs = bookmarks.bookmarksvfs(self)
2485 2495 if bookmarksvfs.exists(b'undo.bookmarks'):
2486 2496 bookmarksvfs.rename(
2487 2497 b'undo.bookmarks', b'bookmarks', checkambig=True
2488 2498 )
2489 2499 if self.svfs.exists(b'undo.phaseroots'):
2490 2500 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2491 2501 self.invalidate()
2492 2502
2493 2503 has_node = self.changelog.index.has_node
2494 2504 parentgone = any(not has_node(p) for p in parents)
2495 2505 if parentgone:
2496 2506 # prevent dirstateguard from overwriting already restored one
2497 2507 dsguard.close()
2498 2508
2499 2509 narrowspec.restorebackup(self, b'undo.narrowspec')
2500 2510 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2501 2511 self.dirstate.restorebackup(None, b'undo.dirstate')
2502 2512 try:
2503 2513 branch = self.vfs.read(b'undo.branch')
2504 2514 self.dirstate.setbranch(encoding.tolocal(branch))
2505 2515 except IOError:
2506 2516 ui.warn(
2507 2517 _(
2508 2518 b'named branch could not be reset: '
2509 2519 b'current branch is still \'%s\'\n'
2510 2520 )
2511 2521 % self.dirstate.branch()
2512 2522 )
2513 2523
2514 2524 parents = tuple([p.rev() for p in self[None].parents()])
2515 2525 if len(parents) > 1:
2516 2526 ui.status(
2517 2527 _(
2518 2528 b'working directory now based on '
2519 2529 b'revisions %d and %d\n'
2520 2530 )
2521 2531 % parents
2522 2532 )
2523 2533 else:
2524 2534 ui.status(
2525 2535 _(b'working directory now based on revision %d\n') % parents
2526 2536 )
2527 2537 mergestatemod.mergestate.clean(self)
2528 2538
2529 2539 # TODO: if we know which new heads may result from this rollback, pass
2530 2540 # them to destroy(), which will prevent the branchhead cache from being
2531 2541 # invalidated.
2532 2542 self.destroyed()
2533 2543 return 0
2534 2544
2535 2545 def _buildcacheupdater(self, newtransaction):
2536 2546 """called during transaction to build the callback updating cache
2537 2547
2538 2548 Lives on the repository to help extension who might want to augment
2539 2549 this logic. For this purpose, the created transaction is passed to the
2540 2550 method.
2541 2551 """
2542 2552 # we must avoid cyclic reference between repo and transaction.
2543 2553 reporef = weakref.ref(self)
2544 2554
2545 2555 def updater(tr):
2546 2556 repo = reporef()
2547 2557 repo.updatecaches(tr)
2548 2558
2549 2559 return updater
2550 2560
2551 2561 @unfilteredmethod
2552 2562 def updatecaches(self, tr=None, full=False):
2553 2563 """warm appropriate caches
2554 2564
2555 2565 If this function is called after a transaction closed. The transaction
2556 2566 will be available in the 'tr' argument. This can be used to selectively
2557 2567 update caches relevant to the changes in that transaction.
2558 2568
2559 2569 If 'full' is set, make sure all caches the function knows about have
2560 2570 up-to-date data. Even the ones usually loaded more lazily.
2561 2571 """
2562 2572 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2563 2573 # During strip, many caches are invalid but
2564 2574 # later call to `destroyed` will refresh them.
2565 2575 return
2566 2576
2567 2577 if tr is None or tr.changes[b'origrepolen'] < len(self):
2568 2578 # accessing the 'ser ved' branchmap should refresh all the others,
2569 2579 self.ui.debug(b'updating the branch cache\n')
2570 2580 self.filtered(b'served').branchmap()
2571 2581 self.filtered(b'served.hidden').branchmap()
2572 2582
2573 2583 if full:
2574 2584 unfi = self.unfiltered()
2575 2585
2576 2586 self.changelog.update_caches(transaction=tr)
2577 2587 self.manifestlog.update_caches(transaction=tr)
2578 2588
2579 2589 rbc = unfi.revbranchcache()
2580 2590 for r in unfi.changelog:
2581 2591 rbc.branchinfo(r)
2582 2592 rbc.write()
2583 2593
2584 2594 # ensure the working copy parents are in the manifestfulltextcache
2585 2595 for ctx in self[b'.'].parents():
2586 2596 ctx.manifest() # accessing the manifest is enough
2587 2597
2588 2598 # accessing fnode cache warms the cache
2589 2599 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2590 2600 # accessing tags warm the cache
2591 2601 self.tags()
2592 2602 self.filtered(b'served').tags()
2593 2603
2594 2604 # The `full` arg is documented as updating even the lazily-loaded
2595 2605 # caches immediately, so we're forcing a write to cause these caches
2596 2606 # to be warmed up even if they haven't explicitly been requested
2597 2607 # yet (if they've never been used by hg, they won't ever have been
2598 2608 # written, even if they're a subset of another kind of cache that
2599 2609 # *has* been used).
2600 2610 for filt in repoview.filtertable.keys():
2601 2611 filtered = self.filtered(filt)
2602 2612 filtered.branchmap().write(filtered)
2603 2613
2604 2614 def invalidatecaches(self):
2605 2615
2606 2616 if '_tagscache' in vars(self):
2607 2617 # can't use delattr on proxy
2608 2618 del self.__dict__['_tagscache']
2609 2619
2610 2620 self._branchcaches.clear()
2611 2621 self.invalidatevolatilesets()
2612 2622 self._sparsesignaturecache.clear()
2613 2623
2614 2624 def invalidatevolatilesets(self):
2615 2625 self.filteredrevcache.clear()
2616 2626 obsolete.clearobscaches(self)
2617 2627 self._quick_access_changeid_invalidate()
2618 2628
2619 2629 def invalidatedirstate(self):
2620 2630 """Invalidates the dirstate, causing the next call to dirstate
2621 2631 to check if it was modified since the last time it was read,
2622 2632 rereading it if it has.
2623 2633
2624 2634 This is different to dirstate.invalidate() that it doesn't always
2625 2635 rereads the dirstate. Use dirstate.invalidate() if you want to
2626 2636 explicitly read the dirstate again (i.e. restoring it to a previous
2627 2637 known good state)."""
2628 2638 if hasunfilteredcache(self, 'dirstate'):
2629 2639 for k in self.dirstate._filecache:
2630 2640 try:
2631 2641 delattr(self.dirstate, k)
2632 2642 except AttributeError:
2633 2643 pass
2634 2644 delattr(self.unfiltered(), 'dirstate')
2635 2645
2636 2646 def invalidate(self, clearfilecache=False):
2637 2647 """Invalidates both store and non-store parts other than dirstate
2638 2648
2639 2649 If a transaction is running, invalidation of store is omitted,
2640 2650 because discarding in-memory changes might cause inconsistency
2641 2651 (e.g. incomplete fncache causes unintentional failure, but
2642 2652 redundant one doesn't).
2643 2653 """
2644 2654 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2645 2655 for k in list(self._filecache.keys()):
2646 2656 # dirstate is invalidated separately in invalidatedirstate()
2647 2657 if k == b'dirstate':
2648 2658 continue
2649 2659 if (
2650 2660 k == b'changelog'
2651 2661 and self.currenttransaction()
2652 2662 and self.changelog._delayed
2653 2663 ):
2654 2664 # The changelog object may store unwritten revisions. We don't
2655 2665 # want to lose them.
2656 2666 # TODO: Solve the problem instead of working around it.
2657 2667 continue
2658 2668
2659 2669 if clearfilecache:
2660 2670 del self._filecache[k]
2661 2671 try:
2662 2672 delattr(unfiltered, k)
2663 2673 except AttributeError:
2664 2674 pass
2665 2675 self.invalidatecaches()
2666 2676 if not self.currenttransaction():
2667 2677 # TODO: Changing contents of store outside transaction
2668 2678 # causes inconsistency. We should make in-memory store
2669 2679 # changes detectable, and abort if changed.
2670 2680 self.store.invalidatecaches()
2671 2681
2672 2682 def invalidateall(self):
2673 2683 """Fully invalidates both store and non-store parts, causing the
2674 2684 subsequent operation to reread any outside changes."""
2675 2685 # extension should hook this to invalidate its caches
2676 2686 self.invalidate()
2677 2687 self.invalidatedirstate()
2678 2688
2679 2689 @unfilteredmethod
2680 2690 def _refreshfilecachestats(self, tr):
2681 2691 """Reload stats of cached files so that they are flagged as valid"""
2682 2692 for k, ce in self._filecache.items():
2683 2693 k = pycompat.sysstr(k)
2684 2694 if k == 'dirstate' or k not in self.__dict__:
2685 2695 continue
2686 2696 ce.refresh()
2687 2697
2688 2698 def _lock(
2689 2699 self,
2690 2700 vfs,
2691 2701 lockname,
2692 2702 wait,
2693 2703 releasefn,
2694 2704 acquirefn,
2695 2705 desc,
2696 2706 ):
2697 2707 timeout = 0
2698 2708 warntimeout = 0
2699 2709 if wait:
2700 2710 timeout = self.ui.configint(b"ui", b"timeout")
2701 2711 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2702 2712 # internal config: ui.signal-safe-lock
2703 2713 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2704 2714
2705 2715 l = lockmod.trylock(
2706 2716 self.ui,
2707 2717 vfs,
2708 2718 lockname,
2709 2719 timeout,
2710 2720 warntimeout,
2711 2721 releasefn=releasefn,
2712 2722 acquirefn=acquirefn,
2713 2723 desc=desc,
2714 2724 signalsafe=signalsafe,
2715 2725 )
2716 2726 return l
2717 2727
2718 2728 def _afterlock(self, callback):
2719 2729 """add a callback to be run when the repository is fully unlocked
2720 2730
2721 2731 The callback will be executed when the outermost lock is released
2722 2732 (with wlock being higher level than 'lock')."""
2723 2733 for ref in (self._wlockref, self._lockref):
2724 2734 l = ref and ref()
2725 2735 if l and l.held:
2726 2736 l.postrelease.append(callback)
2727 2737 break
2728 2738 else: # no lock have been found.
2729 2739 callback(True)
2730 2740
2731 2741 def lock(self, wait=True):
2732 2742 """Lock the repository store (.hg/store) and return a weak reference
2733 2743 to the lock. Use this before modifying the store (e.g. committing or
2734 2744 stripping). If you are opening a transaction, get a lock as well.)
2735 2745
2736 2746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2737 2747 'wlock' first to avoid a dead-lock hazard."""
2738 2748 l = self._currentlock(self._lockref)
2739 2749 if l is not None:
2740 2750 l.lock()
2741 2751 return l
2742 2752
2743 2753 l = self._lock(
2744 2754 vfs=self.svfs,
2745 2755 lockname=b"lock",
2746 2756 wait=wait,
2747 2757 releasefn=None,
2748 2758 acquirefn=self.invalidate,
2749 2759 desc=_(b'repository %s') % self.origroot,
2750 2760 )
2751 2761 self._lockref = weakref.ref(l)
2752 2762 return l
2753 2763
2754 2764 def wlock(self, wait=True):
2755 2765 """Lock the non-store parts of the repository (everything under
2756 2766 .hg except .hg/store) and return a weak reference to the lock.
2757 2767
2758 2768 Use this before modifying files in .hg.
2759 2769
2760 2770 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2761 2771 'wlock' first to avoid a dead-lock hazard."""
2762 2772 l = self._wlockref and self._wlockref()
2763 2773 if l is not None and l.held:
2764 2774 l.lock()
2765 2775 return l
2766 2776
2767 2777 # We do not need to check for non-waiting lock acquisition. Such
2768 2778 # acquisition would not cause dead-lock as they would just fail.
2769 2779 if wait and (
2770 2780 self.ui.configbool(b'devel', b'all-warnings')
2771 2781 or self.ui.configbool(b'devel', b'check-locks')
2772 2782 ):
2773 2783 if self._currentlock(self._lockref) is not None:
2774 2784 self.ui.develwarn(b'"wlock" acquired after "lock"')
2775 2785
2776 2786 def unlock():
2777 2787 if self.dirstate.pendingparentchange():
2778 2788 self.dirstate.invalidate()
2779 2789 else:
2780 2790 self.dirstate.write(None)
2781 2791
2782 2792 self._filecache[b'dirstate'].refresh()
2783 2793
2784 2794 l = self._lock(
2785 2795 self.vfs,
2786 2796 b"wlock",
2787 2797 wait,
2788 2798 unlock,
2789 2799 self.invalidatedirstate,
2790 2800 _(b'working directory of %s') % self.origroot,
2791 2801 )
2792 2802 self._wlockref = weakref.ref(l)
2793 2803 return l
2794 2804
2795 2805 def _currentlock(self, lockref):
2796 2806 """Returns the lock if it's held, or None if it's not."""
2797 2807 if lockref is None:
2798 2808 return None
2799 2809 l = lockref()
2800 2810 if l is None or not l.held:
2801 2811 return None
2802 2812 return l
2803 2813
2804 2814 def currentwlock(self):
2805 2815 """Returns the wlock if it's held, or None if it's not."""
2806 2816 return self._currentlock(self._wlockref)
2807 2817
2808 2818 def checkcommitpatterns(self, wctx, match, status, fail):
2809 2819 """check for commit arguments that aren't committable"""
2810 2820 if match.isexact() or match.prefix():
2811 2821 matched = set(status.modified + status.added + status.removed)
2812 2822
2813 2823 for f in match.files():
2814 2824 f = self.dirstate.normalize(f)
2815 2825 if f == b'.' or f in matched or f in wctx.substate:
2816 2826 continue
2817 2827 if f in status.deleted:
2818 2828 fail(f, _(b'file not found!'))
2819 2829 # Is it a directory that exists or used to exist?
2820 2830 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2821 2831 d = f + b'/'
2822 2832 for mf in matched:
2823 2833 if mf.startswith(d):
2824 2834 break
2825 2835 else:
2826 2836 fail(f, _(b"no match under directory!"))
2827 2837 elif f not in self.dirstate:
2828 2838 fail(f, _(b"file not tracked!"))
2829 2839
2830 2840 @unfilteredmethod
2831 2841 def commit(
2832 2842 self,
2833 2843 text=b"",
2834 2844 user=None,
2835 2845 date=None,
2836 2846 match=None,
2837 2847 force=False,
2838 2848 editor=None,
2839 2849 extra=None,
2840 2850 ):
2841 2851 """Add a new revision to current repository.
2842 2852
2843 2853 Revision information is gathered from the working directory,
2844 2854 match can be used to filter the committed files. If editor is
2845 2855 supplied, it is called to get a commit message.
2846 2856 """
2847 2857 if extra is None:
2848 2858 extra = {}
2849 2859
2850 2860 def fail(f, msg):
2851 2861 raise error.InputError(b'%s: %s' % (f, msg))
2852 2862
2853 2863 if not match:
2854 2864 match = matchmod.always()
2855 2865
2856 2866 if not force:
2857 2867 match.bad = fail
2858 2868
2859 2869 # lock() for recent changelog (see issue4368)
2860 2870 with self.wlock(), self.lock():
2861 2871 wctx = self[None]
2862 2872 merge = len(wctx.parents()) > 1
2863 2873
2864 2874 if not force and merge and not match.always():
2865 2875 raise error.Abort(
2866 2876 _(
2867 2877 b'cannot partially commit a merge '
2868 2878 b'(do not specify files or patterns)'
2869 2879 )
2870 2880 )
2871 2881
2872 2882 status = self.status(match=match, clean=force)
2873 2883 if force:
2874 2884 status.modified.extend(
2875 2885 status.clean
2876 2886 ) # mq may commit clean files
2877 2887
2878 2888 # check subrepos
2879 2889 subs, commitsubs, newstate = subrepoutil.precommit(
2880 2890 self.ui, wctx, status, match, force=force
2881 2891 )
2882 2892
2883 2893 # make sure all explicit patterns are matched
2884 2894 if not force:
2885 2895 self.checkcommitpatterns(wctx, match, status, fail)
2886 2896
2887 2897 cctx = context.workingcommitctx(
2888 2898 self, status, text, user, date, extra
2889 2899 )
2890 2900
2891 2901 ms = mergestatemod.mergestate.read(self)
2892 2902 mergeutil.checkunresolved(ms)
2893 2903
2894 2904 # internal config: ui.allowemptycommit
2895 2905 if cctx.isempty() and not self.ui.configbool(
2896 2906 b'ui', b'allowemptycommit'
2897 2907 ):
2898 2908 self.ui.debug(b'nothing to commit, clearing merge state\n')
2899 2909 ms.reset()
2900 2910 return None
2901 2911
2902 2912 if merge and cctx.deleted():
2903 2913 raise error.Abort(_(b"cannot commit merge with missing files"))
2904 2914
2905 2915 if editor:
2906 2916 cctx._text = editor(self, cctx, subs)
2907 2917 edited = text != cctx._text
2908 2918
2909 2919 # Save commit message in case this transaction gets rolled back
2910 2920 # (e.g. by a pretxncommit hook). Leave the content alone on
2911 2921 # the assumption that the user will use the same editor again.
2912 2922 msgfn = self.savecommitmessage(cctx._text)
2913 2923
2914 2924 # commit subs and write new state
2915 2925 if subs:
2916 2926 uipathfn = scmutil.getuipathfn(self)
2917 2927 for s in sorted(commitsubs):
2918 2928 sub = wctx.sub(s)
2919 2929 self.ui.status(
2920 2930 _(b'committing subrepository %s\n')
2921 2931 % uipathfn(subrepoutil.subrelpath(sub))
2922 2932 )
2923 2933 sr = sub.commit(cctx._text, user, date)
2924 2934 newstate[s] = (newstate[s][0], sr)
2925 2935 subrepoutil.writestate(self, newstate)
2926 2936
2927 2937 p1, p2 = self.dirstate.parents()
2928 2938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2929 2939 try:
2930 2940 self.hook(
2931 2941 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2932 2942 )
2933 2943 with self.transaction(b'commit'):
2934 2944 ret = self.commitctx(cctx, True)
2935 2945 # update bookmarks, dirstate and mergestate
2936 2946 bookmarks.update(self, [p1, p2], ret)
2937 2947 cctx.markcommitted(ret)
2938 2948 ms.reset()
2939 2949 except: # re-raises
2940 2950 if edited:
2941 2951 self.ui.write(
2942 2952 _(b'note: commit message saved in %s\n') % msgfn
2943 2953 )
2944 2954 self.ui.write(
2945 2955 _(
2946 2956 b"note: use 'hg commit --logfile "
2947 2957 b".hg/last-message.txt --edit' to reuse it\n"
2948 2958 )
2949 2959 )
2950 2960 raise
2951 2961
2952 2962 def commithook(unused_success):
2953 2963 # hack for command that use a temporary commit (eg: histedit)
2954 2964 # temporary commit got stripped before hook release
2955 2965 if self.changelog.hasnode(ret):
2956 2966 self.hook(
2957 2967 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2958 2968 )
2959 2969
2960 2970 self._afterlock(commithook)
2961 2971 return ret
2962 2972
2963 2973 @unfilteredmethod
2964 2974 def commitctx(self, ctx, error=False, origctx=None):
2965 2975 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2966 2976
2967 2977 @unfilteredmethod
2968 2978 def destroying(self):
2969 2979 """Inform the repository that nodes are about to be destroyed.
2970 2980 Intended for use by strip and rollback, so there's a common
2971 2981 place for anything that has to be done before destroying history.
2972 2982
2973 2983 This is mostly useful for saving state that is in memory and waiting
2974 2984 to be flushed when the current lock is released. Because a call to
2975 2985 destroyed is imminent, the repo will be invalidated causing those
2976 2986 changes to stay in memory (waiting for the next unlock), or vanish
2977 2987 completely.
2978 2988 """
2979 2989 # When using the same lock to commit and strip, the phasecache is left
2980 2990 # dirty after committing. Then when we strip, the repo is invalidated,
2981 2991 # causing those changes to disappear.
2982 2992 if '_phasecache' in vars(self):
2983 2993 self._phasecache.write()
2984 2994
2985 2995 @unfilteredmethod
2986 2996 def destroyed(self):
2987 2997 """Inform the repository that nodes have been destroyed.
2988 2998 Intended for use by strip and rollback, so there's a common
2989 2999 place for anything that has to be done after destroying history.
2990 3000 """
2991 3001 # When one tries to:
2992 3002 # 1) destroy nodes thus calling this method (e.g. strip)
2993 3003 # 2) use phasecache somewhere (e.g. commit)
2994 3004 #
2995 3005 # then 2) will fail because the phasecache contains nodes that were
2996 3006 # removed. We can either remove phasecache from the filecache,
2997 3007 # causing it to reload next time it is accessed, or simply filter
2998 3008 # the removed nodes now and write the updated cache.
2999 3009 self._phasecache.filterunknown(self)
3000 3010 self._phasecache.write()
3001 3011
3002 3012 # refresh all repository caches
3003 3013 self.updatecaches()
3004 3014
3005 3015 # Ensure the persistent tag cache is updated. Doing it now
3006 3016 # means that the tag cache only has to worry about destroyed
3007 3017 # heads immediately after a strip/rollback. That in turn
3008 3018 # guarantees that "cachetip == currenttip" (comparing both rev
3009 3019 # and node) always means no nodes have been added or destroyed.
3010 3020
3011 3021 # XXX this is suboptimal when qrefresh'ing: we strip the current
3012 3022 # head, refresh the tag cache, then immediately add a new head.
3013 3023 # But I think doing it this way is necessary for the "instant
3014 3024 # tag cache retrieval" case to work.
3015 3025 self.invalidate()
3016 3026
3017 3027 def status(
3018 3028 self,
3019 3029 node1=b'.',
3020 3030 node2=None,
3021 3031 match=None,
3022 3032 ignored=False,
3023 3033 clean=False,
3024 3034 unknown=False,
3025 3035 listsubrepos=False,
3026 3036 ):
3027 3037 '''a convenience method that calls node1.status(node2)'''
3028 3038 return self[node1].status(
3029 3039 node2, match, ignored, clean, unknown, listsubrepos
3030 3040 )
3031 3041
3032 3042 def addpostdsstatus(self, ps):
3033 3043 """Add a callback to run within the wlock, at the point at which status
3034 3044 fixups happen.
3035 3045
3036 3046 On status completion, callback(wctx, status) will be called with the
3037 3047 wlock held, unless the dirstate has changed from underneath or the wlock
3038 3048 couldn't be grabbed.
3039 3049
3040 3050 Callbacks should not capture and use a cached copy of the dirstate --
3041 3051 it might change in the meanwhile. Instead, they should access the
3042 3052 dirstate via wctx.repo().dirstate.
3043 3053
3044 3054 This list is emptied out after each status run -- extensions should
3045 3055 make sure it adds to this list each time dirstate.status is called.
3046 3056 Extensions should also make sure they don't call this for statuses
3047 3057 that don't involve the dirstate.
3048 3058 """
3049 3059
3050 3060 # The list is located here for uniqueness reasons -- it is actually
3051 3061 # managed by the workingctx, but that isn't unique per-repo.
3052 3062 self._postdsstatus.append(ps)
3053 3063
3054 3064 def postdsstatus(self):
3055 3065 """Used by workingctx to get the list of post-dirstate-status hooks."""
3056 3066 return self._postdsstatus
3057 3067
3058 3068 def clearpostdsstatus(self):
3059 3069 """Used by workingctx to clear post-dirstate-status hooks."""
3060 3070 del self._postdsstatus[:]
3061 3071
3062 3072 def heads(self, start=None):
3063 3073 if start is None:
3064 3074 cl = self.changelog
3065 3075 headrevs = reversed(cl.headrevs())
3066 3076 return [cl.node(rev) for rev in headrevs]
3067 3077
3068 3078 heads = self.changelog.heads(start)
3069 3079 # sort the output in rev descending order
3070 3080 return sorted(heads, key=self.changelog.rev, reverse=True)
3071 3081
3072 3082 def branchheads(self, branch=None, start=None, closed=False):
3073 3083 """return a (possibly filtered) list of heads for the given branch
3074 3084
3075 3085 Heads are returned in topological order, from newest to oldest.
3076 3086 If branch is None, use the dirstate branch.
3077 3087 If start is not None, return only heads reachable from start.
3078 3088 If closed is True, return heads that are marked as closed as well.
3079 3089 """
3080 3090 if branch is None:
3081 3091 branch = self[None].branch()
3082 3092 branches = self.branchmap()
3083 3093 if not branches.hasbranch(branch):
3084 3094 return []
3085 3095 # the cache returns heads ordered lowest to highest
3086 3096 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3087 3097 if start is not None:
3088 3098 # filter out the heads that cannot be reached from startrev
3089 3099 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3090 3100 bheads = [h for h in bheads if h in fbheads]
3091 3101 return bheads
3092 3102
3093 3103 def branches(self, nodes):
3094 3104 if not nodes:
3095 3105 nodes = [self.changelog.tip()]
3096 3106 b = []
3097 3107 for n in nodes:
3098 3108 t = n
3099 3109 while True:
3100 3110 p = self.changelog.parents(n)
3101 3111 if p[1] != nullid or p[0] == nullid:
3102 3112 b.append((t, n, p[0], p[1]))
3103 3113 break
3104 3114 n = p[0]
3105 3115 return b
3106 3116
3107 3117 def between(self, pairs):
3108 3118 r = []
3109 3119
3110 3120 for top, bottom in pairs:
3111 3121 n, l, i = top, [], 0
3112 3122 f = 1
3113 3123
3114 3124 while n != bottom and n != nullid:
3115 3125 p = self.changelog.parents(n)[0]
3116 3126 if i == f:
3117 3127 l.append(n)
3118 3128 f = f * 2
3119 3129 n = p
3120 3130 i += 1
3121 3131
3122 3132 r.append(l)
3123 3133
3124 3134 return r
3125 3135
3126 3136 def checkpush(self, pushop):
3127 3137 """Extensions can override this function if additional checks have
3128 3138 to be performed before pushing, or call it if they override push
3129 3139 command.
3130 3140 """
3131 3141
3132 3142 @unfilteredpropertycache
3133 3143 def prepushoutgoinghooks(self):
3134 3144 """Return util.hooks consists of a pushop with repo, remote, outgoing
3135 3145 methods, which are called before pushing changesets.
3136 3146 """
3137 3147 return util.hooks()
3138 3148
3139 3149 def pushkey(self, namespace, key, old, new):
3140 3150 try:
3141 3151 tr = self.currenttransaction()
3142 3152 hookargs = {}
3143 3153 if tr is not None:
3144 3154 hookargs.update(tr.hookargs)
3145 3155 hookargs = pycompat.strkwargs(hookargs)
3146 3156 hookargs['namespace'] = namespace
3147 3157 hookargs['key'] = key
3148 3158 hookargs['old'] = old
3149 3159 hookargs['new'] = new
3150 3160 self.hook(b'prepushkey', throw=True, **hookargs)
3151 3161 except error.HookAbort as exc:
3152 3162 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3153 3163 if exc.hint:
3154 3164 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3155 3165 return False
3156 3166 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3157 3167 ret = pushkey.push(self, namespace, key, old, new)
3158 3168
3159 3169 def runhook(unused_success):
3160 3170 self.hook(
3161 3171 b'pushkey',
3162 3172 namespace=namespace,
3163 3173 key=key,
3164 3174 old=old,
3165 3175 new=new,
3166 3176 ret=ret,
3167 3177 )
3168 3178
3169 3179 self._afterlock(runhook)
3170 3180 return ret
3171 3181
3172 3182 def listkeys(self, namespace):
3173 3183 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3174 3184 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3175 3185 values = pushkey.list(self, namespace)
3176 3186 self.hook(b'listkeys', namespace=namespace, values=values)
3177 3187 return values
3178 3188
3179 3189 def debugwireargs(self, one, two, three=None, four=None, five=None):
3180 3190 '''used to test argument passing over the wire'''
3181 3191 return b"%s %s %s %s %s" % (
3182 3192 one,
3183 3193 two,
3184 3194 pycompat.bytestr(three),
3185 3195 pycompat.bytestr(four),
3186 3196 pycompat.bytestr(five),
3187 3197 )
3188 3198
3189 3199 def savecommitmessage(self, text):
3190 3200 fp = self.vfs(b'last-message.txt', b'wb')
3191 3201 try:
3192 3202 fp.write(text)
3193 3203 finally:
3194 3204 fp.close()
3195 3205 return self.pathto(fp.name[len(self.root) + 1 :])
3196 3206
3197 3207
3198 3208 # used to avoid circular references so destructors work
3199 3209 def aftertrans(files):
3200 3210 renamefiles = [tuple(t) for t in files]
3201 3211
3202 3212 def a():
3203 3213 for vfs, src, dest in renamefiles:
3204 3214 # if src and dest refer to a same file, vfs.rename is a no-op,
3205 3215 # leaving both src and dest on disk. delete dest to make sure
3206 3216 # the rename couldn't be such a no-op.
3207 3217 vfs.tryunlink(dest)
3208 3218 try:
3209 3219 vfs.rename(src, dest)
3210 3220 except OSError: # journal file does not yet exist
3211 3221 pass
3212 3222
3213 3223 return a
3214 3224
3215 3225
3216 3226 def undoname(fn):
3217 3227 base, name = os.path.split(fn)
3218 3228 assert name.startswith(b'journal')
3219 3229 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3220 3230
3221 3231
3222 3232 def instance(ui, path, create, intents=None, createopts=None):
3223 3233 localpath = util.urllocalpath(path)
3224 3234 if create:
3225 3235 createrepository(ui, localpath, createopts=createopts)
3226 3236
3227 3237 return makelocalrepository(ui, localpath, intents=intents)
3228 3238
3229 3239
3230 3240 def islocal(path):
3231 3241 return True
3232 3242
3233 3243
3234 3244 def defaultcreateopts(ui, createopts=None):
3235 3245 """Populate the default creation options for a repository.
3236 3246
3237 3247 A dictionary of explicitly requested creation options can be passed
3238 3248 in. Missing keys will be populated.
3239 3249 """
3240 3250 createopts = dict(createopts or {})
3241 3251
3242 3252 if b'backend' not in createopts:
3243 3253 # experimental config: storage.new-repo-backend
3244 3254 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3245 3255
3246 3256 return createopts
3247 3257
3248 3258
3249 3259 def newreporequirements(ui, createopts):
3250 3260 """Determine the set of requirements for a new local repository.
3251 3261
3252 3262 Extensions can wrap this function to specify custom requirements for
3253 3263 new repositories.
3254 3264 """
3255 3265 # If the repo is being created from a shared repository, we copy
3256 3266 # its requirements.
3257 3267 if b'sharedrepo' in createopts:
3258 3268 requirements = set(createopts[b'sharedrepo'].requirements)
3259 3269 if createopts.get(b'sharedrelative'):
3260 3270 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3261 3271 else:
3262 3272 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3263 3273
3264 3274 return requirements
3265 3275
3266 3276 if b'backend' not in createopts:
3267 3277 raise error.ProgrammingError(
3268 3278 b'backend key not present in createopts; '
3269 3279 b'was defaultcreateopts() called?'
3270 3280 )
3271 3281
3272 3282 if createopts[b'backend'] != b'revlogv1':
3273 3283 raise error.Abort(
3274 3284 _(
3275 3285 b'unable to determine repository requirements for '
3276 3286 b'storage backend: %s'
3277 3287 )
3278 3288 % createopts[b'backend']
3279 3289 )
3280 3290
3281 3291 requirements = {b'revlogv1'}
3282 3292 if ui.configbool(b'format', b'usestore'):
3283 3293 requirements.add(b'store')
3284 3294 if ui.configbool(b'format', b'usefncache'):
3285 3295 requirements.add(b'fncache')
3286 3296 if ui.configbool(b'format', b'dotencode'):
3287 3297 requirements.add(b'dotencode')
3288 3298
3289 3299 compengines = ui.configlist(b'format', b'revlog-compression')
3290 3300 for compengine in compengines:
3291 3301 if compengine in util.compengines:
3292 3302 break
3293 3303 else:
3294 3304 raise error.Abort(
3295 3305 _(
3296 3306 b'compression engines %s defined by '
3297 3307 b'format.revlog-compression not available'
3298 3308 )
3299 3309 % b', '.join(b'"%s"' % e for e in compengines),
3300 3310 hint=_(
3301 3311 b'run "hg debuginstall" to list available '
3302 3312 b'compression engines'
3303 3313 ),
3304 3314 )
3305 3315
3306 3316 # zlib is the historical default and doesn't need an explicit requirement.
3307 3317 if compengine == b'zstd':
3308 3318 requirements.add(b'revlog-compression-zstd')
3309 3319 elif compengine != b'zlib':
3310 3320 requirements.add(b'exp-compression-%s' % compengine)
3311 3321
3312 3322 if scmutil.gdinitconfig(ui):
3313 3323 requirements.add(b'generaldelta')
3314 3324 if ui.configbool(b'format', b'sparse-revlog'):
3315 3325 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3316 3326
3317 3327 # experimental config: format.exp-use-side-data
3318 3328 if ui.configbool(b'format', b'exp-use-side-data'):
3319 3329 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3320 3330 # experimental config: format.exp-use-copies-side-data-changeset
3321 3331 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3322 3332 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3323 3333 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3324 3334 if ui.configbool(b'experimental', b'treemanifest'):
3325 3335 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3326 3336
3327 3337 revlogv2 = ui.config(b'experimental', b'revlogv2')
3328 3338 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3329 3339 requirements.remove(b'revlogv1')
3330 3340 # generaldelta is implied by revlogv2.
3331 3341 requirements.discard(b'generaldelta')
3332 3342 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3333 3343 # experimental config: format.internal-phase
3334 3344 if ui.configbool(b'format', b'internal-phase'):
3335 3345 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3336 3346
3337 3347 if createopts.get(b'narrowfiles'):
3338 3348 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3339 3349
3340 3350 if createopts.get(b'lfs'):
3341 3351 requirements.add(b'lfs')
3342 3352
3343 3353 if ui.configbool(b'format', b'bookmarks-in-store'):
3344 3354 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3345 3355
3346 3356 if ui.configbool(b'format', b'use-persistent-nodemap'):
3347 3357 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3348 3358
3349 3359 # if share-safe is enabled, let's create the new repository with the new
3350 3360 # requirement
3351 3361 if ui.configbool(b'format', b'exp-share-safe'):
3352 3362 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3353 3363
3354 3364 return requirements
3355 3365
3356 3366
3357 3367 def checkrequirementscompat(ui, requirements):
3358 3368 """Checks compatibility of repository requirements enabled and disabled.
3359 3369
3360 3370 Returns a set of requirements which needs to be dropped because dependend
3361 3371 requirements are not enabled. Also warns users about it"""
3362 3372
3363 3373 dropped = set()
3364 3374
3365 3375 if b'store' not in requirements:
3366 3376 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3367 3377 ui.warn(
3368 3378 _(
3369 3379 b'ignoring enabled \'format.bookmarks-in-store\' config '
3370 3380 b'beacuse it is incompatible with disabled '
3371 3381 b'\'format.usestore\' config\n'
3372 3382 )
3373 3383 )
3374 3384 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3375 3385
3376 3386 if (
3377 3387 requirementsmod.SHARED_REQUIREMENT in requirements
3378 3388 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3379 3389 ):
3380 3390 raise error.Abort(
3381 3391 _(
3382 3392 b"cannot create shared repository as source was created"
3383 3393 b" with 'format.usestore' config disabled"
3384 3394 )
3385 3395 )
3386 3396
3387 3397 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3388 3398 ui.warn(
3389 3399 _(
3390 3400 b"ignoring enabled 'format.exp-share-safe' config because "
3391 3401 b"it is incompatible with disabled 'format.usestore'"
3392 3402 b" config\n"
3393 3403 )
3394 3404 )
3395 3405 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3396 3406
3397 3407 return dropped
3398 3408
3399 3409
3400 3410 def filterknowncreateopts(ui, createopts):
3401 3411 """Filters a dict of repo creation options against options that are known.
3402 3412
3403 3413 Receives a dict of repo creation options and returns a dict of those
3404 3414 options that we don't know how to handle.
3405 3415
3406 3416 This function is called as part of repository creation. If the
3407 3417 returned dict contains any items, repository creation will not
3408 3418 be allowed, as it means there was a request to create a repository
3409 3419 with options not recognized by loaded code.
3410 3420
3411 3421 Extensions can wrap this function to filter out creation options
3412 3422 they know how to handle.
3413 3423 """
3414 3424 known = {
3415 3425 b'backend',
3416 3426 b'lfs',
3417 3427 b'narrowfiles',
3418 3428 b'sharedrepo',
3419 3429 b'sharedrelative',
3420 3430 b'shareditems',
3421 3431 b'shallowfilestore',
3422 3432 }
3423 3433
3424 3434 return {k: v for k, v in createopts.items() if k not in known}
3425 3435
3426 3436
3427 3437 def createrepository(ui, path, createopts=None):
3428 3438 """Create a new repository in a vfs.
3429 3439
3430 3440 ``path`` path to the new repo's working directory.
3431 3441 ``createopts`` options for the new repository.
3432 3442
3433 3443 The following keys for ``createopts`` are recognized:
3434 3444
3435 3445 backend
3436 3446 The storage backend to use.
3437 3447 lfs
3438 3448 Repository will be created with ``lfs`` requirement. The lfs extension
3439 3449 will automatically be loaded when the repository is accessed.
3440 3450 narrowfiles
3441 3451 Set up repository to support narrow file storage.
3442 3452 sharedrepo
3443 3453 Repository object from which storage should be shared.
3444 3454 sharedrelative
3445 3455 Boolean indicating if the path to the shared repo should be
3446 3456 stored as relative. By default, the pointer to the "parent" repo
3447 3457 is stored as an absolute path.
3448 3458 shareditems
3449 3459 Set of items to share to the new repository (in addition to storage).
3450 3460 shallowfilestore
3451 3461 Indicates that storage for files should be shallow (not all ancestor
3452 3462 revisions are known).
3453 3463 """
3454 3464 createopts = defaultcreateopts(ui, createopts=createopts)
3455 3465
3456 3466 unknownopts = filterknowncreateopts(ui, createopts)
3457 3467
3458 3468 if not isinstance(unknownopts, dict):
3459 3469 raise error.ProgrammingError(
3460 3470 b'filterknowncreateopts() did not return a dict'
3461 3471 )
3462 3472
3463 3473 if unknownopts:
3464 3474 raise error.Abort(
3465 3475 _(
3466 3476 b'unable to create repository because of unknown '
3467 3477 b'creation option: %s'
3468 3478 )
3469 3479 % b', '.join(sorted(unknownopts)),
3470 3480 hint=_(b'is a required extension not loaded?'),
3471 3481 )
3472 3482
3473 3483 requirements = newreporequirements(ui, createopts=createopts)
3474 3484 requirements -= checkrequirementscompat(ui, requirements)
3475 3485
3476 3486 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3477 3487
3478 3488 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3479 3489 if hgvfs.exists():
3480 3490 raise error.RepoError(_(b'repository %s already exists') % path)
3481 3491
3482 3492 if b'sharedrepo' in createopts:
3483 3493 sharedpath = createopts[b'sharedrepo'].sharedpath
3484 3494
3485 3495 if createopts.get(b'sharedrelative'):
3486 3496 try:
3487 3497 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3488 3498 except (IOError, ValueError) as e:
3489 3499 # ValueError is raised on Windows if the drive letters differ
3490 3500 # on each path.
3491 3501 raise error.Abort(
3492 3502 _(b'cannot calculate relative path'),
3493 3503 hint=stringutil.forcebytestr(e),
3494 3504 )
3495 3505
3496 3506 if not wdirvfs.exists():
3497 3507 wdirvfs.makedirs()
3498 3508
3499 3509 hgvfs.makedir(notindexed=True)
3500 3510 if b'sharedrepo' not in createopts:
3501 3511 hgvfs.mkdir(b'cache')
3502 3512 hgvfs.mkdir(b'wcache')
3503 3513
3504 3514 if b'store' in requirements and b'sharedrepo' not in createopts:
3505 3515 hgvfs.mkdir(b'store')
3506 3516
3507 3517 # We create an invalid changelog outside the store so very old
3508 3518 # Mercurial versions (which didn't know about the requirements
3509 3519 # file) encounter an error on reading the changelog. This
3510 3520 # effectively locks out old clients and prevents them from
3511 3521 # mucking with a repo in an unknown format.
3512 3522 #
3513 3523 # The revlog header has version 2, which won't be recognized by
3514 3524 # such old clients.
3515 3525 hgvfs.append(
3516 3526 b'00changelog.i',
3517 3527 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3518 3528 b'layout',
3519 3529 )
3520 3530
3521 3531 # Filter the requirements into working copy and store ones
3522 3532 wcreq, storereq = scmutil.filterrequirements(requirements)
3523 3533 # write working copy ones
3524 3534 scmutil.writerequires(hgvfs, wcreq)
3525 3535 # If there are store requirements and the current repository
3526 3536 # is not a shared one, write stored requirements
3527 3537 # For new shared repository, we don't need to write the store
3528 3538 # requirements as they are already present in store requires
3529 3539 if storereq and b'sharedrepo' not in createopts:
3530 3540 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3531 3541 scmutil.writerequires(storevfs, storereq)
3532 3542
3533 3543 # Write out file telling readers where to find the shared store.
3534 3544 if b'sharedrepo' in createopts:
3535 3545 hgvfs.write(b'sharedpath', sharedpath)
3536 3546
3537 3547 if createopts.get(b'shareditems'):
3538 3548 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3539 3549 hgvfs.write(b'shared', shared)
3540 3550
3541 3551
3542 3552 def poisonrepository(repo):
3543 3553 """Poison a repository instance so it can no longer be used."""
3544 3554 # Perform any cleanup on the instance.
3545 3555 repo.close()
3546 3556
3547 3557 # Our strategy is to replace the type of the object with one that
3548 3558 # has all attribute lookups result in error.
3549 3559 #
3550 3560 # But we have to allow the close() method because some constructors
3551 3561 # of repos call close() on repo references.
3552 3562 class poisonedrepository(object):
3553 3563 def __getattribute__(self, item):
3554 3564 if item == 'close':
3555 3565 return object.__getattribute__(self, item)
3556 3566
3557 3567 raise error.ProgrammingError(
3558 3568 b'repo instances should not be used after unshare'
3559 3569 )
3560 3570
3561 3571 def close(self):
3562 3572 pass
3563 3573
3564 3574 # We may have a repoview, which intercepts __setattr__. So be sure
3565 3575 # we operate at the lowest level possible.
3566 3576 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1472 +1,1481 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 hg,
19 19 localrepo,
20 20 manifest,
21 21 metadata,
22 22 pycompat,
23 23 requirements,
24 24 revlog,
25 25 scmutil,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29
30 30 from .utils import compression
31 31
32 32 # list of requirements that request a clone of all revlog if added/removed
33 33 RECLONES_REQUIREMENTS = {
34 34 b'generaldelta',
35 35 requirements.SPARSEREVLOG_REQUIREMENT,
36 36 }
37 37
38 38
39 39 def requiredsourcerequirements(repo):
40 40 """Obtain requirements required to be present to upgrade a repo.
41 41
42 42 An upgrade will not be allowed if the repository doesn't have the
43 43 requirements returned by this function.
44 44 """
45 45 return {
46 46 # Introduced in Mercurial 0.9.2.
47 47 b'revlogv1',
48 48 # Introduced in Mercurial 0.9.2.
49 49 b'store',
50 50 }
51 51
52 52
53 53 def blocksourcerequirements(repo):
54 54 """Obtain requirements that will prevent an upgrade from occurring.
55 55
56 56 An upgrade cannot be performed if the source repository contains a
57 57 requirements in the returned set.
58 58 """
59 59 return {
60 60 # The upgrade code does not yet support these experimental features.
61 61 # This is an artificial limitation.
62 62 requirements.TREEMANIFEST_REQUIREMENT,
63 63 # This was a precursor to generaldelta and was never enabled by default.
64 64 # It should (hopefully) not exist in the wild.
65 65 b'parentdelta',
66 66 # Upgrade should operate on the actual store, not the shared link.
67 67 requirements.SHARED_REQUIREMENT,
68 68 }
69 69
70 70
71 71 def supportremovedrequirements(repo):
72 72 """Obtain requirements that can be removed during an upgrade.
73 73
74 74 If an upgrade were to create a repository that dropped a requirement,
75 75 the dropped requirement must appear in the returned set for the upgrade
76 76 to be allowed.
77 77 """
78 78 supported = {
79 79 requirements.SPARSEREVLOG_REQUIREMENT,
80 80 requirements.SIDEDATA_REQUIREMENT,
81 81 requirements.COPIESSDC_REQUIREMENT,
82 82 requirements.NODEMAP_REQUIREMENT,
83 requirements.SHARESAFE_REQUIREMENT,
83 84 }
84 85 for name in compression.compengines:
85 86 engine = compression.compengines[name]
86 87 if engine.available() and engine.revlogheader():
87 88 supported.add(b'exp-compression-%s' % name)
88 89 if engine.name() == b'zstd':
89 90 supported.add(b'revlog-compression-zstd')
90 91 return supported
91 92
92 93
93 94 def supporteddestrequirements(repo):
94 95 """Obtain requirements that upgrade supports in the destination.
95 96
96 97 If the result of the upgrade would create requirements not in this set,
97 98 the upgrade is disallowed.
98 99
99 100 Extensions should monkeypatch this to add their custom requirements.
100 101 """
101 102 supported = {
102 103 b'dotencode',
103 104 b'fncache',
104 105 b'generaldelta',
105 106 b'revlogv1',
106 107 b'store',
107 108 requirements.SPARSEREVLOG_REQUIREMENT,
108 109 requirements.SIDEDATA_REQUIREMENT,
109 110 requirements.COPIESSDC_REQUIREMENT,
110 111 requirements.NODEMAP_REQUIREMENT,
111 112 requirements.SHARESAFE_REQUIREMENT,
112 113 }
113 114 for name in compression.compengines:
114 115 engine = compression.compengines[name]
115 116 if engine.available() and engine.revlogheader():
116 117 supported.add(b'exp-compression-%s' % name)
117 118 if engine.name() == b'zstd':
118 119 supported.add(b'revlog-compression-zstd')
119 120 return supported
120 121
121 122
122 123 def allowednewrequirements(repo):
123 124 """Obtain requirements that can be added to a repository during upgrade.
124 125
125 126 This is used to disallow proposed requirements from being added when
126 127 they weren't present before.
127 128
128 129 We use a list of allowed requirement additions instead of a list of known
129 130 bad additions because the whitelist approach is safer and will prevent
130 131 future, unknown requirements from accidentally being added.
131 132 """
132 133 supported = {
133 134 b'dotencode',
134 135 b'fncache',
135 136 b'generaldelta',
136 137 requirements.SPARSEREVLOG_REQUIREMENT,
137 138 requirements.SIDEDATA_REQUIREMENT,
138 139 requirements.COPIESSDC_REQUIREMENT,
139 140 requirements.NODEMAP_REQUIREMENT,
140 141 requirements.SHARESAFE_REQUIREMENT,
141 142 }
142 143 for name in compression.compengines:
143 144 engine = compression.compengines[name]
144 145 if engine.available() and engine.revlogheader():
145 146 supported.add(b'exp-compression-%s' % name)
146 147 if engine.name() == b'zstd':
147 148 supported.add(b'revlog-compression-zstd')
148 149 return supported
149 150
150 151
151 152 def preservedrequirements(repo):
152 153 return set()
153 154
154 155
155 156 DEFICIENCY = b'deficiency'
156 157 OPTIMISATION = b'optimization'
157 158
158 159
159 160 class improvement(object):
160 161 """Represents an improvement that can be made as part of an upgrade.
161 162
162 163 The following attributes are defined on each instance:
163 164
164 165 name
165 166 Machine-readable string uniquely identifying this improvement. It
166 167 will be mapped to an action later in the upgrade process.
167 168
168 169 type
169 170 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
170 171 problem. An optimization is an action (sometimes optional) that
171 172 can be taken to further improve the state of the repository.
172 173
173 174 description
174 175 Message intended for humans explaining the improvement in more detail,
175 176 including the implications of it. For ``DEFICIENCY`` types, should be
176 177 worded in the present tense. For ``OPTIMISATION`` types, should be
177 178 worded in the future tense.
178 179
179 180 upgrademessage
180 181 Message intended for humans explaining what an upgrade addressing this
181 182 issue will do. Should be worded in the future tense.
182 183 """
183 184
184 185 def __init__(self, name, type, description, upgrademessage):
185 186 self.name = name
186 187 self.type = type
187 188 self.description = description
188 189 self.upgrademessage = upgrademessage
189 190
190 191 def __eq__(self, other):
191 192 if not isinstance(other, improvement):
192 193 # This is what python tell use to do
193 194 return NotImplemented
194 195 return self.name == other.name
195 196
196 197 def __ne__(self, other):
197 198 return not (self == other)
198 199
199 200 def __hash__(self):
200 201 return hash(self.name)
201 202
202 203
203 204 allformatvariant = []
204 205
205 206
206 207 def registerformatvariant(cls):
207 208 allformatvariant.append(cls)
208 209 return cls
209 210
210 211
211 212 class formatvariant(improvement):
212 213 """an improvement subclass dedicated to repository format"""
213 214
214 215 type = DEFICIENCY
215 216 ### The following attributes should be defined for each class:
216 217
217 218 # machine-readable string uniquely identifying this improvement. it will be
218 219 # mapped to an action later in the upgrade process.
219 220 name = None
220 221
221 222 # message intended for humans explaining the improvement in more detail,
222 223 # including the implications of it ``DEFICIENCY`` types, should be worded
223 224 # in the present tense.
224 225 description = None
225 226
226 227 # message intended for humans explaining what an upgrade addressing this
227 228 # issue will do. should be worded in the future tense.
228 229 upgrademessage = None
229 230
230 231 # value of current Mercurial default for new repository
231 232 default = None
232 233
233 234 def __init__(self):
234 235 raise NotImplementedError()
235 236
236 237 @staticmethod
237 238 def fromrepo(repo):
238 239 """current value of the variant in the repository"""
239 240 raise NotImplementedError()
240 241
241 242 @staticmethod
242 243 def fromconfig(repo):
243 244 """current value of the variant in the configuration"""
244 245 raise NotImplementedError()
245 246
246 247
247 248 class requirementformatvariant(formatvariant):
248 249 """formatvariant based on a 'requirement' name.
249 250
250 251 Many format variant are controlled by a 'requirement'. We define a small
251 252 subclass to factor the code.
252 253 """
253 254
254 255 # the requirement that control this format variant
255 256 _requirement = None
256 257
257 258 @staticmethod
258 259 def _newreporequirements(ui):
259 260 return localrepo.newreporequirements(
260 261 ui, localrepo.defaultcreateopts(ui)
261 262 )
262 263
263 264 @classmethod
264 265 def fromrepo(cls, repo):
265 266 assert cls._requirement is not None
266 267 return cls._requirement in repo.requirements
267 268
268 269 @classmethod
269 270 def fromconfig(cls, repo):
270 271 assert cls._requirement is not None
271 272 return cls._requirement in cls._newreporequirements(repo.ui)
272 273
273 274
274 275 @registerformatvariant
275 276 class fncache(requirementformatvariant):
276 277 name = b'fncache'
277 278
278 279 _requirement = b'fncache'
279 280
280 281 default = True
281 282
282 283 description = _(
283 284 b'long and reserved filenames may not work correctly; '
284 285 b'repository performance is sub-optimal'
285 286 )
286 287
287 288 upgrademessage = _(
288 289 b'repository will be more resilient to storing '
289 290 b'certain paths and performance of certain '
290 291 b'operations should be improved'
291 292 )
292 293
293 294
294 295 @registerformatvariant
295 296 class dotencode(requirementformatvariant):
296 297 name = b'dotencode'
297 298
298 299 _requirement = b'dotencode'
299 300
300 301 default = True
301 302
302 303 description = _(
303 304 b'storage of filenames beginning with a period or '
304 305 b'space may not work correctly'
305 306 )
306 307
307 308 upgrademessage = _(
308 309 b'repository will be better able to store files '
309 310 b'beginning with a space or period'
310 311 )
311 312
312 313
313 314 @registerformatvariant
314 315 class generaldelta(requirementformatvariant):
315 316 name = b'generaldelta'
316 317
317 318 _requirement = b'generaldelta'
318 319
319 320 default = True
320 321
321 322 description = _(
322 323 b'deltas within internal storage are unable to '
323 324 b'choose optimal revisions; repository is larger and '
324 325 b'slower than it could be; interaction with other '
325 326 b'repositories may require extra network and CPU '
326 327 b'resources, making "hg push" and "hg pull" slower'
327 328 )
328 329
329 330 upgrademessage = _(
330 331 b'repository storage will be able to create '
331 332 b'optimal deltas; new repository data will be '
332 333 b'smaller and read times should decrease; '
333 334 b'interacting with other repositories using this '
334 335 b'storage model should require less network and '
335 336 b'CPU resources, making "hg push" and "hg pull" '
336 337 b'faster'
337 338 )
338 339
339 340
340 341 @registerformatvariant
341 342 class sharedsafe(requirementformatvariant):
342 343 name = b'exp-sharesafe'
343 344 _requirement = requirements.SHARESAFE_REQUIREMENT
344 345
345 346 default = False
346 347
347 348 description = _(
348 349 b'old shared repositories do not share source repository '
349 350 b'requirements and config. This leads to various problems '
350 351 b'when the source repository format is upgraded or some new '
351 352 b'extensions are enabled.'
352 353 )
353 354
354 355 upgrademessage = _(
355 356 b'Upgrades a repository to share-safe format so that future '
356 357 b'shares of this repository share its requirements and configs.'
357 358 )
358 359
359 360
360 361 @registerformatvariant
361 362 class sparserevlog(requirementformatvariant):
362 363 name = b'sparserevlog'
363 364
364 365 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
365 366
366 367 default = True
367 368
368 369 description = _(
369 370 b'in order to limit disk reading and memory usage on older '
370 371 b'version, the span of a delta chain from its root to its '
371 372 b'end is limited, whatever the relevant data in this span. '
372 373 b'This can severly limit Mercurial ability to build good '
373 374 b'chain of delta resulting is much more storage space being '
374 375 b'taken and limit reusability of on disk delta during '
375 376 b'exchange.'
376 377 )
377 378
378 379 upgrademessage = _(
379 380 b'Revlog supports delta chain with more unused data '
380 381 b'between payload. These gaps will be skipped at read '
381 382 b'time. This allows for better delta chains, making a '
382 383 b'better compression and faster exchange with server.'
383 384 )
384 385
385 386
386 387 @registerformatvariant
387 388 class sidedata(requirementformatvariant):
388 389 name = b'sidedata'
389 390
390 391 _requirement = requirements.SIDEDATA_REQUIREMENT
391 392
392 393 default = False
393 394
394 395 description = _(
395 396 b'Allows storage of extra data alongside a revision, '
396 397 b'unlocking various caching options.'
397 398 )
398 399
399 400 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
400 401
401 402
402 403 @registerformatvariant
403 404 class persistentnodemap(requirementformatvariant):
404 405 name = b'persistent-nodemap'
405 406
406 407 _requirement = requirements.NODEMAP_REQUIREMENT
407 408
408 409 default = False
409 410
410 411 description = _(
411 412 b'persist the node -> rev mapping on disk to speedup lookup'
412 413 )
413 414
414 415 upgrademessage = _(b'Speedup revision lookup by node id.')
415 416
416 417
417 418 @registerformatvariant
418 419 class copiessdc(requirementformatvariant):
419 420 name = b'copies-sdc'
420 421
421 422 _requirement = requirements.COPIESSDC_REQUIREMENT
422 423
423 424 default = False
424 425
425 426 description = _(b'Stores copies information alongside changesets.')
426 427
427 428 upgrademessage = _(
428 429 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
429 430 )
430 431
431 432
432 433 @registerformatvariant
433 434 class removecldeltachain(formatvariant):
434 435 name = b'plain-cl-delta'
435 436
436 437 default = True
437 438
438 439 description = _(
439 440 b'changelog storage is using deltas instead of '
440 441 b'raw entries; changelog reading and any '
441 442 b'operation relying on changelog data are slower '
442 443 b'than they could be'
443 444 )
444 445
445 446 upgrademessage = _(
446 447 b'changelog storage will be reformated to '
447 448 b'store raw entries; changelog reading will be '
448 449 b'faster; changelog size may be reduced'
449 450 )
450 451
451 452 @staticmethod
452 453 def fromrepo(repo):
453 454 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
454 455 # changelogs with deltas.
455 456 cl = repo.changelog
456 457 chainbase = cl.chainbase
457 458 return all(rev == chainbase(rev) for rev in cl)
458 459
459 460 @staticmethod
460 461 def fromconfig(repo):
461 462 return True
462 463
463 464
464 465 @registerformatvariant
465 466 class compressionengine(formatvariant):
466 467 name = b'compression'
467 468 default = b'zlib'
468 469
469 470 description = _(
470 471 b'Compresion algorithm used to compress data. '
471 472 b'Some engine are faster than other'
472 473 )
473 474
474 475 upgrademessage = _(
475 476 b'revlog content will be recompressed with the new algorithm.'
476 477 )
477 478
478 479 @classmethod
479 480 def fromrepo(cls, repo):
480 481 # we allow multiple compression engine requirement to co-exist because
481 482 # strickly speaking, revlog seems to support mixed compression style.
482 483 #
483 484 # The compression used for new entries will be "the last one"
484 485 compression = b'zlib'
485 486 for req in repo.requirements:
486 487 prefix = req.startswith
487 488 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
488 489 compression = req.split(b'-', 2)[2]
489 490 return compression
490 491
491 492 @classmethod
492 493 def fromconfig(cls, repo):
493 494 compengines = repo.ui.configlist(b'format', b'revlog-compression')
494 495 # return the first valid value as the selection code would do
495 496 for comp in compengines:
496 497 if comp in util.compengines:
497 498 return comp
498 499
499 500 # no valide compression found lets display it all for clarity
500 501 return b','.join(compengines)
501 502
502 503
503 504 @registerformatvariant
504 505 class compressionlevel(formatvariant):
505 506 name = b'compression-level'
506 507 default = b'default'
507 508
508 509 description = _(b'compression level')
509 510
510 511 upgrademessage = _(b'revlog content will be recompressed')
511 512
512 513 @classmethod
513 514 def fromrepo(cls, repo):
514 515 comp = compressionengine.fromrepo(repo)
515 516 level = None
516 517 if comp == b'zlib':
517 518 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
518 519 elif comp == b'zstd':
519 520 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
520 521 if level is None:
521 522 return b'default'
522 523 return bytes(level)
523 524
524 525 @classmethod
525 526 def fromconfig(cls, repo):
526 527 comp = compressionengine.fromconfig(repo)
527 528 level = None
528 529 if comp == b'zlib':
529 530 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
530 531 elif comp == b'zstd':
531 532 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
532 533 if level is None:
533 534 return b'default'
534 535 return bytes(level)
535 536
536 537
537 538 def finddeficiencies(repo):
538 539 """returns a list of deficiencies that the repo suffer from"""
539 540 deficiencies = []
540 541
541 542 # We could detect lack of revlogv1 and store here, but they were added
542 543 # in 0.9.2 and we don't support upgrading repos without these
543 544 # requirements, so let's not bother.
544 545
545 546 for fv in allformatvariant:
546 547 if not fv.fromrepo(repo):
547 548 deficiencies.append(fv)
548 549
549 550 return deficiencies
550 551
551 552
552 553 # search without '-' to support older form on newer client.
553 554 #
554 555 # We don't enforce backward compatibility for debug command so this
555 556 # might eventually be dropped. However, having to use two different
556 557 # forms in script when comparing result is anoying enough to add
557 558 # backward compatibility for a while.
558 559 legacy_opts_map = {
559 560 b'redeltaparent': b're-delta-parent',
560 561 b'redeltamultibase': b're-delta-multibase',
561 562 b'redeltaall': b're-delta-all',
562 563 b'redeltafulladd': b're-delta-fulladd',
563 564 }
564 565
565 566 ALL_OPTIMISATIONS = []
566 567
567 568
568 569 def register_optimization(obj):
569 570 ALL_OPTIMISATIONS.append(obj)
570 571 return obj
571 572
572 573
573 574 register_optimization(
574 575 improvement(
575 576 name=b're-delta-parent',
576 577 type=OPTIMISATION,
577 578 description=_(
578 579 b'deltas within internal storage will be recalculated to '
579 580 b'choose an optimal base revision where this was not '
580 581 b'already done; the size of the repository may shrink and '
581 582 b'various operations may become faster; the first time '
582 583 b'this optimization is performed could slow down upgrade '
583 584 b'execution considerably; subsequent invocations should '
584 585 b'not run noticeably slower'
585 586 ),
586 587 upgrademessage=_(
587 588 b'deltas within internal storage will choose a new '
588 589 b'base revision if needed'
589 590 ),
590 591 )
591 592 )
592 593
593 594 register_optimization(
594 595 improvement(
595 596 name=b're-delta-multibase',
596 597 type=OPTIMISATION,
597 598 description=_(
598 599 b'deltas within internal storage will be recalculated '
599 600 b'against multiple base revision and the smallest '
600 601 b'difference will be used; the size of the repository may '
601 602 b'shrink significantly when there are many merges; this '
602 603 b'optimization will slow down execution in proportion to '
603 604 b'the number of merges in the repository and the amount '
604 605 b'of files in the repository; this slow down should not '
605 606 b'be significant unless there are tens of thousands of '
606 607 b'files and thousands of merges'
607 608 ),
608 609 upgrademessage=_(
609 610 b'deltas within internal storage will choose an '
610 611 b'optimal delta by computing deltas against multiple '
611 612 b'parents; may slow down execution time '
612 613 b'significantly'
613 614 ),
614 615 )
615 616 )
616 617
617 618 register_optimization(
618 619 improvement(
619 620 name=b're-delta-all',
620 621 type=OPTIMISATION,
621 622 description=_(
622 623 b'deltas within internal storage will always be '
623 624 b'recalculated without reusing prior deltas; this will '
624 625 b'likely make execution run several times slower; this '
625 626 b'optimization is typically not needed'
626 627 ),
627 628 upgrademessage=_(
628 629 b'deltas within internal storage will be fully '
629 630 b'recomputed; this will likely drastically slow down '
630 631 b'execution time'
631 632 ),
632 633 )
633 634 )
634 635
635 636 register_optimization(
636 637 improvement(
637 638 name=b're-delta-fulladd',
638 639 type=OPTIMISATION,
639 640 description=_(
640 641 b'every revision will be re-added as if it was new '
641 642 b'content. It will go through the full storage '
642 643 b'mechanism giving extensions a chance to process it '
643 644 b'(eg. lfs). This is similar to "re-delta-all" but even '
644 645 b'slower since more logic is involved.'
645 646 ),
646 647 upgrademessage=_(
647 648 b'each revision will be added as new content to the '
648 649 b'internal storage; this will likely drastically slow '
649 650 b'down execution time, but some extensions might need '
650 651 b'it'
651 652 ),
652 653 )
653 654 )
654 655
655 656
656 657 def findoptimizations(repo):
657 658 """Determine optimisation that could be used during upgrade"""
658 659 # These are unconditionally added. There is logic later that figures out
659 660 # which ones to apply.
660 661 return list(ALL_OPTIMISATIONS)
661 662
662 663
663 664 def determineactions(repo, deficiencies, sourcereqs, destreqs):
664 665 """Determine upgrade actions that will be performed.
665 666
666 667 Given a list of improvements as returned by ``finddeficiencies`` and
667 668 ``findoptimizations``, determine the list of upgrade actions that
668 669 will be performed.
669 670
670 671 The role of this function is to filter improvements if needed, apply
671 672 recommended optimizations from the improvements list that make sense,
672 673 etc.
673 674
674 675 Returns a list of action names.
675 676 """
676 677 newactions = []
677 678
678 679 for d in deficiencies:
679 680 name = d._requirement
680 681
681 682 # If the action is a requirement that doesn't show up in the
682 683 # destination requirements, prune the action.
683 684 if name is not None and name not in destreqs:
684 685 continue
685 686
686 687 newactions.append(d)
687 688
688 689 # FUTURE consider adding some optimizations here for certain transitions.
689 690 # e.g. adding generaldelta could schedule parent redeltas.
690 691
691 692 return newactions
692 693
693 694
694 695 def _revlogfrompath(repo, path):
695 696 """Obtain a revlog from a repo path.
696 697
697 698 An instance of the appropriate class is returned.
698 699 """
699 700 if path == b'00changelog.i':
700 701 return changelog.changelog(repo.svfs)
701 702 elif path.endswith(b'00manifest.i'):
702 703 mandir = path[: -len(b'00manifest.i')]
703 704 return manifest.manifestrevlog(repo.svfs, tree=mandir)
704 705 else:
705 706 # reverse of "/".join(("data", path + ".i"))
706 707 return filelog.filelog(repo.svfs, path[5:-2])
707 708
708 709
709 710 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
710 711 """copy all relevant files for `oldrl` into `destrepo` store
711 712
712 713 Files are copied "as is" without any transformation. The copy is performed
713 714 without extra checks. Callers are responsible for making sure the copied
714 715 content is compatible with format of the destination repository.
715 716 """
716 717 oldrl = getattr(oldrl, '_revlog', oldrl)
717 718 newrl = _revlogfrompath(destrepo, unencodedname)
718 719 newrl = getattr(newrl, '_revlog', newrl)
719 720
720 721 oldvfs = oldrl.opener
721 722 newvfs = newrl.opener
722 723 oldindex = oldvfs.join(oldrl.indexfile)
723 724 newindex = newvfs.join(newrl.indexfile)
724 725 olddata = oldvfs.join(oldrl.datafile)
725 726 newdata = newvfs.join(newrl.datafile)
726 727
727 728 with newvfs(newrl.indexfile, b'w'):
728 729 pass # create all the directories
729 730
730 731 util.copyfile(oldindex, newindex)
731 732 copydata = oldrl.opener.exists(oldrl.datafile)
732 733 if copydata:
733 734 util.copyfile(olddata, newdata)
734 735
735 736 if not (
736 737 unencodedname.endswith(b'00changelog.i')
737 738 or unencodedname.endswith(b'00manifest.i')
738 739 ):
739 740 destrepo.svfs.fncache.add(unencodedname)
740 741 if copydata:
741 742 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
742 743
743 744
744 745 UPGRADE_CHANGELOG = object()
745 746 UPGRADE_MANIFEST = object()
746 747 UPGRADE_FILELOGS = object()
747 748
748 749 UPGRADE_ALL_REVLOGS = frozenset(
749 750 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
750 751 )
751 752
752 753
753 754 def getsidedatacompanion(srcrepo, dstrepo):
754 755 sidedatacompanion = None
755 756 removedreqs = srcrepo.requirements - dstrepo.requirements
756 757 addedreqs = dstrepo.requirements - srcrepo.requirements
757 758 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
758 759
759 760 def sidedatacompanion(rl, rev):
760 761 rl = getattr(rl, '_revlog', rl)
761 762 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
762 763 return True, (), {}, 0, 0
763 764 return False, (), {}, 0, 0
764 765
765 766 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
766 767 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
767 768 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
768 769 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
769 770 return sidedatacompanion
770 771
771 772
772 773 def matchrevlog(revlogfilter, entry):
773 774 """check if a revlog is selected for cloning.
774 775
775 776 In other words, are there any updates which need to be done on revlog
776 777 or it can be blindly copied.
777 778
778 779 The store entry is checked against the passed filter"""
779 780 if entry.endswith(b'00changelog.i'):
780 781 return UPGRADE_CHANGELOG in revlogfilter
781 782 elif entry.endswith(b'00manifest.i'):
782 783 return UPGRADE_MANIFEST in revlogfilter
783 784 return UPGRADE_FILELOGS in revlogfilter
784 785
785 786
786 787 def _clonerevlogs(
787 788 ui,
788 789 srcrepo,
789 790 dstrepo,
790 791 tr,
791 792 deltareuse,
792 793 forcedeltabothparents,
793 794 revlogs=UPGRADE_ALL_REVLOGS,
794 795 ):
795 796 """Copy revlogs between 2 repos."""
796 797 revcount = 0
797 798 srcsize = 0
798 799 srcrawsize = 0
799 800 dstsize = 0
800 801 fcount = 0
801 802 frevcount = 0
802 803 fsrcsize = 0
803 804 frawsize = 0
804 805 fdstsize = 0
805 806 mcount = 0
806 807 mrevcount = 0
807 808 msrcsize = 0
808 809 mrawsize = 0
809 810 mdstsize = 0
810 811 crevcount = 0
811 812 csrcsize = 0
812 813 crawsize = 0
813 814 cdstsize = 0
814 815
815 816 alldatafiles = list(srcrepo.store.walk())
816 817
817 818 # Perform a pass to collect metadata. This validates we can open all
818 819 # source files and allows a unified progress bar to be displayed.
819 820 for unencoded, encoded, size in alldatafiles:
820 821 if unencoded.endswith(b'.d'):
821 822 continue
822 823
823 824 rl = _revlogfrompath(srcrepo, unencoded)
824 825
825 826 info = rl.storageinfo(
826 827 exclusivefiles=True,
827 828 revisionscount=True,
828 829 trackedsize=True,
829 830 storedsize=True,
830 831 )
831 832
832 833 revcount += info[b'revisionscount'] or 0
833 834 datasize = info[b'storedsize'] or 0
834 835 rawsize = info[b'trackedsize'] or 0
835 836
836 837 srcsize += datasize
837 838 srcrawsize += rawsize
838 839
839 840 # This is for the separate progress bars.
840 841 if isinstance(rl, changelog.changelog):
841 842 crevcount += len(rl)
842 843 csrcsize += datasize
843 844 crawsize += rawsize
844 845 elif isinstance(rl, manifest.manifestrevlog):
845 846 mcount += 1
846 847 mrevcount += len(rl)
847 848 msrcsize += datasize
848 849 mrawsize += rawsize
849 850 elif isinstance(rl, filelog.filelog):
850 851 fcount += 1
851 852 frevcount += len(rl)
852 853 fsrcsize += datasize
853 854 frawsize += rawsize
854 855 else:
855 856 error.ProgrammingError(b'unknown revlog type')
856 857
857 858 if not revcount:
858 859 return
859 860
860 861 ui.status(
861 862 _(
862 863 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
863 864 b'%d in changelog)\n'
864 865 )
865 866 % (revcount, frevcount, mrevcount, crevcount)
866 867 )
867 868 ui.status(
868 869 _(b'migrating %s in store; %s tracked data\n')
869 870 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
870 871 )
871 872
872 873 # Used to keep track of progress.
873 874 progress = None
874 875
875 876 def oncopiedrevision(rl, rev, node):
876 877 progress.increment()
877 878
878 879 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
879 880
880 881 # Do the actual copying.
881 882 # FUTURE this operation can be farmed off to worker processes.
882 883 seen = set()
883 884 for unencoded, encoded, size in alldatafiles:
884 885 if unencoded.endswith(b'.d'):
885 886 continue
886 887
887 888 oldrl = _revlogfrompath(srcrepo, unencoded)
888 889
889 890 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
890 891 ui.status(
891 892 _(
892 893 b'finished migrating %d manifest revisions across %d '
893 894 b'manifests; change in size: %s\n'
894 895 )
895 896 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
896 897 )
897 898
898 899 ui.status(
899 900 _(
900 901 b'migrating changelog containing %d revisions '
901 902 b'(%s in store; %s tracked data)\n'
902 903 )
903 904 % (
904 905 crevcount,
905 906 util.bytecount(csrcsize),
906 907 util.bytecount(crawsize),
907 908 )
908 909 )
909 910 seen.add(b'c')
910 911 progress = srcrepo.ui.makeprogress(
911 912 _(b'changelog revisions'), total=crevcount
912 913 )
913 914 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
914 915 ui.status(
915 916 _(
916 917 b'finished migrating %d filelog revisions across %d '
917 918 b'filelogs; change in size: %s\n'
918 919 )
919 920 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
920 921 )
921 922
922 923 ui.status(
923 924 _(
924 925 b'migrating %d manifests containing %d revisions '
925 926 b'(%s in store; %s tracked data)\n'
926 927 )
927 928 % (
928 929 mcount,
929 930 mrevcount,
930 931 util.bytecount(msrcsize),
931 932 util.bytecount(mrawsize),
932 933 )
933 934 )
934 935 seen.add(b'm')
935 936 if progress:
936 937 progress.complete()
937 938 progress = srcrepo.ui.makeprogress(
938 939 _(b'manifest revisions'), total=mrevcount
939 940 )
940 941 elif b'f' not in seen:
941 942 ui.status(
942 943 _(
943 944 b'migrating %d filelogs containing %d revisions '
944 945 b'(%s in store; %s tracked data)\n'
945 946 )
946 947 % (
947 948 fcount,
948 949 frevcount,
949 950 util.bytecount(fsrcsize),
950 951 util.bytecount(frawsize),
951 952 )
952 953 )
953 954 seen.add(b'f')
954 955 if progress:
955 956 progress.complete()
956 957 progress = srcrepo.ui.makeprogress(
957 958 _(b'file revisions'), total=frevcount
958 959 )
959 960
960 961 if matchrevlog(revlogs, unencoded):
961 962 ui.note(
962 963 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
963 964 )
964 965 newrl = _revlogfrompath(dstrepo, unencoded)
965 966 oldrl.clone(
966 967 tr,
967 968 newrl,
968 969 addrevisioncb=oncopiedrevision,
969 970 deltareuse=deltareuse,
970 971 forcedeltabothparents=forcedeltabothparents,
971 972 sidedatacompanion=sidedatacompanion,
972 973 )
973 974 else:
974 975 msg = _(b'blindly copying %s containing %i revisions\n')
975 976 ui.note(msg % (unencoded, len(oldrl)))
976 977 _copyrevlog(tr, dstrepo, oldrl, unencoded)
977 978
978 979 newrl = _revlogfrompath(dstrepo, unencoded)
979 980
980 981 info = newrl.storageinfo(storedsize=True)
981 982 datasize = info[b'storedsize'] or 0
982 983
983 984 dstsize += datasize
984 985
985 986 if isinstance(newrl, changelog.changelog):
986 987 cdstsize += datasize
987 988 elif isinstance(newrl, manifest.manifestrevlog):
988 989 mdstsize += datasize
989 990 else:
990 991 fdstsize += datasize
991 992
992 993 progress.complete()
993 994
994 995 ui.status(
995 996 _(
996 997 b'finished migrating %d changelog revisions; change in size: '
997 998 b'%s\n'
998 999 )
999 1000 % (crevcount, util.bytecount(cdstsize - csrcsize))
1000 1001 )
1001 1002
1002 1003 ui.status(
1003 1004 _(
1004 1005 b'finished migrating %d total revisions; total change in store '
1005 1006 b'size: %s\n'
1006 1007 )
1007 1008 % (revcount, util.bytecount(dstsize - srcsize))
1008 1009 )
1009 1010
1010 1011
1011 1012 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
1012 1013 """Determine whether to copy a store file during upgrade.
1013 1014
1014 1015 This function is called when migrating store files from ``srcrepo`` to
1015 1016 ``dstrepo`` as part of upgrading a repository.
1016 1017
1017 1018 Args:
1018 1019 srcrepo: repo we are copying from
1019 1020 dstrepo: repo we are copying to
1020 1021 requirements: set of requirements for ``dstrepo``
1021 1022 path: store file being examined
1022 1023 mode: the ``ST_MODE`` file type of ``path``
1023 1024 st: ``stat`` data structure for ``path``
1024 1025
1025 1026 Function should return ``True`` if the file is to be copied.
1026 1027 """
1027 1028 # Skip revlogs.
1028 1029 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1029 1030 return False
1030 1031 # Skip transaction related files.
1031 1032 if path.startswith(b'undo'):
1032 1033 return False
1033 1034 # Only copy regular files.
1034 1035 if mode != stat.S_IFREG:
1035 1036 return False
1036 1037 # Skip other skipped files.
1037 1038 if path in (b'lock', b'fncache'):
1038 1039 return False
1039 1040
1040 1041 return True
1041 1042
1042 1043
1043 1044 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1044 1045 """Hook point for extensions to perform additional actions during upgrade.
1045 1046
1046 1047 This function is called after revlogs and store files have been copied but
1047 1048 before the new store is swapped into the original location.
1048 1049 """
1049 1050
1050 1051
1051 1052 def _upgraderepo(
1052 1053 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1053 1054 ):
1054 1055 """Do the low-level work of upgrading a repository.
1055 1056
1056 1057 The upgrade is effectively performed as a copy between a source
1057 1058 repository and a temporary destination repository.
1058 1059
1059 1060 The source repository is unmodified for as long as possible so the
1060 1061 upgrade can abort at any time without causing loss of service for
1061 1062 readers and without corrupting the source repository.
1062 1063 """
1063 1064 assert srcrepo.currentwlock()
1064 1065 assert dstrepo.currentwlock()
1065 1066
1066 1067 ui.status(
1067 1068 _(
1068 1069 b'(it is safe to interrupt this process any time before '
1069 1070 b'data migration completes)\n'
1070 1071 )
1071 1072 )
1072 1073
1073 1074 if b're-delta-all' in actions:
1074 1075 deltareuse = revlog.revlog.DELTAREUSENEVER
1075 1076 elif b're-delta-parent' in actions:
1076 1077 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1077 1078 elif b're-delta-multibase' in actions:
1078 1079 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1079 1080 elif b're-delta-fulladd' in actions:
1080 1081 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1081 1082 else:
1082 1083 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1083 1084
1084 1085 with dstrepo.transaction(b'upgrade') as tr:
1085 1086 _clonerevlogs(
1086 1087 ui,
1087 1088 srcrepo,
1088 1089 dstrepo,
1089 1090 tr,
1090 1091 deltareuse,
1091 1092 b're-delta-multibase' in actions,
1092 1093 revlogs=revlogs,
1093 1094 )
1094 1095
1095 1096 # Now copy other files in the store directory.
1096 1097 # The sorted() makes execution deterministic.
1097 1098 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1098 1099 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1099 1100 continue
1100 1101
1101 1102 srcrepo.ui.status(_(b'copying %s\n') % p)
1102 1103 src = srcrepo.store.rawvfs.join(p)
1103 1104 dst = dstrepo.store.rawvfs.join(p)
1104 1105 util.copyfile(src, dst, copystat=True)
1105 1106
1106 1107 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1107 1108
1108 1109 ui.status(_(b'data fully migrated to temporary repository\n'))
1109 1110
1110 1111 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1111 1112 backupvfs = vfsmod.vfs(backuppath)
1112 1113
1113 1114 # Make a backup of requires file first, as it is the first to be modified.
1114 1115 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1115 1116
1116 1117 # We install an arbitrary requirement that clients must not support
1117 1118 # as a mechanism to lock out new clients during the data swap. This is
1118 1119 # better than allowing a client to continue while the repository is in
1119 1120 # an inconsistent state.
1120 1121 ui.status(
1121 1122 _(
1122 1123 b'marking source repository as being upgraded; clients will be '
1123 1124 b'unable to read from repository\n'
1124 1125 )
1125 1126 )
1126 1127 scmutil.writereporequirements(
1127 1128 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1128 1129 )
1129 1130
1130 1131 ui.status(_(b'starting in-place swap of repository data\n'))
1131 1132 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1132 1133
1133 1134 # Now swap in the new store directory. Doing it as a rename should make
1134 1135 # the operation nearly instantaneous and atomic (at least in well-behaved
1135 1136 # environments).
1136 1137 ui.status(_(b'replacing store...\n'))
1137 1138 tstart = util.timer()
1138 1139 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1139 1140 util.rename(dstrepo.spath, srcrepo.spath)
1140 1141 elapsed = util.timer() - tstart
1141 1142 ui.status(
1142 1143 _(
1143 1144 b'store replacement complete; repository was inconsistent for '
1144 1145 b'%0.1fs\n'
1145 1146 )
1146 1147 % elapsed
1147 1148 )
1148 1149
1149 1150 # We first write the requirements file. Any new requirements will lock
1150 1151 # out legacy clients.
1151 1152 ui.status(
1152 1153 _(
1153 1154 b'finalizing requirements file and making repository readable '
1154 1155 b'again\n'
1155 1156 )
1156 1157 )
1157 1158 scmutil.writereporequirements(srcrepo, requirements)
1158 1159
1159 1160 # The lock file from the old store won't be removed because nothing has a
1160 1161 # reference to its new location. So clean it up manually. Alternatively, we
1161 1162 # could update srcrepo.svfs and other variables to point to the new
1162 1163 # location. This is simpler.
1163 1164 backupvfs.unlink(b'store/lock')
1164 1165
1165 1166 return backuppath
1166 1167
1167 1168
1168 1169 def upgraderepo(
1169 1170 ui,
1170 1171 repo,
1171 1172 run=False,
1172 1173 optimize=None,
1173 1174 backup=True,
1174 1175 manifest=None,
1175 1176 changelog=None,
1176 1177 filelogs=None,
1177 1178 ):
1178 1179 """Upgrade a repository in place."""
1179 1180 if optimize is None:
1180 1181 optimize = []
1181 1182 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1182 1183 repo = repo.unfiltered()
1183 1184
1184 1185 revlogs = set(UPGRADE_ALL_REVLOGS)
1185 1186 specentries = (
1186 1187 (UPGRADE_CHANGELOG, changelog),
1187 1188 (UPGRADE_MANIFEST, manifest),
1188 1189 (UPGRADE_FILELOGS, filelogs),
1189 1190 )
1190 1191 specified = [(y, x) for (y, x) in specentries if x is not None]
1191 1192 if specified:
1192 1193 # we have some limitation on revlogs to be recloned
1193 1194 if any(x for y, x in specified):
1194 1195 revlogs = set()
1195 1196 for upgrade, enabled in specified:
1196 1197 if enabled:
1197 1198 revlogs.add(upgrade)
1198 1199 else:
1199 1200 # none are enabled
1200 1201 for upgrade, __ in specified:
1201 1202 revlogs.discard(upgrade)
1202 1203
1203 1204 # Ensure the repository can be upgraded.
1204 1205 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1205 1206 if missingreqs:
1206 1207 raise error.Abort(
1207 1208 _(b'cannot upgrade repository; requirement missing: %s')
1208 1209 % _(b', ').join(sorted(missingreqs))
1209 1210 )
1210 1211
1211 1212 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1212 1213 if blockedreqs:
1213 1214 raise error.Abort(
1214 1215 _(
1215 1216 b'cannot upgrade repository; unsupported source '
1216 1217 b'requirement: %s'
1217 1218 )
1218 1219 % _(b', ').join(sorted(blockedreqs))
1219 1220 )
1220 1221
1221 1222 # FUTURE there is potentially a need to control the wanted requirements via
1222 1223 # command arguments or via an extension hook point.
1223 1224 newreqs = localrepo.newreporequirements(
1224 1225 repo.ui, localrepo.defaultcreateopts(repo.ui)
1225 1226 )
1226 1227 newreqs.update(preservedrequirements(repo))
1227 1228
1228 1229 noremovereqs = (
1229 1230 repo.requirements - newreqs - supportremovedrequirements(repo)
1230 1231 )
1231 1232 if noremovereqs:
1232 1233 raise error.Abort(
1233 1234 _(
1234 1235 b'cannot upgrade repository; requirement would be '
1235 1236 b'removed: %s'
1236 1237 )
1237 1238 % _(b', ').join(sorted(noremovereqs))
1238 1239 )
1239 1240
1240 1241 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1241 1242 if noaddreqs:
1242 1243 raise error.Abort(
1243 1244 _(
1244 1245 b'cannot upgrade repository; do not support adding '
1245 1246 b'requirement: %s'
1246 1247 )
1247 1248 % _(b', ').join(sorted(noaddreqs))
1248 1249 )
1249 1250
1250 1251 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1251 1252 if unsupportedreqs:
1252 1253 raise error.Abort(
1253 1254 _(
1254 1255 b'cannot upgrade repository; do not support '
1255 1256 b'destination requirement: %s'
1256 1257 )
1257 1258 % _(b', ').join(sorted(unsupportedreqs))
1258 1259 )
1259 1260
1260 1261 # Find and validate all improvements that can be made.
1261 1262 alloptimizations = findoptimizations(repo)
1262 1263
1263 1264 # Apply and Validate arguments.
1264 1265 optimizations = []
1265 1266 for o in alloptimizations:
1266 1267 if o.name in optimize:
1267 1268 optimizations.append(o)
1268 1269 optimize.discard(o.name)
1269 1270
1270 1271 if optimize: # anything left is unknown
1271 1272 raise error.Abort(
1272 1273 _(b'unknown optimization action requested: %s')
1273 1274 % b', '.join(sorted(optimize)),
1274 1275 hint=_(b'run without arguments to see valid optimizations'),
1275 1276 )
1276 1277
1277 1278 deficiencies = finddeficiencies(repo)
1278 1279 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1279 1280 actions.extend(
1280 1281 o
1281 1282 for o in sorted(optimizations)
1282 1283 # determineactions could have added optimisation
1283 1284 if o not in actions
1284 1285 )
1285 1286
1286 1287 removedreqs = repo.requirements - newreqs
1287 1288 addedreqs = newreqs - repo.requirements
1288 1289
1289 1290 if revlogs != UPGRADE_ALL_REVLOGS:
1290 1291 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1291 1292 if incompatible:
1292 1293 msg = _(
1293 1294 b'ignoring revlogs selection flags, format requirements '
1294 1295 b'change: %s\n'
1295 1296 )
1296 1297 ui.warn(msg % b', '.join(sorted(incompatible)))
1297 1298 revlogs = UPGRADE_ALL_REVLOGS
1298 1299
1299 1300 def write_labeled(l, label):
1300 1301 first = True
1301 1302 for r in sorted(l):
1302 1303 if not first:
1303 1304 ui.write(b', ')
1304 1305 ui.write(r, label=label)
1305 1306 first = False
1306 1307
1307 1308 def printrequirements():
1308 1309 ui.write(_(b'requirements\n'))
1309 1310 ui.write(_(b' preserved: '))
1310 1311 write_labeled(
1311 1312 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1312 1313 )
1313 1314 ui.write((b'\n'))
1314 1315 removed = repo.requirements - newreqs
1315 1316 if repo.requirements - newreqs:
1316 1317 ui.write(_(b' removed: '))
1317 1318 write_labeled(removed, "upgrade-repo.requirement.removed")
1318 1319 ui.write((b'\n'))
1319 1320 added = newreqs - repo.requirements
1320 1321 if added:
1321 1322 ui.write(_(b' added: '))
1322 1323 write_labeled(added, "upgrade-repo.requirement.added")
1323 1324 ui.write((b'\n'))
1324 1325 ui.write(b'\n')
1325 1326
1326 1327 def printoptimisations():
1327 1328 optimisations = [a for a in actions if a.type == OPTIMISATION]
1328 1329 optimisations.sort(key=lambda a: a.name)
1329 1330 if optimisations:
1330 1331 ui.write(_(b'optimisations: '))
1331 1332 write_labeled(
1332 1333 [a.name for a in optimisations],
1333 1334 "upgrade-repo.optimisation.performed",
1334 1335 )
1335 1336 ui.write(b'\n\n')
1336 1337
1337 1338 def printupgradeactions():
1338 1339 for a in actions:
1339 1340 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1340 1341
1341 1342 if not run:
1342 1343 fromconfig = []
1343 1344 onlydefault = []
1344 1345
1345 1346 for d in deficiencies:
1346 1347 if d.fromconfig(repo):
1347 1348 fromconfig.append(d)
1348 1349 elif d.default:
1349 1350 onlydefault.append(d)
1350 1351
1351 1352 if fromconfig or onlydefault:
1352 1353
1353 1354 if fromconfig:
1354 1355 ui.status(
1355 1356 _(
1356 1357 b'repository lacks features recommended by '
1357 1358 b'current config options:\n\n'
1358 1359 )
1359 1360 )
1360 1361 for i in fromconfig:
1361 1362 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1362 1363
1363 1364 if onlydefault:
1364 1365 ui.status(
1365 1366 _(
1366 1367 b'repository lacks features used by the default '
1367 1368 b'config options:\n\n'
1368 1369 )
1369 1370 )
1370 1371 for i in onlydefault:
1371 1372 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1372 1373
1373 1374 ui.status(b'\n')
1374 1375 else:
1375 1376 ui.status(
1376 1377 _(
1377 1378 b'(no feature deficiencies found in existing '
1378 1379 b'repository)\n'
1379 1380 )
1380 1381 )
1381 1382
1382 1383 ui.status(
1383 1384 _(
1384 1385 b'performing an upgrade with "--run" will make the following '
1385 1386 b'changes:\n\n'
1386 1387 )
1387 1388 )
1388 1389
1389 1390 printrequirements()
1390 1391 printoptimisations()
1391 1392 printupgradeactions()
1392 1393
1393 1394 unusedoptimize = [i for i in alloptimizations if i not in actions]
1394 1395
1395 1396 if unusedoptimize:
1396 1397 ui.status(
1397 1398 _(
1398 1399 b'additional optimizations are available by specifying '
1399 1400 b'"--optimize <name>":\n\n'
1400 1401 )
1401 1402 )
1402 1403 for i in unusedoptimize:
1403 1404 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1404 1405 return
1405 1406
1406 1407 # Else we're in the run=true case.
1407 1408 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1408 1409 printrequirements()
1409 1410 printoptimisations()
1410 1411 printupgradeactions()
1411 1412
1412 1413 upgradeactions = [a.name for a in actions]
1413 1414
1414 1415 ui.status(_(b'beginning upgrade...\n'))
1415 1416 with repo.wlock(), repo.lock():
1416 1417 ui.status(_(b'repository locked and read-only\n'))
1417 1418 # Our strategy for upgrading the repository is to create a new,
1418 1419 # temporary repository, write data to it, then do a swap of the
1419 1420 # data. There are less heavyweight ways to do this, but it is easier
1420 1421 # to create a new repo object than to instantiate all the components
1421 1422 # (like the store) separately.
1422 1423 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1423 1424 backuppath = None
1424 1425 try:
1425 1426 ui.status(
1426 1427 _(
1427 1428 b'creating temporary repository to stage migrated '
1428 1429 b'data: %s\n'
1429 1430 )
1430 1431 % tmppath
1431 1432 )
1432 1433
1433 1434 # clone ui without using ui.copy because repo.ui is protected
1434 1435 repoui = repo.ui.__class__(repo.ui)
1435 1436 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1436 1437
1437 1438 with dstrepo.wlock(), dstrepo.lock():
1438 1439 backuppath = _upgraderepo(
1439 1440 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1440 1441 )
1441 1442 if not (backup or backuppath is None):
1442 1443 ui.status(
1443 1444 _(b'removing old repository content%s\n') % backuppath
1444 1445 )
1445 1446 repo.vfs.rmtree(backuppath, forcibly=True)
1446 1447 backuppath = None
1447 1448
1448 1449 finally:
1449 1450 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1450 1451 repo.vfs.rmtree(tmppath, forcibly=True)
1451 1452
1452 1453 if backuppath and not ui.quiet:
1453 1454 ui.warn(
1454 1455 _(b'copy of old repository backed up at %s\n') % backuppath
1455 1456 )
1456 1457 ui.warn(
1457 1458 _(
1458 1459 b'the old repository will not be deleted; remove '
1459 1460 b'it to free up disk space once the upgraded '
1460 1461 b'repository is verified\n'
1461 1462 )
1462 1463 )
1463 1464
1464 1465 if sharedsafe.name in addedreqs:
1465 1466 ui.warn(
1466 1467 _(
1467 1468 b'repository upgraded to share safe mode, existing'
1468 1469 b' shares will still work in old non-safe mode. '
1469 1470 b'Re-share existing shares to use them in safe mode'
1470 1471 b' New shares will be created in safe mode.\n'
1471 1472 )
1472 1473 )
1474 if sharedsafe.name in removedreqs:
1475 ui.warn(
1476 _(
1477 b'repository downgraded to not use share safe mode, '
1478 b'existing shares will not work and needs to'
1479 b' be reshared.\n'
1480 )
1481 )
@@ -1,380 +1,455 b''
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > share =
6 6 > [format]
7 7 > exp-share-safe = True
8 8 > EOF
9 9
10 10 prepare source repo
11 11
12 12 $ hg init source
13 13 $ cd source
14 14 $ cat .hg/requires
15 15 exp-sharesafe
16 16 $ cat .hg/store/requires
17 17 dotencode
18 18 fncache
19 19 generaldelta
20 20 revlogv1
21 21 sparserevlog
22 22 store
23 23 $ hg debugrequirements
24 24 dotencode
25 25 exp-sharesafe
26 26 fncache
27 27 generaldelta
28 28 revlogv1
29 29 sparserevlog
30 30 store
31 31
32 32 $ echo a > a
33 33 $ hg ci -Aqm "added a"
34 34 $ echo b > b
35 35 $ hg ci -Aqm "added b"
36 36
37 37 $ HGEDITOR=cat hg config --shared
38 38 abort: repository is not shared; can't use --shared
39 39 [10]
40 40 $ cd ..
41 41
42 42 Create a shared repo and check the requirements are shared and read correctly
43 43 $ hg share source shared1
44 44 updating working directory
45 45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 46 $ cd shared1
47 47 $ cat .hg/requires
48 48 exp-sharesafe
49 49 shared
50 50
51 51 $ hg debugrequirements -R ../source
52 52 dotencode
53 53 exp-sharesafe
54 54 fncache
55 55 generaldelta
56 56 revlogv1
57 57 sparserevlog
58 58 store
59 59
60 60 $ hg debugrequirements
61 61 dotencode
62 62 exp-sharesafe
63 63 fncache
64 64 generaldelta
65 65 revlogv1
66 66 shared
67 67 sparserevlog
68 68 store
69 69
70 70 $ echo c > c
71 71 $ hg ci -Aqm "added c"
72 72
73 73 Check that config of the source repository is also loaded
74 74
75 75 $ hg showconfig ui.curses
76 76 [1]
77 77
78 78 $ echo "[ui]" >> ../source/.hg/hgrc
79 79 $ echo "curses=true" >> ../source/.hg/hgrc
80 80
81 81 $ hg showconfig ui.curses
82 82 true
83 83
84 84 Test that extensions of source repository are also loaded
85 85
86 86 $ hg debugextensions
87 87 share
88 88 $ hg extdiff -p echo
89 89 hg: unknown command 'extdiff'
90 90 'extdiff' is provided by the following extension:
91 91
92 92 extdiff command to allow external programs to compare revisions
93 93
94 94 (use 'hg help extensions' for information on enabling extensions)
95 95 [255]
96 96
97 97 $ echo "[extensions]" >> ../source/.hg/hgrc
98 98 $ echo "extdiff=" >> ../source/.hg/hgrc
99 99
100 100 $ hg debugextensions -R ../source
101 101 extdiff
102 102 share
103 103 $ hg extdiff -R ../source -p echo
104 104
105 105 BROKEN: the command below will not work if config of shared source is not loaded
106 106 on dispatch but debugextensions says that extension
107 107 is loaded
108 108 $ hg debugextensions
109 109 extdiff
110 110 share
111 111
112 112 $ hg extdiff -p echo
113 113
114 114 However, local .hg/hgrc should override the config set by share source
115 115
116 116 $ echo "[ui]" >> .hg/hgrc
117 117 $ echo "curses=false" >> .hg/hgrc
118 118
119 119 $ hg showconfig ui.curses
120 120 false
121 121
122 122 $ HGEDITOR=cat hg config --shared
123 123 [ui]
124 124 curses=true
125 125 [extensions]
126 126 extdiff=
127 127
128 128 $ HGEDITOR=cat hg config --local
129 129 [ui]
130 130 curses=false
131 131
132 132 Testing that hooks set in source repository also runs in shared repo
133 133
134 134 $ cd ../source
135 135 $ cat <<EOF >> .hg/hgrc
136 136 > [extensions]
137 137 > hooklib=
138 138 > [hooks]
139 139 > pretxnchangegroup.reject_merge_commits = \
140 140 > python:hgext.hooklib.reject_merge_commits.hook
141 141 > EOF
142 142
143 143 $ cd ..
144 144 $ hg clone source cloned
145 145 updating to branch default
146 146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 147 $ cd cloned
148 148 $ hg up 0
149 149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 150 $ echo bar > bar
151 151 $ hg ci -Aqm "added bar"
152 152 $ hg merge
153 153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 154 (branch merge, don't forget to commit)
155 155 $ hg ci -m "merge commit"
156 156
157 157 $ hg push ../source
158 158 pushing to ../source
159 159 searching for changes
160 160 adding changesets
161 161 adding manifests
162 162 adding file changes
163 163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 164 transaction abort!
165 165 rollback completed
166 166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 167 [255]
168 168
169 169 $ hg push ../shared1
170 170 pushing to ../shared1
171 171 searching for changes
172 172 adding changesets
173 173 adding manifests
174 174 adding file changes
175 175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 176 transaction abort!
177 177 rollback completed
178 178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 179 [255]
180 180
181 181 Test that if share source config is untrusted, we dont read it
182 182
183 183 $ cd ../shared1
184 184
185 185 $ cat << EOF > $TESTTMP/untrusted.py
186 186 > from mercurial import scmutil, util
187 187 > def uisetup(ui):
188 188 > class untrustedui(ui.__class__):
189 189 > def _trusted(self, fp, f):
190 190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 191 > return False
192 192 > return super(untrustedui, self)._trusted(fp, f)
193 193 > ui.__class__ = untrustedui
194 194 > EOF
195 195
196 196 $ hg showconfig hooks
197 197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198 198
199 199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 200 [1]
201 201
202 202 Update the source repository format and check that shared repo works
203 203
204 204 $ cd ../source
205 205
206 206 Disable zstd related tests because its not present on pure version
207 207 #if zstd
208 208 $ echo "[format]" >> .hg/hgrc
209 209 $ echo "revlog-compression=zstd" >> .hg/hgrc
210 210
211 211 $ hg debugupgraderepo --run -q
212 212 upgrade will perform the following actions:
213 213
214 214 requirements
215 215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 216 added: revlog-compression-zstd
217 217
218 218 $ hg log -r .
219 219 changeset: 1:5f6d8a4bf34a
220 220 user: test
221 221 date: Thu Jan 01 00:00:00 1970 +0000
222 222 summary: added b
223 223
224 224 #endif
225 225 $ echo "[format]" >> .hg/hgrc
226 226 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
227 227
228 228 $ hg debugupgraderepo --run -q -R ../shared1
229 229 abort: cannot upgrade repository; unsupported source requirement: shared
230 230 [255]
231 231
232 232 $ hg debugupgraderepo --run -q
233 233 upgrade will perform the following actions:
234 234
235 235 requirements
236 236 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
237 237 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
238 238 added: persistent-nodemap
239 239
240 240 $ hg log -r .
241 241 changeset: 1:5f6d8a4bf34a
242 242 user: test
243 243 date: Thu Jan 01 00:00:00 1970 +0000
244 244 summary: added b
245 245
246 246
247 247 Shared one should work
248 248 $ cd ../shared1
249 249 $ hg log -r .
250 250 changeset: 2:155349b645be
251 251 tag: tip
252 252 user: test
253 253 date: Thu Jan 01 00:00:00 1970 +0000
254 254 summary: added c
255 255
256 256
257 257 Testing that nonsharedrc is loaded for source and not shared
258 258
259 259 $ cd ../source
260 260 $ touch .hg/hgrc-not-shared
261 261 $ echo "[ui]" >> .hg/hgrc-not-shared
262 262 $ echo "traceback=true" >> .hg/hgrc-not-shared
263 263
264 264 $ hg showconfig ui.traceback
265 265 true
266 266
267 267 $ HGEDITOR=cat hg config --non-shared
268 268 [ui]
269 269 traceback=true
270 270
271 271 $ cd ../shared1
272 272 $ hg showconfig ui.traceback
273 273 [1]
274 274
275 275 Unsharing works
276 276
277 277 $ hg unshare
278 278
279 279 Test that source config is added to the shared one after unshare, and the config
280 280 of current repo is still respected over the config which came from source config
281 281 $ cd ../cloned
282 282 $ hg push ../shared1
283 283 pushing to ../shared1
284 284 searching for changes
285 285 adding changesets
286 286 adding manifests
287 287 adding file changes
288 288 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
289 289 transaction abort!
290 290 rollback completed
291 291 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
292 292 [255]
293 293 $ hg showconfig ui.curses -R ../shared1
294 294 false
295 295
296 296 $ cd ../
297 297
298 298 Test that upgrading using debugupgraderepo works
299 299 =================================================
300 300
301 301 $ hg init non-share-safe --config format.exp-share-safe=false
302 302 $ cd non-share-safe
303 303 $ hg debugrequirements
304 304 dotencode
305 305 fncache
306 306 generaldelta
307 307 revlogv1
308 308 sparserevlog
309 309 store
310 310 $ echo foo > foo
311 311 $ hg ci -Aqm 'added foo'
312 312 $ echo bar > bar
313 313 $ hg ci -Aqm 'added bar'
314 314
315 315 Create a share before upgrading
316 316
317 317 $ cd ..
318 318 $ hg share non-share-safe nss-share
319 319 updating working directory
320 320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 321 $ hg debugrequirements -R nss-share
322 322 dotencode
323 323 fncache
324 324 generaldelta
325 325 revlogv1
326 326 shared
327 327 sparserevlog
328 328 store
329 329 $ cd non-share-safe
330 330
331 331 Upgrade
332 332
333 333 $ hg debugupgraderepo -q
334 334 requirements
335 335 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
336 336 added: exp-sharesafe
337 337
338 338 $ hg debugupgraderepo --run -q
339 339 upgrade will perform the following actions:
340 340
341 341 requirements
342 342 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
343 343 added: exp-sharesafe
344 344
345 345 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
346 346
347 347 $ hg debugrequirements
348 348 dotencode
349 349 exp-sharesafe
350 350 fncache
351 351 generaldelta
352 352 revlogv1
353 353 sparserevlog
354 354 store
355 355
356 356 $ cat .hg/requires
357 357 exp-sharesafe
358 358
359 359 $ cat .hg/store/requires
360 360 dotencode
361 361 fncache
362 362 generaldelta
363 363 revlogv1
364 364 sparserevlog
365 365 store
366 366
367 367 $ hg log -GT "{node}: {desc}\n"
368 368 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
369 369 |
370 370 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
371 371
372 372
373 373 Make sure existing shares still works
374 374
375 375 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
376 376 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
377 377 |
378 378 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
379 379
380
381
382 Create a safe share from upgrade one
383
384 $ cd ..
385 $ hg share non-share-safe ss-share
386 updating working directory
387 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
388 $ cd ss-share
389 $ hg log -GT "{node}: {desc}\n"
390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
391 |
392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
393
394 $ cd ../non-share-safe
395
396 Test that downgrading works too
397
398 $ cat >> $HGRCPATH <<EOF
399 > [extensions]
400 > share =
401 > [format]
402 > exp-share-safe = False
403 > EOF
404
405 $ hg debugupgraderepo -q
406 requirements
407 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
408 removed: exp-sharesafe
409
410 $ hg debugupgraderepo -q --run
411 upgrade will perform the following actions:
412
413 requirements
414 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
415 removed: exp-sharesafe
416
417 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
418
419 $ hg debugrequirements
420 dotencode
421 fncache
422 generaldelta
423 revlogv1
424 sparserevlog
425 store
426
427 $ cat .hg/requires
428 dotencode
429 fncache
430 generaldelta
431 revlogv1
432 sparserevlog
433 store
434
435 $ test -f .hg/store/requires
436 [1]
437
438 $ hg log -GT "{node}: {desc}\n"
439 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
440 |
441 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
442
443
444 Make sure existing shares still works
445
446 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
447 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
448 |
449 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
450
380 451 $ hg unshare -R ../nss-share
452
453 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
454 abort: share source does not support exp-sharesafe requirement
455 [255]
General Comments 0
You need to be logged in to leave comments. Login now