##// END OF EJS Templates
localrepo: add git extension to autoextensions list...
Augie Fackler -
r44966:21893ff3 default
parent child Browse files
Show More
@@ -1,3799 +1,3800 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # Functions receiving (ui, features) that extensions can register to impact
449 449 # the ability to load repositories with custom requirements. Only
450 450 # functions defined in loaded extensions are called.
451 451 #
452 452 # The function receives a set of requirement strings that the repository
453 453 # is capable of opening. Functions will typically add elements to the
454 454 # set to reflect that the extension knows how to handle that requirements.
455 455 featuresetupfuncs = set()
456 456
457 457
458 458 def makelocalrepository(baseui, path, intents=None):
459 459 """Create a local repository object.
460 460
461 461 Given arguments needed to construct a local repository, this function
462 462 performs various early repository loading functionality (such as
463 463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 464 the repository can be opened, derives a type suitable for representing
465 465 that repository, and returns an instance of it.
466 466
467 467 The returned object conforms to the ``repository.completelocalrepository``
468 468 interface.
469 469
470 470 The repository type is derived by calling a series of factory functions
471 471 for each aspect/interface of the final repository. These are defined by
472 472 ``REPO_INTERFACES``.
473 473
474 474 Each factory function is called to produce a type implementing a specific
475 475 interface. The cumulative list of returned types will be combined into a
476 476 new type and that type will be instantiated to represent the local
477 477 repository.
478 478
479 479 The factory functions each receive various state that may be consulted
480 480 as part of deriving a type.
481 481
482 482 Extensions should wrap these factory functions to customize repository type
483 483 creation. Note that an extension's wrapped function may be called even if
484 484 that extension is not loaded for the repo being constructed. Extensions
485 485 should check if their ``__name__`` appears in the
486 486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 487 not.
488 488 """
489 489 ui = baseui.copy()
490 490 # Prevent copying repo configuration.
491 491 ui.copy = baseui.copy
492 492
493 493 # Working directory VFS rooted at repository root.
494 494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495 495
496 496 # Main VFS for .hg/ directory.
497 497 hgpath = wdirvfs.join(b'.hg')
498 498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499 499
500 500 # The .hg/ path should exist and should be a directory. All other
501 501 # cases are errors.
502 502 if not hgvfs.isdir():
503 503 try:
504 504 hgvfs.stat()
505 505 except OSError as e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508
509 509 raise error.RepoError(_(b'repository %s not found') % path)
510 510
511 511 # .hg/requires file contains a newline-delimited list of
512 512 # features/capabilities the opener (us) must have in order to use
513 513 # the repository. This file was introduced in Mercurial 0.9.2,
514 514 # which means very old repositories may not have one. We assume
515 515 # a missing file translates to no requirements.
516 516 try:
517 517 requirements = set(hgvfs.read(b'requires').splitlines())
518 518 except IOError as e:
519 519 if e.errno != errno.ENOENT:
520 520 raise
521 521 requirements = set()
522 522
523 523 # The .hg/hgrc file may load extensions or contain config options
524 524 # that influence repository construction. Attempt to load it and
525 525 # process any new extensions that it may have pulled in.
526 526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 528 extensions.loadall(ui)
529 529 extensions.populateui(ui)
530 530
531 531 # Set of module names of extensions loaded for this repository.
532 532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533 533
534 534 supportedrequirements = gathersupportedrequirements(ui)
535 535
536 536 # We first validate the requirements are known.
537 537 ensurerequirementsrecognized(requirements, supportedrequirements)
538 538
539 539 # Then we validate that the known set is reasonable to use together.
540 540 ensurerequirementscompatible(ui, requirements)
541 541
542 542 # TODO there are unhandled edge cases related to opening repositories with
543 543 # shared storage. If storage is shared, we should also test for requirements
544 544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 545 # that repo, as that repo may load extensions needed to open it. This is a
546 546 # bit complicated because we don't want the other hgrc to overwrite settings
547 547 # in this hgrc.
548 548 #
549 549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 550 # file when sharing repos. But if a requirement is added after the share is
551 551 # performed, thereby introducing a new requirement for the opener, we may
552 552 # will not see that and could encounter a run-time error interacting with
553 553 # that shared store since it has an unknown-to-us requirement.
554 554
555 555 # At this point, we know we should be capable of opening the repository.
556 556 # Now get on with doing that.
557 557
558 558 features = set()
559 559
560 560 # The "store" part of the repository holds versioned data. How it is
561 561 # accessed is determined by various requirements. The ``shared`` or
562 562 # ``relshared`` requirements indicate the store lives in the path contained
563 563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 565 if b'shared' in requirements or b'relshared' in requirements:
566 566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 567 if b'relshared' in requirements:
568 568 sharedpath = hgvfs.join(sharedpath)
569 569
570 570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571 571
572 572 if not sharedvfs.exists():
573 573 raise error.RepoError(
574 574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 575 % sharedvfs.base
576 576 )
577 577
578 578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579 579
580 580 storebasepath = sharedvfs.base
581 581 cachepath = sharedvfs.join(b'cache')
582 582 else:
583 583 storebasepath = hgvfs.base
584 584 cachepath = hgvfs.join(b'cache')
585 585 wcachepath = hgvfs.join(b'wcache')
586 586
587 587 # The store has changed over time and the exact layout is dictated by
588 588 # requirements. The store interface abstracts differences across all
589 589 # of them.
590 590 store = makestore(
591 591 requirements,
592 592 storebasepath,
593 593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 594 )
595 595 hgvfs.createmode = store.createmode
596 596
597 597 storevfs = store.vfs
598 598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599 599
600 600 # The cache vfs is used to manage cache files.
601 601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 602 cachevfs.createmode = store.createmode
603 603 # The cache vfs is used to manage cache files related to the working copy
604 604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 605 wcachevfs.createmode = store.createmode
606 606
607 607 # Now resolve the type for the repository object. We do this by repeatedly
608 608 # calling a factory function to produces types for specific aspects of the
609 609 # repo's operation. The aggregate returned types are used as base classes
610 610 # for a dynamically-derived type, which will represent our new repository.
611 611
612 612 bases = []
613 613 extrastate = {}
614 614
615 615 for iface, fn in REPO_INTERFACES:
616 616 # We pass all potentially useful state to give extensions tons of
617 617 # flexibility.
618 618 typ = fn()(
619 619 ui=ui,
620 620 intents=intents,
621 621 requirements=requirements,
622 622 features=features,
623 623 wdirvfs=wdirvfs,
624 624 hgvfs=hgvfs,
625 625 store=store,
626 626 storevfs=storevfs,
627 627 storeoptions=storevfs.options,
628 628 cachevfs=cachevfs,
629 629 wcachevfs=wcachevfs,
630 630 extensionmodulenames=extensionmodulenames,
631 631 extrastate=extrastate,
632 632 baseclasses=bases,
633 633 )
634 634
635 635 if not isinstance(typ, type):
636 636 raise error.ProgrammingError(
637 637 b'unable to construct type for %s' % iface
638 638 )
639 639
640 640 bases.append(typ)
641 641
642 642 # type() allows you to use characters in type names that wouldn't be
643 643 # recognized as Python symbols in source code. We abuse that to add
644 644 # rich information about our constructed repo.
645 645 name = pycompat.sysstr(
646 646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 647 )
648 648
649 649 cls = type(name, tuple(bases), {})
650 650
651 651 return cls(
652 652 baseui=baseui,
653 653 ui=ui,
654 654 origroot=path,
655 655 wdirvfs=wdirvfs,
656 656 hgvfs=hgvfs,
657 657 requirements=requirements,
658 658 supportedrequirements=supportedrequirements,
659 659 sharedpath=storebasepath,
660 660 store=store,
661 661 cachevfs=cachevfs,
662 662 wcachevfs=wcachevfs,
663 663 features=features,
664 664 intents=intents,
665 665 )
666 666
667 667
668 668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 669 """Load hgrc files/content into a ui instance.
670 670
671 671 This is called during repository opening to load any additional
672 672 config files or settings relevant to the current repository.
673 673
674 674 Returns a bool indicating whether any additional configs were loaded.
675 675
676 676 Extensions should monkeypatch this function to modify how per-repo
677 677 configs are loaded. For example, an extension may wish to pull in
678 678 configs from alternate files or sources.
679 679 """
680 680 if not rcutil.use_repo_hgrc():
681 681 return False
682 682 try:
683 683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 684 return True
685 685 except IOError:
686 686 return False
687 687
688 688
689 689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 690 """Perform additional actions after .hg/hgrc is loaded.
691 691
692 692 This function is called during repository loading immediately after
693 693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694 694
695 695 The function can be used to validate configs, automatically add
696 696 options (including extensions) based on requirements, etc.
697 697 """
698 698
699 699 # Map of requirements to list of extensions to load automatically when
700 700 # requirement is present.
701 701 autoextensions = {
702 b'git': [b'git'],
702 703 b'largefiles': [b'largefiles'],
703 704 b'lfs': [b'lfs'],
704 705 }
705 706
706 707 for requirement, names in sorted(autoextensions.items()):
707 708 if requirement not in requirements:
708 709 continue
709 710
710 711 for name in names:
711 712 if not ui.hasconfig(b'extensions', name):
712 713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713 714
714 715
715 716 def gathersupportedrequirements(ui):
716 717 """Determine the complete set of recognized requirements."""
717 718 # Start with all requirements supported by this file.
718 719 supported = set(localrepository._basesupported)
719 720
720 721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 722 # relevant to this ui instance.
722 723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723 724
724 725 for fn in featuresetupfuncs:
725 726 if fn.__module__ in modules:
726 727 fn(ui, supported)
727 728
728 729 # Add derived requirements from registered compression engines.
729 730 for name in util.compengines:
730 731 engine = util.compengines[name]
731 732 if engine.available() and engine.revlogheader():
732 733 supported.add(b'exp-compression-%s' % name)
733 734 if engine.name() == b'zstd':
734 735 supported.add(b'revlog-compression-zstd')
735 736
736 737 return supported
737 738
738 739
739 740 def ensurerequirementsrecognized(requirements, supported):
740 741 """Validate that a set of local requirements is recognized.
741 742
742 743 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 744 exists any requirement in that set that currently loaded code doesn't
744 745 recognize.
745 746
746 747 Returns a set of supported requirements.
747 748 """
748 749 missing = set()
749 750
750 751 for requirement in requirements:
751 752 if requirement in supported:
752 753 continue
753 754
754 755 if not requirement or not requirement[0:1].isalnum():
755 756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756 757
757 758 missing.add(requirement)
758 759
759 760 if missing:
760 761 raise error.RequirementError(
761 762 _(b'repository requires features unknown to this Mercurial: %s')
762 763 % b' '.join(sorted(missing)),
763 764 hint=_(
764 765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 766 b'for more information'
766 767 ),
767 768 )
768 769
769 770
770 771 def ensurerequirementscompatible(ui, requirements):
771 772 """Validates that a set of recognized requirements is mutually compatible.
772 773
773 774 Some requirements may not be compatible with others or require
774 775 config options that aren't enabled. This function is called during
775 776 repository opening to ensure that the set of requirements needed
776 777 to open a repository is sane and compatible with config options.
777 778
778 779 Extensions can monkeypatch this function to perform additional
779 780 checking.
780 781
781 782 ``error.RepoError`` should be raised on failure.
782 783 """
783 784 if b'exp-sparse' in requirements and not sparse.enabled:
784 785 raise error.RepoError(
785 786 _(
786 787 b'repository is using sparse feature but '
787 788 b'sparse is not enabled; enable the '
788 789 b'"sparse" extensions to access'
789 790 )
790 791 )
791 792
792 793
793 794 def makestore(requirements, path, vfstype):
794 795 """Construct a storage object for a repository."""
795 796 if b'store' in requirements:
796 797 if b'fncache' in requirements:
797 798 return storemod.fncachestore(
798 799 path, vfstype, b'dotencode' in requirements
799 800 )
800 801
801 802 return storemod.encodedstore(path, vfstype)
802 803
803 804 return storemod.basicstore(path, vfstype)
804 805
805 806
806 807 def resolvestorevfsoptions(ui, requirements, features):
807 808 """Resolve the options to pass to the store vfs opener.
808 809
809 810 The returned dict is used to influence behavior of the storage layer.
810 811 """
811 812 options = {}
812 813
813 814 if b'treemanifest' in requirements:
814 815 options[b'treemanifest'] = True
815 816
816 817 # experimental config: format.manifestcachesize
817 818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 819 if manifestcachesize is not None:
819 820 options[b'manifestcachesize'] = manifestcachesize
820 821
821 822 # In the absence of another requirement superseding a revlog-related
822 823 # requirement, we have to assume the repo is using revlog version 0.
823 824 # This revlog format is super old and we don't bother trying to parse
824 825 # opener options for it because those options wouldn't do anything
825 826 # meaningful on such old repos.
826 827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 829 else: # explicitly mark repo as using revlogv0
829 830 options[b'revlogv0'] = True
830 831
831 832 if COPIESSDC_REQUIREMENT in requirements:
832 833 options[b'copies-storage'] = b'changeset-sidedata'
833 834 else:
834 835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 836 copiesextramode = (b'changeset-only', b'compatibility')
836 837 if writecopiesto in copiesextramode:
837 838 options[b'copies-storage'] = b'extra'
838 839
839 840 return options
840 841
841 842
842 843 def resolverevlogstorevfsoptions(ui, requirements, features):
843 844 """Resolve opener options specific to revlogs."""
844 845
845 846 options = {}
846 847 options[b'flagprocessors'] = {}
847 848
848 849 if b'revlogv1' in requirements:
849 850 options[b'revlogv1'] = True
850 851 if REVLOGV2_REQUIREMENT in requirements:
851 852 options[b'revlogv2'] = True
852 853
853 854 if b'generaldelta' in requirements:
854 855 options[b'generaldelta'] = True
855 856
856 857 # experimental config: format.chunkcachesize
857 858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 859 if chunkcachesize is not None:
859 860 options[b'chunkcachesize'] = chunkcachesize
860 861
861 862 deltabothparents = ui.configbool(
862 863 b'storage', b'revlog.optimize-delta-parent-choice'
863 864 )
864 865 options[b'deltabothparents'] = deltabothparents
865 866
866 867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 868 lazydeltabase = False
868 869 if lazydelta:
869 870 lazydeltabase = ui.configbool(
870 871 b'storage', b'revlog.reuse-external-delta-parent'
871 872 )
872 873 if lazydeltabase is None:
873 874 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 875 options[b'lazydelta'] = lazydelta
875 876 options[b'lazydeltabase'] = lazydeltabase
876 877
877 878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 879 if 0 <= chainspan:
879 880 options[b'maxdeltachainspan'] = chainspan
880 881
881 882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 883 if mmapindexthreshold is not None:
883 884 options[b'mmapindexthreshold'] = mmapindexthreshold
884 885
885 886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 887 srdensitythres = float(
887 888 ui.config(b'experimental', b'sparse-read.density-threshold')
888 889 )
889 890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 891 options[b'with-sparse-read'] = withsparseread
891 892 options[b'sparse-read-density-threshold'] = srdensitythres
892 893 options[b'sparse-read-min-gap-size'] = srmingapsize
893 894
894 895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 896 options[b'sparse-revlog'] = sparserevlog
896 897 if sparserevlog:
897 898 options[b'generaldelta'] = True
898 899
899 900 sidedata = SIDEDATA_REQUIREMENT in requirements
900 901 options[b'side-data'] = sidedata
901 902
902 903 maxchainlen = None
903 904 if sparserevlog:
904 905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 906 # experimental config: format.maxchainlen
906 907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 908 if maxchainlen is not None:
908 909 options[b'maxchainlen'] = maxchainlen
909 910
910 911 for r in requirements:
911 912 # we allow multiple compression engine requirement to co-exist because
912 913 # strickly speaking, revlog seems to support mixed compression style.
913 914 #
914 915 # The compression used for new entries will be "the last one"
915 916 prefix = r.startswith
916 917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 918 options[b'compengine'] = r.split(b'-', 2)[2]
918 919
919 920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 921 if options[b'zlib.level'] is not None:
921 922 if not (0 <= options[b'zlib.level'] <= 9):
922 923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 924 raise error.Abort(msg % options[b'zlib.level'])
924 925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 926 if options[b'zstd.level'] is not None:
926 927 if not (0 <= options[b'zstd.level'] <= 22):
927 928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 929 raise error.Abort(msg % options[b'zstd.level'])
929 930
930 931 if repository.NARROW_REQUIREMENT in requirements:
931 932 options[b'enableellipsis'] = True
932 933
933 934 if ui.configbool(b'experimental', b'rust.index'):
934 935 options[b'rust.index'] = True
935 936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 937 options[b'exp-persistent-nodemap'] = True
937 938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
938 939 options[b'exp-persistent-nodemap.mmap'] = True
939 940 if ui.configbool(b'devel', b'persistent-nodemap'):
940 941 options[b'devel-force-nodemap'] = True
941 942
942 943 return options
943 944
944 945
945 946 def makemain(**kwargs):
946 947 """Produce a type conforming to ``ilocalrepositorymain``."""
947 948 return localrepository
948 949
949 950
950 951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
951 952 class revlogfilestorage(object):
952 953 """File storage when using revlogs."""
953 954
954 955 def file(self, path):
955 956 if path[0] == b'/':
956 957 path = path[1:]
957 958
958 959 return filelog.filelog(self.svfs, path)
959 960
960 961
961 962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 963 class revlognarrowfilestorage(object):
963 964 """File storage when using revlogs and narrow files."""
964 965
965 966 def file(self, path):
966 967 if path[0] == b'/':
967 968 path = path[1:]
968 969
969 970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
970 971
971 972
972 973 def makefilestorage(requirements, features, **kwargs):
973 974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
974 975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
975 976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
976 977
977 978 if repository.NARROW_REQUIREMENT in requirements:
978 979 return revlognarrowfilestorage
979 980 else:
980 981 return revlogfilestorage
981 982
982 983
983 984 # List of repository interfaces and factory functions for them. Each
984 985 # will be called in order during ``makelocalrepository()`` to iteratively
985 986 # derive the final type for a local repository instance. We capture the
986 987 # function as a lambda so we don't hold a reference and the module-level
987 988 # functions can be wrapped.
988 989 REPO_INTERFACES = [
989 990 (repository.ilocalrepositorymain, lambda: makemain),
990 991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
991 992 ]
992 993
993 994
994 995 @interfaceutil.implementer(repository.ilocalrepositorymain)
995 996 class localrepository(object):
996 997 """Main class for representing local repositories.
997 998
998 999 All local repositories are instances of this class.
999 1000
1000 1001 Constructed on its own, instances of this class are not usable as
1001 1002 repository objects. To obtain a usable repository object, call
1002 1003 ``hg.repository()``, ``localrepo.instance()``, or
1003 1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1004 1005 ``instance()`` adds support for creating new repositories.
1005 1006 ``hg.repository()`` adds more extension integration, including calling
1006 1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1007 1008 used.
1008 1009 """
1009 1010
1010 1011 # obsolete experimental requirements:
1011 1012 # - manifestv2: An experimental new manifest format that allowed
1012 1013 # for stem compression of long paths. Experiment ended up not
1013 1014 # being successful (repository sizes went up due to worse delta
1014 1015 # chains), and the code was deleted in 4.6.
1015 1016 supportedformats = {
1016 1017 b'revlogv1',
1017 1018 b'generaldelta',
1018 1019 b'treemanifest',
1019 1020 COPIESSDC_REQUIREMENT,
1020 1021 REVLOGV2_REQUIREMENT,
1021 1022 SIDEDATA_REQUIREMENT,
1022 1023 SPARSEREVLOG_REQUIREMENT,
1023 1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1024 1025 }
1025 1026 _basesupported = supportedformats | {
1026 1027 b'store',
1027 1028 b'fncache',
1028 1029 b'shared',
1029 1030 b'relshared',
1030 1031 b'dotencode',
1031 1032 b'exp-sparse',
1032 1033 b'internal-phase',
1033 1034 }
1034 1035
1035 1036 # list of prefix for file which can be written without 'wlock'
1036 1037 # Extensions should extend this list when needed
1037 1038 _wlockfreeprefix = {
1038 1039 # We migh consider requiring 'wlock' for the next
1039 1040 # two, but pretty much all the existing code assume
1040 1041 # wlock is not needed so we keep them excluded for
1041 1042 # now.
1042 1043 b'hgrc',
1043 1044 b'requires',
1044 1045 # XXX cache is a complicatged business someone
1045 1046 # should investigate this in depth at some point
1046 1047 b'cache/',
1047 1048 # XXX shouldn't be dirstate covered by the wlock?
1048 1049 b'dirstate',
1049 1050 # XXX bisect was still a bit too messy at the time
1050 1051 # this changeset was introduced. Someone should fix
1051 1052 # the remainig bit and drop this line
1052 1053 b'bisect.state',
1053 1054 }
1054 1055
1055 1056 def __init__(
1056 1057 self,
1057 1058 baseui,
1058 1059 ui,
1059 1060 origroot,
1060 1061 wdirvfs,
1061 1062 hgvfs,
1062 1063 requirements,
1063 1064 supportedrequirements,
1064 1065 sharedpath,
1065 1066 store,
1066 1067 cachevfs,
1067 1068 wcachevfs,
1068 1069 features,
1069 1070 intents=None,
1070 1071 ):
1071 1072 """Create a new local repository instance.
1072 1073
1073 1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1074 1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1075 1076 object.
1076 1077
1077 1078 Arguments:
1078 1079
1079 1080 baseui
1080 1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1081 1082
1082 1083 ui
1083 1084 ``ui.ui`` instance for use by the repository.
1084 1085
1085 1086 origroot
1086 1087 ``bytes`` path to working directory root of this repository.
1087 1088
1088 1089 wdirvfs
1089 1090 ``vfs.vfs`` rooted at the working directory.
1090 1091
1091 1092 hgvfs
1092 1093 ``vfs.vfs`` rooted at .hg/
1093 1094
1094 1095 requirements
1095 1096 ``set`` of bytestrings representing repository opening requirements.
1096 1097
1097 1098 supportedrequirements
1098 1099 ``set`` of bytestrings representing repository requirements that we
1099 1100 know how to open. May be a supetset of ``requirements``.
1100 1101
1101 1102 sharedpath
1102 1103 ``bytes`` Defining path to storage base directory. Points to a
1103 1104 ``.hg/`` directory somewhere.
1104 1105
1105 1106 store
1106 1107 ``store.basicstore`` (or derived) instance providing access to
1107 1108 versioned storage.
1108 1109
1109 1110 cachevfs
1110 1111 ``vfs.vfs`` used for cache files.
1111 1112
1112 1113 wcachevfs
1113 1114 ``vfs.vfs`` used for cache files related to the working copy.
1114 1115
1115 1116 features
1116 1117 ``set`` of bytestrings defining features/capabilities of this
1117 1118 instance.
1118 1119
1119 1120 intents
1120 1121 ``set`` of system strings indicating what this repo will be used
1121 1122 for.
1122 1123 """
1123 1124 self.baseui = baseui
1124 1125 self.ui = ui
1125 1126 self.origroot = origroot
1126 1127 # vfs rooted at working directory.
1127 1128 self.wvfs = wdirvfs
1128 1129 self.root = wdirvfs.base
1129 1130 # vfs rooted at .hg/. Used to access most non-store paths.
1130 1131 self.vfs = hgvfs
1131 1132 self.path = hgvfs.base
1132 1133 self.requirements = requirements
1133 1134 self.supported = supportedrequirements
1134 1135 self.sharedpath = sharedpath
1135 1136 self.store = store
1136 1137 self.cachevfs = cachevfs
1137 1138 self.wcachevfs = wcachevfs
1138 1139 self.features = features
1139 1140
1140 1141 self.filtername = None
1141 1142
1142 1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1143 1144 b'devel', b'check-locks'
1144 1145 ):
1145 1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1146 1147 # A list of callback to shape the phase if no data were found.
1147 1148 # Callback are in the form: func(repo, roots) --> processed root.
1148 1149 # This list it to be filled by extension during repo setup
1149 1150 self._phasedefaults = []
1150 1151
1151 1152 color.setup(self.ui)
1152 1153
1153 1154 self.spath = self.store.path
1154 1155 self.svfs = self.store.vfs
1155 1156 self.sjoin = self.store.join
1156 1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1157 1158 b'devel', b'check-locks'
1158 1159 ):
1159 1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1160 1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1161 1162 else: # standard vfs
1162 1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1163 1164
1164 1165 self._dirstatevalidatewarned = False
1165 1166
1166 1167 self._branchcaches = branchmap.BranchMapCache()
1167 1168 self._revbranchcache = None
1168 1169 self._filterpats = {}
1169 1170 self._datafilters = {}
1170 1171 self._transref = self._lockref = self._wlockref = None
1171 1172
1172 1173 # A cache for various files under .hg/ that tracks file changes,
1173 1174 # (used by the filecache decorator)
1174 1175 #
1175 1176 # Maps a property name to its util.filecacheentry
1176 1177 self._filecache = {}
1177 1178
1178 1179 # hold sets of revision to be filtered
1179 1180 # should be cleared when something might have changed the filter value:
1180 1181 # - new changesets,
1181 1182 # - phase change,
1182 1183 # - new obsolescence marker,
1183 1184 # - working directory parent change,
1184 1185 # - bookmark changes
1185 1186 self.filteredrevcache = {}
1186 1187
1187 1188 # post-dirstate-status hooks
1188 1189 self._postdsstatus = []
1189 1190
1190 1191 # generic mapping between names and nodes
1191 1192 self.names = namespaces.namespaces()
1192 1193
1193 1194 # Key to signature value.
1194 1195 self._sparsesignaturecache = {}
1195 1196 # Signature to cached matcher instance.
1196 1197 self._sparsematchercache = {}
1197 1198
1198 1199 self._extrafilterid = repoview.extrafilter(ui)
1199 1200
1200 1201 self.filecopiesmode = None
1201 1202 if COPIESSDC_REQUIREMENT in self.requirements:
1202 1203 self.filecopiesmode = b'changeset-sidedata'
1203 1204
1204 1205 def _getvfsward(self, origfunc):
1205 1206 """build a ward for self.vfs"""
1206 1207 rref = weakref.ref(self)
1207 1208
1208 1209 def checkvfs(path, mode=None):
1209 1210 ret = origfunc(path, mode=mode)
1210 1211 repo = rref()
1211 1212 if (
1212 1213 repo is None
1213 1214 or not util.safehasattr(repo, b'_wlockref')
1214 1215 or not util.safehasattr(repo, b'_lockref')
1215 1216 ):
1216 1217 return
1217 1218 if mode in (None, b'r', b'rb'):
1218 1219 return
1219 1220 if path.startswith(repo.path):
1220 1221 # truncate name relative to the repository (.hg)
1221 1222 path = path[len(repo.path) + 1 :]
1222 1223 if path.startswith(b'cache/'):
1223 1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1224 1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1225 1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1226 1227 # journal is covered by 'lock'
1227 1228 if repo._currentlock(repo._lockref) is None:
1228 1229 repo.ui.develwarn(
1229 1230 b'write with no lock: "%s"' % path,
1230 1231 stacklevel=3,
1231 1232 config=b'check-locks',
1232 1233 )
1233 1234 elif repo._currentlock(repo._wlockref) is None:
1234 1235 # rest of vfs files are covered by 'wlock'
1235 1236 #
1236 1237 # exclude special files
1237 1238 for prefix in self._wlockfreeprefix:
1238 1239 if path.startswith(prefix):
1239 1240 return
1240 1241 repo.ui.develwarn(
1241 1242 b'write with no wlock: "%s"' % path,
1242 1243 stacklevel=3,
1243 1244 config=b'check-locks',
1244 1245 )
1245 1246 return ret
1246 1247
1247 1248 return checkvfs
1248 1249
1249 1250 def _getsvfsward(self, origfunc):
1250 1251 """build a ward for self.svfs"""
1251 1252 rref = weakref.ref(self)
1252 1253
1253 1254 def checksvfs(path, mode=None):
1254 1255 ret = origfunc(path, mode=mode)
1255 1256 repo = rref()
1256 1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1257 1258 return
1258 1259 if mode in (None, b'r', b'rb'):
1259 1260 return
1260 1261 if path.startswith(repo.sharedpath):
1261 1262 # truncate name relative to the repository (.hg)
1262 1263 path = path[len(repo.sharedpath) + 1 :]
1263 1264 if repo._currentlock(repo._lockref) is None:
1264 1265 repo.ui.develwarn(
1265 1266 b'write with no lock: "%s"' % path, stacklevel=4
1266 1267 )
1267 1268 return ret
1268 1269
1269 1270 return checksvfs
1270 1271
1271 1272 def close(self):
1272 1273 self._writecaches()
1273 1274
1274 1275 def _writecaches(self):
1275 1276 if self._revbranchcache:
1276 1277 self._revbranchcache.write()
1277 1278
1278 1279 def _restrictcapabilities(self, caps):
1279 1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1280 1281 caps = set(caps)
1281 1282 capsblob = bundle2.encodecaps(
1282 1283 bundle2.getrepocaps(self, role=b'client')
1283 1284 )
1284 1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1285 1286 return caps
1286 1287
1287 1288 def _writerequirements(self):
1288 1289 scmutil.writerequires(self.vfs, self.requirements)
1289 1290
1290 1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1291 1292 # self -> auditor -> self._checknested -> self
1292 1293
1293 1294 @property
1294 1295 def auditor(self):
1295 1296 # This is only used by context.workingctx.match in order to
1296 1297 # detect files in subrepos.
1297 1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1298 1299
1299 1300 @property
1300 1301 def nofsauditor(self):
1301 1302 # This is only used by context.basectx.match in order to detect
1302 1303 # files in subrepos.
1303 1304 return pathutil.pathauditor(
1304 1305 self.root, callback=self._checknested, realfs=False, cached=True
1305 1306 )
1306 1307
1307 1308 def _checknested(self, path):
1308 1309 """Determine if path is a legal nested repository."""
1309 1310 if not path.startswith(self.root):
1310 1311 return False
1311 1312 subpath = path[len(self.root) + 1 :]
1312 1313 normsubpath = util.pconvert(subpath)
1313 1314
1314 1315 # XXX: Checking against the current working copy is wrong in
1315 1316 # the sense that it can reject things like
1316 1317 #
1317 1318 # $ hg cat -r 10 sub/x.txt
1318 1319 #
1319 1320 # if sub/ is no longer a subrepository in the working copy
1320 1321 # parent revision.
1321 1322 #
1322 1323 # However, it can of course also allow things that would have
1323 1324 # been rejected before, such as the above cat command if sub/
1324 1325 # is a subrepository now, but was a normal directory before.
1325 1326 # The old path auditor would have rejected by mistake since it
1326 1327 # panics when it sees sub/.hg/.
1327 1328 #
1328 1329 # All in all, checking against the working copy seems sensible
1329 1330 # since we want to prevent access to nested repositories on
1330 1331 # the filesystem *now*.
1331 1332 ctx = self[None]
1332 1333 parts = util.splitpath(subpath)
1333 1334 while parts:
1334 1335 prefix = b'/'.join(parts)
1335 1336 if prefix in ctx.substate:
1336 1337 if prefix == normsubpath:
1337 1338 return True
1338 1339 else:
1339 1340 sub = ctx.sub(prefix)
1340 1341 return sub.checknested(subpath[len(prefix) + 1 :])
1341 1342 else:
1342 1343 parts.pop()
1343 1344 return False
1344 1345
1345 1346 def peer(self):
1346 1347 return localpeer(self) # not cached to avoid reference cycle
1347 1348
1348 1349 def unfiltered(self):
1349 1350 """Return unfiltered version of the repository
1350 1351
1351 1352 Intended to be overwritten by filtered repo."""
1352 1353 return self
1353 1354
1354 1355 def filtered(self, name, visibilityexceptions=None):
1355 1356 """Return a filtered version of a repository
1356 1357
1357 1358 The `name` parameter is the identifier of the requested view. This
1358 1359 will return a repoview object set "exactly" to the specified view.
1359 1360
1360 1361 This function does not apply recursive filtering to a repository. For
1361 1362 example calling `repo.filtered("served")` will return a repoview using
1362 1363 the "served" view, regardless of the initial view used by `repo`.
1363 1364
1364 1365 In other word, there is always only one level of `repoview` "filtering".
1365 1366 """
1366 1367 if self._extrafilterid is not None and b'%' not in name:
1367 1368 name = name + b'%' + self._extrafilterid
1368 1369
1369 1370 cls = repoview.newtype(self.unfiltered().__class__)
1370 1371 return cls(self, name, visibilityexceptions)
1371 1372
1372 1373 @mixedrepostorecache(
1373 1374 (b'bookmarks', b'plain'),
1374 1375 (b'bookmarks.current', b'plain'),
1375 1376 (b'bookmarks', b''),
1376 1377 (b'00changelog.i', b''),
1377 1378 )
1378 1379 def _bookmarks(self):
1379 1380 # Since the multiple files involved in the transaction cannot be
1380 1381 # written atomically (with current repository format), there is a race
1381 1382 # condition here.
1382 1383 #
1383 1384 # 1) changelog content A is read
1384 1385 # 2) outside transaction update changelog to content B
1385 1386 # 3) outside transaction update bookmark file referring to content B
1386 1387 # 4) bookmarks file content is read and filtered against changelog-A
1387 1388 #
1388 1389 # When this happens, bookmarks against nodes missing from A are dropped.
1389 1390 #
1390 1391 # Having this happening during read is not great, but it become worse
1391 1392 # when this happen during write because the bookmarks to the "unknown"
1392 1393 # nodes will be dropped for good. However, writes happen within locks.
1393 1394 # This locking makes it possible to have a race free consistent read.
1394 1395 # For this purpose data read from disc before locking are
1395 1396 # "invalidated" right after the locks are taken. This invalidations are
1396 1397 # "light", the `filecache` mechanism keep the data in memory and will
1397 1398 # reuse them if the underlying files did not changed. Not parsing the
1398 1399 # same data multiple times helps performances.
1399 1400 #
1400 1401 # Unfortunately in the case describe above, the files tracked by the
1401 1402 # bookmarks file cache might not have changed, but the in-memory
1402 1403 # content is still "wrong" because we used an older changelog content
1403 1404 # to process the on-disk data. So after locking, the changelog would be
1404 1405 # refreshed but `_bookmarks` would be preserved.
1405 1406 # Adding `00changelog.i` to the list of tracked file is not
1406 1407 # enough, because at the time we build the content for `_bookmarks` in
1407 1408 # (4), the changelog file has already diverged from the content used
1408 1409 # for loading `changelog` in (1)
1409 1410 #
1410 1411 # To prevent the issue, we force the changelog to be explicitly
1411 1412 # reloaded while computing `_bookmarks`. The data race can still happen
1412 1413 # without the lock (with a narrower window), but it would no longer go
1413 1414 # undetected during the lock time refresh.
1414 1415 #
1415 1416 # The new schedule is as follow
1416 1417 #
1417 1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1418 1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1419 1420 # 3) We force `changelog` filecache to be tested
1420 1421 # 4) cachestat for `changelog` are captured (for changelog)
1421 1422 # 5) `_bookmarks` is computed and cached
1422 1423 #
1423 1424 # The step in (3) ensure we have a changelog at least as recent as the
1424 1425 # cache stat computed in (1). As a result at locking time:
1425 1426 # * if the changelog did not changed since (1) -> we can reuse the data
1426 1427 # * otherwise -> the bookmarks get refreshed.
1427 1428 self._refreshchangelog()
1428 1429 return bookmarks.bmstore(self)
1429 1430
1430 1431 def _refreshchangelog(self):
1431 1432 """make sure the in memory changelog match the on-disk one"""
1432 1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1433 1434 del self.changelog
1434 1435
1435 1436 @property
1436 1437 def _activebookmark(self):
1437 1438 return self._bookmarks.active
1438 1439
1439 1440 # _phasesets depend on changelog. what we need is to call
1440 1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1441 1442 # can't be easily expressed in filecache mechanism.
1442 1443 @storecache(b'phaseroots', b'00changelog.i')
1443 1444 def _phasecache(self):
1444 1445 return phases.phasecache(self, self._phasedefaults)
1445 1446
1446 1447 @storecache(b'obsstore')
1447 1448 def obsstore(self):
1448 1449 return obsolete.makestore(self.ui, self)
1449 1450
1450 1451 @storecache(b'00changelog.i')
1451 1452 def changelog(self):
1452 1453 return self.store.changelog(txnutil.mayhavepending(self.root))
1453 1454
1454 1455 @storecache(b'00manifest.i')
1455 1456 def manifestlog(self):
1456 1457 return self.store.manifestlog(self, self._storenarrowmatch)
1457 1458
1458 1459 @repofilecache(b'dirstate')
1459 1460 def dirstate(self):
1460 1461 return self._makedirstate()
1461 1462
1462 1463 def _makedirstate(self):
1463 1464 """Extension point for wrapping the dirstate per-repo."""
1464 1465 sparsematchfn = lambda: sparse.matcher(self)
1465 1466
1466 1467 return dirstate.dirstate(
1467 1468 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1468 1469 )
1469 1470
1470 1471 def _dirstatevalidate(self, node):
1471 1472 try:
1472 1473 self.changelog.rev(node)
1473 1474 return node
1474 1475 except error.LookupError:
1475 1476 if not self._dirstatevalidatewarned:
1476 1477 self._dirstatevalidatewarned = True
1477 1478 self.ui.warn(
1478 1479 _(b"warning: ignoring unknown working parent %s!\n")
1479 1480 % short(node)
1480 1481 )
1481 1482 return nullid
1482 1483
1483 1484 @storecache(narrowspec.FILENAME)
1484 1485 def narrowpats(self):
1485 1486 """matcher patterns for this repository's narrowspec
1486 1487
1487 1488 A tuple of (includes, excludes).
1488 1489 """
1489 1490 return narrowspec.load(self)
1490 1491
1491 1492 @storecache(narrowspec.FILENAME)
1492 1493 def _storenarrowmatch(self):
1493 1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1494 1495 return matchmod.always()
1495 1496 include, exclude = self.narrowpats
1496 1497 return narrowspec.match(self.root, include=include, exclude=exclude)
1497 1498
1498 1499 @storecache(narrowspec.FILENAME)
1499 1500 def _narrowmatch(self):
1500 1501 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 1502 return matchmod.always()
1502 1503 narrowspec.checkworkingcopynarrowspec(self)
1503 1504 include, exclude = self.narrowpats
1504 1505 return narrowspec.match(self.root, include=include, exclude=exclude)
1505 1506
1506 1507 def narrowmatch(self, match=None, includeexact=False):
1507 1508 """matcher corresponding the the repo's narrowspec
1508 1509
1509 1510 If `match` is given, then that will be intersected with the narrow
1510 1511 matcher.
1511 1512
1512 1513 If `includeexact` is True, then any exact matches from `match` will
1513 1514 be included even if they're outside the narrowspec.
1514 1515 """
1515 1516 if match:
1516 1517 if includeexact and not self._narrowmatch.always():
1517 1518 # do not exclude explicitly-specified paths so that they can
1518 1519 # be warned later on
1519 1520 em = matchmod.exact(match.files())
1520 1521 nm = matchmod.unionmatcher([self._narrowmatch, em])
1521 1522 return matchmod.intersectmatchers(match, nm)
1522 1523 return matchmod.intersectmatchers(match, self._narrowmatch)
1523 1524 return self._narrowmatch
1524 1525
1525 1526 def setnarrowpats(self, newincludes, newexcludes):
1526 1527 narrowspec.save(self, newincludes, newexcludes)
1527 1528 self.invalidate(clearfilecache=True)
1528 1529
1529 1530 @unfilteredpropertycache
1530 1531 def _quick_access_changeid_null(self):
1531 1532 return {
1532 1533 b'null': (nullrev, nullid),
1533 1534 nullrev: (nullrev, nullid),
1534 1535 nullid: (nullrev, nullid),
1535 1536 }
1536 1537
1537 1538 @unfilteredpropertycache
1538 1539 def _quick_access_changeid_wc(self):
1539 1540 # also fast path access to the working copy parents
1540 1541 # however, only do it for filter that ensure wc is visible.
1541 1542 quick = {}
1542 1543 cl = self.unfiltered().changelog
1543 1544 for node in self.dirstate.parents():
1544 1545 if node == nullid:
1545 1546 continue
1546 1547 rev = cl.index.get_rev(node)
1547 1548 if rev is None:
1548 1549 # unknown working copy parent case:
1549 1550 #
1550 1551 # skip the fast path and let higher code deal with it
1551 1552 continue
1552 1553 pair = (rev, node)
1553 1554 quick[rev] = pair
1554 1555 quick[node] = pair
1555 1556 # also add the parents of the parents
1556 1557 for r in cl.parentrevs(rev):
1557 1558 if r == nullrev:
1558 1559 continue
1559 1560 n = cl.node(r)
1560 1561 pair = (r, n)
1561 1562 quick[r] = pair
1562 1563 quick[n] = pair
1563 1564 p1node = self.dirstate.p1()
1564 1565 if p1node != nullid:
1565 1566 quick[b'.'] = quick[p1node]
1566 1567 return quick
1567 1568
1568 1569 @unfilteredmethod
1569 1570 def _quick_access_changeid_invalidate(self):
1570 1571 if '_quick_access_changeid_wc' in vars(self):
1571 1572 del self.__dict__['_quick_access_changeid_wc']
1572 1573
1573 1574 @property
1574 1575 def _quick_access_changeid(self):
1575 1576 """an helper dictionnary for __getitem__ calls
1576 1577
1577 1578 This contains a list of symbol we can recognise right away without
1578 1579 further processing.
1579 1580 """
1580 1581 mapping = self._quick_access_changeid_null
1581 1582 if self.filtername in repoview.filter_has_wc:
1582 1583 mapping = mapping.copy()
1583 1584 mapping.update(self._quick_access_changeid_wc)
1584 1585 return mapping
1585 1586
1586 1587 def __getitem__(self, changeid):
1587 1588 # dealing with special cases
1588 1589 if changeid is None:
1589 1590 return context.workingctx(self)
1590 1591 if isinstance(changeid, context.basectx):
1591 1592 return changeid
1592 1593
1593 1594 # dealing with multiple revisions
1594 1595 if isinstance(changeid, slice):
1595 1596 # wdirrev isn't contiguous so the slice shouldn't include it
1596 1597 return [
1597 1598 self[i]
1598 1599 for i in pycompat.xrange(*changeid.indices(len(self)))
1599 1600 if i not in self.changelog.filteredrevs
1600 1601 ]
1601 1602
1602 1603 # dealing with some special values
1603 1604 quick_access = self._quick_access_changeid.get(changeid)
1604 1605 if quick_access is not None:
1605 1606 rev, node = quick_access
1606 1607 return context.changectx(self, rev, node, maybe_filtered=False)
1607 1608 if changeid == b'tip':
1608 1609 node = self.changelog.tip()
1609 1610 rev = self.changelog.rev(node)
1610 1611 return context.changectx(self, rev, node)
1611 1612
1612 1613 # dealing with arbitrary values
1613 1614 try:
1614 1615 if isinstance(changeid, int):
1615 1616 node = self.changelog.node(changeid)
1616 1617 rev = changeid
1617 1618 elif changeid == b'.':
1618 1619 # this is a hack to delay/avoid loading obsmarkers
1619 1620 # when we know that '.' won't be hidden
1620 1621 node = self.dirstate.p1()
1621 1622 rev = self.unfiltered().changelog.rev(node)
1622 1623 elif len(changeid) == 20:
1623 1624 try:
1624 1625 node = changeid
1625 1626 rev = self.changelog.rev(changeid)
1626 1627 except error.FilteredLookupError:
1627 1628 changeid = hex(changeid) # for the error message
1628 1629 raise
1629 1630 except LookupError:
1630 1631 # check if it might have come from damaged dirstate
1631 1632 #
1632 1633 # XXX we could avoid the unfiltered if we had a recognizable
1633 1634 # exception for filtered changeset access
1634 1635 if (
1635 1636 self.local()
1636 1637 and changeid in self.unfiltered().dirstate.parents()
1637 1638 ):
1638 1639 msg = _(b"working directory has unknown parent '%s'!")
1639 1640 raise error.Abort(msg % short(changeid))
1640 1641 changeid = hex(changeid) # for the error message
1641 1642 raise
1642 1643
1643 1644 elif len(changeid) == 40:
1644 1645 node = bin(changeid)
1645 1646 rev = self.changelog.rev(node)
1646 1647 else:
1647 1648 raise error.ProgrammingError(
1648 1649 b"unsupported changeid '%s' of type %s"
1649 1650 % (changeid, pycompat.bytestr(type(changeid)))
1650 1651 )
1651 1652
1652 1653 return context.changectx(self, rev, node)
1653 1654
1654 1655 except (error.FilteredIndexError, error.FilteredLookupError):
1655 1656 raise error.FilteredRepoLookupError(
1656 1657 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1657 1658 )
1658 1659 except (IndexError, LookupError):
1659 1660 raise error.RepoLookupError(
1660 1661 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1661 1662 )
1662 1663 except error.WdirUnsupported:
1663 1664 return context.workingctx(self)
1664 1665
1665 1666 def __contains__(self, changeid):
1666 1667 """True if the given changeid exists
1667 1668
1668 1669 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1669 1670 specified.
1670 1671 """
1671 1672 try:
1672 1673 self[changeid]
1673 1674 return True
1674 1675 except error.RepoLookupError:
1675 1676 return False
1676 1677
1677 1678 def __nonzero__(self):
1678 1679 return True
1679 1680
1680 1681 __bool__ = __nonzero__
1681 1682
1682 1683 def __len__(self):
1683 1684 # no need to pay the cost of repoview.changelog
1684 1685 unfi = self.unfiltered()
1685 1686 return len(unfi.changelog)
1686 1687
1687 1688 def __iter__(self):
1688 1689 return iter(self.changelog)
1689 1690
1690 1691 def revs(self, expr, *args):
1691 1692 '''Find revisions matching a revset.
1692 1693
1693 1694 The revset is specified as a string ``expr`` that may contain
1694 1695 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1695 1696
1696 1697 Revset aliases from the configuration are not expanded. To expand
1697 1698 user aliases, consider calling ``scmutil.revrange()`` or
1698 1699 ``repo.anyrevs([expr], user=True)``.
1699 1700
1700 1701 Returns a smartset.abstractsmartset, which is a list-like interface
1701 1702 that contains integer revisions.
1702 1703 '''
1703 1704 tree = revsetlang.spectree(expr, *args)
1704 1705 return revset.makematcher(tree)(self)
1705 1706
1706 1707 def set(self, expr, *args):
1707 1708 '''Find revisions matching a revset and emit changectx instances.
1708 1709
1709 1710 This is a convenience wrapper around ``revs()`` that iterates the
1710 1711 result and is a generator of changectx instances.
1711 1712
1712 1713 Revset aliases from the configuration are not expanded. To expand
1713 1714 user aliases, consider calling ``scmutil.revrange()``.
1714 1715 '''
1715 1716 for r in self.revs(expr, *args):
1716 1717 yield self[r]
1717 1718
1718 1719 def anyrevs(self, specs, user=False, localalias=None):
1719 1720 '''Find revisions matching one of the given revsets.
1720 1721
1721 1722 Revset aliases from the configuration are not expanded by default. To
1722 1723 expand user aliases, specify ``user=True``. To provide some local
1723 1724 definitions overriding user aliases, set ``localalias`` to
1724 1725 ``{name: definitionstring}``.
1725 1726 '''
1726 1727 if specs == [b'null']:
1727 1728 return revset.baseset([nullrev])
1728 1729 if specs == [b'.']:
1729 1730 quick_data = self._quick_access_changeid.get(b'.')
1730 1731 if quick_data is not None:
1731 1732 return revset.baseset([quick_data[0]])
1732 1733 if user:
1733 1734 m = revset.matchany(
1734 1735 self.ui,
1735 1736 specs,
1736 1737 lookup=revset.lookupfn(self),
1737 1738 localalias=localalias,
1738 1739 )
1739 1740 else:
1740 1741 m = revset.matchany(None, specs, localalias=localalias)
1741 1742 return m(self)
1742 1743
1743 1744 def url(self):
1744 1745 return b'file:' + self.root
1745 1746
1746 1747 def hook(self, name, throw=False, **args):
1747 1748 """Call a hook, passing this repo instance.
1748 1749
1749 1750 This a convenience method to aid invoking hooks. Extensions likely
1750 1751 won't call this unless they have registered a custom hook or are
1751 1752 replacing code that is expected to call a hook.
1752 1753 """
1753 1754 return hook.hook(self.ui, self, name, throw, **args)
1754 1755
1755 1756 @filteredpropertycache
1756 1757 def _tagscache(self):
1757 1758 '''Returns a tagscache object that contains various tags related
1758 1759 caches.'''
1759 1760
1760 1761 # This simplifies its cache management by having one decorated
1761 1762 # function (this one) and the rest simply fetch things from it.
1762 1763 class tagscache(object):
1763 1764 def __init__(self):
1764 1765 # These two define the set of tags for this repository. tags
1765 1766 # maps tag name to node; tagtypes maps tag name to 'global' or
1766 1767 # 'local'. (Global tags are defined by .hgtags across all
1767 1768 # heads, and local tags are defined in .hg/localtags.)
1768 1769 # They constitute the in-memory cache of tags.
1769 1770 self.tags = self.tagtypes = None
1770 1771
1771 1772 self.nodetagscache = self.tagslist = None
1772 1773
1773 1774 cache = tagscache()
1774 1775 cache.tags, cache.tagtypes = self._findtags()
1775 1776
1776 1777 return cache
1777 1778
1778 1779 def tags(self):
1779 1780 '''return a mapping of tag to node'''
1780 1781 t = {}
1781 1782 if self.changelog.filteredrevs:
1782 1783 tags, tt = self._findtags()
1783 1784 else:
1784 1785 tags = self._tagscache.tags
1785 1786 rev = self.changelog.rev
1786 1787 for k, v in pycompat.iteritems(tags):
1787 1788 try:
1788 1789 # ignore tags to unknown nodes
1789 1790 rev(v)
1790 1791 t[k] = v
1791 1792 except (error.LookupError, ValueError):
1792 1793 pass
1793 1794 return t
1794 1795
1795 1796 def _findtags(self):
1796 1797 '''Do the hard work of finding tags. Return a pair of dicts
1797 1798 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1798 1799 maps tag name to a string like \'global\' or \'local\'.
1799 1800 Subclasses or extensions are free to add their own tags, but
1800 1801 should be aware that the returned dicts will be retained for the
1801 1802 duration of the localrepo object.'''
1802 1803
1803 1804 # XXX what tagtype should subclasses/extensions use? Currently
1804 1805 # mq and bookmarks add tags, but do not set the tagtype at all.
1805 1806 # Should each extension invent its own tag type? Should there
1806 1807 # be one tagtype for all such "virtual" tags? Or is the status
1807 1808 # quo fine?
1808 1809
1809 1810 # map tag name to (node, hist)
1810 1811 alltags = tagsmod.findglobaltags(self.ui, self)
1811 1812 # map tag name to tag type
1812 1813 tagtypes = {tag: b'global' for tag in alltags}
1813 1814
1814 1815 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1815 1816
1816 1817 # Build the return dicts. Have to re-encode tag names because
1817 1818 # the tags module always uses UTF-8 (in order not to lose info
1818 1819 # writing to the cache), but the rest of Mercurial wants them in
1819 1820 # local encoding.
1820 1821 tags = {}
1821 1822 for (name, (node, hist)) in pycompat.iteritems(alltags):
1822 1823 if node != nullid:
1823 1824 tags[encoding.tolocal(name)] = node
1824 1825 tags[b'tip'] = self.changelog.tip()
1825 1826 tagtypes = {
1826 1827 encoding.tolocal(name): value
1827 1828 for (name, value) in pycompat.iteritems(tagtypes)
1828 1829 }
1829 1830 return (tags, tagtypes)
1830 1831
1831 1832 def tagtype(self, tagname):
1832 1833 '''
1833 1834 return the type of the given tag. result can be:
1834 1835
1835 1836 'local' : a local tag
1836 1837 'global' : a global tag
1837 1838 None : tag does not exist
1838 1839 '''
1839 1840
1840 1841 return self._tagscache.tagtypes.get(tagname)
1841 1842
1842 1843 def tagslist(self):
1843 1844 '''return a list of tags ordered by revision'''
1844 1845 if not self._tagscache.tagslist:
1845 1846 l = []
1846 1847 for t, n in pycompat.iteritems(self.tags()):
1847 1848 l.append((self.changelog.rev(n), t, n))
1848 1849 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1849 1850
1850 1851 return self._tagscache.tagslist
1851 1852
1852 1853 def nodetags(self, node):
1853 1854 '''return the tags associated with a node'''
1854 1855 if not self._tagscache.nodetagscache:
1855 1856 nodetagscache = {}
1856 1857 for t, n in pycompat.iteritems(self._tagscache.tags):
1857 1858 nodetagscache.setdefault(n, []).append(t)
1858 1859 for tags in pycompat.itervalues(nodetagscache):
1859 1860 tags.sort()
1860 1861 self._tagscache.nodetagscache = nodetagscache
1861 1862 return self._tagscache.nodetagscache.get(node, [])
1862 1863
1863 1864 def nodebookmarks(self, node):
1864 1865 """return the list of bookmarks pointing to the specified node"""
1865 1866 return self._bookmarks.names(node)
1866 1867
1867 1868 def branchmap(self):
1868 1869 '''returns a dictionary {branch: [branchheads]} with branchheads
1869 1870 ordered by increasing revision number'''
1870 1871 return self._branchcaches[self]
1871 1872
1872 1873 @unfilteredmethod
1873 1874 def revbranchcache(self):
1874 1875 if not self._revbranchcache:
1875 1876 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1876 1877 return self._revbranchcache
1877 1878
1878 1879 def branchtip(self, branch, ignoremissing=False):
1879 1880 '''return the tip node for a given branch
1880 1881
1881 1882 If ignoremissing is True, then this method will not raise an error.
1882 1883 This is helpful for callers that only expect None for a missing branch
1883 1884 (e.g. namespace).
1884 1885
1885 1886 '''
1886 1887 try:
1887 1888 return self.branchmap().branchtip(branch)
1888 1889 except KeyError:
1889 1890 if not ignoremissing:
1890 1891 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1891 1892 else:
1892 1893 pass
1893 1894
1894 1895 def lookup(self, key):
1895 1896 node = scmutil.revsymbol(self, key).node()
1896 1897 if node is None:
1897 1898 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1898 1899 return node
1899 1900
1900 1901 def lookupbranch(self, key):
1901 1902 if self.branchmap().hasbranch(key):
1902 1903 return key
1903 1904
1904 1905 return scmutil.revsymbol(self, key).branch()
1905 1906
1906 1907 def known(self, nodes):
1907 1908 cl = self.changelog
1908 1909 get_rev = cl.index.get_rev
1909 1910 filtered = cl.filteredrevs
1910 1911 result = []
1911 1912 for n in nodes:
1912 1913 r = get_rev(n)
1913 1914 resp = not (r is None or r in filtered)
1914 1915 result.append(resp)
1915 1916 return result
1916 1917
1917 1918 def local(self):
1918 1919 return self
1919 1920
1920 1921 def publishing(self):
1921 1922 # it's safe (and desirable) to trust the publish flag unconditionally
1922 1923 # so that we don't finalize changes shared between users via ssh or nfs
1923 1924 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1924 1925
1925 1926 def cancopy(self):
1926 1927 # so statichttprepo's override of local() works
1927 1928 if not self.local():
1928 1929 return False
1929 1930 if not self.publishing():
1930 1931 return True
1931 1932 # if publishing we can't copy if there is filtered content
1932 1933 return not self.filtered(b'visible').changelog.filteredrevs
1933 1934
1934 1935 def shared(self):
1935 1936 '''the type of shared repository (None if not shared)'''
1936 1937 if self.sharedpath != self.path:
1937 1938 return b'store'
1938 1939 return None
1939 1940
1940 1941 def wjoin(self, f, *insidef):
1941 1942 return self.vfs.reljoin(self.root, f, *insidef)
1942 1943
1943 1944 def setparents(self, p1, p2=nullid):
1944 1945 self[None].setparents(p1, p2)
1945 1946 self._quick_access_changeid_invalidate()
1946 1947
1947 1948 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1948 1949 """changeid must be a changeset revision, if specified.
1949 1950 fileid can be a file revision or node."""
1950 1951 return context.filectx(
1951 1952 self, path, changeid, fileid, changectx=changectx
1952 1953 )
1953 1954
1954 1955 def getcwd(self):
1955 1956 return self.dirstate.getcwd()
1956 1957
1957 1958 def pathto(self, f, cwd=None):
1958 1959 return self.dirstate.pathto(f, cwd)
1959 1960
1960 1961 def _loadfilter(self, filter):
1961 1962 if filter not in self._filterpats:
1962 1963 l = []
1963 1964 for pat, cmd in self.ui.configitems(filter):
1964 1965 if cmd == b'!':
1965 1966 continue
1966 1967 mf = matchmod.match(self.root, b'', [pat])
1967 1968 fn = None
1968 1969 params = cmd
1969 1970 for name, filterfn in pycompat.iteritems(self._datafilters):
1970 1971 if cmd.startswith(name):
1971 1972 fn = filterfn
1972 1973 params = cmd[len(name) :].lstrip()
1973 1974 break
1974 1975 if not fn:
1975 1976 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1976 1977 fn.__name__ = 'commandfilter'
1977 1978 # Wrap old filters not supporting keyword arguments
1978 1979 if not pycompat.getargspec(fn)[2]:
1979 1980 oldfn = fn
1980 1981 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1981 1982 fn.__name__ = 'compat-' + oldfn.__name__
1982 1983 l.append((mf, fn, params))
1983 1984 self._filterpats[filter] = l
1984 1985 return self._filterpats[filter]
1985 1986
1986 1987 def _filter(self, filterpats, filename, data):
1987 1988 for mf, fn, cmd in filterpats:
1988 1989 if mf(filename):
1989 1990 self.ui.debug(
1990 1991 b"filtering %s through %s\n"
1991 1992 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1992 1993 )
1993 1994 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1994 1995 break
1995 1996
1996 1997 return data
1997 1998
1998 1999 @unfilteredpropertycache
1999 2000 def _encodefilterpats(self):
2000 2001 return self._loadfilter(b'encode')
2001 2002
2002 2003 @unfilteredpropertycache
2003 2004 def _decodefilterpats(self):
2004 2005 return self._loadfilter(b'decode')
2005 2006
2006 2007 def adddatafilter(self, name, filter):
2007 2008 self._datafilters[name] = filter
2008 2009
2009 2010 def wread(self, filename):
2010 2011 if self.wvfs.islink(filename):
2011 2012 data = self.wvfs.readlink(filename)
2012 2013 else:
2013 2014 data = self.wvfs.read(filename)
2014 2015 return self._filter(self._encodefilterpats, filename, data)
2015 2016
2016 2017 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2017 2018 """write ``data`` into ``filename`` in the working directory
2018 2019
2019 2020 This returns length of written (maybe decoded) data.
2020 2021 """
2021 2022 data = self._filter(self._decodefilterpats, filename, data)
2022 2023 if b'l' in flags:
2023 2024 self.wvfs.symlink(data, filename)
2024 2025 else:
2025 2026 self.wvfs.write(
2026 2027 filename, data, backgroundclose=backgroundclose, **kwargs
2027 2028 )
2028 2029 if b'x' in flags:
2029 2030 self.wvfs.setflags(filename, False, True)
2030 2031 else:
2031 2032 self.wvfs.setflags(filename, False, False)
2032 2033 return len(data)
2033 2034
2034 2035 def wwritedata(self, filename, data):
2035 2036 return self._filter(self._decodefilterpats, filename, data)
2036 2037
2037 2038 def currenttransaction(self):
2038 2039 """return the current transaction or None if non exists"""
2039 2040 if self._transref:
2040 2041 tr = self._transref()
2041 2042 else:
2042 2043 tr = None
2043 2044
2044 2045 if tr and tr.running():
2045 2046 return tr
2046 2047 return None
2047 2048
2048 2049 def transaction(self, desc, report=None):
2049 2050 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2050 2051 b'devel', b'check-locks'
2051 2052 ):
2052 2053 if self._currentlock(self._lockref) is None:
2053 2054 raise error.ProgrammingError(b'transaction requires locking')
2054 2055 tr = self.currenttransaction()
2055 2056 if tr is not None:
2056 2057 return tr.nest(name=desc)
2057 2058
2058 2059 # abort here if the journal already exists
2059 2060 if self.svfs.exists(b"journal"):
2060 2061 raise error.RepoError(
2061 2062 _(b"abandoned transaction found"),
2062 2063 hint=_(b"run 'hg recover' to clean up transaction"),
2063 2064 )
2064 2065
2065 2066 idbase = b"%.40f#%f" % (random.random(), time.time())
2066 2067 ha = hex(hashutil.sha1(idbase).digest())
2067 2068 txnid = b'TXN:' + ha
2068 2069 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2069 2070
2070 2071 self._writejournal(desc)
2071 2072 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2072 2073 if report:
2073 2074 rp = report
2074 2075 else:
2075 2076 rp = self.ui.warn
2076 2077 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2077 2078 # we must avoid cyclic reference between repo and transaction.
2078 2079 reporef = weakref.ref(self)
2079 2080 # Code to track tag movement
2080 2081 #
2081 2082 # Since tags are all handled as file content, it is actually quite hard
2082 2083 # to track these movement from a code perspective. So we fallback to a
2083 2084 # tracking at the repository level. One could envision to track changes
2084 2085 # to the '.hgtags' file through changegroup apply but that fails to
2085 2086 # cope with case where transaction expose new heads without changegroup
2086 2087 # being involved (eg: phase movement).
2087 2088 #
2088 2089 # For now, We gate the feature behind a flag since this likely comes
2089 2090 # with performance impacts. The current code run more often than needed
2090 2091 # and do not use caches as much as it could. The current focus is on
2091 2092 # the behavior of the feature so we disable it by default. The flag
2092 2093 # will be removed when we are happy with the performance impact.
2093 2094 #
2094 2095 # Once this feature is no longer experimental move the following
2095 2096 # documentation to the appropriate help section:
2096 2097 #
2097 2098 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2098 2099 # tags (new or changed or deleted tags). In addition the details of
2099 2100 # these changes are made available in a file at:
2100 2101 # ``REPOROOT/.hg/changes/tags.changes``.
2101 2102 # Make sure you check for HG_TAG_MOVED before reading that file as it
2102 2103 # might exist from a previous transaction even if no tag were touched
2103 2104 # in this one. Changes are recorded in a line base format::
2104 2105 #
2105 2106 # <action> <hex-node> <tag-name>\n
2106 2107 #
2107 2108 # Actions are defined as follow:
2108 2109 # "-R": tag is removed,
2109 2110 # "+A": tag is added,
2110 2111 # "-M": tag is moved (old value),
2111 2112 # "+M": tag is moved (new value),
2112 2113 tracktags = lambda x: None
2113 2114 # experimental config: experimental.hook-track-tags
2114 2115 shouldtracktags = self.ui.configbool(
2115 2116 b'experimental', b'hook-track-tags'
2116 2117 )
2117 2118 if desc != b'strip' and shouldtracktags:
2118 2119 oldheads = self.changelog.headrevs()
2119 2120
2120 2121 def tracktags(tr2):
2121 2122 repo = reporef()
2122 2123 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2123 2124 newheads = repo.changelog.headrevs()
2124 2125 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2125 2126 # notes: we compare lists here.
2126 2127 # As we do it only once buiding set would not be cheaper
2127 2128 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2128 2129 if changes:
2129 2130 tr2.hookargs[b'tag_moved'] = b'1'
2130 2131 with repo.vfs(
2131 2132 b'changes/tags.changes', b'w', atomictemp=True
2132 2133 ) as changesfile:
2133 2134 # note: we do not register the file to the transaction
2134 2135 # because we needs it to still exist on the transaction
2135 2136 # is close (for txnclose hooks)
2136 2137 tagsmod.writediff(changesfile, changes)
2137 2138
2138 2139 def validate(tr2):
2139 2140 """will run pre-closing hooks"""
2140 2141 # XXX the transaction API is a bit lacking here so we take a hacky
2141 2142 # path for now
2142 2143 #
2143 2144 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2144 2145 # dict is copied before these run. In addition we needs the data
2145 2146 # available to in memory hooks too.
2146 2147 #
2147 2148 # Moreover, we also need to make sure this runs before txnclose
2148 2149 # hooks and there is no "pending" mechanism that would execute
2149 2150 # logic only if hooks are about to run.
2150 2151 #
2151 2152 # Fixing this limitation of the transaction is also needed to track
2152 2153 # other families of changes (bookmarks, phases, obsolescence).
2153 2154 #
2154 2155 # This will have to be fixed before we remove the experimental
2155 2156 # gating.
2156 2157 tracktags(tr2)
2157 2158 repo = reporef()
2158 2159
2159 2160 singleheadopt = (b'experimental', b'single-head-per-branch')
2160 2161 singlehead = repo.ui.configbool(*singleheadopt)
2161 2162 if singlehead:
2162 2163 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2163 2164 accountclosed = singleheadsub.get(
2164 2165 b"account-closed-heads", False
2165 2166 )
2166 2167 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2167 2168 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2168 2169 for name, (old, new) in sorted(
2169 2170 tr.changes[b'bookmarks'].items()
2170 2171 ):
2171 2172 args = tr.hookargs.copy()
2172 2173 args.update(bookmarks.preparehookargs(name, old, new))
2173 2174 repo.hook(
2174 2175 b'pretxnclose-bookmark',
2175 2176 throw=True,
2176 2177 **pycompat.strkwargs(args)
2177 2178 )
2178 2179 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2179 2180 cl = repo.unfiltered().changelog
2180 2181 for rev, (old, new) in tr.changes[b'phases'].items():
2181 2182 args = tr.hookargs.copy()
2182 2183 node = hex(cl.node(rev))
2183 2184 args.update(phases.preparehookargs(node, old, new))
2184 2185 repo.hook(
2185 2186 b'pretxnclose-phase',
2186 2187 throw=True,
2187 2188 **pycompat.strkwargs(args)
2188 2189 )
2189 2190
2190 2191 repo.hook(
2191 2192 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2192 2193 )
2193 2194
2194 2195 def releasefn(tr, success):
2195 2196 repo = reporef()
2196 2197 if repo is None:
2197 2198 # If the repo has been GC'd (and this release function is being
2198 2199 # called from transaction.__del__), there's not much we can do,
2199 2200 # so just leave the unfinished transaction there and let the
2200 2201 # user run `hg recover`.
2201 2202 return
2202 2203 if success:
2203 2204 # this should be explicitly invoked here, because
2204 2205 # in-memory changes aren't written out at closing
2205 2206 # transaction, if tr.addfilegenerator (via
2206 2207 # dirstate.write or so) isn't invoked while
2207 2208 # transaction running
2208 2209 repo.dirstate.write(None)
2209 2210 else:
2210 2211 # discard all changes (including ones already written
2211 2212 # out) in this transaction
2212 2213 narrowspec.restorebackup(self, b'journal.narrowspec')
2213 2214 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2214 2215 repo.dirstate.restorebackup(None, b'journal.dirstate')
2215 2216
2216 2217 repo.invalidate(clearfilecache=True)
2217 2218
2218 2219 tr = transaction.transaction(
2219 2220 rp,
2220 2221 self.svfs,
2221 2222 vfsmap,
2222 2223 b"journal",
2223 2224 b"undo",
2224 2225 aftertrans(renames),
2225 2226 self.store.createmode,
2226 2227 validator=validate,
2227 2228 releasefn=releasefn,
2228 2229 checkambigfiles=_cachedfiles,
2229 2230 name=desc,
2230 2231 )
2231 2232 tr.changes[b'origrepolen'] = len(self)
2232 2233 tr.changes[b'obsmarkers'] = set()
2233 2234 tr.changes[b'phases'] = {}
2234 2235 tr.changes[b'bookmarks'] = {}
2235 2236
2236 2237 tr.hookargs[b'txnid'] = txnid
2237 2238 tr.hookargs[b'txnname'] = desc
2238 2239 # note: writing the fncache only during finalize mean that the file is
2239 2240 # outdated when running hooks. As fncache is used for streaming clone,
2240 2241 # this is not expected to break anything that happen during the hooks.
2241 2242 tr.addfinalize(b'flush-fncache', self.store.write)
2242 2243
2243 2244 def txnclosehook(tr2):
2244 2245 """To be run if transaction is successful, will schedule a hook run
2245 2246 """
2246 2247 # Don't reference tr2 in hook() so we don't hold a reference.
2247 2248 # This reduces memory consumption when there are multiple
2248 2249 # transactions per lock. This can likely go away if issue5045
2249 2250 # fixes the function accumulation.
2250 2251 hookargs = tr2.hookargs
2251 2252
2252 2253 def hookfunc(unused_success):
2253 2254 repo = reporef()
2254 2255 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2255 2256 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2256 2257 for name, (old, new) in bmchanges:
2257 2258 args = tr.hookargs.copy()
2258 2259 args.update(bookmarks.preparehookargs(name, old, new))
2259 2260 repo.hook(
2260 2261 b'txnclose-bookmark',
2261 2262 throw=False,
2262 2263 **pycompat.strkwargs(args)
2263 2264 )
2264 2265
2265 2266 if hook.hashook(repo.ui, b'txnclose-phase'):
2266 2267 cl = repo.unfiltered().changelog
2267 2268 phasemv = sorted(tr.changes[b'phases'].items())
2268 2269 for rev, (old, new) in phasemv:
2269 2270 args = tr.hookargs.copy()
2270 2271 node = hex(cl.node(rev))
2271 2272 args.update(phases.preparehookargs(node, old, new))
2272 2273 repo.hook(
2273 2274 b'txnclose-phase',
2274 2275 throw=False,
2275 2276 **pycompat.strkwargs(args)
2276 2277 )
2277 2278
2278 2279 repo.hook(
2279 2280 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2280 2281 )
2281 2282
2282 2283 reporef()._afterlock(hookfunc)
2283 2284
2284 2285 tr.addfinalize(b'txnclose-hook', txnclosehook)
2285 2286 # Include a leading "-" to make it happen before the transaction summary
2286 2287 # reports registered via scmutil.registersummarycallback() whose names
2287 2288 # are 00-txnreport etc. That way, the caches will be warm when the
2288 2289 # callbacks run.
2289 2290 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2290 2291
2291 2292 def txnaborthook(tr2):
2292 2293 """To be run if transaction is aborted
2293 2294 """
2294 2295 reporef().hook(
2295 2296 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2296 2297 )
2297 2298
2298 2299 tr.addabort(b'txnabort-hook', txnaborthook)
2299 2300 # avoid eager cache invalidation. in-memory data should be identical
2300 2301 # to stored data if transaction has no error.
2301 2302 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2302 2303 self._transref = weakref.ref(tr)
2303 2304 scmutil.registersummarycallback(self, tr, desc)
2304 2305 return tr
2305 2306
2306 2307 def _journalfiles(self):
2307 2308 return (
2308 2309 (self.svfs, b'journal'),
2309 2310 (self.svfs, b'journal.narrowspec'),
2310 2311 (self.vfs, b'journal.narrowspec.dirstate'),
2311 2312 (self.vfs, b'journal.dirstate'),
2312 2313 (self.vfs, b'journal.branch'),
2313 2314 (self.vfs, b'journal.desc'),
2314 2315 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2315 2316 (self.svfs, b'journal.phaseroots'),
2316 2317 )
2317 2318
2318 2319 def undofiles(self):
2319 2320 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2320 2321
2321 2322 @unfilteredmethod
2322 2323 def _writejournal(self, desc):
2323 2324 self.dirstate.savebackup(None, b'journal.dirstate')
2324 2325 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2325 2326 narrowspec.savebackup(self, b'journal.narrowspec')
2326 2327 self.vfs.write(
2327 2328 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2328 2329 )
2329 2330 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2330 2331 bookmarksvfs = bookmarks.bookmarksvfs(self)
2331 2332 bookmarksvfs.write(
2332 2333 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2333 2334 )
2334 2335 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2335 2336
2336 2337 def recover(self):
2337 2338 with self.lock():
2338 2339 if self.svfs.exists(b"journal"):
2339 2340 self.ui.status(_(b"rolling back interrupted transaction\n"))
2340 2341 vfsmap = {
2341 2342 b'': self.svfs,
2342 2343 b'plain': self.vfs,
2343 2344 }
2344 2345 transaction.rollback(
2345 2346 self.svfs,
2346 2347 vfsmap,
2347 2348 b"journal",
2348 2349 self.ui.warn,
2349 2350 checkambigfiles=_cachedfiles,
2350 2351 )
2351 2352 self.invalidate()
2352 2353 return True
2353 2354 else:
2354 2355 self.ui.warn(_(b"no interrupted transaction available\n"))
2355 2356 return False
2356 2357
2357 2358 def rollback(self, dryrun=False, force=False):
2358 2359 wlock = lock = dsguard = None
2359 2360 try:
2360 2361 wlock = self.wlock()
2361 2362 lock = self.lock()
2362 2363 if self.svfs.exists(b"undo"):
2363 2364 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2364 2365
2365 2366 return self._rollback(dryrun, force, dsguard)
2366 2367 else:
2367 2368 self.ui.warn(_(b"no rollback information available\n"))
2368 2369 return 1
2369 2370 finally:
2370 2371 release(dsguard, lock, wlock)
2371 2372
2372 2373 @unfilteredmethod # Until we get smarter cache management
2373 2374 def _rollback(self, dryrun, force, dsguard):
2374 2375 ui = self.ui
2375 2376 try:
2376 2377 args = self.vfs.read(b'undo.desc').splitlines()
2377 2378 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2378 2379 if len(args) >= 3:
2379 2380 detail = args[2]
2380 2381 oldtip = oldlen - 1
2381 2382
2382 2383 if detail and ui.verbose:
2383 2384 msg = _(
2384 2385 b'repository tip rolled back to revision %d'
2385 2386 b' (undo %s: %s)\n'
2386 2387 ) % (oldtip, desc, detail)
2387 2388 else:
2388 2389 msg = _(
2389 2390 b'repository tip rolled back to revision %d (undo %s)\n'
2390 2391 ) % (oldtip, desc)
2391 2392 except IOError:
2392 2393 msg = _(b'rolling back unknown transaction\n')
2393 2394 desc = None
2394 2395
2395 2396 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2396 2397 raise error.Abort(
2397 2398 _(
2398 2399 b'rollback of last commit while not checked out '
2399 2400 b'may lose data'
2400 2401 ),
2401 2402 hint=_(b'use -f to force'),
2402 2403 )
2403 2404
2404 2405 ui.status(msg)
2405 2406 if dryrun:
2406 2407 return 0
2407 2408
2408 2409 parents = self.dirstate.parents()
2409 2410 self.destroying()
2410 2411 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2411 2412 transaction.rollback(
2412 2413 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2413 2414 )
2414 2415 bookmarksvfs = bookmarks.bookmarksvfs(self)
2415 2416 if bookmarksvfs.exists(b'undo.bookmarks'):
2416 2417 bookmarksvfs.rename(
2417 2418 b'undo.bookmarks', b'bookmarks', checkambig=True
2418 2419 )
2419 2420 if self.svfs.exists(b'undo.phaseroots'):
2420 2421 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2421 2422 self.invalidate()
2422 2423
2423 2424 has_node = self.changelog.index.has_node
2424 2425 parentgone = any(not has_node(p) for p in parents)
2425 2426 if parentgone:
2426 2427 # prevent dirstateguard from overwriting already restored one
2427 2428 dsguard.close()
2428 2429
2429 2430 narrowspec.restorebackup(self, b'undo.narrowspec')
2430 2431 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2431 2432 self.dirstate.restorebackup(None, b'undo.dirstate')
2432 2433 try:
2433 2434 branch = self.vfs.read(b'undo.branch')
2434 2435 self.dirstate.setbranch(encoding.tolocal(branch))
2435 2436 except IOError:
2436 2437 ui.warn(
2437 2438 _(
2438 2439 b'named branch could not be reset: '
2439 2440 b'current branch is still \'%s\'\n'
2440 2441 )
2441 2442 % self.dirstate.branch()
2442 2443 )
2443 2444
2444 2445 parents = tuple([p.rev() for p in self[None].parents()])
2445 2446 if len(parents) > 1:
2446 2447 ui.status(
2447 2448 _(
2448 2449 b'working directory now based on '
2449 2450 b'revisions %d and %d\n'
2450 2451 )
2451 2452 % parents
2452 2453 )
2453 2454 else:
2454 2455 ui.status(
2455 2456 _(b'working directory now based on revision %d\n') % parents
2456 2457 )
2457 2458 mergemod.mergestate.clean(self, self[b'.'].node())
2458 2459
2459 2460 # TODO: if we know which new heads may result from this rollback, pass
2460 2461 # them to destroy(), which will prevent the branchhead cache from being
2461 2462 # invalidated.
2462 2463 self.destroyed()
2463 2464 return 0
2464 2465
2465 2466 def _buildcacheupdater(self, newtransaction):
2466 2467 """called during transaction to build the callback updating cache
2467 2468
2468 2469 Lives on the repository to help extension who might want to augment
2469 2470 this logic. For this purpose, the created transaction is passed to the
2470 2471 method.
2471 2472 """
2472 2473 # we must avoid cyclic reference between repo and transaction.
2473 2474 reporef = weakref.ref(self)
2474 2475
2475 2476 def updater(tr):
2476 2477 repo = reporef()
2477 2478 repo.updatecaches(tr)
2478 2479
2479 2480 return updater
2480 2481
2481 2482 @unfilteredmethod
2482 2483 def updatecaches(self, tr=None, full=False):
2483 2484 """warm appropriate caches
2484 2485
2485 2486 If this function is called after a transaction closed. The transaction
2486 2487 will be available in the 'tr' argument. This can be used to selectively
2487 2488 update caches relevant to the changes in that transaction.
2488 2489
2489 2490 If 'full' is set, make sure all caches the function knows about have
2490 2491 up-to-date data. Even the ones usually loaded more lazily.
2491 2492 """
2492 2493 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2493 2494 # During strip, many caches are invalid but
2494 2495 # later call to `destroyed` will refresh them.
2495 2496 return
2496 2497
2497 2498 if tr is None or tr.changes[b'origrepolen'] < len(self):
2498 2499 # accessing the 'ser ved' branchmap should refresh all the others,
2499 2500 self.ui.debug(b'updating the branch cache\n')
2500 2501 self.filtered(b'served').branchmap()
2501 2502 self.filtered(b'served.hidden').branchmap()
2502 2503
2503 2504 if full:
2504 2505 unfi = self.unfiltered()
2505 2506
2506 2507 self.changelog.update_caches(transaction=tr)
2507 2508
2508 2509 rbc = unfi.revbranchcache()
2509 2510 for r in unfi.changelog:
2510 2511 rbc.branchinfo(r)
2511 2512 rbc.write()
2512 2513
2513 2514 # ensure the working copy parents are in the manifestfulltextcache
2514 2515 for ctx in self[b'.'].parents():
2515 2516 ctx.manifest() # accessing the manifest is enough
2516 2517
2517 2518 # accessing fnode cache warms the cache
2518 2519 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2519 2520 # accessing tags warm the cache
2520 2521 self.tags()
2521 2522 self.filtered(b'served').tags()
2522 2523
2523 2524 # The `full` arg is documented as updating even the lazily-loaded
2524 2525 # caches immediately, so we're forcing a write to cause these caches
2525 2526 # to be warmed up even if they haven't explicitly been requested
2526 2527 # yet (if they've never been used by hg, they won't ever have been
2527 2528 # written, even if they're a subset of another kind of cache that
2528 2529 # *has* been used).
2529 2530 for filt in repoview.filtertable.keys():
2530 2531 filtered = self.filtered(filt)
2531 2532 filtered.branchmap().write(filtered)
2532 2533
2533 2534 def invalidatecaches(self):
2534 2535
2535 2536 if '_tagscache' in vars(self):
2536 2537 # can't use delattr on proxy
2537 2538 del self.__dict__['_tagscache']
2538 2539
2539 2540 self._branchcaches.clear()
2540 2541 self.invalidatevolatilesets()
2541 2542 self._sparsesignaturecache.clear()
2542 2543
2543 2544 def invalidatevolatilesets(self):
2544 2545 self.filteredrevcache.clear()
2545 2546 obsolete.clearobscaches(self)
2546 2547 self._quick_access_changeid_invalidate()
2547 2548
2548 2549 def invalidatedirstate(self):
2549 2550 '''Invalidates the dirstate, causing the next call to dirstate
2550 2551 to check if it was modified since the last time it was read,
2551 2552 rereading it if it has.
2552 2553
2553 2554 This is different to dirstate.invalidate() that it doesn't always
2554 2555 rereads the dirstate. Use dirstate.invalidate() if you want to
2555 2556 explicitly read the dirstate again (i.e. restoring it to a previous
2556 2557 known good state).'''
2557 2558 if hasunfilteredcache(self, 'dirstate'):
2558 2559 for k in self.dirstate._filecache:
2559 2560 try:
2560 2561 delattr(self.dirstate, k)
2561 2562 except AttributeError:
2562 2563 pass
2563 2564 delattr(self.unfiltered(), 'dirstate')
2564 2565
2565 2566 def invalidate(self, clearfilecache=False):
2566 2567 '''Invalidates both store and non-store parts other than dirstate
2567 2568
2568 2569 If a transaction is running, invalidation of store is omitted,
2569 2570 because discarding in-memory changes might cause inconsistency
2570 2571 (e.g. incomplete fncache causes unintentional failure, but
2571 2572 redundant one doesn't).
2572 2573 '''
2573 2574 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2574 2575 for k in list(self._filecache.keys()):
2575 2576 # dirstate is invalidated separately in invalidatedirstate()
2576 2577 if k == b'dirstate':
2577 2578 continue
2578 2579 if (
2579 2580 k == b'changelog'
2580 2581 and self.currenttransaction()
2581 2582 and self.changelog._delayed
2582 2583 ):
2583 2584 # The changelog object may store unwritten revisions. We don't
2584 2585 # want to lose them.
2585 2586 # TODO: Solve the problem instead of working around it.
2586 2587 continue
2587 2588
2588 2589 if clearfilecache:
2589 2590 del self._filecache[k]
2590 2591 try:
2591 2592 delattr(unfiltered, k)
2592 2593 except AttributeError:
2593 2594 pass
2594 2595 self.invalidatecaches()
2595 2596 if not self.currenttransaction():
2596 2597 # TODO: Changing contents of store outside transaction
2597 2598 # causes inconsistency. We should make in-memory store
2598 2599 # changes detectable, and abort if changed.
2599 2600 self.store.invalidatecaches()
2600 2601
2601 2602 def invalidateall(self):
2602 2603 '''Fully invalidates both store and non-store parts, causing the
2603 2604 subsequent operation to reread any outside changes.'''
2604 2605 # extension should hook this to invalidate its caches
2605 2606 self.invalidate()
2606 2607 self.invalidatedirstate()
2607 2608
2608 2609 @unfilteredmethod
2609 2610 def _refreshfilecachestats(self, tr):
2610 2611 """Reload stats of cached files so that they are flagged as valid"""
2611 2612 for k, ce in self._filecache.items():
2612 2613 k = pycompat.sysstr(k)
2613 2614 if k == 'dirstate' or k not in self.__dict__:
2614 2615 continue
2615 2616 ce.refresh()
2616 2617
2617 2618 def _lock(
2618 2619 self,
2619 2620 vfs,
2620 2621 lockname,
2621 2622 wait,
2622 2623 releasefn,
2623 2624 acquirefn,
2624 2625 desc,
2625 2626 inheritchecker=None,
2626 2627 parentenvvar=None,
2627 2628 ):
2628 2629 parentlock = None
2629 2630 # the contents of parentenvvar are used by the underlying lock to
2630 2631 # determine whether it can be inherited
2631 2632 if parentenvvar is not None:
2632 2633 parentlock = encoding.environ.get(parentenvvar)
2633 2634
2634 2635 timeout = 0
2635 2636 warntimeout = 0
2636 2637 if wait:
2637 2638 timeout = self.ui.configint(b"ui", b"timeout")
2638 2639 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2639 2640 # internal config: ui.signal-safe-lock
2640 2641 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2641 2642
2642 2643 l = lockmod.trylock(
2643 2644 self.ui,
2644 2645 vfs,
2645 2646 lockname,
2646 2647 timeout,
2647 2648 warntimeout,
2648 2649 releasefn=releasefn,
2649 2650 acquirefn=acquirefn,
2650 2651 desc=desc,
2651 2652 inheritchecker=inheritchecker,
2652 2653 parentlock=parentlock,
2653 2654 signalsafe=signalsafe,
2654 2655 )
2655 2656 return l
2656 2657
2657 2658 def _afterlock(self, callback):
2658 2659 """add a callback to be run when the repository is fully unlocked
2659 2660
2660 2661 The callback will be executed when the outermost lock is released
2661 2662 (with wlock being higher level than 'lock')."""
2662 2663 for ref in (self._wlockref, self._lockref):
2663 2664 l = ref and ref()
2664 2665 if l and l.held:
2665 2666 l.postrelease.append(callback)
2666 2667 break
2667 2668 else: # no lock have been found.
2668 2669 callback(True)
2669 2670
2670 2671 def lock(self, wait=True):
2671 2672 '''Lock the repository store (.hg/store) and return a weak reference
2672 2673 to the lock. Use this before modifying the store (e.g. committing or
2673 2674 stripping). If you are opening a transaction, get a lock as well.)
2674 2675
2675 2676 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2676 2677 'wlock' first to avoid a dead-lock hazard.'''
2677 2678 l = self._currentlock(self._lockref)
2678 2679 if l is not None:
2679 2680 l.lock()
2680 2681 return l
2681 2682
2682 2683 l = self._lock(
2683 2684 vfs=self.svfs,
2684 2685 lockname=b"lock",
2685 2686 wait=wait,
2686 2687 releasefn=None,
2687 2688 acquirefn=self.invalidate,
2688 2689 desc=_(b'repository %s') % self.origroot,
2689 2690 )
2690 2691 self._lockref = weakref.ref(l)
2691 2692 return l
2692 2693
2693 2694 def _wlockchecktransaction(self):
2694 2695 if self.currenttransaction() is not None:
2695 2696 raise error.LockInheritanceContractViolation(
2696 2697 b'wlock cannot be inherited in the middle of a transaction'
2697 2698 )
2698 2699
2699 2700 def wlock(self, wait=True):
2700 2701 '''Lock the non-store parts of the repository (everything under
2701 2702 .hg except .hg/store) and return a weak reference to the lock.
2702 2703
2703 2704 Use this before modifying files in .hg.
2704 2705
2705 2706 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2706 2707 'wlock' first to avoid a dead-lock hazard.'''
2707 2708 l = self._wlockref and self._wlockref()
2708 2709 if l is not None and l.held:
2709 2710 l.lock()
2710 2711 return l
2711 2712
2712 2713 # We do not need to check for non-waiting lock acquisition. Such
2713 2714 # acquisition would not cause dead-lock as they would just fail.
2714 2715 if wait and (
2715 2716 self.ui.configbool(b'devel', b'all-warnings')
2716 2717 or self.ui.configbool(b'devel', b'check-locks')
2717 2718 ):
2718 2719 if self._currentlock(self._lockref) is not None:
2719 2720 self.ui.develwarn(b'"wlock" acquired after "lock"')
2720 2721
2721 2722 def unlock():
2722 2723 if self.dirstate.pendingparentchange():
2723 2724 self.dirstate.invalidate()
2724 2725 else:
2725 2726 self.dirstate.write(None)
2726 2727
2727 2728 self._filecache[b'dirstate'].refresh()
2728 2729
2729 2730 l = self._lock(
2730 2731 self.vfs,
2731 2732 b"wlock",
2732 2733 wait,
2733 2734 unlock,
2734 2735 self.invalidatedirstate,
2735 2736 _(b'working directory of %s') % self.origroot,
2736 2737 inheritchecker=self._wlockchecktransaction,
2737 2738 parentenvvar=b'HG_WLOCK_LOCKER',
2738 2739 )
2739 2740 self._wlockref = weakref.ref(l)
2740 2741 return l
2741 2742
2742 2743 def _currentlock(self, lockref):
2743 2744 """Returns the lock if it's held, or None if it's not."""
2744 2745 if lockref is None:
2745 2746 return None
2746 2747 l = lockref()
2747 2748 if l is None or not l.held:
2748 2749 return None
2749 2750 return l
2750 2751
2751 2752 def currentwlock(self):
2752 2753 """Returns the wlock if it's held, or None if it's not."""
2753 2754 return self._currentlock(self._wlockref)
2754 2755
2755 2756 def _filecommit(
2756 2757 self,
2757 2758 fctx,
2758 2759 manifest1,
2759 2760 manifest2,
2760 2761 linkrev,
2761 2762 tr,
2762 2763 changelist,
2763 2764 includecopymeta,
2764 2765 ):
2765 2766 """
2766 2767 commit an individual file as part of a larger transaction
2767 2768 """
2768 2769
2769 2770 fname = fctx.path()
2770 2771 fparent1 = manifest1.get(fname, nullid)
2771 2772 fparent2 = manifest2.get(fname, nullid)
2772 2773 if isinstance(fctx, context.filectx):
2773 2774 node = fctx.filenode()
2774 2775 if node in [fparent1, fparent2]:
2775 2776 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2776 2777 if (
2777 2778 fparent1 != nullid
2778 2779 and manifest1.flags(fname) != fctx.flags()
2779 2780 ) or (
2780 2781 fparent2 != nullid
2781 2782 and manifest2.flags(fname) != fctx.flags()
2782 2783 ):
2783 2784 changelist.append(fname)
2784 2785 return node
2785 2786
2786 2787 flog = self.file(fname)
2787 2788 meta = {}
2788 2789 cfname = fctx.copysource()
2789 2790 if cfname and cfname != fname:
2790 2791 # Mark the new revision of this file as a copy of another
2791 2792 # file. This copy data will effectively act as a parent
2792 2793 # of this new revision. If this is a merge, the first
2793 2794 # parent will be the nullid (meaning "look up the copy data")
2794 2795 # and the second one will be the other parent. For example:
2795 2796 #
2796 2797 # 0 --- 1 --- 3 rev1 changes file foo
2797 2798 # \ / rev2 renames foo to bar and changes it
2798 2799 # \- 2 -/ rev3 should have bar with all changes and
2799 2800 # should record that bar descends from
2800 2801 # bar in rev2 and foo in rev1
2801 2802 #
2802 2803 # this allows this merge to succeed:
2803 2804 #
2804 2805 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2805 2806 # \ / merging rev3 and rev4 should use bar@rev2
2806 2807 # \- 2 --- 4 as the merge base
2807 2808 #
2808 2809
2809 2810 cnode = manifest1.get(cfname)
2810 2811 newfparent = fparent2
2811 2812
2812 2813 if manifest2: # branch merge
2813 2814 if fparent2 == nullid or cnode is None: # copied on remote side
2814 2815 if cfname in manifest2:
2815 2816 cnode = manifest2[cfname]
2816 2817 newfparent = fparent1
2817 2818
2818 2819 # Here, we used to search backwards through history to try to find
2819 2820 # where the file copy came from if the source of a copy was not in
2820 2821 # the parent directory. However, this doesn't actually make sense to
2821 2822 # do (what does a copy from something not in your working copy even
2822 2823 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2823 2824 # the user that copy information was dropped, so if they didn't
2824 2825 # expect this outcome it can be fixed, but this is the correct
2825 2826 # behavior in this circumstance.
2826 2827
2827 2828 if cnode:
2828 2829 self.ui.debug(
2829 2830 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2830 2831 )
2831 2832 if includecopymeta:
2832 2833 meta[b"copy"] = cfname
2833 2834 meta[b"copyrev"] = hex(cnode)
2834 2835 fparent1, fparent2 = nullid, newfparent
2835 2836 else:
2836 2837 self.ui.warn(
2837 2838 _(
2838 2839 b"warning: can't find ancestor for '%s' "
2839 2840 b"copied from '%s'!\n"
2840 2841 )
2841 2842 % (fname, cfname)
2842 2843 )
2843 2844
2844 2845 elif fparent1 == nullid:
2845 2846 fparent1, fparent2 = fparent2, nullid
2846 2847 elif fparent2 != nullid:
2847 2848 # is one parent an ancestor of the other?
2848 2849 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2849 2850 if fparent1 in fparentancestors:
2850 2851 fparent1, fparent2 = fparent2, nullid
2851 2852 elif fparent2 in fparentancestors:
2852 2853 fparent2 = nullid
2853 2854
2854 2855 # is the file changed?
2855 2856 text = fctx.data()
2856 2857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2857 2858 changelist.append(fname)
2858 2859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2859 2860 # are just the flags changed during merge?
2860 2861 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2861 2862 changelist.append(fname)
2862 2863
2863 2864 return fparent1
2864 2865
2865 2866 def checkcommitpatterns(self, wctx, match, status, fail):
2866 2867 """check for commit arguments that aren't committable"""
2867 2868 if match.isexact() or match.prefix():
2868 2869 matched = set(status.modified + status.added + status.removed)
2869 2870
2870 2871 for f in match.files():
2871 2872 f = self.dirstate.normalize(f)
2872 2873 if f == b'.' or f in matched or f in wctx.substate:
2873 2874 continue
2874 2875 if f in status.deleted:
2875 2876 fail(f, _(b'file not found!'))
2876 2877 # Is it a directory that exists or used to exist?
2877 2878 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2878 2879 d = f + b'/'
2879 2880 for mf in matched:
2880 2881 if mf.startswith(d):
2881 2882 break
2882 2883 else:
2883 2884 fail(f, _(b"no match under directory!"))
2884 2885 elif f not in self.dirstate:
2885 2886 fail(f, _(b"file not tracked!"))
2886 2887
2887 2888 @unfilteredmethod
2888 2889 def commit(
2889 2890 self,
2890 2891 text=b"",
2891 2892 user=None,
2892 2893 date=None,
2893 2894 match=None,
2894 2895 force=False,
2895 2896 editor=None,
2896 2897 extra=None,
2897 2898 ):
2898 2899 """Add a new revision to current repository.
2899 2900
2900 2901 Revision information is gathered from the working directory,
2901 2902 match can be used to filter the committed files. If editor is
2902 2903 supplied, it is called to get a commit message.
2903 2904 """
2904 2905 if extra is None:
2905 2906 extra = {}
2906 2907
2907 2908 def fail(f, msg):
2908 2909 raise error.Abort(b'%s: %s' % (f, msg))
2909 2910
2910 2911 if not match:
2911 2912 match = matchmod.always()
2912 2913
2913 2914 if not force:
2914 2915 match.bad = fail
2915 2916
2916 2917 # lock() for recent changelog (see issue4368)
2917 2918 with self.wlock(), self.lock():
2918 2919 wctx = self[None]
2919 2920 merge = len(wctx.parents()) > 1
2920 2921
2921 2922 if not force and merge and not match.always():
2922 2923 raise error.Abort(
2923 2924 _(
2924 2925 b'cannot partially commit a merge '
2925 2926 b'(do not specify files or patterns)'
2926 2927 )
2927 2928 )
2928 2929
2929 2930 status = self.status(match=match, clean=force)
2930 2931 if force:
2931 2932 status.modified.extend(
2932 2933 status.clean
2933 2934 ) # mq may commit clean files
2934 2935
2935 2936 # check subrepos
2936 2937 subs, commitsubs, newstate = subrepoutil.precommit(
2937 2938 self.ui, wctx, status, match, force=force
2938 2939 )
2939 2940
2940 2941 # make sure all explicit patterns are matched
2941 2942 if not force:
2942 2943 self.checkcommitpatterns(wctx, match, status, fail)
2943 2944
2944 2945 cctx = context.workingcommitctx(
2945 2946 self, status, text, user, date, extra
2946 2947 )
2947 2948
2948 2949 ms = mergemod.mergestate.read(self)
2949 2950 mergeutil.checkunresolved(ms)
2950 2951
2951 2952 # internal config: ui.allowemptycommit
2952 2953 allowemptycommit = (
2953 2954 wctx.branch() != wctx.p1().branch()
2954 2955 or extra.get(b'close')
2955 2956 or merge
2956 2957 or cctx.files()
2957 2958 or self.ui.configbool(b'ui', b'allowemptycommit')
2958 2959 )
2959 2960 if not allowemptycommit:
2960 2961 self.ui.debug(b'nothing to commit, clearing merge state\n')
2961 2962 ms.reset()
2962 2963 return None
2963 2964
2964 2965 if merge and cctx.deleted():
2965 2966 raise error.Abort(_(b"cannot commit merge with missing files"))
2966 2967
2967 2968 if editor:
2968 2969 cctx._text = editor(self, cctx, subs)
2969 2970 edited = text != cctx._text
2970 2971
2971 2972 # Save commit message in case this transaction gets rolled back
2972 2973 # (e.g. by a pretxncommit hook). Leave the content alone on
2973 2974 # the assumption that the user will use the same editor again.
2974 2975 msgfn = self.savecommitmessage(cctx._text)
2975 2976
2976 2977 # commit subs and write new state
2977 2978 if subs:
2978 2979 uipathfn = scmutil.getuipathfn(self)
2979 2980 for s in sorted(commitsubs):
2980 2981 sub = wctx.sub(s)
2981 2982 self.ui.status(
2982 2983 _(b'committing subrepository %s\n')
2983 2984 % uipathfn(subrepoutil.subrelpath(sub))
2984 2985 )
2985 2986 sr = sub.commit(cctx._text, user, date)
2986 2987 newstate[s] = (newstate[s][0], sr)
2987 2988 subrepoutil.writestate(self, newstate)
2988 2989
2989 2990 p1, p2 = self.dirstate.parents()
2990 2991 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2991 2992 try:
2992 2993 self.hook(
2993 2994 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2994 2995 )
2995 2996 with self.transaction(b'commit'):
2996 2997 ret = self.commitctx(cctx, True)
2997 2998 # update bookmarks, dirstate and mergestate
2998 2999 bookmarks.update(self, [p1, p2], ret)
2999 3000 cctx.markcommitted(ret)
3000 3001 ms.reset()
3001 3002 except: # re-raises
3002 3003 if edited:
3003 3004 self.ui.write(
3004 3005 _(b'note: commit message saved in %s\n') % msgfn
3005 3006 )
3006 3007 raise
3007 3008
3008 3009 def commithook(unused_success):
3009 3010 # hack for command that use a temporary commit (eg: histedit)
3010 3011 # temporary commit got stripped before hook release
3011 3012 if self.changelog.hasnode(ret):
3012 3013 self.hook(
3013 3014 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3014 3015 )
3015 3016
3016 3017 self._afterlock(commithook)
3017 3018 return ret
3018 3019
3019 3020 @unfilteredmethod
3020 3021 def commitctx(self, ctx, error=False, origctx=None):
3021 3022 """Add a new revision to current repository.
3022 3023 Revision information is passed via the context argument.
3023 3024
3024 3025 ctx.files() should list all files involved in this commit, i.e.
3025 3026 modified/added/removed files. On merge, it may be wider than the
3026 3027 ctx.files() to be committed, since any file nodes derived directly
3027 3028 from p1 or p2 are excluded from the committed ctx.files().
3028 3029
3029 3030 origctx is for convert to work around the problem that bug
3030 3031 fixes to the files list in changesets change hashes. For
3031 3032 convert to be the identity, it can pass an origctx and this
3032 3033 function will use the same files list when it makes sense to
3033 3034 do so.
3034 3035 """
3035 3036
3036 3037 p1, p2 = ctx.p1(), ctx.p2()
3037 3038 user = ctx.user()
3038 3039
3039 3040 if self.filecopiesmode == b'changeset-sidedata':
3040 3041 writechangesetcopy = True
3041 3042 writefilecopymeta = True
3042 3043 writecopiesto = None
3043 3044 else:
3044 3045 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3045 3046 writefilecopymeta = writecopiesto != b'changeset-only'
3046 3047 writechangesetcopy = writecopiesto in (
3047 3048 b'changeset-only',
3048 3049 b'compatibility',
3049 3050 )
3050 3051 p1copies, p2copies = None, None
3051 3052 if writechangesetcopy:
3052 3053 p1copies = ctx.p1copies()
3053 3054 p2copies = ctx.p2copies()
3054 3055 filesadded, filesremoved = None, None
3055 3056 with self.lock(), self.transaction(b"commit") as tr:
3056 3057 trp = weakref.proxy(tr)
3057 3058
3058 3059 if ctx.manifestnode():
3059 3060 # reuse an existing manifest revision
3060 3061 self.ui.debug(b'reusing known manifest\n')
3061 3062 mn = ctx.manifestnode()
3062 3063 files = ctx.files()
3063 3064 if writechangesetcopy:
3064 3065 filesadded = ctx.filesadded()
3065 3066 filesremoved = ctx.filesremoved()
3066 3067 elif ctx.files():
3067 3068 m1ctx = p1.manifestctx()
3068 3069 m2ctx = p2.manifestctx()
3069 3070 mctx = m1ctx.copy()
3070 3071
3071 3072 m = mctx.read()
3072 3073 m1 = m1ctx.read()
3073 3074 m2 = m2ctx.read()
3074 3075
3075 3076 # check in files
3076 3077 added = []
3077 3078 changed = []
3078 3079 removed = list(ctx.removed())
3079 3080 linkrev = len(self)
3080 3081 self.ui.note(_(b"committing files:\n"))
3081 3082 uipathfn = scmutil.getuipathfn(self)
3082 3083 for f in sorted(ctx.modified() + ctx.added()):
3083 3084 self.ui.note(uipathfn(f) + b"\n")
3084 3085 try:
3085 3086 fctx = ctx[f]
3086 3087 if fctx is None:
3087 3088 removed.append(f)
3088 3089 else:
3089 3090 added.append(f)
3090 3091 m[f] = self._filecommit(
3091 3092 fctx,
3092 3093 m1,
3093 3094 m2,
3094 3095 linkrev,
3095 3096 trp,
3096 3097 changed,
3097 3098 writefilecopymeta,
3098 3099 )
3099 3100 m.setflag(f, fctx.flags())
3100 3101 except OSError:
3101 3102 self.ui.warn(
3102 3103 _(b"trouble committing %s!\n") % uipathfn(f)
3103 3104 )
3104 3105 raise
3105 3106 except IOError as inst:
3106 3107 errcode = getattr(inst, 'errno', errno.ENOENT)
3107 3108 if error or errcode and errcode != errno.ENOENT:
3108 3109 self.ui.warn(
3109 3110 _(b"trouble committing %s!\n") % uipathfn(f)
3110 3111 )
3111 3112 raise
3112 3113
3113 3114 # update manifest
3114 3115 removed = [f for f in removed if f in m1 or f in m2]
3115 3116 drop = sorted([f for f in removed if f in m])
3116 3117 for f in drop:
3117 3118 del m[f]
3118 3119 if p2.rev() != nullrev:
3119 3120
3120 3121 @util.cachefunc
3121 3122 def mas():
3122 3123 p1n = p1.node()
3123 3124 p2n = p2.node()
3124 3125 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3125 3126 if not cahs:
3126 3127 cahs = [nullrev]
3127 3128 return [self[r].manifest() for r in cahs]
3128 3129
3129 3130 def deletionfromparent(f):
3130 3131 # When a file is removed relative to p1 in a merge, this
3131 3132 # function determines whether the absence is due to a
3132 3133 # deletion from a parent, or whether the merge commit
3133 3134 # itself deletes the file. We decide this by doing a
3134 3135 # simplified three way merge of the manifest entry for
3135 3136 # the file. There are two ways we decide the merge
3136 3137 # itself didn't delete a file:
3137 3138 # - neither parent (nor the merge) contain the file
3138 3139 # - exactly one parent contains the file, and that
3139 3140 # parent has the same filelog entry as the merge
3140 3141 # ancestor (or all of them if there two). In other
3141 3142 # words, that parent left the file unchanged while the
3142 3143 # other one deleted it.
3143 3144 # One way to think about this is that deleting a file is
3144 3145 # similar to emptying it, so the list of changed files
3145 3146 # should be similar either way. The computation
3146 3147 # described above is not done directly in _filecommit
3147 3148 # when creating the list of changed files, however
3148 3149 # it does something very similar by comparing filelog
3149 3150 # nodes.
3150 3151 if f in m1:
3151 3152 return f not in m2 and all(
3152 3153 f in ma and ma.find(f) == m1.find(f)
3153 3154 for ma in mas()
3154 3155 )
3155 3156 elif f in m2:
3156 3157 return all(
3157 3158 f in ma and ma.find(f) == m2.find(f)
3158 3159 for ma in mas()
3159 3160 )
3160 3161 else:
3161 3162 return True
3162 3163
3163 3164 removed = [f for f in removed if not deletionfromparent(f)]
3164 3165
3165 3166 files = changed + removed
3166 3167 md = None
3167 3168 if not files:
3168 3169 # if no "files" actually changed in terms of the changelog,
3169 3170 # try hard to detect unmodified manifest entry so that the
3170 3171 # exact same commit can be reproduced later on convert.
3171 3172 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3172 3173 if not files and md:
3173 3174 self.ui.debug(
3174 3175 b'not reusing manifest (no file change in '
3175 3176 b'changelog, but manifest differs)\n'
3176 3177 )
3177 3178 if files or md:
3178 3179 self.ui.note(_(b"committing manifest\n"))
3179 3180 # we're using narrowmatch here since it's already applied at
3180 3181 # other stages (such as dirstate.walk), so we're already
3181 3182 # ignoring things outside of narrowspec in most cases. The
3182 3183 # one case where we might have files outside the narrowspec
3183 3184 # at this point is merges, and we already error out in the
3184 3185 # case where the merge has files outside of the narrowspec,
3185 3186 # so this is safe.
3186 3187 mn = mctx.write(
3187 3188 trp,
3188 3189 linkrev,
3189 3190 p1.manifestnode(),
3190 3191 p2.manifestnode(),
3191 3192 added,
3192 3193 drop,
3193 3194 match=self.narrowmatch(),
3194 3195 )
3195 3196
3196 3197 if writechangesetcopy:
3197 3198 filesadded = [
3198 3199 f for f in changed if not (f in m1 or f in m2)
3199 3200 ]
3200 3201 filesremoved = removed
3201 3202 else:
3202 3203 self.ui.debug(
3203 3204 b'reusing manifest from p1 (listed files '
3204 3205 b'actually unchanged)\n'
3205 3206 )
3206 3207 mn = p1.manifestnode()
3207 3208 else:
3208 3209 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3209 3210 mn = p1.manifestnode()
3210 3211 files = []
3211 3212
3212 3213 if writecopiesto == b'changeset-only':
3213 3214 # If writing only to changeset extras, use None to indicate that
3214 3215 # no entry should be written. If writing to both, write an empty
3215 3216 # entry to prevent the reader from falling back to reading
3216 3217 # filelogs.
3217 3218 p1copies = p1copies or None
3218 3219 p2copies = p2copies or None
3219 3220 filesadded = filesadded or None
3220 3221 filesremoved = filesremoved or None
3221 3222
3222 3223 if origctx and origctx.manifestnode() == mn:
3223 3224 files = origctx.files()
3224 3225
3225 3226 # update changelog
3226 3227 self.ui.note(_(b"committing changelog\n"))
3227 3228 self.changelog.delayupdate(tr)
3228 3229 n = self.changelog.add(
3229 3230 mn,
3230 3231 files,
3231 3232 ctx.description(),
3232 3233 trp,
3233 3234 p1.node(),
3234 3235 p2.node(),
3235 3236 user,
3236 3237 ctx.date(),
3237 3238 ctx.extra().copy(),
3238 3239 p1copies,
3239 3240 p2copies,
3240 3241 filesadded,
3241 3242 filesremoved,
3242 3243 )
3243 3244 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3244 3245 self.hook(
3245 3246 b'pretxncommit',
3246 3247 throw=True,
3247 3248 node=hex(n),
3248 3249 parent1=xp1,
3249 3250 parent2=xp2,
3250 3251 )
3251 3252 # set the new commit is proper phase
3252 3253 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3253 3254 if targetphase:
3254 3255 # retract boundary do not alter parent changeset.
3255 3256 # if a parent have higher the resulting phase will
3256 3257 # be compliant anyway
3257 3258 #
3258 3259 # if minimal phase was 0 we don't need to retract anything
3259 3260 phases.registernew(self, tr, targetphase, [n])
3260 3261 return n
3261 3262
3262 3263 @unfilteredmethod
3263 3264 def destroying(self):
3264 3265 '''Inform the repository that nodes are about to be destroyed.
3265 3266 Intended for use by strip and rollback, so there's a common
3266 3267 place for anything that has to be done before destroying history.
3267 3268
3268 3269 This is mostly useful for saving state that is in memory and waiting
3269 3270 to be flushed when the current lock is released. Because a call to
3270 3271 destroyed is imminent, the repo will be invalidated causing those
3271 3272 changes to stay in memory (waiting for the next unlock), or vanish
3272 3273 completely.
3273 3274 '''
3274 3275 # When using the same lock to commit and strip, the phasecache is left
3275 3276 # dirty after committing. Then when we strip, the repo is invalidated,
3276 3277 # causing those changes to disappear.
3277 3278 if '_phasecache' in vars(self):
3278 3279 self._phasecache.write()
3279 3280
3280 3281 @unfilteredmethod
3281 3282 def destroyed(self):
3282 3283 '''Inform the repository that nodes have been destroyed.
3283 3284 Intended for use by strip and rollback, so there's a common
3284 3285 place for anything that has to be done after destroying history.
3285 3286 '''
3286 3287 # When one tries to:
3287 3288 # 1) destroy nodes thus calling this method (e.g. strip)
3288 3289 # 2) use phasecache somewhere (e.g. commit)
3289 3290 #
3290 3291 # then 2) will fail because the phasecache contains nodes that were
3291 3292 # removed. We can either remove phasecache from the filecache,
3292 3293 # causing it to reload next time it is accessed, or simply filter
3293 3294 # the removed nodes now and write the updated cache.
3294 3295 self._phasecache.filterunknown(self)
3295 3296 self._phasecache.write()
3296 3297
3297 3298 # refresh all repository caches
3298 3299 self.updatecaches()
3299 3300
3300 3301 # Ensure the persistent tag cache is updated. Doing it now
3301 3302 # means that the tag cache only has to worry about destroyed
3302 3303 # heads immediately after a strip/rollback. That in turn
3303 3304 # guarantees that "cachetip == currenttip" (comparing both rev
3304 3305 # and node) always means no nodes have been added or destroyed.
3305 3306
3306 3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3307 3308 # head, refresh the tag cache, then immediately add a new head.
3308 3309 # But I think doing it this way is necessary for the "instant
3309 3310 # tag cache retrieval" case to work.
3310 3311 self.invalidate()
3311 3312
3312 3313 def status(
3313 3314 self,
3314 3315 node1=b'.',
3315 3316 node2=None,
3316 3317 match=None,
3317 3318 ignored=False,
3318 3319 clean=False,
3319 3320 unknown=False,
3320 3321 listsubrepos=False,
3321 3322 ):
3322 3323 '''a convenience method that calls node1.status(node2)'''
3323 3324 return self[node1].status(
3324 3325 node2, match, ignored, clean, unknown, listsubrepos
3325 3326 )
3326 3327
3327 3328 def addpostdsstatus(self, ps):
3328 3329 """Add a callback to run within the wlock, at the point at which status
3329 3330 fixups happen.
3330 3331
3331 3332 On status completion, callback(wctx, status) will be called with the
3332 3333 wlock held, unless the dirstate has changed from underneath or the wlock
3333 3334 couldn't be grabbed.
3334 3335
3335 3336 Callbacks should not capture and use a cached copy of the dirstate --
3336 3337 it might change in the meanwhile. Instead, they should access the
3337 3338 dirstate via wctx.repo().dirstate.
3338 3339
3339 3340 This list is emptied out after each status run -- extensions should
3340 3341 make sure it adds to this list each time dirstate.status is called.
3341 3342 Extensions should also make sure they don't call this for statuses
3342 3343 that don't involve the dirstate.
3343 3344 """
3344 3345
3345 3346 # The list is located here for uniqueness reasons -- it is actually
3346 3347 # managed by the workingctx, but that isn't unique per-repo.
3347 3348 self._postdsstatus.append(ps)
3348 3349
3349 3350 def postdsstatus(self):
3350 3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3351 3352 return self._postdsstatus
3352 3353
3353 3354 def clearpostdsstatus(self):
3354 3355 """Used by workingctx to clear post-dirstate-status hooks."""
3355 3356 del self._postdsstatus[:]
3356 3357
3357 3358 def heads(self, start=None):
3358 3359 if start is None:
3359 3360 cl = self.changelog
3360 3361 headrevs = reversed(cl.headrevs())
3361 3362 return [cl.node(rev) for rev in headrevs]
3362 3363
3363 3364 heads = self.changelog.heads(start)
3364 3365 # sort the output in rev descending order
3365 3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3366 3367
3367 3368 def branchheads(self, branch=None, start=None, closed=False):
3368 3369 '''return a (possibly filtered) list of heads for the given branch
3369 3370
3370 3371 Heads are returned in topological order, from newest to oldest.
3371 3372 If branch is None, use the dirstate branch.
3372 3373 If start is not None, return only heads reachable from start.
3373 3374 If closed is True, return heads that are marked as closed as well.
3374 3375 '''
3375 3376 if branch is None:
3376 3377 branch = self[None].branch()
3377 3378 branches = self.branchmap()
3378 3379 if not branches.hasbranch(branch):
3379 3380 return []
3380 3381 # the cache returns heads ordered lowest to highest
3381 3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3382 3383 if start is not None:
3383 3384 # filter out the heads that cannot be reached from startrev
3384 3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3385 3386 bheads = [h for h in bheads if h in fbheads]
3386 3387 return bheads
3387 3388
3388 3389 def branches(self, nodes):
3389 3390 if not nodes:
3390 3391 nodes = [self.changelog.tip()]
3391 3392 b = []
3392 3393 for n in nodes:
3393 3394 t = n
3394 3395 while True:
3395 3396 p = self.changelog.parents(n)
3396 3397 if p[1] != nullid or p[0] == nullid:
3397 3398 b.append((t, n, p[0], p[1]))
3398 3399 break
3399 3400 n = p[0]
3400 3401 return b
3401 3402
3402 3403 def between(self, pairs):
3403 3404 r = []
3404 3405
3405 3406 for top, bottom in pairs:
3406 3407 n, l, i = top, [], 0
3407 3408 f = 1
3408 3409
3409 3410 while n != bottom and n != nullid:
3410 3411 p = self.changelog.parents(n)[0]
3411 3412 if i == f:
3412 3413 l.append(n)
3413 3414 f = f * 2
3414 3415 n = p
3415 3416 i += 1
3416 3417
3417 3418 r.append(l)
3418 3419
3419 3420 return r
3420 3421
3421 3422 def checkpush(self, pushop):
3422 3423 """Extensions can override this function if additional checks have
3423 3424 to be performed before pushing, or call it if they override push
3424 3425 command.
3425 3426 """
3426 3427
3427 3428 @unfilteredpropertycache
3428 3429 def prepushoutgoinghooks(self):
3429 3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3430 3431 methods, which are called before pushing changesets.
3431 3432 """
3432 3433 return util.hooks()
3433 3434
3434 3435 def pushkey(self, namespace, key, old, new):
3435 3436 try:
3436 3437 tr = self.currenttransaction()
3437 3438 hookargs = {}
3438 3439 if tr is not None:
3439 3440 hookargs.update(tr.hookargs)
3440 3441 hookargs = pycompat.strkwargs(hookargs)
3441 3442 hookargs['namespace'] = namespace
3442 3443 hookargs['key'] = key
3443 3444 hookargs['old'] = old
3444 3445 hookargs['new'] = new
3445 3446 self.hook(b'prepushkey', throw=True, **hookargs)
3446 3447 except error.HookAbort as exc:
3447 3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3448 3449 if exc.hint:
3449 3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3450 3451 return False
3451 3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3452 3453 ret = pushkey.push(self, namespace, key, old, new)
3453 3454
3454 3455 def runhook(unused_success):
3455 3456 self.hook(
3456 3457 b'pushkey',
3457 3458 namespace=namespace,
3458 3459 key=key,
3459 3460 old=old,
3460 3461 new=new,
3461 3462 ret=ret,
3462 3463 )
3463 3464
3464 3465 self._afterlock(runhook)
3465 3466 return ret
3466 3467
3467 3468 def listkeys(self, namespace):
3468 3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3469 3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3470 3471 values = pushkey.list(self, namespace)
3471 3472 self.hook(b'listkeys', namespace=namespace, values=values)
3472 3473 return values
3473 3474
3474 3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3475 3476 '''used to test argument passing over the wire'''
3476 3477 return b"%s %s %s %s %s" % (
3477 3478 one,
3478 3479 two,
3479 3480 pycompat.bytestr(three),
3480 3481 pycompat.bytestr(four),
3481 3482 pycompat.bytestr(five),
3482 3483 )
3483 3484
3484 3485 def savecommitmessage(self, text):
3485 3486 fp = self.vfs(b'last-message.txt', b'wb')
3486 3487 try:
3487 3488 fp.write(text)
3488 3489 finally:
3489 3490 fp.close()
3490 3491 return self.pathto(fp.name[len(self.root) + 1 :])
3491 3492
3492 3493
3493 3494 # used to avoid circular references so destructors work
3494 3495 def aftertrans(files):
3495 3496 renamefiles = [tuple(t) for t in files]
3496 3497
3497 3498 def a():
3498 3499 for vfs, src, dest in renamefiles:
3499 3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 3501 # leaving both src and dest on disk. delete dest to make sure
3501 3502 # the rename couldn't be such a no-op.
3502 3503 vfs.tryunlink(dest)
3503 3504 try:
3504 3505 vfs.rename(src, dest)
3505 3506 except OSError: # journal file does not yet exist
3506 3507 pass
3507 3508
3508 3509 return a
3509 3510
3510 3511
3511 3512 def undoname(fn):
3512 3513 base, name = os.path.split(fn)
3513 3514 assert name.startswith(b'journal')
3514 3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3515 3516
3516 3517
3517 3518 def instance(ui, path, create, intents=None, createopts=None):
3518 3519 localpath = util.urllocalpath(path)
3519 3520 if create:
3520 3521 createrepository(ui, localpath, createopts=createopts)
3521 3522
3522 3523 return makelocalrepository(ui, localpath, intents=intents)
3523 3524
3524 3525
3525 3526 def islocal(path):
3526 3527 return True
3527 3528
3528 3529
3529 3530 def defaultcreateopts(ui, createopts=None):
3530 3531 """Populate the default creation options for a repository.
3531 3532
3532 3533 A dictionary of explicitly requested creation options can be passed
3533 3534 in. Missing keys will be populated.
3534 3535 """
3535 3536 createopts = dict(createopts or {})
3536 3537
3537 3538 if b'backend' not in createopts:
3538 3539 # experimental config: storage.new-repo-backend
3539 3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3540 3541
3541 3542 return createopts
3542 3543
3543 3544
3544 3545 def newreporequirements(ui, createopts):
3545 3546 """Determine the set of requirements for a new local repository.
3546 3547
3547 3548 Extensions can wrap this function to specify custom requirements for
3548 3549 new repositories.
3549 3550 """
3550 3551 # If the repo is being created from a shared repository, we copy
3551 3552 # its requirements.
3552 3553 if b'sharedrepo' in createopts:
3553 3554 requirements = set(createopts[b'sharedrepo'].requirements)
3554 3555 if createopts.get(b'sharedrelative'):
3555 3556 requirements.add(b'relshared')
3556 3557 else:
3557 3558 requirements.add(b'shared')
3558 3559
3559 3560 return requirements
3560 3561
3561 3562 if b'backend' not in createopts:
3562 3563 raise error.ProgrammingError(
3563 3564 b'backend key not present in createopts; '
3564 3565 b'was defaultcreateopts() called?'
3565 3566 )
3566 3567
3567 3568 if createopts[b'backend'] != b'revlogv1':
3568 3569 raise error.Abort(
3569 3570 _(
3570 3571 b'unable to determine repository requirements for '
3571 3572 b'storage backend: %s'
3572 3573 )
3573 3574 % createopts[b'backend']
3574 3575 )
3575 3576
3576 3577 requirements = {b'revlogv1'}
3577 3578 if ui.configbool(b'format', b'usestore'):
3578 3579 requirements.add(b'store')
3579 3580 if ui.configbool(b'format', b'usefncache'):
3580 3581 requirements.add(b'fncache')
3581 3582 if ui.configbool(b'format', b'dotencode'):
3582 3583 requirements.add(b'dotencode')
3583 3584
3584 3585 compengines = ui.configlist(b'format', b'revlog-compression')
3585 3586 for compengine in compengines:
3586 3587 if compengine in util.compengines:
3587 3588 break
3588 3589 else:
3589 3590 raise error.Abort(
3590 3591 _(
3591 3592 b'compression engines %s defined by '
3592 3593 b'format.revlog-compression not available'
3593 3594 )
3594 3595 % b', '.join(b'"%s"' % e for e in compengines),
3595 3596 hint=_(
3596 3597 b'run "hg debuginstall" to list available '
3597 3598 b'compression engines'
3598 3599 ),
3599 3600 )
3600 3601
3601 3602 # zlib is the historical default and doesn't need an explicit requirement.
3602 3603 if compengine == b'zstd':
3603 3604 requirements.add(b'revlog-compression-zstd')
3604 3605 elif compengine != b'zlib':
3605 3606 requirements.add(b'exp-compression-%s' % compengine)
3606 3607
3607 3608 if scmutil.gdinitconfig(ui):
3608 3609 requirements.add(b'generaldelta')
3609 3610 if ui.configbool(b'format', b'sparse-revlog'):
3610 3611 requirements.add(SPARSEREVLOG_REQUIREMENT)
3611 3612
3612 3613 # experimental config: format.exp-use-side-data
3613 3614 if ui.configbool(b'format', b'exp-use-side-data'):
3614 3615 requirements.add(SIDEDATA_REQUIREMENT)
3615 3616 # experimental config: format.exp-use-copies-side-data-changeset
3616 3617 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3617 3618 requirements.add(SIDEDATA_REQUIREMENT)
3618 3619 requirements.add(COPIESSDC_REQUIREMENT)
3619 3620 if ui.configbool(b'experimental', b'treemanifest'):
3620 3621 requirements.add(b'treemanifest')
3621 3622
3622 3623 revlogv2 = ui.config(b'experimental', b'revlogv2')
3623 3624 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3624 3625 requirements.remove(b'revlogv1')
3625 3626 # generaldelta is implied by revlogv2.
3626 3627 requirements.discard(b'generaldelta')
3627 3628 requirements.add(REVLOGV2_REQUIREMENT)
3628 3629 # experimental config: format.internal-phase
3629 3630 if ui.configbool(b'format', b'internal-phase'):
3630 3631 requirements.add(b'internal-phase')
3631 3632
3632 3633 if createopts.get(b'narrowfiles'):
3633 3634 requirements.add(repository.NARROW_REQUIREMENT)
3634 3635
3635 3636 if createopts.get(b'lfs'):
3636 3637 requirements.add(b'lfs')
3637 3638
3638 3639 if ui.configbool(b'format', b'bookmarks-in-store'):
3639 3640 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3640 3641
3641 3642 return requirements
3642 3643
3643 3644
3644 3645 def filterknowncreateopts(ui, createopts):
3645 3646 """Filters a dict of repo creation options against options that are known.
3646 3647
3647 3648 Receives a dict of repo creation options and returns a dict of those
3648 3649 options that we don't know how to handle.
3649 3650
3650 3651 This function is called as part of repository creation. If the
3651 3652 returned dict contains any items, repository creation will not
3652 3653 be allowed, as it means there was a request to create a repository
3653 3654 with options not recognized by loaded code.
3654 3655
3655 3656 Extensions can wrap this function to filter out creation options
3656 3657 they know how to handle.
3657 3658 """
3658 3659 known = {
3659 3660 b'backend',
3660 3661 b'lfs',
3661 3662 b'narrowfiles',
3662 3663 b'sharedrepo',
3663 3664 b'sharedrelative',
3664 3665 b'shareditems',
3665 3666 b'shallowfilestore',
3666 3667 }
3667 3668
3668 3669 return {k: v for k, v in createopts.items() if k not in known}
3669 3670
3670 3671
3671 3672 def createrepository(ui, path, createopts=None):
3672 3673 """Create a new repository in a vfs.
3673 3674
3674 3675 ``path`` path to the new repo's working directory.
3675 3676 ``createopts`` options for the new repository.
3676 3677
3677 3678 The following keys for ``createopts`` are recognized:
3678 3679
3679 3680 backend
3680 3681 The storage backend to use.
3681 3682 lfs
3682 3683 Repository will be created with ``lfs`` requirement. The lfs extension
3683 3684 will automatically be loaded when the repository is accessed.
3684 3685 narrowfiles
3685 3686 Set up repository to support narrow file storage.
3686 3687 sharedrepo
3687 3688 Repository object from which storage should be shared.
3688 3689 sharedrelative
3689 3690 Boolean indicating if the path to the shared repo should be
3690 3691 stored as relative. By default, the pointer to the "parent" repo
3691 3692 is stored as an absolute path.
3692 3693 shareditems
3693 3694 Set of items to share to the new repository (in addition to storage).
3694 3695 shallowfilestore
3695 3696 Indicates that storage for files should be shallow (not all ancestor
3696 3697 revisions are known).
3697 3698 """
3698 3699 createopts = defaultcreateopts(ui, createopts=createopts)
3699 3700
3700 3701 unknownopts = filterknowncreateopts(ui, createopts)
3701 3702
3702 3703 if not isinstance(unknownopts, dict):
3703 3704 raise error.ProgrammingError(
3704 3705 b'filterknowncreateopts() did not return a dict'
3705 3706 )
3706 3707
3707 3708 if unknownopts:
3708 3709 raise error.Abort(
3709 3710 _(
3710 3711 b'unable to create repository because of unknown '
3711 3712 b'creation option: %s'
3712 3713 )
3713 3714 % b', '.join(sorted(unknownopts)),
3714 3715 hint=_(b'is a required extension not loaded?'),
3715 3716 )
3716 3717
3717 3718 requirements = newreporequirements(ui, createopts=createopts)
3718 3719
3719 3720 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3720 3721
3721 3722 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3722 3723 if hgvfs.exists():
3723 3724 raise error.RepoError(_(b'repository %s already exists') % path)
3724 3725
3725 3726 if b'sharedrepo' in createopts:
3726 3727 sharedpath = createopts[b'sharedrepo'].sharedpath
3727 3728
3728 3729 if createopts.get(b'sharedrelative'):
3729 3730 try:
3730 3731 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3731 3732 except (IOError, ValueError) as e:
3732 3733 # ValueError is raised on Windows if the drive letters differ
3733 3734 # on each path.
3734 3735 raise error.Abort(
3735 3736 _(b'cannot calculate relative path'),
3736 3737 hint=stringutil.forcebytestr(e),
3737 3738 )
3738 3739
3739 3740 if not wdirvfs.exists():
3740 3741 wdirvfs.makedirs()
3741 3742
3742 3743 hgvfs.makedir(notindexed=True)
3743 3744 if b'sharedrepo' not in createopts:
3744 3745 hgvfs.mkdir(b'cache')
3745 3746 hgvfs.mkdir(b'wcache')
3746 3747
3747 3748 if b'store' in requirements and b'sharedrepo' not in createopts:
3748 3749 hgvfs.mkdir(b'store')
3749 3750
3750 3751 # We create an invalid changelog outside the store so very old
3751 3752 # Mercurial versions (which didn't know about the requirements
3752 3753 # file) encounter an error on reading the changelog. This
3753 3754 # effectively locks out old clients and prevents them from
3754 3755 # mucking with a repo in an unknown format.
3755 3756 #
3756 3757 # The revlog header has version 2, which won't be recognized by
3757 3758 # such old clients.
3758 3759 hgvfs.append(
3759 3760 b'00changelog.i',
3760 3761 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3761 3762 b'layout',
3762 3763 )
3763 3764
3764 3765 scmutil.writerequires(hgvfs, requirements)
3765 3766
3766 3767 # Write out file telling readers where to find the shared store.
3767 3768 if b'sharedrepo' in createopts:
3768 3769 hgvfs.write(b'sharedpath', sharedpath)
3769 3770
3770 3771 if createopts.get(b'shareditems'):
3771 3772 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3772 3773 hgvfs.write(b'shared', shared)
3773 3774
3774 3775
3775 3776 def poisonrepository(repo):
3776 3777 """Poison a repository instance so it can no longer be used."""
3777 3778 # Perform any cleanup on the instance.
3778 3779 repo.close()
3779 3780
3780 3781 # Our strategy is to replace the type of the object with one that
3781 3782 # has all attribute lookups result in error.
3782 3783 #
3783 3784 # But we have to allow the close() method because some constructors
3784 3785 # of repos call close() on repo references.
3785 3786 class poisonedrepository(object):
3786 3787 def __getattribute__(self, item):
3787 3788 if item == 'close':
3788 3789 return object.__getattribute__(self, item)
3789 3790
3790 3791 raise error.ProgrammingError(
3791 3792 b'repo instances should not be used after unshare'
3792 3793 )
3793 3794
3794 3795 def close(self):
3795 3796 pass
3796 3797
3797 3798 # We may have a repoview, which intercepts __setattr__. So be sure
3798 3799 # we operate at the lowest level possible.
3799 3800 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now