##// END OF EJS Templates
localrepo: introduce a `_quick_access_changeid` property...
marmoute -
r44204:e89e3275 default
parent child Browse files
Show More
@@ -1,3729 +1,3744 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 changegroup,
35 35 color,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 merge as mergemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 procutil,
78 78 stringutil,
79 79 )
80 80
81 81 from .revlogutils import constants as revlogconst
82 82
83 83 release = lockmod.release
84 84 urlerr = util.urlerr
85 85 urlreq = util.urlreq
86 86
87 87 # set of (path, vfs-location) tuples. vfs-location is:
88 88 # - 'plain for vfs relative paths
89 89 # - '' for svfs relative paths
90 90 _cachedfiles = set()
91 91
92 92
93 93 class _basefilecache(scmutil.filecache):
94 94 """All filecache usage on repo are done for logic that should be unfiltered
95 95 """
96 96
97 97 def __get__(self, repo, type=None):
98 98 if repo is None:
99 99 return self
100 100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 101 unfi = repo.unfiltered()
102 102 try:
103 103 return unfi.__dict__[self.sname]
104 104 except KeyError:
105 105 pass
106 106 return super(_basefilecache, self).__get__(unfi, type)
107 107
108 108 def set(self, repo, value):
109 109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 110
111 111
112 112 class repofilecache(_basefilecache):
113 113 """filecache for files in .hg but outside of .hg/store"""
114 114
115 115 def __init__(self, *paths):
116 116 super(repofilecache, self).__init__(*paths)
117 117 for path in paths:
118 118 _cachedfiles.add((path, b'plain'))
119 119
120 120 def join(self, obj, fname):
121 121 return obj.vfs.join(fname)
122 122
123 123
124 124 class storecache(_basefilecache):
125 125 """filecache for files in the store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(storecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b''))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.sjoin(fname)
134 134
135 135
136 136 class mixedrepostorecache(_basefilecache):
137 137 """filecache for a mix files in .hg/store and outside"""
138 138
139 139 def __init__(self, *pathsandlocations):
140 140 # scmutil.filecache only uses the path for passing back into our
141 141 # join(), so we can safely pass a list of paths and locations
142 142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 143 _cachedfiles.update(pathsandlocations)
144 144
145 145 def join(self, obj, fnameandlocation):
146 146 fname, location = fnameandlocation
147 147 if location == b'plain':
148 148 return obj.vfs.join(fname)
149 149 else:
150 150 if location != b'':
151 151 raise error.ProgrammingError(
152 152 b'unexpected location: %s' % location
153 153 )
154 154 return obj.sjoin(fname)
155 155
156 156
157 157 def isfilecached(repo, name):
158 158 """check if a repo has already cached "name" filecache-ed property
159 159
160 160 This returns (cachedobj-or-None, iscached) tuple.
161 161 """
162 162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 163 if not cacheentry:
164 164 return None, False
165 165 return cacheentry.obj, True
166 166
167 167
168 168 class unfilteredpropertycache(util.propertycache):
169 169 """propertycache that apply to unfiltered repo only"""
170 170
171 171 def __get__(self, repo, type=None):
172 172 unfi = repo.unfiltered()
173 173 if unfi is repo:
174 174 return super(unfilteredpropertycache, self).__get__(unfi)
175 175 return getattr(unfi, self.name)
176 176
177 177
178 178 class filteredpropertycache(util.propertycache):
179 179 """propertycache that must take filtering in account"""
180 180
181 181 def cachevalue(self, obj, value):
182 182 object.__setattr__(obj, self.name, value)
183 183
184 184
185 185 def hasunfilteredcache(repo, name):
186 186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 187 return name in vars(repo.unfiltered())
188 188
189 189
190 190 def unfilteredmethod(orig):
191 191 """decorate method that always need to be run on unfiltered version"""
192 192
193 193 def wrapper(repo, *args, **kwargs):
194 194 return orig(repo.unfiltered(), *args, **kwargs)
195 195
196 196 return wrapper
197 197
198 198
199 199 moderncaps = {
200 200 b'lookup',
201 201 b'branchmap',
202 202 b'pushkey',
203 203 b'known',
204 204 b'getbundle',
205 205 b'unbundle',
206 206 }
207 207 legacycaps = moderncaps.union({b'changegroupsubset'})
208 208
209 209
210 210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 211 class localcommandexecutor(object):
212 212 def __init__(self, peer):
213 213 self._peer = peer
214 214 self._sent = False
215 215 self._closed = False
216 216
217 217 def __enter__(self):
218 218 return self
219 219
220 220 def __exit__(self, exctype, excvalue, exctb):
221 221 self.close()
222 222
223 223 def callcommand(self, command, args):
224 224 if self._sent:
225 225 raise error.ProgrammingError(
226 226 b'callcommand() cannot be used after sendcommands()'
227 227 )
228 228
229 229 if self._closed:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after close()'
232 232 )
233 233
234 234 # We don't need to support anything fancy. Just call the named
235 235 # method on the peer and return a resolved future.
236 236 fn = getattr(self._peer, pycompat.sysstr(command))
237 237
238 238 f = pycompat.futures.Future()
239 239
240 240 try:
241 241 result = fn(**pycompat.strkwargs(args))
242 242 except Exception:
243 243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 244 else:
245 245 f.set_result(result)
246 246
247 247 return f
248 248
249 249 def sendcommands(self):
250 250 self._sent = True
251 251
252 252 def close(self):
253 253 self._closed = True
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommands)
257 257 class localpeer(repository.peer):
258 258 '''peer for a local repo; reflects only the most recent API'''
259 259
260 260 def __init__(self, repo, caps=None):
261 261 super(localpeer, self).__init__()
262 262
263 263 if caps is None:
264 264 caps = moderncaps.copy()
265 265 self._repo = repo.filtered(b'served')
266 266 self.ui = repo.ui
267 267 self._caps = repo._restrictcapabilities(caps)
268 268
269 269 # Begin of _basepeer interface.
270 270
271 271 def url(self):
272 272 return self._repo.url()
273 273
274 274 def local(self):
275 275 return self._repo
276 276
277 277 def peer(self):
278 278 return self
279 279
280 280 def canpush(self):
281 281 return True
282 282
283 283 def close(self):
284 284 self._repo.close()
285 285
286 286 # End of _basepeer interface.
287 287
288 288 # Begin of _basewirecommands interface.
289 289
290 290 def branchmap(self):
291 291 return self._repo.branchmap()
292 292
293 293 def capabilities(self):
294 294 return self._caps
295 295
296 296 def clonebundles(self):
297 297 return self._repo.tryread(b'clonebundles.manifest')
298 298
299 299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 300 """Used to test argument passing over the wire"""
301 301 return b"%s %s %s %s %s" % (
302 302 one,
303 303 two,
304 304 pycompat.bytestr(three),
305 305 pycompat.bytestr(four),
306 306 pycompat.bytestr(five),
307 307 )
308 308
309 309 def getbundle(
310 310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 311 ):
312 312 chunks = exchange.getbundlechunks(
313 313 self._repo,
314 314 source,
315 315 heads=heads,
316 316 common=common,
317 317 bundlecaps=bundlecaps,
318 318 **kwargs
319 319 )[1]
320 320 cb = util.chunkbuffer(chunks)
321 321
322 322 if exchange.bundle2requested(bundlecaps):
323 323 # When requesting a bundle2, getbundle returns a stream to make the
324 324 # wire level function happier. We need to build a proper object
325 325 # from it in local peer.
326 326 return bundle2.getunbundler(self.ui, cb)
327 327 else:
328 328 return changegroup.getunbundler(b'01', cb, None)
329 329
330 330 def heads(self):
331 331 return self._repo.heads()
332 332
333 333 def known(self, nodes):
334 334 return self._repo.known(nodes)
335 335
336 336 def listkeys(self, namespace):
337 337 return self._repo.listkeys(namespace)
338 338
339 339 def lookup(self, key):
340 340 return self._repo.lookup(key)
341 341
342 342 def pushkey(self, namespace, key, old, new):
343 343 return self._repo.pushkey(namespace, key, old, new)
344 344
345 345 def stream_out(self):
346 346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 347
348 348 def unbundle(self, bundle, heads, url):
349 349 """apply a bundle on a repo
350 350
351 351 This function handles the repo locking itself."""
352 352 try:
353 353 try:
354 354 bundle = exchange.readbundle(self.ui, bundle, None)
355 355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 356 if util.safehasattr(ret, b'getchunks'):
357 357 # This is a bundle20 object, turn it into an unbundler.
358 358 # This little dance should be dropped eventually when the
359 359 # API is finally improved.
360 360 stream = util.chunkbuffer(ret.getchunks())
361 361 ret = bundle2.getunbundler(self.ui, stream)
362 362 return ret
363 363 except Exception as exc:
364 364 # If the exception contains output salvaged from a bundle2
365 365 # reply, we need to make sure it is printed before continuing
366 366 # to fail. So we build a bundle2 with such output and consume
367 367 # it directly.
368 368 #
369 369 # This is not very elegant but allows a "simple" solution for
370 370 # issue4594
371 371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 372 if output:
373 373 bundler = bundle2.bundle20(self._repo.ui)
374 374 for out in output:
375 375 bundler.addpart(out)
376 376 stream = util.chunkbuffer(bundler.getchunks())
377 377 b = bundle2.getunbundler(self.ui, stream)
378 378 bundle2.processbundle(self._repo, b)
379 379 raise
380 380 except error.PushRaced as exc:
381 381 raise error.ResponseError(
382 382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 383 )
384 384
385 385 # End of _basewirecommands interface.
386 386
387 387 # Begin of peer interface.
388 388
389 389 def commandexecutor(self):
390 390 return localcommandexecutor(self)
391 391
392 392 # End of peer interface.
393 393
394 394
395 395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 396 class locallegacypeer(localpeer):
397 397 '''peer extension which implements legacy methods too; used for tests with
398 398 restricted capabilities'''
399 399
400 400 def __init__(self, repo):
401 401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 402
403 403 # Begin of baselegacywirecommands interface.
404 404
405 405 def between(self, pairs):
406 406 return self._repo.between(pairs)
407 407
408 408 def branches(self, nodes):
409 409 return self._repo.branches(nodes)
410 410
411 411 def changegroup(self, nodes, source):
412 412 outgoing = discovery.outgoing(
413 413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 414 )
415 415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 416
417 417 def changegroupsubset(self, bases, heads, source):
418 418 outgoing = discovery.outgoing(
419 419 self._repo, missingroots=bases, missingheads=heads
420 420 )
421 421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 422
423 423 # End of baselegacywirecommands interface.
424 424
425 425
426 426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 427 # clients.
428 428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 429
430 430 # A repository with the sparserevlog feature will have delta chains that
431 431 # can spread over a larger span. Sparse reading cuts these large spans into
432 432 # pieces, so that each piece isn't too big.
433 433 # Without the sparserevlog capability, reading from the repository could use
434 434 # huge amounts of memory, because the whole span would be read at once,
435 435 # including all the intermediate revisions that aren't pertinent for the chain.
436 436 # This is why once a repository has enabled sparse-read, it becomes required.
437 437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 438
439 439 # A repository with the sidedataflag requirement will allow to store extra
440 440 # information for revision without altering their original hashes.
441 441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 442
443 443 # A repository with the the copies-sidedata-changeset requirement will store
444 444 # copies related information in changeset's sidedata.
445 445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 446
447 447 # Functions receiving (ui, features) that extensions can register to impact
448 448 # the ability to load repositories with custom requirements. Only
449 449 # functions defined in loaded extensions are called.
450 450 #
451 451 # The function receives a set of requirement strings that the repository
452 452 # is capable of opening. Functions will typically add elements to the
453 453 # set to reflect that the extension knows how to handle that requirements.
454 454 featuresetupfuncs = set()
455 455
456 456
457 457 def makelocalrepository(baseui, path, intents=None):
458 458 """Create a local repository object.
459 459
460 460 Given arguments needed to construct a local repository, this function
461 461 performs various early repository loading functionality (such as
462 462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 463 the repository can be opened, derives a type suitable for representing
464 464 that repository, and returns an instance of it.
465 465
466 466 The returned object conforms to the ``repository.completelocalrepository``
467 467 interface.
468 468
469 469 The repository type is derived by calling a series of factory functions
470 470 for each aspect/interface of the final repository. These are defined by
471 471 ``REPO_INTERFACES``.
472 472
473 473 Each factory function is called to produce a type implementing a specific
474 474 interface. The cumulative list of returned types will be combined into a
475 475 new type and that type will be instantiated to represent the local
476 476 repository.
477 477
478 478 The factory functions each receive various state that may be consulted
479 479 as part of deriving a type.
480 480
481 481 Extensions should wrap these factory functions to customize repository type
482 482 creation. Note that an extension's wrapped function may be called even if
483 483 that extension is not loaded for the repo being constructed. Extensions
484 484 should check if their ``__name__`` appears in the
485 485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 486 not.
487 487 """
488 488 ui = baseui.copy()
489 489 # Prevent copying repo configuration.
490 490 ui.copy = baseui.copy
491 491
492 492 # Working directory VFS rooted at repository root.
493 493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 494
495 495 # Main VFS for .hg/ directory.
496 496 hgpath = wdirvfs.join(b'.hg')
497 497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 498
499 499 # The .hg/ path should exist and should be a directory. All other
500 500 # cases are errors.
501 501 if not hgvfs.isdir():
502 502 try:
503 503 hgvfs.stat()
504 504 except OSError as e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507
508 508 raise error.RepoError(_(b'repository %s not found') % path)
509 509
510 510 # .hg/requires file contains a newline-delimited list of
511 511 # features/capabilities the opener (us) must have in order to use
512 512 # the repository. This file was introduced in Mercurial 0.9.2,
513 513 # which means very old repositories may not have one. We assume
514 514 # a missing file translates to no requirements.
515 515 try:
516 516 requirements = set(hgvfs.read(b'requires').splitlines())
517 517 except IOError as e:
518 518 if e.errno != errno.ENOENT:
519 519 raise
520 520 requirements = set()
521 521
522 522 # The .hg/hgrc file may load extensions or contain config options
523 523 # that influence repository construction. Attempt to load it and
524 524 # process any new extensions that it may have pulled in.
525 525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 527 extensions.loadall(ui)
528 528 extensions.populateui(ui)
529 529
530 530 # Set of module names of extensions loaded for this repository.
531 531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 532
533 533 supportedrequirements = gathersupportedrequirements(ui)
534 534
535 535 # We first validate the requirements are known.
536 536 ensurerequirementsrecognized(requirements, supportedrequirements)
537 537
538 538 # Then we validate that the known set is reasonable to use together.
539 539 ensurerequirementscompatible(ui, requirements)
540 540
541 541 # TODO there are unhandled edge cases related to opening repositories with
542 542 # shared storage. If storage is shared, we should also test for requirements
543 543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 544 # that repo, as that repo may load extensions needed to open it. This is a
545 545 # bit complicated because we don't want the other hgrc to overwrite settings
546 546 # in this hgrc.
547 547 #
548 548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 549 # file when sharing repos. But if a requirement is added after the share is
550 550 # performed, thereby introducing a new requirement for the opener, we may
551 551 # will not see that and could encounter a run-time error interacting with
552 552 # that shared store since it has an unknown-to-us requirement.
553 553
554 554 # At this point, we know we should be capable of opening the repository.
555 555 # Now get on with doing that.
556 556
557 557 features = set()
558 558
559 559 # The "store" part of the repository holds versioned data. How it is
560 560 # accessed is determined by various requirements. The ``shared`` or
561 561 # ``relshared`` requirements indicate the store lives in the path contained
562 562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 564 if b'shared' in requirements or b'relshared' in requirements:
565 565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 566 if b'relshared' in requirements:
567 567 sharedpath = hgvfs.join(sharedpath)
568 568
569 569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 570
571 571 if not sharedvfs.exists():
572 572 raise error.RepoError(
573 573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 574 % sharedvfs.base
575 575 )
576 576
577 577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 578
579 579 storebasepath = sharedvfs.base
580 580 cachepath = sharedvfs.join(b'cache')
581 581 else:
582 582 storebasepath = hgvfs.base
583 583 cachepath = hgvfs.join(b'cache')
584 584 wcachepath = hgvfs.join(b'wcache')
585 585
586 586 # The store has changed over time and the exact layout is dictated by
587 587 # requirements. The store interface abstracts differences across all
588 588 # of them.
589 589 store = makestore(
590 590 requirements,
591 591 storebasepath,
592 592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 593 )
594 594 hgvfs.createmode = store.createmode
595 595
596 596 storevfs = store.vfs
597 597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 598
599 599 # The cache vfs is used to manage cache files.
600 600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 601 cachevfs.createmode = store.createmode
602 602 # The cache vfs is used to manage cache files related to the working copy
603 603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 604 wcachevfs.createmode = store.createmode
605 605
606 606 # Now resolve the type for the repository object. We do this by repeatedly
607 607 # calling a factory function to produces types for specific aspects of the
608 608 # repo's operation. The aggregate returned types are used as base classes
609 609 # for a dynamically-derived type, which will represent our new repository.
610 610
611 611 bases = []
612 612 extrastate = {}
613 613
614 614 for iface, fn in REPO_INTERFACES:
615 615 # We pass all potentially useful state to give extensions tons of
616 616 # flexibility.
617 617 typ = fn()(
618 618 ui=ui,
619 619 intents=intents,
620 620 requirements=requirements,
621 621 features=features,
622 622 wdirvfs=wdirvfs,
623 623 hgvfs=hgvfs,
624 624 store=store,
625 625 storevfs=storevfs,
626 626 storeoptions=storevfs.options,
627 627 cachevfs=cachevfs,
628 628 wcachevfs=wcachevfs,
629 629 extensionmodulenames=extensionmodulenames,
630 630 extrastate=extrastate,
631 631 baseclasses=bases,
632 632 )
633 633
634 634 if not isinstance(typ, type):
635 635 raise error.ProgrammingError(
636 636 b'unable to construct type for %s' % iface
637 637 )
638 638
639 639 bases.append(typ)
640 640
641 641 # type() allows you to use characters in type names that wouldn't be
642 642 # recognized as Python symbols in source code. We abuse that to add
643 643 # rich information about our constructed repo.
644 644 name = pycompat.sysstr(
645 645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 646 )
647 647
648 648 cls = type(name, tuple(bases), {})
649 649
650 650 return cls(
651 651 baseui=baseui,
652 652 ui=ui,
653 653 origroot=path,
654 654 wdirvfs=wdirvfs,
655 655 hgvfs=hgvfs,
656 656 requirements=requirements,
657 657 supportedrequirements=supportedrequirements,
658 658 sharedpath=storebasepath,
659 659 store=store,
660 660 cachevfs=cachevfs,
661 661 wcachevfs=wcachevfs,
662 662 features=features,
663 663 intents=intents,
664 664 )
665 665
666 666
667 667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 668 """Load hgrc files/content into a ui instance.
669 669
670 670 This is called during repository opening to load any additional
671 671 config files or settings relevant to the current repository.
672 672
673 673 Returns a bool indicating whether any additional configs were loaded.
674 674
675 675 Extensions should monkeypatch this function to modify how per-repo
676 676 configs are loaded. For example, an extension may wish to pull in
677 677 configs from alternate files or sources.
678 678 """
679 679 try:
680 680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 681 return True
682 682 except IOError:
683 683 return False
684 684
685 685
686 686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 687 """Perform additional actions after .hg/hgrc is loaded.
688 688
689 689 This function is called during repository loading immediately after
690 690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691 691
692 692 The function can be used to validate configs, automatically add
693 693 options (including extensions) based on requirements, etc.
694 694 """
695 695
696 696 # Map of requirements to list of extensions to load automatically when
697 697 # requirement is present.
698 698 autoextensions = {
699 699 b'largefiles': [b'largefiles'],
700 700 b'lfs': [b'lfs'],
701 701 }
702 702
703 703 for requirement, names in sorted(autoextensions.items()):
704 704 if requirement not in requirements:
705 705 continue
706 706
707 707 for name in names:
708 708 if not ui.hasconfig(b'extensions', name):
709 709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710 710
711 711
712 712 def gathersupportedrequirements(ui):
713 713 """Determine the complete set of recognized requirements."""
714 714 # Start with all requirements supported by this file.
715 715 supported = set(localrepository._basesupported)
716 716
717 717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 718 # relevant to this ui instance.
719 719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 for fn in featuresetupfuncs:
722 722 if fn.__module__ in modules:
723 723 fn(ui, supported)
724 724
725 725 # Add derived requirements from registered compression engines.
726 726 for name in util.compengines:
727 727 engine = util.compengines[name]
728 728 if engine.available() and engine.revlogheader():
729 729 supported.add(b'exp-compression-%s' % name)
730 730 if engine.name() == b'zstd':
731 731 supported.add(b'revlog-compression-zstd')
732 732
733 733 return supported
734 734
735 735
736 736 def ensurerequirementsrecognized(requirements, supported):
737 737 """Validate that a set of local requirements is recognized.
738 738
739 739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 740 exists any requirement in that set that currently loaded code doesn't
741 741 recognize.
742 742
743 743 Returns a set of supported requirements.
744 744 """
745 745 missing = set()
746 746
747 747 for requirement in requirements:
748 748 if requirement in supported:
749 749 continue
750 750
751 751 if not requirement or not requirement[0:1].isalnum():
752 752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753 753
754 754 missing.add(requirement)
755 755
756 756 if missing:
757 757 raise error.RequirementError(
758 758 _(b'repository requires features unknown to this Mercurial: %s')
759 759 % b' '.join(sorted(missing)),
760 760 hint=_(
761 761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 762 b'for more information'
763 763 ),
764 764 )
765 765
766 766
767 767 def ensurerequirementscompatible(ui, requirements):
768 768 """Validates that a set of recognized requirements is mutually compatible.
769 769
770 770 Some requirements may not be compatible with others or require
771 771 config options that aren't enabled. This function is called during
772 772 repository opening to ensure that the set of requirements needed
773 773 to open a repository is sane and compatible with config options.
774 774
775 775 Extensions can monkeypatch this function to perform additional
776 776 checking.
777 777
778 778 ``error.RepoError`` should be raised on failure.
779 779 """
780 780 if b'exp-sparse' in requirements and not sparse.enabled:
781 781 raise error.RepoError(
782 782 _(
783 783 b'repository is using sparse feature but '
784 784 b'sparse is not enabled; enable the '
785 785 b'"sparse" extensions to access'
786 786 )
787 787 )
788 788
789 789
790 790 def makestore(requirements, path, vfstype):
791 791 """Construct a storage object for a repository."""
792 792 if b'store' in requirements:
793 793 if b'fncache' in requirements:
794 794 return storemod.fncachestore(
795 795 path, vfstype, b'dotencode' in requirements
796 796 )
797 797
798 798 return storemod.encodedstore(path, vfstype)
799 799
800 800 return storemod.basicstore(path, vfstype)
801 801
802 802
803 803 def resolvestorevfsoptions(ui, requirements, features):
804 804 """Resolve the options to pass to the store vfs opener.
805 805
806 806 The returned dict is used to influence behavior of the storage layer.
807 807 """
808 808 options = {}
809 809
810 810 if b'treemanifest' in requirements:
811 811 options[b'treemanifest'] = True
812 812
813 813 # experimental config: format.manifestcachesize
814 814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 815 if manifestcachesize is not None:
816 816 options[b'manifestcachesize'] = manifestcachesize
817 817
818 818 # In the absence of another requirement superseding a revlog-related
819 819 # requirement, we have to assume the repo is using revlog version 0.
820 820 # This revlog format is super old and we don't bother trying to parse
821 821 # opener options for it because those options wouldn't do anything
822 822 # meaningful on such old repos.
823 823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 825 else: # explicitly mark repo as using revlogv0
826 826 options[b'revlogv0'] = True
827 827
828 828 if COPIESSDC_REQUIREMENT in requirements:
829 829 options[b'copies-storage'] = b'changeset-sidedata'
830 830 else:
831 831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 832 copiesextramode = (b'changeset-only', b'compatibility')
833 833 if writecopiesto in copiesextramode:
834 834 options[b'copies-storage'] = b'extra'
835 835
836 836 return options
837 837
838 838
839 839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 840 """Resolve opener options specific to revlogs."""
841 841
842 842 options = {}
843 843 options[b'flagprocessors'] = {}
844 844
845 845 if b'revlogv1' in requirements:
846 846 options[b'revlogv1'] = True
847 847 if REVLOGV2_REQUIREMENT in requirements:
848 848 options[b'revlogv2'] = True
849 849
850 850 if b'generaldelta' in requirements:
851 851 options[b'generaldelta'] = True
852 852
853 853 # experimental config: format.chunkcachesize
854 854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 855 if chunkcachesize is not None:
856 856 options[b'chunkcachesize'] = chunkcachesize
857 857
858 858 deltabothparents = ui.configbool(
859 859 b'storage', b'revlog.optimize-delta-parent-choice'
860 860 )
861 861 options[b'deltabothparents'] = deltabothparents
862 862
863 863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 864 lazydeltabase = False
865 865 if lazydelta:
866 866 lazydeltabase = ui.configbool(
867 867 b'storage', b'revlog.reuse-external-delta-parent'
868 868 )
869 869 if lazydeltabase is None:
870 870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 871 options[b'lazydelta'] = lazydelta
872 872 options[b'lazydeltabase'] = lazydeltabase
873 873
874 874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 875 if 0 <= chainspan:
876 876 options[b'maxdeltachainspan'] = chainspan
877 877
878 878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 879 if mmapindexthreshold is not None:
880 880 options[b'mmapindexthreshold'] = mmapindexthreshold
881 881
882 882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 883 srdensitythres = float(
884 884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 885 )
886 886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 887 options[b'with-sparse-read'] = withsparseread
888 888 options[b'sparse-read-density-threshold'] = srdensitythres
889 889 options[b'sparse-read-min-gap-size'] = srmingapsize
890 890
891 891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 892 options[b'sparse-revlog'] = sparserevlog
893 893 if sparserevlog:
894 894 options[b'generaldelta'] = True
895 895
896 896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 897 options[b'side-data'] = sidedata
898 898
899 899 maxchainlen = None
900 900 if sparserevlog:
901 901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 902 # experimental config: format.maxchainlen
903 903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 904 if maxchainlen is not None:
905 905 options[b'maxchainlen'] = maxchainlen
906 906
907 907 for r in requirements:
908 908 # we allow multiple compression engine requirement to co-exist because
909 909 # strickly speaking, revlog seems to support mixed compression style.
910 910 #
911 911 # The compression used for new entries will be "the last one"
912 912 prefix = r.startswith
913 913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 914 options[b'compengine'] = r.split(b'-', 2)[2]
915 915
916 916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 917 if options[b'zlib.level'] is not None:
918 918 if not (0 <= options[b'zlib.level'] <= 9):
919 919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 920 raise error.Abort(msg % options[b'zlib.level'])
921 921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 922 if options[b'zstd.level'] is not None:
923 923 if not (0 <= options[b'zstd.level'] <= 22):
924 924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 925 raise error.Abort(msg % options[b'zstd.level'])
926 926
927 927 if repository.NARROW_REQUIREMENT in requirements:
928 928 options[b'enableellipsis'] = True
929 929
930 930 return options
931 931
932 932
933 933 def makemain(**kwargs):
934 934 """Produce a type conforming to ``ilocalrepositorymain``."""
935 935 return localrepository
936 936
937 937
938 938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
939 939 class revlogfilestorage(object):
940 940 """File storage when using revlogs."""
941 941
942 942 def file(self, path):
943 943 if path[0] == b'/':
944 944 path = path[1:]
945 945
946 946 return filelog.filelog(self.svfs, path)
947 947
948 948
949 949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
950 950 class revlognarrowfilestorage(object):
951 951 """File storage when using revlogs and narrow files."""
952 952
953 953 def file(self, path):
954 954 if path[0] == b'/':
955 955 path = path[1:]
956 956
957 957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
958 958
959 959
960 960 def makefilestorage(requirements, features, **kwargs):
961 961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
962 962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
963 963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
964 964
965 965 if repository.NARROW_REQUIREMENT in requirements:
966 966 return revlognarrowfilestorage
967 967 else:
968 968 return revlogfilestorage
969 969
970 970
971 971 # List of repository interfaces and factory functions for them. Each
972 972 # will be called in order during ``makelocalrepository()`` to iteratively
973 973 # derive the final type for a local repository instance. We capture the
974 974 # function as a lambda so we don't hold a reference and the module-level
975 975 # functions can be wrapped.
976 976 REPO_INTERFACES = [
977 977 (repository.ilocalrepositorymain, lambda: makemain),
978 978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
979 979 ]
980 980
981 981
982 982 @interfaceutil.implementer(repository.ilocalrepositorymain)
983 983 class localrepository(object):
984 984 """Main class for representing local repositories.
985 985
986 986 All local repositories are instances of this class.
987 987
988 988 Constructed on its own, instances of this class are not usable as
989 989 repository objects. To obtain a usable repository object, call
990 990 ``hg.repository()``, ``localrepo.instance()``, or
991 991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
992 992 ``instance()`` adds support for creating new repositories.
993 993 ``hg.repository()`` adds more extension integration, including calling
994 994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
995 995 used.
996 996 """
997 997
998 998 # obsolete experimental requirements:
999 999 # - manifestv2: An experimental new manifest format that allowed
1000 1000 # for stem compression of long paths. Experiment ended up not
1001 1001 # being successful (repository sizes went up due to worse delta
1002 1002 # chains), and the code was deleted in 4.6.
1003 1003 supportedformats = {
1004 1004 b'revlogv1',
1005 1005 b'generaldelta',
1006 1006 b'treemanifest',
1007 1007 COPIESSDC_REQUIREMENT,
1008 1008 REVLOGV2_REQUIREMENT,
1009 1009 SIDEDATA_REQUIREMENT,
1010 1010 SPARSEREVLOG_REQUIREMENT,
1011 1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1012 1012 }
1013 1013 _basesupported = supportedformats | {
1014 1014 b'store',
1015 1015 b'fncache',
1016 1016 b'shared',
1017 1017 b'relshared',
1018 1018 b'dotencode',
1019 1019 b'exp-sparse',
1020 1020 b'internal-phase',
1021 1021 }
1022 1022
1023 1023 # list of prefix for file which can be written without 'wlock'
1024 1024 # Extensions should extend this list when needed
1025 1025 _wlockfreeprefix = {
1026 1026 # We migh consider requiring 'wlock' for the next
1027 1027 # two, but pretty much all the existing code assume
1028 1028 # wlock is not needed so we keep them excluded for
1029 1029 # now.
1030 1030 b'hgrc',
1031 1031 b'requires',
1032 1032 # XXX cache is a complicatged business someone
1033 1033 # should investigate this in depth at some point
1034 1034 b'cache/',
1035 1035 # XXX shouldn't be dirstate covered by the wlock?
1036 1036 b'dirstate',
1037 1037 # XXX bisect was still a bit too messy at the time
1038 1038 # this changeset was introduced. Someone should fix
1039 1039 # the remainig bit and drop this line
1040 1040 b'bisect.state',
1041 1041 }
1042 1042
1043 1043 def __init__(
1044 1044 self,
1045 1045 baseui,
1046 1046 ui,
1047 1047 origroot,
1048 1048 wdirvfs,
1049 1049 hgvfs,
1050 1050 requirements,
1051 1051 supportedrequirements,
1052 1052 sharedpath,
1053 1053 store,
1054 1054 cachevfs,
1055 1055 wcachevfs,
1056 1056 features,
1057 1057 intents=None,
1058 1058 ):
1059 1059 """Create a new local repository instance.
1060 1060
1061 1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1062 1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1063 1063 object.
1064 1064
1065 1065 Arguments:
1066 1066
1067 1067 baseui
1068 1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1069 1069
1070 1070 ui
1071 1071 ``ui.ui`` instance for use by the repository.
1072 1072
1073 1073 origroot
1074 1074 ``bytes`` path to working directory root of this repository.
1075 1075
1076 1076 wdirvfs
1077 1077 ``vfs.vfs`` rooted at the working directory.
1078 1078
1079 1079 hgvfs
1080 1080 ``vfs.vfs`` rooted at .hg/
1081 1081
1082 1082 requirements
1083 1083 ``set`` of bytestrings representing repository opening requirements.
1084 1084
1085 1085 supportedrequirements
1086 1086 ``set`` of bytestrings representing repository requirements that we
1087 1087 know how to open. May be a supetset of ``requirements``.
1088 1088
1089 1089 sharedpath
1090 1090 ``bytes`` Defining path to storage base directory. Points to a
1091 1091 ``.hg/`` directory somewhere.
1092 1092
1093 1093 store
1094 1094 ``store.basicstore`` (or derived) instance providing access to
1095 1095 versioned storage.
1096 1096
1097 1097 cachevfs
1098 1098 ``vfs.vfs`` used for cache files.
1099 1099
1100 1100 wcachevfs
1101 1101 ``vfs.vfs`` used for cache files related to the working copy.
1102 1102
1103 1103 features
1104 1104 ``set`` of bytestrings defining features/capabilities of this
1105 1105 instance.
1106 1106
1107 1107 intents
1108 1108 ``set`` of system strings indicating what this repo will be used
1109 1109 for.
1110 1110 """
1111 1111 self.baseui = baseui
1112 1112 self.ui = ui
1113 1113 self.origroot = origroot
1114 1114 # vfs rooted at working directory.
1115 1115 self.wvfs = wdirvfs
1116 1116 self.root = wdirvfs.base
1117 1117 # vfs rooted at .hg/. Used to access most non-store paths.
1118 1118 self.vfs = hgvfs
1119 1119 self.path = hgvfs.base
1120 1120 self.requirements = requirements
1121 1121 self.supported = supportedrequirements
1122 1122 self.sharedpath = sharedpath
1123 1123 self.store = store
1124 1124 self.cachevfs = cachevfs
1125 1125 self.wcachevfs = wcachevfs
1126 1126 self.features = features
1127 1127
1128 1128 self.filtername = None
1129 1129
1130 1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1131 1131 b'devel', b'check-locks'
1132 1132 ):
1133 1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1134 1134 # A list of callback to shape the phase if no data were found.
1135 1135 # Callback are in the form: func(repo, roots) --> processed root.
1136 1136 # This list it to be filled by extension during repo setup
1137 1137 self._phasedefaults = []
1138 1138
1139 1139 color.setup(self.ui)
1140 1140
1141 1141 self.spath = self.store.path
1142 1142 self.svfs = self.store.vfs
1143 1143 self.sjoin = self.store.join
1144 1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1145 1145 b'devel', b'check-locks'
1146 1146 ):
1147 1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1148 1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1149 1149 else: # standard vfs
1150 1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1151 1151
1152 1152 self._dirstatevalidatewarned = False
1153 1153
1154 1154 self._branchcaches = branchmap.BranchMapCache()
1155 1155 self._revbranchcache = None
1156 1156 self._filterpats = {}
1157 1157 self._datafilters = {}
1158 1158 self._transref = self._lockref = self._wlockref = None
1159 1159
1160 1160 # A cache for various files under .hg/ that tracks file changes,
1161 1161 # (used by the filecache decorator)
1162 1162 #
1163 1163 # Maps a property name to its util.filecacheentry
1164 1164 self._filecache = {}
1165 1165
1166 1166 # hold sets of revision to be filtered
1167 1167 # should be cleared when something might have changed the filter value:
1168 1168 # - new changesets,
1169 1169 # - phase change,
1170 1170 # - new obsolescence marker,
1171 1171 # - working directory parent change,
1172 1172 # - bookmark changes
1173 1173 self.filteredrevcache = {}
1174 1174
1175 1175 # post-dirstate-status hooks
1176 1176 self._postdsstatus = []
1177 1177
1178 1178 # generic mapping between names and nodes
1179 1179 self.names = namespaces.namespaces()
1180 1180
1181 1181 # Key to signature value.
1182 1182 self._sparsesignaturecache = {}
1183 1183 # Signature to cached matcher instance.
1184 1184 self._sparsematchercache = {}
1185 1185
1186 1186 self._extrafilterid = repoview.extrafilter(ui)
1187 1187
1188 1188 self.filecopiesmode = None
1189 1189 if COPIESSDC_REQUIREMENT in self.requirements:
1190 1190 self.filecopiesmode = b'changeset-sidedata'
1191 1191
1192 1192 def _getvfsward(self, origfunc):
1193 1193 """build a ward for self.vfs"""
1194 1194 rref = weakref.ref(self)
1195 1195
1196 1196 def checkvfs(path, mode=None):
1197 1197 ret = origfunc(path, mode=mode)
1198 1198 repo = rref()
1199 1199 if (
1200 1200 repo is None
1201 1201 or not util.safehasattr(repo, b'_wlockref')
1202 1202 or not util.safehasattr(repo, b'_lockref')
1203 1203 ):
1204 1204 return
1205 1205 if mode in (None, b'r', b'rb'):
1206 1206 return
1207 1207 if path.startswith(repo.path):
1208 1208 # truncate name relative to the repository (.hg)
1209 1209 path = path[len(repo.path) + 1 :]
1210 1210 if path.startswith(b'cache/'):
1211 1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1212 1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1213 1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1214 1214 # journal is covered by 'lock'
1215 1215 if repo._currentlock(repo._lockref) is None:
1216 1216 repo.ui.develwarn(
1217 1217 b'write with no lock: "%s"' % path,
1218 1218 stacklevel=3,
1219 1219 config=b'check-locks',
1220 1220 )
1221 1221 elif repo._currentlock(repo._wlockref) is None:
1222 1222 # rest of vfs files are covered by 'wlock'
1223 1223 #
1224 1224 # exclude special files
1225 1225 for prefix in self._wlockfreeprefix:
1226 1226 if path.startswith(prefix):
1227 1227 return
1228 1228 repo.ui.develwarn(
1229 1229 b'write with no wlock: "%s"' % path,
1230 1230 stacklevel=3,
1231 1231 config=b'check-locks',
1232 1232 )
1233 1233 return ret
1234 1234
1235 1235 return checkvfs
1236 1236
1237 1237 def _getsvfsward(self, origfunc):
1238 1238 """build a ward for self.svfs"""
1239 1239 rref = weakref.ref(self)
1240 1240
1241 1241 def checksvfs(path, mode=None):
1242 1242 ret = origfunc(path, mode=mode)
1243 1243 repo = rref()
1244 1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1245 1245 return
1246 1246 if mode in (None, b'r', b'rb'):
1247 1247 return
1248 1248 if path.startswith(repo.sharedpath):
1249 1249 # truncate name relative to the repository (.hg)
1250 1250 path = path[len(repo.sharedpath) + 1 :]
1251 1251 if repo._currentlock(repo._lockref) is None:
1252 1252 repo.ui.develwarn(
1253 1253 b'write with no lock: "%s"' % path, stacklevel=4
1254 1254 )
1255 1255 return ret
1256 1256
1257 1257 return checksvfs
1258 1258
1259 1259 def close(self):
1260 1260 self._writecaches()
1261 1261
1262 1262 def _writecaches(self):
1263 1263 if self._revbranchcache:
1264 1264 self._revbranchcache.write()
1265 1265
1266 1266 def _restrictcapabilities(self, caps):
1267 1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1268 1268 caps = set(caps)
1269 1269 capsblob = bundle2.encodecaps(
1270 1270 bundle2.getrepocaps(self, role=b'client')
1271 1271 )
1272 1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1273 1273 return caps
1274 1274
1275 1275 def _writerequirements(self):
1276 1276 scmutil.writerequires(self.vfs, self.requirements)
1277 1277
1278 1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1279 1279 # self -> auditor -> self._checknested -> self
1280 1280
1281 1281 @property
1282 1282 def auditor(self):
1283 1283 # This is only used by context.workingctx.match in order to
1284 1284 # detect files in subrepos.
1285 1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1286 1286
1287 1287 @property
1288 1288 def nofsauditor(self):
1289 1289 # This is only used by context.basectx.match in order to detect
1290 1290 # files in subrepos.
1291 1291 return pathutil.pathauditor(
1292 1292 self.root, callback=self._checknested, realfs=False, cached=True
1293 1293 )
1294 1294
1295 1295 def _checknested(self, path):
1296 1296 """Determine if path is a legal nested repository."""
1297 1297 if not path.startswith(self.root):
1298 1298 return False
1299 1299 subpath = path[len(self.root) + 1 :]
1300 1300 normsubpath = util.pconvert(subpath)
1301 1301
1302 1302 # XXX: Checking against the current working copy is wrong in
1303 1303 # the sense that it can reject things like
1304 1304 #
1305 1305 # $ hg cat -r 10 sub/x.txt
1306 1306 #
1307 1307 # if sub/ is no longer a subrepository in the working copy
1308 1308 # parent revision.
1309 1309 #
1310 1310 # However, it can of course also allow things that would have
1311 1311 # been rejected before, such as the above cat command if sub/
1312 1312 # is a subrepository now, but was a normal directory before.
1313 1313 # The old path auditor would have rejected by mistake since it
1314 1314 # panics when it sees sub/.hg/.
1315 1315 #
1316 1316 # All in all, checking against the working copy seems sensible
1317 1317 # since we want to prevent access to nested repositories on
1318 1318 # the filesystem *now*.
1319 1319 ctx = self[None]
1320 1320 parts = util.splitpath(subpath)
1321 1321 while parts:
1322 1322 prefix = b'/'.join(parts)
1323 1323 if prefix in ctx.substate:
1324 1324 if prefix == normsubpath:
1325 1325 return True
1326 1326 else:
1327 1327 sub = ctx.sub(prefix)
1328 1328 return sub.checknested(subpath[len(prefix) + 1 :])
1329 1329 else:
1330 1330 parts.pop()
1331 1331 return False
1332 1332
1333 1333 def peer(self):
1334 1334 return localpeer(self) # not cached to avoid reference cycle
1335 1335
1336 1336 def unfiltered(self):
1337 1337 """Return unfiltered version of the repository
1338 1338
1339 1339 Intended to be overwritten by filtered repo."""
1340 1340 return self
1341 1341
1342 1342 def filtered(self, name, visibilityexceptions=None):
1343 1343 """Return a filtered version of a repository
1344 1344
1345 1345 The `name` parameter is the identifier of the requested view. This
1346 1346 will return a repoview object set "exactly" to the specified view.
1347 1347
1348 1348 This function does not apply recursive filtering to a repository. For
1349 1349 example calling `repo.filtered("served")` will return a repoview using
1350 1350 the "served" view, regardless of the initial view used by `repo`.
1351 1351
1352 1352 In other word, there is always only one level of `repoview` "filtering".
1353 1353 """
1354 1354 if self._extrafilterid is not None and b'%' not in name:
1355 1355 name = name + b'%' + self._extrafilterid
1356 1356
1357 1357 cls = repoview.newtype(self.unfiltered().__class__)
1358 1358 return cls(self, name, visibilityexceptions)
1359 1359
1360 1360 @mixedrepostorecache(
1361 1361 (b'bookmarks', b'plain'),
1362 1362 (b'bookmarks.current', b'plain'),
1363 1363 (b'bookmarks', b''),
1364 1364 (b'00changelog.i', b''),
1365 1365 )
1366 1366 def _bookmarks(self):
1367 1367 # Since the multiple files involved in the transaction cannot be
1368 1368 # written atomically (with current repository format), there is a race
1369 1369 # condition here.
1370 1370 #
1371 1371 # 1) changelog content A is read
1372 1372 # 2) outside transaction update changelog to content B
1373 1373 # 3) outside transaction update bookmark file referring to content B
1374 1374 # 4) bookmarks file content is read and filtered against changelog-A
1375 1375 #
1376 1376 # When this happens, bookmarks against nodes missing from A are dropped.
1377 1377 #
1378 1378 # Having this happening during read is not great, but it become worse
1379 1379 # when this happen during write because the bookmarks to the "unknown"
1380 1380 # nodes will be dropped for good. However, writes happen within locks.
1381 1381 # This locking makes it possible to have a race free consistent read.
1382 1382 # For this purpose data read from disc before locking are
1383 1383 # "invalidated" right after the locks are taken. This invalidations are
1384 1384 # "light", the `filecache` mechanism keep the data in memory and will
1385 1385 # reuse them if the underlying files did not changed. Not parsing the
1386 1386 # same data multiple times helps performances.
1387 1387 #
1388 1388 # Unfortunately in the case describe above, the files tracked by the
1389 1389 # bookmarks file cache might not have changed, but the in-memory
1390 1390 # content is still "wrong" because we used an older changelog content
1391 1391 # to process the on-disk data. So after locking, the changelog would be
1392 1392 # refreshed but `_bookmarks` would be preserved.
1393 1393 # Adding `00changelog.i` to the list of tracked file is not
1394 1394 # enough, because at the time we build the content for `_bookmarks` in
1395 1395 # (4), the changelog file has already diverged from the content used
1396 1396 # for loading `changelog` in (1)
1397 1397 #
1398 1398 # To prevent the issue, we force the changelog to be explicitly
1399 1399 # reloaded while computing `_bookmarks`. The data race can still happen
1400 1400 # without the lock (with a narrower window), but it would no longer go
1401 1401 # undetected during the lock time refresh.
1402 1402 #
1403 1403 # The new schedule is as follow
1404 1404 #
1405 1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1406 1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1407 1407 # 3) We force `changelog` filecache to be tested
1408 1408 # 4) cachestat for `changelog` are captured (for changelog)
1409 1409 # 5) `_bookmarks` is computed and cached
1410 1410 #
1411 1411 # The step in (3) ensure we have a changelog at least as recent as the
1412 1412 # cache stat computed in (1). As a result at locking time:
1413 1413 # * if the changelog did not changed since (1) -> we can reuse the data
1414 1414 # * otherwise -> the bookmarks get refreshed.
1415 1415 self._refreshchangelog()
1416 1416 return bookmarks.bmstore(self)
1417 1417
1418 1418 def _refreshchangelog(self):
1419 1419 """make sure the in memory changelog match the on-disk one"""
1420 1420 if 'changelog' in vars(self) and self.currenttransaction() is None:
1421 1421 del self.changelog
1422 1422
1423 1423 @property
1424 1424 def _activebookmark(self):
1425 1425 return self._bookmarks.active
1426 1426
1427 1427 # _phasesets depend on changelog. what we need is to call
1428 1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1429 1429 # can't be easily expressed in filecache mechanism.
1430 1430 @storecache(b'phaseroots', b'00changelog.i')
1431 1431 def _phasecache(self):
1432 1432 return phases.phasecache(self, self._phasedefaults)
1433 1433
1434 1434 @storecache(b'obsstore')
1435 1435 def obsstore(self):
1436 1436 return obsolete.makestore(self.ui, self)
1437 1437
1438 1438 @storecache(b'00changelog.i')
1439 1439 def changelog(self):
1440 1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1441 1441
1442 1442 @storecache(b'00manifest.i')
1443 1443 def manifestlog(self):
1444 1444 return self.store.manifestlog(self, self._storenarrowmatch)
1445 1445
1446 1446 @repofilecache(b'dirstate')
1447 1447 def dirstate(self):
1448 1448 return self._makedirstate()
1449 1449
1450 1450 def _makedirstate(self):
1451 1451 """Extension point for wrapping the dirstate per-repo."""
1452 1452 sparsematchfn = lambda: sparse.matcher(self)
1453 1453
1454 1454 return dirstate.dirstate(
1455 1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1456 1456 )
1457 1457
1458 1458 def _dirstatevalidate(self, node):
1459 1459 try:
1460 1460 self.changelog.rev(node)
1461 1461 return node
1462 1462 except error.LookupError:
1463 1463 if not self._dirstatevalidatewarned:
1464 1464 self._dirstatevalidatewarned = True
1465 1465 self.ui.warn(
1466 1466 _(b"warning: ignoring unknown working parent %s!\n")
1467 1467 % short(node)
1468 1468 )
1469 1469 return nullid
1470 1470
1471 1471 @storecache(narrowspec.FILENAME)
1472 1472 def narrowpats(self):
1473 1473 """matcher patterns for this repository's narrowspec
1474 1474
1475 1475 A tuple of (includes, excludes).
1476 1476 """
1477 1477 return narrowspec.load(self)
1478 1478
1479 1479 @storecache(narrowspec.FILENAME)
1480 1480 def _storenarrowmatch(self):
1481 1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1482 1482 return matchmod.always()
1483 1483 include, exclude = self.narrowpats
1484 1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1485 1485
1486 1486 @storecache(narrowspec.FILENAME)
1487 1487 def _narrowmatch(self):
1488 1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 1489 return matchmod.always()
1490 1490 narrowspec.checkworkingcopynarrowspec(self)
1491 1491 include, exclude = self.narrowpats
1492 1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1493 1493
1494 1494 def narrowmatch(self, match=None, includeexact=False):
1495 1495 """matcher corresponding the the repo's narrowspec
1496 1496
1497 1497 If `match` is given, then that will be intersected with the narrow
1498 1498 matcher.
1499 1499
1500 1500 If `includeexact` is True, then any exact matches from `match` will
1501 1501 be included even if they're outside the narrowspec.
1502 1502 """
1503 1503 if match:
1504 1504 if includeexact and not self._narrowmatch.always():
1505 1505 # do not exclude explicitly-specified paths so that they can
1506 1506 # be warned later on
1507 1507 em = matchmod.exact(match.files())
1508 1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1509 1509 return matchmod.intersectmatchers(match, nm)
1510 1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1511 1511 return self._narrowmatch
1512 1512
1513 1513 def setnarrowpats(self, newincludes, newexcludes):
1514 1514 narrowspec.save(self, newincludes, newexcludes)
1515 1515 self.invalidate(clearfilecache=True)
1516 1516
1517 @util.propertycache
1518 def _quick_access_changeid(self):
1519 """an helper dictionnary for __getitem__ calls
1520
1521 This contains a list of symbol we can recognise right away without
1522 further processing.
1523 """
1524 return {
1525 b'null': (nullrev, nullid),
1526 nullrev: (nullrev, nullid),
1527 nullid: (nullrev, nullid),
1528 }
1529
1517 1530 def __getitem__(self, changeid):
1518 1531 # dealing with special cases
1519 1532 if changeid is None:
1520 1533 return context.workingctx(self)
1521 1534 if isinstance(changeid, context.basectx):
1522 1535 return changeid
1523 1536
1524 1537 # dealing with multiple revisions
1525 1538 if isinstance(changeid, slice):
1526 1539 # wdirrev isn't contiguous so the slice shouldn't include it
1527 1540 return [
1528 1541 self[i]
1529 1542 for i in pycompat.xrange(*changeid.indices(len(self)))
1530 1543 if i not in self.changelog.filteredrevs
1531 1544 ]
1532 1545
1533 1546 # dealing with some special values
1534 if changeid == b'null' or changeid == nullrev or changeid == nullid:
1535 return context.changectx(self, nullrev, nullid, maybe_filtered=False)
1547 quick_access = self._quick_access_changeid.get(changeid)
1548 if quick_access is not None:
1549 rev, node = quick_access
1550 return context.changectx(self, rev, node, maybe_filtered=False)
1536 1551 if changeid == b'tip':
1537 1552 node = self.changelog.tip()
1538 1553 rev = self.changelog.rev(node)
1539 1554 return context.changectx(self, rev, node)
1540 1555
1541 1556 # dealing with arbitrary values
1542 1557 try:
1543 1558 if isinstance(changeid, int):
1544 1559 node = self.changelog.node(changeid)
1545 1560 rev = changeid
1546 1561 elif changeid == b'.':
1547 1562 # this is a hack to delay/avoid loading obsmarkers
1548 1563 # when we know that '.' won't be hidden
1549 1564 node = self.dirstate.p1()
1550 1565 rev = self.unfiltered().changelog.rev(node)
1551 1566 elif len(changeid) == 20:
1552 1567 try:
1553 1568 node = changeid
1554 1569 rev = self.changelog.rev(changeid)
1555 1570 except error.FilteredLookupError:
1556 1571 changeid = hex(changeid) # for the error message
1557 1572 raise
1558 1573 except LookupError:
1559 1574 # check if it might have come from damaged dirstate
1560 1575 #
1561 1576 # XXX we could avoid the unfiltered if we had a recognizable
1562 1577 # exception for filtered changeset access
1563 1578 if (
1564 1579 self.local()
1565 1580 and changeid in self.unfiltered().dirstate.parents()
1566 1581 ):
1567 1582 msg = _(b"working directory has unknown parent '%s'!")
1568 1583 raise error.Abort(msg % short(changeid))
1569 1584 changeid = hex(changeid) # for the error message
1570 1585 raise
1571 1586
1572 1587 elif len(changeid) == 40:
1573 1588 node = bin(changeid)
1574 1589 rev = self.changelog.rev(node)
1575 1590 else:
1576 1591 raise error.ProgrammingError(
1577 1592 b"unsupported changeid '%s' of type %s"
1578 1593 % (changeid, pycompat.bytestr(type(changeid)))
1579 1594 )
1580 1595
1581 1596 return context.changectx(self, rev, node)
1582 1597
1583 1598 except (error.FilteredIndexError, error.FilteredLookupError):
1584 1599 raise error.FilteredRepoLookupError(
1585 1600 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1586 1601 )
1587 1602 except (IndexError, LookupError):
1588 1603 raise error.RepoLookupError(
1589 1604 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1590 1605 )
1591 1606 except error.WdirUnsupported:
1592 1607 return context.workingctx(self)
1593 1608
1594 1609 def __contains__(self, changeid):
1595 1610 """True if the given changeid exists
1596 1611
1597 1612 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1598 1613 specified.
1599 1614 """
1600 1615 try:
1601 1616 self[changeid]
1602 1617 return True
1603 1618 except error.RepoLookupError:
1604 1619 return False
1605 1620
1606 1621 def __nonzero__(self):
1607 1622 return True
1608 1623
1609 1624 __bool__ = __nonzero__
1610 1625
1611 1626 def __len__(self):
1612 1627 # no need to pay the cost of repoview.changelog
1613 1628 unfi = self.unfiltered()
1614 1629 return len(unfi.changelog)
1615 1630
1616 1631 def __iter__(self):
1617 1632 return iter(self.changelog)
1618 1633
1619 1634 def revs(self, expr, *args):
1620 1635 '''Find revisions matching a revset.
1621 1636
1622 1637 The revset is specified as a string ``expr`` that may contain
1623 1638 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1624 1639
1625 1640 Revset aliases from the configuration are not expanded. To expand
1626 1641 user aliases, consider calling ``scmutil.revrange()`` or
1627 1642 ``repo.anyrevs([expr], user=True)``.
1628 1643
1629 1644 Returns a revset.abstractsmartset, which is a list-like interface
1630 1645 that contains integer revisions.
1631 1646 '''
1632 1647 tree = revsetlang.spectree(expr, *args)
1633 1648 return revset.makematcher(tree)(self)
1634 1649
1635 1650 def set(self, expr, *args):
1636 1651 '''Find revisions matching a revset and emit changectx instances.
1637 1652
1638 1653 This is a convenience wrapper around ``revs()`` that iterates the
1639 1654 result and is a generator of changectx instances.
1640 1655
1641 1656 Revset aliases from the configuration are not expanded. To expand
1642 1657 user aliases, consider calling ``scmutil.revrange()``.
1643 1658 '''
1644 1659 for r in self.revs(expr, *args):
1645 1660 yield self[r]
1646 1661
1647 1662 def anyrevs(self, specs, user=False, localalias=None):
1648 1663 '''Find revisions matching one of the given revsets.
1649 1664
1650 1665 Revset aliases from the configuration are not expanded by default. To
1651 1666 expand user aliases, specify ``user=True``. To provide some local
1652 1667 definitions overriding user aliases, set ``localalias`` to
1653 1668 ``{name: definitionstring}``.
1654 1669 '''
1655 1670 if specs == [b'null']:
1656 1671 return revset.baseset([nullrev])
1657 1672 if user:
1658 1673 m = revset.matchany(
1659 1674 self.ui,
1660 1675 specs,
1661 1676 lookup=revset.lookupfn(self),
1662 1677 localalias=localalias,
1663 1678 )
1664 1679 else:
1665 1680 m = revset.matchany(None, specs, localalias=localalias)
1666 1681 return m(self)
1667 1682
1668 1683 def url(self):
1669 1684 return b'file:' + self.root
1670 1685
1671 1686 def hook(self, name, throw=False, **args):
1672 1687 """Call a hook, passing this repo instance.
1673 1688
1674 1689 This a convenience method to aid invoking hooks. Extensions likely
1675 1690 won't call this unless they have registered a custom hook or are
1676 1691 replacing code that is expected to call a hook.
1677 1692 """
1678 1693 return hook.hook(self.ui, self, name, throw, **args)
1679 1694
1680 1695 @filteredpropertycache
1681 1696 def _tagscache(self):
1682 1697 '''Returns a tagscache object that contains various tags related
1683 1698 caches.'''
1684 1699
1685 1700 # This simplifies its cache management by having one decorated
1686 1701 # function (this one) and the rest simply fetch things from it.
1687 1702 class tagscache(object):
1688 1703 def __init__(self):
1689 1704 # These two define the set of tags for this repository. tags
1690 1705 # maps tag name to node; tagtypes maps tag name to 'global' or
1691 1706 # 'local'. (Global tags are defined by .hgtags across all
1692 1707 # heads, and local tags are defined in .hg/localtags.)
1693 1708 # They constitute the in-memory cache of tags.
1694 1709 self.tags = self.tagtypes = None
1695 1710
1696 1711 self.nodetagscache = self.tagslist = None
1697 1712
1698 1713 cache = tagscache()
1699 1714 cache.tags, cache.tagtypes = self._findtags()
1700 1715
1701 1716 return cache
1702 1717
1703 1718 def tags(self):
1704 1719 '''return a mapping of tag to node'''
1705 1720 t = {}
1706 1721 if self.changelog.filteredrevs:
1707 1722 tags, tt = self._findtags()
1708 1723 else:
1709 1724 tags = self._tagscache.tags
1710 1725 rev = self.changelog.rev
1711 1726 for k, v in pycompat.iteritems(tags):
1712 1727 try:
1713 1728 # ignore tags to unknown nodes
1714 1729 rev(v)
1715 1730 t[k] = v
1716 1731 except (error.LookupError, ValueError):
1717 1732 pass
1718 1733 return t
1719 1734
1720 1735 def _findtags(self):
1721 1736 '''Do the hard work of finding tags. Return a pair of dicts
1722 1737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1723 1738 maps tag name to a string like \'global\' or \'local\'.
1724 1739 Subclasses or extensions are free to add their own tags, but
1725 1740 should be aware that the returned dicts will be retained for the
1726 1741 duration of the localrepo object.'''
1727 1742
1728 1743 # XXX what tagtype should subclasses/extensions use? Currently
1729 1744 # mq and bookmarks add tags, but do not set the tagtype at all.
1730 1745 # Should each extension invent its own tag type? Should there
1731 1746 # be one tagtype for all such "virtual" tags? Or is the status
1732 1747 # quo fine?
1733 1748
1734 1749 # map tag name to (node, hist)
1735 1750 alltags = tagsmod.findglobaltags(self.ui, self)
1736 1751 # map tag name to tag type
1737 1752 tagtypes = dict((tag, b'global') for tag in alltags)
1738 1753
1739 1754 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1740 1755
1741 1756 # Build the return dicts. Have to re-encode tag names because
1742 1757 # the tags module always uses UTF-8 (in order not to lose info
1743 1758 # writing to the cache), but the rest of Mercurial wants them in
1744 1759 # local encoding.
1745 1760 tags = {}
1746 1761 for (name, (node, hist)) in pycompat.iteritems(alltags):
1747 1762 if node != nullid:
1748 1763 tags[encoding.tolocal(name)] = node
1749 1764 tags[b'tip'] = self.changelog.tip()
1750 1765 tagtypes = dict(
1751 1766 [
1752 1767 (encoding.tolocal(name), value)
1753 1768 for (name, value) in pycompat.iteritems(tagtypes)
1754 1769 ]
1755 1770 )
1756 1771 return (tags, tagtypes)
1757 1772
1758 1773 def tagtype(self, tagname):
1759 1774 '''
1760 1775 return the type of the given tag. result can be:
1761 1776
1762 1777 'local' : a local tag
1763 1778 'global' : a global tag
1764 1779 None : tag does not exist
1765 1780 '''
1766 1781
1767 1782 return self._tagscache.tagtypes.get(tagname)
1768 1783
1769 1784 def tagslist(self):
1770 1785 '''return a list of tags ordered by revision'''
1771 1786 if not self._tagscache.tagslist:
1772 1787 l = []
1773 1788 for t, n in pycompat.iteritems(self.tags()):
1774 1789 l.append((self.changelog.rev(n), t, n))
1775 1790 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1776 1791
1777 1792 return self._tagscache.tagslist
1778 1793
1779 1794 def nodetags(self, node):
1780 1795 '''return the tags associated with a node'''
1781 1796 if not self._tagscache.nodetagscache:
1782 1797 nodetagscache = {}
1783 1798 for t, n in pycompat.iteritems(self._tagscache.tags):
1784 1799 nodetagscache.setdefault(n, []).append(t)
1785 1800 for tags in pycompat.itervalues(nodetagscache):
1786 1801 tags.sort()
1787 1802 self._tagscache.nodetagscache = nodetagscache
1788 1803 return self._tagscache.nodetagscache.get(node, [])
1789 1804
1790 1805 def nodebookmarks(self, node):
1791 1806 """return the list of bookmarks pointing to the specified node"""
1792 1807 return self._bookmarks.names(node)
1793 1808
1794 1809 def branchmap(self):
1795 1810 '''returns a dictionary {branch: [branchheads]} with branchheads
1796 1811 ordered by increasing revision number'''
1797 1812 return self._branchcaches[self]
1798 1813
1799 1814 @unfilteredmethod
1800 1815 def revbranchcache(self):
1801 1816 if not self._revbranchcache:
1802 1817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1803 1818 return self._revbranchcache
1804 1819
1805 1820 def branchtip(self, branch, ignoremissing=False):
1806 1821 '''return the tip node for a given branch
1807 1822
1808 1823 If ignoremissing is True, then this method will not raise an error.
1809 1824 This is helpful for callers that only expect None for a missing branch
1810 1825 (e.g. namespace).
1811 1826
1812 1827 '''
1813 1828 try:
1814 1829 return self.branchmap().branchtip(branch)
1815 1830 except KeyError:
1816 1831 if not ignoremissing:
1817 1832 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1818 1833 else:
1819 1834 pass
1820 1835
1821 1836 def lookup(self, key):
1822 1837 node = scmutil.revsymbol(self, key).node()
1823 1838 if node is None:
1824 1839 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1825 1840 return node
1826 1841
1827 1842 def lookupbranch(self, key):
1828 1843 if self.branchmap().hasbranch(key):
1829 1844 return key
1830 1845
1831 1846 return scmutil.revsymbol(self, key).branch()
1832 1847
1833 1848 def known(self, nodes):
1834 1849 cl = self.changelog
1835 1850 get_rev = cl.index.get_rev
1836 1851 filtered = cl.filteredrevs
1837 1852 result = []
1838 1853 for n in nodes:
1839 1854 r = get_rev(n)
1840 1855 resp = not (r is None or r in filtered)
1841 1856 result.append(resp)
1842 1857 return result
1843 1858
1844 1859 def local(self):
1845 1860 return self
1846 1861
1847 1862 def publishing(self):
1848 1863 # it's safe (and desirable) to trust the publish flag unconditionally
1849 1864 # so that we don't finalize changes shared between users via ssh or nfs
1850 1865 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1851 1866
1852 1867 def cancopy(self):
1853 1868 # so statichttprepo's override of local() works
1854 1869 if not self.local():
1855 1870 return False
1856 1871 if not self.publishing():
1857 1872 return True
1858 1873 # if publishing we can't copy if there is filtered content
1859 1874 return not self.filtered(b'visible').changelog.filteredrevs
1860 1875
1861 1876 def shared(self):
1862 1877 '''the type of shared repository (None if not shared)'''
1863 1878 if self.sharedpath != self.path:
1864 1879 return b'store'
1865 1880 return None
1866 1881
1867 1882 def wjoin(self, f, *insidef):
1868 1883 return self.vfs.reljoin(self.root, f, *insidef)
1869 1884
1870 1885 def setparents(self, p1, p2=nullid):
1871 1886 with self.dirstate.parentchange():
1872 1887 copies = self.dirstate.setparents(p1, p2)
1873 1888 pctx = self[p1]
1874 1889 if copies:
1875 1890 # Adjust copy records, the dirstate cannot do it, it
1876 1891 # requires access to parents manifests. Preserve them
1877 1892 # only for entries added to first parent.
1878 1893 for f in copies:
1879 1894 if f not in pctx and copies[f] in pctx:
1880 1895 self.dirstate.copy(copies[f], f)
1881 1896 if p2 == nullid:
1882 1897 for f, s in sorted(self.dirstate.copies().items()):
1883 1898 if f not in pctx and s not in pctx:
1884 1899 self.dirstate.copy(None, f)
1885 1900
1886 1901 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1887 1902 """changeid must be a changeset revision, if specified.
1888 1903 fileid can be a file revision or node."""
1889 1904 return context.filectx(
1890 1905 self, path, changeid, fileid, changectx=changectx
1891 1906 )
1892 1907
1893 1908 def getcwd(self):
1894 1909 return self.dirstate.getcwd()
1895 1910
1896 1911 def pathto(self, f, cwd=None):
1897 1912 return self.dirstate.pathto(f, cwd)
1898 1913
1899 1914 def _loadfilter(self, filter):
1900 1915 if filter not in self._filterpats:
1901 1916 l = []
1902 1917 for pat, cmd in self.ui.configitems(filter):
1903 1918 if cmd == b'!':
1904 1919 continue
1905 1920 mf = matchmod.match(self.root, b'', [pat])
1906 1921 fn = None
1907 1922 params = cmd
1908 1923 for name, filterfn in pycompat.iteritems(self._datafilters):
1909 1924 if cmd.startswith(name):
1910 1925 fn = filterfn
1911 1926 params = cmd[len(name) :].lstrip()
1912 1927 break
1913 1928 if not fn:
1914 1929 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1915 1930 fn.__name__ = 'commandfilter'
1916 1931 # Wrap old filters not supporting keyword arguments
1917 1932 if not pycompat.getargspec(fn)[2]:
1918 1933 oldfn = fn
1919 1934 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1920 1935 fn.__name__ = 'compat-' + oldfn.__name__
1921 1936 l.append((mf, fn, params))
1922 1937 self._filterpats[filter] = l
1923 1938 return self._filterpats[filter]
1924 1939
1925 1940 def _filter(self, filterpats, filename, data):
1926 1941 for mf, fn, cmd in filterpats:
1927 1942 if mf(filename):
1928 1943 self.ui.debug(
1929 1944 b"filtering %s through %s\n"
1930 1945 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1931 1946 )
1932 1947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1933 1948 break
1934 1949
1935 1950 return data
1936 1951
1937 1952 @unfilteredpropertycache
1938 1953 def _encodefilterpats(self):
1939 1954 return self._loadfilter(b'encode')
1940 1955
1941 1956 @unfilteredpropertycache
1942 1957 def _decodefilterpats(self):
1943 1958 return self._loadfilter(b'decode')
1944 1959
1945 1960 def adddatafilter(self, name, filter):
1946 1961 self._datafilters[name] = filter
1947 1962
1948 1963 def wread(self, filename):
1949 1964 if self.wvfs.islink(filename):
1950 1965 data = self.wvfs.readlink(filename)
1951 1966 else:
1952 1967 data = self.wvfs.read(filename)
1953 1968 return self._filter(self._encodefilterpats, filename, data)
1954 1969
1955 1970 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1956 1971 """write ``data`` into ``filename`` in the working directory
1957 1972
1958 1973 This returns length of written (maybe decoded) data.
1959 1974 """
1960 1975 data = self._filter(self._decodefilterpats, filename, data)
1961 1976 if b'l' in flags:
1962 1977 self.wvfs.symlink(data, filename)
1963 1978 else:
1964 1979 self.wvfs.write(
1965 1980 filename, data, backgroundclose=backgroundclose, **kwargs
1966 1981 )
1967 1982 if b'x' in flags:
1968 1983 self.wvfs.setflags(filename, False, True)
1969 1984 else:
1970 1985 self.wvfs.setflags(filename, False, False)
1971 1986 return len(data)
1972 1987
1973 1988 def wwritedata(self, filename, data):
1974 1989 return self._filter(self._decodefilterpats, filename, data)
1975 1990
1976 1991 def currenttransaction(self):
1977 1992 """return the current transaction or None if non exists"""
1978 1993 if self._transref:
1979 1994 tr = self._transref()
1980 1995 else:
1981 1996 tr = None
1982 1997
1983 1998 if tr and tr.running():
1984 1999 return tr
1985 2000 return None
1986 2001
1987 2002 def transaction(self, desc, report=None):
1988 2003 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1989 2004 b'devel', b'check-locks'
1990 2005 ):
1991 2006 if self._currentlock(self._lockref) is None:
1992 2007 raise error.ProgrammingError(b'transaction requires locking')
1993 2008 tr = self.currenttransaction()
1994 2009 if tr is not None:
1995 2010 return tr.nest(name=desc)
1996 2011
1997 2012 # abort here if the journal already exists
1998 2013 if self.svfs.exists(b"journal"):
1999 2014 raise error.RepoError(
2000 2015 _(b"abandoned transaction found"),
2001 2016 hint=_(b"run 'hg recover' to clean up transaction"),
2002 2017 )
2003 2018
2004 2019 idbase = b"%.40f#%f" % (random.random(), time.time())
2005 2020 ha = hex(hashlib.sha1(idbase).digest())
2006 2021 txnid = b'TXN:' + ha
2007 2022 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2008 2023
2009 2024 self._writejournal(desc)
2010 2025 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2011 2026 if report:
2012 2027 rp = report
2013 2028 else:
2014 2029 rp = self.ui.warn
2015 2030 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2016 2031 # we must avoid cyclic reference between repo and transaction.
2017 2032 reporef = weakref.ref(self)
2018 2033 # Code to track tag movement
2019 2034 #
2020 2035 # Since tags are all handled as file content, it is actually quite hard
2021 2036 # to track these movement from a code perspective. So we fallback to a
2022 2037 # tracking at the repository level. One could envision to track changes
2023 2038 # to the '.hgtags' file through changegroup apply but that fails to
2024 2039 # cope with case where transaction expose new heads without changegroup
2025 2040 # being involved (eg: phase movement).
2026 2041 #
2027 2042 # For now, We gate the feature behind a flag since this likely comes
2028 2043 # with performance impacts. The current code run more often than needed
2029 2044 # and do not use caches as much as it could. The current focus is on
2030 2045 # the behavior of the feature so we disable it by default. The flag
2031 2046 # will be removed when we are happy with the performance impact.
2032 2047 #
2033 2048 # Once this feature is no longer experimental move the following
2034 2049 # documentation to the appropriate help section:
2035 2050 #
2036 2051 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2037 2052 # tags (new or changed or deleted tags). In addition the details of
2038 2053 # these changes are made available in a file at:
2039 2054 # ``REPOROOT/.hg/changes/tags.changes``.
2040 2055 # Make sure you check for HG_TAG_MOVED before reading that file as it
2041 2056 # might exist from a previous transaction even if no tag were touched
2042 2057 # in this one. Changes are recorded in a line base format::
2043 2058 #
2044 2059 # <action> <hex-node> <tag-name>\n
2045 2060 #
2046 2061 # Actions are defined as follow:
2047 2062 # "-R": tag is removed,
2048 2063 # "+A": tag is added,
2049 2064 # "-M": tag is moved (old value),
2050 2065 # "+M": tag is moved (new value),
2051 2066 tracktags = lambda x: None
2052 2067 # experimental config: experimental.hook-track-tags
2053 2068 shouldtracktags = self.ui.configbool(
2054 2069 b'experimental', b'hook-track-tags'
2055 2070 )
2056 2071 if desc != b'strip' and shouldtracktags:
2057 2072 oldheads = self.changelog.headrevs()
2058 2073
2059 2074 def tracktags(tr2):
2060 2075 repo = reporef()
2061 2076 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2062 2077 newheads = repo.changelog.headrevs()
2063 2078 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2064 2079 # notes: we compare lists here.
2065 2080 # As we do it only once buiding set would not be cheaper
2066 2081 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2067 2082 if changes:
2068 2083 tr2.hookargs[b'tag_moved'] = b'1'
2069 2084 with repo.vfs(
2070 2085 b'changes/tags.changes', b'w', atomictemp=True
2071 2086 ) as changesfile:
2072 2087 # note: we do not register the file to the transaction
2073 2088 # because we needs it to still exist on the transaction
2074 2089 # is close (for txnclose hooks)
2075 2090 tagsmod.writediff(changesfile, changes)
2076 2091
2077 2092 def validate(tr2):
2078 2093 """will run pre-closing hooks"""
2079 2094 # XXX the transaction API is a bit lacking here so we take a hacky
2080 2095 # path for now
2081 2096 #
2082 2097 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2083 2098 # dict is copied before these run. In addition we needs the data
2084 2099 # available to in memory hooks too.
2085 2100 #
2086 2101 # Moreover, we also need to make sure this runs before txnclose
2087 2102 # hooks and there is no "pending" mechanism that would execute
2088 2103 # logic only if hooks are about to run.
2089 2104 #
2090 2105 # Fixing this limitation of the transaction is also needed to track
2091 2106 # other families of changes (bookmarks, phases, obsolescence).
2092 2107 #
2093 2108 # This will have to be fixed before we remove the experimental
2094 2109 # gating.
2095 2110 tracktags(tr2)
2096 2111 repo = reporef()
2097 2112
2098 2113 singleheadopt = (b'experimental', b'single-head-per-branch')
2099 2114 singlehead = repo.ui.configbool(*singleheadopt)
2100 2115 if singlehead:
2101 2116 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2102 2117 accountclosed = singleheadsub.get(
2103 2118 b"account-closed-heads", False
2104 2119 )
2105 2120 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2106 2121 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2107 2122 for name, (old, new) in sorted(
2108 2123 tr.changes[b'bookmarks'].items()
2109 2124 ):
2110 2125 args = tr.hookargs.copy()
2111 2126 args.update(bookmarks.preparehookargs(name, old, new))
2112 2127 repo.hook(
2113 2128 b'pretxnclose-bookmark',
2114 2129 throw=True,
2115 2130 **pycompat.strkwargs(args)
2116 2131 )
2117 2132 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2118 2133 cl = repo.unfiltered().changelog
2119 2134 for rev, (old, new) in tr.changes[b'phases'].items():
2120 2135 args = tr.hookargs.copy()
2121 2136 node = hex(cl.node(rev))
2122 2137 args.update(phases.preparehookargs(node, old, new))
2123 2138 repo.hook(
2124 2139 b'pretxnclose-phase',
2125 2140 throw=True,
2126 2141 **pycompat.strkwargs(args)
2127 2142 )
2128 2143
2129 2144 repo.hook(
2130 2145 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2131 2146 )
2132 2147
2133 2148 def releasefn(tr, success):
2134 2149 repo = reporef()
2135 2150 if repo is None:
2136 2151 # If the repo has been GC'd (and this release function is being
2137 2152 # called from transaction.__del__), there's not much we can do,
2138 2153 # so just leave the unfinished transaction there and let the
2139 2154 # user run `hg recover`.
2140 2155 return
2141 2156 if success:
2142 2157 # this should be explicitly invoked here, because
2143 2158 # in-memory changes aren't written out at closing
2144 2159 # transaction, if tr.addfilegenerator (via
2145 2160 # dirstate.write or so) isn't invoked while
2146 2161 # transaction running
2147 2162 repo.dirstate.write(None)
2148 2163 else:
2149 2164 # discard all changes (including ones already written
2150 2165 # out) in this transaction
2151 2166 narrowspec.restorebackup(self, b'journal.narrowspec')
2152 2167 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2153 2168 repo.dirstate.restorebackup(None, b'journal.dirstate')
2154 2169
2155 2170 repo.invalidate(clearfilecache=True)
2156 2171
2157 2172 tr = transaction.transaction(
2158 2173 rp,
2159 2174 self.svfs,
2160 2175 vfsmap,
2161 2176 b"journal",
2162 2177 b"undo",
2163 2178 aftertrans(renames),
2164 2179 self.store.createmode,
2165 2180 validator=validate,
2166 2181 releasefn=releasefn,
2167 2182 checkambigfiles=_cachedfiles,
2168 2183 name=desc,
2169 2184 )
2170 2185 tr.changes[b'origrepolen'] = len(self)
2171 2186 tr.changes[b'obsmarkers'] = set()
2172 2187 tr.changes[b'phases'] = {}
2173 2188 tr.changes[b'bookmarks'] = {}
2174 2189
2175 2190 tr.hookargs[b'txnid'] = txnid
2176 2191 tr.hookargs[b'txnname'] = desc
2177 2192 # note: writing the fncache only during finalize mean that the file is
2178 2193 # outdated when running hooks. As fncache is used for streaming clone,
2179 2194 # this is not expected to break anything that happen during the hooks.
2180 2195 tr.addfinalize(b'flush-fncache', self.store.write)
2181 2196
2182 2197 def txnclosehook(tr2):
2183 2198 """To be run if transaction is successful, will schedule a hook run
2184 2199 """
2185 2200 # Don't reference tr2 in hook() so we don't hold a reference.
2186 2201 # This reduces memory consumption when there are multiple
2187 2202 # transactions per lock. This can likely go away if issue5045
2188 2203 # fixes the function accumulation.
2189 2204 hookargs = tr2.hookargs
2190 2205
2191 2206 def hookfunc():
2192 2207 repo = reporef()
2193 2208 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2194 2209 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2195 2210 for name, (old, new) in bmchanges:
2196 2211 args = tr.hookargs.copy()
2197 2212 args.update(bookmarks.preparehookargs(name, old, new))
2198 2213 repo.hook(
2199 2214 b'txnclose-bookmark',
2200 2215 throw=False,
2201 2216 **pycompat.strkwargs(args)
2202 2217 )
2203 2218
2204 2219 if hook.hashook(repo.ui, b'txnclose-phase'):
2205 2220 cl = repo.unfiltered().changelog
2206 2221 phasemv = sorted(tr.changes[b'phases'].items())
2207 2222 for rev, (old, new) in phasemv:
2208 2223 args = tr.hookargs.copy()
2209 2224 node = hex(cl.node(rev))
2210 2225 args.update(phases.preparehookargs(node, old, new))
2211 2226 repo.hook(
2212 2227 b'txnclose-phase',
2213 2228 throw=False,
2214 2229 **pycompat.strkwargs(args)
2215 2230 )
2216 2231
2217 2232 repo.hook(
2218 2233 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2219 2234 )
2220 2235
2221 2236 reporef()._afterlock(hookfunc)
2222 2237
2223 2238 tr.addfinalize(b'txnclose-hook', txnclosehook)
2224 2239 # Include a leading "-" to make it happen before the transaction summary
2225 2240 # reports registered via scmutil.registersummarycallback() whose names
2226 2241 # are 00-txnreport etc. That way, the caches will be warm when the
2227 2242 # callbacks run.
2228 2243 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2229 2244
2230 2245 def txnaborthook(tr2):
2231 2246 """To be run if transaction is aborted
2232 2247 """
2233 2248 reporef().hook(
2234 2249 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2235 2250 )
2236 2251
2237 2252 tr.addabort(b'txnabort-hook', txnaborthook)
2238 2253 # avoid eager cache invalidation. in-memory data should be identical
2239 2254 # to stored data if transaction has no error.
2240 2255 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2241 2256 self._transref = weakref.ref(tr)
2242 2257 scmutil.registersummarycallback(self, tr, desc)
2243 2258 return tr
2244 2259
2245 2260 def _journalfiles(self):
2246 2261 return (
2247 2262 (self.svfs, b'journal'),
2248 2263 (self.svfs, b'journal.narrowspec'),
2249 2264 (self.vfs, b'journal.narrowspec.dirstate'),
2250 2265 (self.vfs, b'journal.dirstate'),
2251 2266 (self.vfs, b'journal.branch'),
2252 2267 (self.vfs, b'journal.desc'),
2253 2268 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2254 2269 (self.svfs, b'journal.phaseroots'),
2255 2270 )
2256 2271
2257 2272 def undofiles(self):
2258 2273 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2259 2274
2260 2275 @unfilteredmethod
2261 2276 def _writejournal(self, desc):
2262 2277 self.dirstate.savebackup(None, b'journal.dirstate')
2263 2278 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2264 2279 narrowspec.savebackup(self, b'journal.narrowspec')
2265 2280 self.vfs.write(
2266 2281 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2267 2282 )
2268 2283 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2269 2284 bookmarksvfs = bookmarks.bookmarksvfs(self)
2270 2285 bookmarksvfs.write(
2271 2286 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2272 2287 )
2273 2288 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2274 2289
2275 2290 def recover(self):
2276 2291 with self.lock():
2277 2292 if self.svfs.exists(b"journal"):
2278 2293 self.ui.status(_(b"rolling back interrupted transaction\n"))
2279 2294 vfsmap = {
2280 2295 b'': self.svfs,
2281 2296 b'plain': self.vfs,
2282 2297 }
2283 2298 transaction.rollback(
2284 2299 self.svfs,
2285 2300 vfsmap,
2286 2301 b"journal",
2287 2302 self.ui.warn,
2288 2303 checkambigfiles=_cachedfiles,
2289 2304 )
2290 2305 self.invalidate()
2291 2306 return True
2292 2307 else:
2293 2308 self.ui.warn(_(b"no interrupted transaction available\n"))
2294 2309 return False
2295 2310
2296 2311 def rollback(self, dryrun=False, force=False):
2297 2312 wlock = lock = dsguard = None
2298 2313 try:
2299 2314 wlock = self.wlock()
2300 2315 lock = self.lock()
2301 2316 if self.svfs.exists(b"undo"):
2302 2317 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2303 2318
2304 2319 return self._rollback(dryrun, force, dsguard)
2305 2320 else:
2306 2321 self.ui.warn(_(b"no rollback information available\n"))
2307 2322 return 1
2308 2323 finally:
2309 2324 release(dsguard, lock, wlock)
2310 2325
2311 2326 @unfilteredmethod # Until we get smarter cache management
2312 2327 def _rollback(self, dryrun, force, dsguard):
2313 2328 ui = self.ui
2314 2329 try:
2315 2330 args = self.vfs.read(b'undo.desc').splitlines()
2316 2331 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2317 2332 if len(args) >= 3:
2318 2333 detail = args[2]
2319 2334 oldtip = oldlen - 1
2320 2335
2321 2336 if detail and ui.verbose:
2322 2337 msg = _(
2323 2338 b'repository tip rolled back to revision %d'
2324 2339 b' (undo %s: %s)\n'
2325 2340 ) % (oldtip, desc, detail)
2326 2341 else:
2327 2342 msg = _(
2328 2343 b'repository tip rolled back to revision %d (undo %s)\n'
2329 2344 ) % (oldtip, desc)
2330 2345 except IOError:
2331 2346 msg = _(b'rolling back unknown transaction\n')
2332 2347 desc = None
2333 2348
2334 2349 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2335 2350 raise error.Abort(
2336 2351 _(
2337 2352 b'rollback of last commit while not checked out '
2338 2353 b'may lose data'
2339 2354 ),
2340 2355 hint=_(b'use -f to force'),
2341 2356 )
2342 2357
2343 2358 ui.status(msg)
2344 2359 if dryrun:
2345 2360 return 0
2346 2361
2347 2362 parents = self.dirstate.parents()
2348 2363 self.destroying()
2349 2364 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2350 2365 transaction.rollback(
2351 2366 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2352 2367 )
2353 2368 bookmarksvfs = bookmarks.bookmarksvfs(self)
2354 2369 if bookmarksvfs.exists(b'undo.bookmarks'):
2355 2370 bookmarksvfs.rename(
2356 2371 b'undo.bookmarks', b'bookmarks', checkambig=True
2357 2372 )
2358 2373 if self.svfs.exists(b'undo.phaseroots'):
2359 2374 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2360 2375 self.invalidate()
2361 2376
2362 2377 has_node = self.changelog.index.has_node
2363 2378 parentgone = any(not has_node(p) for p in parents)
2364 2379 if parentgone:
2365 2380 # prevent dirstateguard from overwriting already restored one
2366 2381 dsguard.close()
2367 2382
2368 2383 narrowspec.restorebackup(self, b'undo.narrowspec')
2369 2384 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2370 2385 self.dirstate.restorebackup(None, b'undo.dirstate')
2371 2386 try:
2372 2387 branch = self.vfs.read(b'undo.branch')
2373 2388 self.dirstate.setbranch(encoding.tolocal(branch))
2374 2389 except IOError:
2375 2390 ui.warn(
2376 2391 _(
2377 2392 b'named branch could not be reset: '
2378 2393 b'current branch is still \'%s\'\n'
2379 2394 )
2380 2395 % self.dirstate.branch()
2381 2396 )
2382 2397
2383 2398 parents = tuple([p.rev() for p in self[None].parents()])
2384 2399 if len(parents) > 1:
2385 2400 ui.status(
2386 2401 _(
2387 2402 b'working directory now based on '
2388 2403 b'revisions %d and %d\n'
2389 2404 )
2390 2405 % parents
2391 2406 )
2392 2407 else:
2393 2408 ui.status(
2394 2409 _(b'working directory now based on revision %d\n') % parents
2395 2410 )
2396 2411 mergemod.mergestate.clean(self, self[b'.'].node())
2397 2412
2398 2413 # TODO: if we know which new heads may result from this rollback, pass
2399 2414 # them to destroy(), which will prevent the branchhead cache from being
2400 2415 # invalidated.
2401 2416 self.destroyed()
2402 2417 return 0
2403 2418
2404 2419 def _buildcacheupdater(self, newtransaction):
2405 2420 """called during transaction to build the callback updating cache
2406 2421
2407 2422 Lives on the repository to help extension who might want to augment
2408 2423 this logic. For this purpose, the created transaction is passed to the
2409 2424 method.
2410 2425 """
2411 2426 # we must avoid cyclic reference between repo and transaction.
2412 2427 reporef = weakref.ref(self)
2413 2428
2414 2429 def updater(tr):
2415 2430 repo = reporef()
2416 2431 repo.updatecaches(tr)
2417 2432
2418 2433 return updater
2419 2434
2420 2435 @unfilteredmethod
2421 2436 def updatecaches(self, tr=None, full=False):
2422 2437 """warm appropriate caches
2423 2438
2424 2439 If this function is called after a transaction closed. The transaction
2425 2440 will be available in the 'tr' argument. This can be used to selectively
2426 2441 update caches relevant to the changes in that transaction.
2427 2442
2428 2443 If 'full' is set, make sure all caches the function knows about have
2429 2444 up-to-date data. Even the ones usually loaded more lazily.
2430 2445 """
2431 2446 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2432 2447 # During strip, many caches are invalid but
2433 2448 # later call to `destroyed` will refresh them.
2434 2449 return
2435 2450
2436 2451 if tr is None or tr.changes[b'origrepolen'] < len(self):
2437 2452 # accessing the 'ser ved' branchmap should refresh all the others,
2438 2453 self.ui.debug(b'updating the branch cache\n')
2439 2454 self.filtered(b'served').branchmap()
2440 2455 self.filtered(b'served.hidden').branchmap()
2441 2456
2442 2457 if full:
2443 2458 unfi = self.unfiltered()
2444 2459 rbc = unfi.revbranchcache()
2445 2460 for r in unfi.changelog:
2446 2461 rbc.branchinfo(r)
2447 2462 rbc.write()
2448 2463
2449 2464 # ensure the working copy parents are in the manifestfulltextcache
2450 2465 for ctx in self[b'.'].parents():
2451 2466 ctx.manifest() # accessing the manifest is enough
2452 2467
2453 2468 # accessing fnode cache warms the cache
2454 2469 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2455 2470 # accessing tags warm the cache
2456 2471 self.tags()
2457 2472 self.filtered(b'served').tags()
2458 2473
2459 2474 # The `full` arg is documented as updating even the lazily-loaded
2460 2475 # caches immediately, so we're forcing a write to cause these caches
2461 2476 # to be warmed up even if they haven't explicitly been requested
2462 2477 # yet (if they've never been used by hg, they won't ever have been
2463 2478 # written, even if they're a subset of another kind of cache that
2464 2479 # *has* been used).
2465 2480 for filt in repoview.filtertable.keys():
2466 2481 filtered = self.filtered(filt)
2467 2482 filtered.branchmap().write(filtered)
2468 2483
2469 2484 def invalidatecaches(self):
2470 2485
2471 2486 if '_tagscache' in vars(self):
2472 2487 # can't use delattr on proxy
2473 2488 del self.__dict__['_tagscache']
2474 2489
2475 2490 self._branchcaches.clear()
2476 2491 self.invalidatevolatilesets()
2477 2492 self._sparsesignaturecache.clear()
2478 2493
2479 2494 def invalidatevolatilesets(self):
2480 2495 self.filteredrevcache.clear()
2481 2496 obsolete.clearobscaches(self)
2482 2497
2483 2498 def invalidatedirstate(self):
2484 2499 '''Invalidates the dirstate, causing the next call to dirstate
2485 2500 to check if it was modified since the last time it was read,
2486 2501 rereading it if it has.
2487 2502
2488 2503 This is different to dirstate.invalidate() that it doesn't always
2489 2504 rereads the dirstate. Use dirstate.invalidate() if you want to
2490 2505 explicitly read the dirstate again (i.e. restoring it to a previous
2491 2506 known good state).'''
2492 2507 if hasunfilteredcache(self, 'dirstate'):
2493 2508 for k in self.dirstate._filecache:
2494 2509 try:
2495 2510 delattr(self.dirstate, k)
2496 2511 except AttributeError:
2497 2512 pass
2498 2513 delattr(self.unfiltered(), 'dirstate')
2499 2514
2500 2515 def invalidate(self, clearfilecache=False):
2501 2516 '''Invalidates both store and non-store parts other than dirstate
2502 2517
2503 2518 If a transaction is running, invalidation of store is omitted,
2504 2519 because discarding in-memory changes might cause inconsistency
2505 2520 (e.g. incomplete fncache causes unintentional failure, but
2506 2521 redundant one doesn't).
2507 2522 '''
2508 2523 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2509 2524 for k in list(self._filecache.keys()):
2510 2525 # dirstate is invalidated separately in invalidatedirstate()
2511 2526 if k == b'dirstate':
2512 2527 continue
2513 2528 if (
2514 2529 k == b'changelog'
2515 2530 and self.currenttransaction()
2516 2531 and self.changelog._delayed
2517 2532 ):
2518 2533 # The changelog object may store unwritten revisions. We don't
2519 2534 # want to lose them.
2520 2535 # TODO: Solve the problem instead of working around it.
2521 2536 continue
2522 2537
2523 2538 if clearfilecache:
2524 2539 del self._filecache[k]
2525 2540 try:
2526 2541 delattr(unfiltered, k)
2527 2542 except AttributeError:
2528 2543 pass
2529 2544 self.invalidatecaches()
2530 2545 if not self.currenttransaction():
2531 2546 # TODO: Changing contents of store outside transaction
2532 2547 # causes inconsistency. We should make in-memory store
2533 2548 # changes detectable, and abort if changed.
2534 2549 self.store.invalidatecaches()
2535 2550
2536 2551 def invalidateall(self):
2537 2552 '''Fully invalidates both store and non-store parts, causing the
2538 2553 subsequent operation to reread any outside changes.'''
2539 2554 # extension should hook this to invalidate its caches
2540 2555 self.invalidate()
2541 2556 self.invalidatedirstate()
2542 2557
2543 2558 @unfilteredmethod
2544 2559 def _refreshfilecachestats(self, tr):
2545 2560 """Reload stats of cached files so that they are flagged as valid"""
2546 2561 for k, ce in self._filecache.items():
2547 2562 k = pycompat.sysstr(k)
2548 2563 if k == 'dirstate' or k not in self.__dict__:
2549 2564 continue
2550 2565 ce.refresh()
2551 2566
2552 2567 def _lock(
2553 2568 self,
2554 2569 vfs,
2555 2570 lockname,
2556 2571 wait,
2557 2572 releasefn,
2558 2573 acquirefn,
2559 2574 desc,
2560 2575 inheritchecker=None,
2561 2576 parentenvvar=None,
2562 2577 ):
2563 2578 parentlock = None
2564 2579 # the contents of parentenvvar are used by the underlying lock to
2565 2580 # determine whether it can be inherited
2566 2581 if parentenvvar is not None:
2567 2582 parentlock = encoding.environ.get(parentenvvar)
2568 2583
2569 2584 timeout = 0
2570 2585 warntimeout = 0
2571 2586 if wait:
2572 2587 timeout = self.ui.configint(b"ui", b"timeout")
2573 2588 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2574 2589 # internal config: ui.signal-safe-lock
2575 2590 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2576 2591
2577 2592 l = lockmod.trylock(
2578 2593 self.ui,
2579 2594 vfs,
2580 2595 lockname,
2581 2596 timeout,
2582 2597 warntimeout,
2583 2598 releasefn=releasefn,
2584 2599 acquirefn=acquirefn,
2585 2600 desc=desc,
2586 2601 inheritchecker=inheritchecker,
2587 2602 parentlock=parentlock,
2588 2603 signalsafe=signalsafe,
2589 2604 )
2590 2605 return l
2591 2606
2592 2607 def _afterlock(self, callback):
2593 2608 """add a callback to be run when the repository is fully unlocked
2594 2609
2595 2610 The callback will be executed when the outermost lock is released
2596 2611 (with wlock being higher level than 'lock')."""
2597 2612 for ref in (self._wlockref, self._lockref):
2598 2613 l = ref and ref()
2599 2614 if l and l.held:
2600 2615 l.postrelease.append(callback)
2601 2616 break
2602 2617 else: # no lock have been found.
2603 2618 callback()
2604 2619
2605 2620 def lock(self, wait=True):
2606 2621 '''Lock the repository store (.hg/store) and return a weak reference
2607 2622 to the lock. Use this before modifying the store (e.g. committing or
2608 2623 stripping). If you are opening a transaction, get a lock as well.)
2609 2624
2610 2625 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2611 2626 'wlock' first to avoid a dead-lock hazard.'''
2612 2627 l = self._currentlock(self._lockref)
2613 2628 if l is not None:
2614 2629 l.lock()
2615 2630 return l
2616 2631
2617 2632 l = self._lock(
2618 2633 vfs=self.svfs,
2619 2634 lockname=b"lock",
2620 2635 wait=wait,
2621 2636 releasefn=None,
2622 2637 acquirefn=self.invalidate,
2623 2638 desc=_(b'repository %s') % self.origroot,
2624 2639 )
2625 2640 self._lockref = weakref.ref(l)
2626 2641 return l
2627 2642
2628 2643 def _wlockchecktransaction(self):
2629 2644 if self.currenttransaction() is not None:
2630 2645 raise error.LockInheritanceContractViolation(
2631 2646 b'wlock cannot be inherited in the middle of a transaction'
2632 2647 )
2633 2648
2634 2649 def wlock(self, wait=True):
2635 2650 '''Lock the non-store parts of the repository (everything under
2636 2651 .hg except .hg/store) and return a weak reference to the lock.
2637 2652
2638 2653 Use this before modifying files in .hg.
2639 2654
2640 2655 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2641 2656 'wlock' first to avoid a dead-lock hazard.'''
2642 2657 l = self._wlockref and self._wlockref()
2643 2658 if l is not None and l.held:
2644 2659 l.lock()
2645 2660 return l
2646 2661
2647 2662 # We do not need to check for non-waiting lock acquisition. Such
2648 2663 # acquisition would not cause dead-lock as they would just fail.
2649 2664 if wait and (
2650 2665 self.ui.configbool(b'devel', b'all-warnings')
2651 2666 or self.ui.configbool(b'devel', b'check-locks')
2652 2667 ):
2653 2668 if self._currentlock(self._lockref) is not None:
2654 2669 self.ui.develwarn(b'"wlock" acquired after "lock"')
2655 2670
2656 2671 def unlock():
2657 2672 if self.dirstate.pendingparentchange():
2658 2673 self.dirstate.invalidate()
2659 2674 else:
2660 2675 self.dirstate.write(None)
2661 2676
2662 2677 self._filecache[b'dirstate'].refresh()
2663 2678
2664 2679 l = self._lock(
2665 2680 self.vfs,
2666 2681 b"wlock",
2667 2682 wait,
2668 2683 unlock,
2669 2684 self.invalidatedirstate,
2670 2685 _(b'working directory of %s') % self.origroot,
2671 2686 inheritchecker=self._wlockchecktransaction,
2672 2687 parentenvvar=b'HG_WLOCK_LOCKER',
2673 2688 )
2674 2689 self._wlockref = weakref.ref(l)
2675 2690 return l
2676 2691
2677 2692 def _currentlock(self, lockref):
2678 2693 """Returns the lock if it's held, or None if it's not."""
2679 2694 if lockref is None:
2680 2695 return None
2681 2696 l = lockref()
2682 2697 if l is None or not l.held:
2683 2698 return None
2684 2699 return l
2685 2700
2686 2701 def currentwlock(self):
2687 2702 """Returns the wlock if it's held, or None if it's not."""
2688 2703 return self._currentlock(self._wlockref)
2689 2704
2690 2705 def _filecommit(
2691 2706 self,
2692 2707 fctx,
2693 2708 manifest1,
2694 2709 manifest2,
2695 2710 linkrev,
2696 2711 tr,
2697 2712 changelist,
2698 2713 includecopymeta,
2699 2714 ):
2700 2715 """
2701 2716 commit an individual file as part of a larger transaction
2702 2717 """
2703 2718
2704 2719 fname = fctx.path()
2705 2720 fparent1 = manifest1.get(fname, nullid)
2706 2721 fparent2 = manifest2.get(fname, nullid)
2707 2722 if isinstance(fctx, context.filectx):
2708 2723 node = fctx.filenode()
2709 2724 if node in [fparent1, fparent2]:
2710 2725 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2711 2726 if (
2712 2727 fparent1 != nullid
2713 2728 and manifest1.flags(fname) != fctx.flags()
2714 2729 ) or (
2715 2730 fparent2 != nullid
2716 2731 and manifest2.flags(fname) != fctx.flags()
2717 2732 ):
2718 2733 changelist.append(fname)
2719 2734 return node
2720 2735
2721 2736 flog = self.file(fname)
2722 2737 meta = {}
2723 2738 cfname = fctx.copysource()
2724 2739 if cfname and cfname != fname:
2725 2740 # Mark the new revision of this file as a copy of another
2726 2741 # file. This copy data will effectively act as a parent
2727 2742 # of this new revision. If this is a merge, the first
2728 2743 # parent will be the nullid (meaning "look up the copy data")
2729 2744 # and the second one will be the other parent. For example:
2730 2745 #
2731 2746 # 0 --- 1 --- 3 rev1 changes file foo
2732 2747 # \ / rev2 renames foo to bar and changes it
2733 2748 # \- 2 -/ rev3 should have bar with all changes and
2734 2749 # should record that bar descends from
2735 2750 # bar in rev2 and foo in rev1
2736 2751 #
2737 2752 # this allows this merge to succeed:
2738 2753 #
2739 2754 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2740 2755 # \ / merging rev3 and rev4 should use bar@rev2
2741 2756 # \- 2 --- 4 as the merge base
2742 2757 #
2743 2758
2744 2759 cnode = manifest1.get(cfname)
2745 2760 newfparent = fparent2
2746 2761
2747 2762 if manifest2: # branch merge
2748 2763 if fparent2 == nullid or cnode is None: # copied on remote side
2749 2764 if cfname in manifest2:
2750 2765 cnode = manifest2[cfname]
2751 2766 newfparent = fparent1
2752 2767
2753 2768 # Here, we used to search backwards through history to try to find
2754 2769 # where the file copy came from if the source of a copy was not in
2755 2770 # the parent directory. However, this doesn't actually make sense to
2756 2771 # do (what does a copy from something not in your working copy even
2757 2772 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2758 2773 # the user that copy information was dropped, so if they didn't
2759 2774 # expect this outcome it can be fixed, but this is the correct
2760 2775 # behavior in this circumstance.
2761 2776
2762 2777 if cnode:
2763 2778 self.ui.debug(
2764 2779 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2765 2780 )
2766 2781 if includecopymeta:
2767 2782 meta[b"copy"] = cfname
2768 2783 meta[b"copyrev"] = hex(cnode)
2769 2784 fparent1, fparent2 = nullid, newfparent
2770 2785 else:
2771 2786 self.ui.warn(
2772 2787 _(
2773 2788 b"warning: can't find ancestor for '%s' "
2774 2789 b"copied from '%s'!\n"
2775 2790 )
2776 2791 % (fname, cfname)
2777 2792 )
2778 2793
2779 2794 elif fparent1 == nullid:
2780 2795 fparent1, fparent2 = fparent2, nullid
2781 2796 elif fparent2 != nullid:
2782 2797 # is one parent an ancestor of the other?
2783 2798 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2784 2799 if fparent1 in fparentancestors:
2785 2800 fparent1, fparent2 = fparent2, nullid
2786 2801 elif fparent2 in fparentancestors:
2787 2802 fparent2 = nullid
2788 2803
2789 2804 # is the file changed?
2790 2805 text = fctx.data()
2791 2806 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2792 2807 changelist.append(fname)
2793 2808 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2794 2809 # are just the flags changed during merge?
2795 2810 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2796 2811 changelist.append(fname)
2797 2812
2798 2813 return fparent1
2799 2814
2800 2815 def checkcommitpatterns(self, wctx, match, status, fail):
2801 2816 """check for commit arguments that aren't committable"""
2802 2817 if match.isexact() or match.prefix():
2803 2818 matched = set(status.modified + status.added + status.removed)
2804 2819
2805 2820 for f in match.files():
2806 2821 f = self.dirstate.normalize(f)
2807 2822 if f == b'.' or f in matched or f in wctx.substate:
2808 2823 continue
2809 2824 if f in status.deleted:
2810 2825 fail(f, _(b'file not found!'))
2811 2826 # Is it a directory that exists or used to exist?
2812 2827 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2813 2828 d = f + b'/'
2814 2829 for mf in matched:
2815 2830 if mf.startswith(d):
2816 2831 break
2817 2832 else:
2818 2833 fail(f, _(b"no match under directory!"))
2819 2834 elif f not in self.dirstate:
2820 2835 fail(f, _(b"file not tracked!"))
2821 2836
2822 2837 @unfilteredmethod
2823 2838 def commit(
2824 2839 self,
2825 2840 text=b"",
2826 2841 user=None,
2827 2842 date=None,
2828 2843 match=None,
2829 2844 force=False,
2830 2845 editor=False,
2831 2846 extra=None,
2832 2847 ):
2833 2848 """Add a new revision to current repository.
2834 2849
2835 2850 Revision information is gathered from the working directory,
2836 2851 match can be used to filter the committed files. If editor is
2837 2852 supplied, it is called to get a commit message.
2838 2853 """
2839 2854 if extra is None:
2840 2855 extra = {}
2841 2856
2842 2857 def fail(f, msg):
2843 2858 raise error.Abort(b'%s: %s' % (f, msg))
2844 2859
2845 2860 if not match:
2846 2861 match = matchmod.always()
2847 2862
2848 2863 if not force:
2849 2864 match.bad = fail
2850 2865
2851 2866 # lock() for recent changelog (see issue4368)
2852 2867 with self.wlock(), self.lock():
2853 2868 wctx = self[None]
2854 2869 merge = len(wctx.parents()) > 1
2855 2870
2856 2871 if not force and merge and not match.always():
2857 2872 raise error.Abort(
2858 2873 _(
2859 2874 b'cannot partially commit a merge '
2860 2875 b'(do not specify files or patterns)'
2861 2876 )
2862 2877 )
2863 2878
2864 2879 status = self.status(match=match, clean=force)
2865 2880 if force:
2866 2881 status.modified.extend(
2867 2882 status.clean
2868 2883 ) # mq may commit clean files
2869 2884
2870 2885 # check subrepos
2871 2886 subs, commitsubs, newstate = subrepoutil.precommit(
2872 2887 self.ui, wctx, status, match, force=force
2873 2888 )
2874 2889
2875 2890 # make sure all explicit patterns are matched
2876 2891 if not force:
2877 2892 self.checkcommitpatterns(wctx, match, status, fail)
2878 2893
2879 2894 cctx = context.workingcommitctx(
2880 2895 self, status, text, user, date, extra
2881 2896 )
2882 2897
2883 2898 # internal config: ui.allowemptycommit
2884 2899 allowemptycommit = (
2885 2900 wctx.branch() != wctx.p1().branch()
2886 2901 or extra.get(b'close')
2887 2902 or merge
2888 2903 or cctx.files()
2889 2904 or self.ui.configbool(b'ui', b'allowemptycommit')
2890 2905 )
2891 2906 if not allowemptycommit:
2892 2907 return None
2893 2908
2894 2909 if merge and cctx.deleted():
2895 2910 raise error.Abort(_(b"cannot commit merge with missing files"))
2896 2911
2897 2912 ms = mergemod.mergestate.read(self)
2898 2913 mergeutil.checkunresolved(ms)
2899 2914
2900 2915 if editor:
2901 2916 cctx._text = editor(self, cctx, subs)
2902 2917 edited = text != cctx._text
2903 2918
2904 2919 # Save commit message in case this transaction gets rolled back
2905 2920 # (e.g. by a pretxncommit hook). Leave the content alone on
2906 2921 # the assumption that the user will use the same editor again.
2907 2922 msgfn = self.savecommitmessage(cctx._text)
2908 2923
2909 2924 # commit subs and write new state
2910 2925 if subs:
2911 2926 uipathfn = scmutil.getuipathfn(self)
2912 2927 for s in sorted(commitsubs):
2913 2928 sub = wctx.sub(s)
2914 2929 self.ui.status(
2915 2930 _(b'committing subrepository %s\n')
2916 2931 % uipathfn(subrepoutil.subrelpath(sub))
2917 2932 )
2918 2933 sr = sub.commit(cctx._text, user, date)
2919 2934 newstate[s] = (newstate[s][0], sr)
2920 2935 subrepoutil.writestate(self, newstate)
2921 2936
2922 2937 p1, p2 = self.dirstate.parents()
2923 2938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2924 2939 try:
2925 2940 self.hook(
2926 2941 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2927 2942 )
2928 2943 with self.transaction(b'commit'):
2929 2944 ret = self.commitctx(cctx, True)
2930 2945 # update bookmarks, dirstate and mergestate
2931 2946 bookmarks.update(self, [p1, p2], ret)
2932 2947 cctx.markcommitted(ret)
2933 2948 ms.reset()
2934 2949 except: # re-raises
2935 2950 if edited:
2936 2951 self.ui.write(
2937 2952 _(b'note: commit message saved in %s\n') % msgfn
2938 2953 )
2939 2954 raise
2940 2955
2941 2956 def commithook():
2942 2957 # hack for command that use a temporary commit (eg: histedit)
2943 2958 # temporary commit got stripped before hook release
2944 2959 if self.changelog.hasnode(ret):
2945 2960 self.hook(
2946 2961 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2947 2962 )
2948 2963
2949 2964 self._afterlock(commithook)
2950 2965 return ret
2951 2966
2952 2967 @unfilteredmethod
2953 2968 def commitctx(self, ctx, error=False, origctx=None):
2954 2969 """Add a new revision to current repository.
2955 2970 Revision information is passed via the context argument.
2956 2971
2957 2972 ctx.files() should list all files involved in this commit, i.e.
2958 2973 modified/added/removed files. On merge, it may be wider than the
2959 2974 ctx.files() to be committed, since any file nodes derived directly
2960 2975 from p1 or p2 are excluded from the committed ctx.files().
2961 2976
2962 2977 origctx is for convert to work around the problem that bug
2963 2978 fixes to the files list in changesets change hashes. For
2964 2979 convert to be the identity, it can pass an origctx and this
2965 2980 function will use the same files list when it makes sense to
2966 2981 do so.
2967 2982 """
2968 2983
2969 2984 p1, p2 = ctx.p1(), ctx.p2()
2970 2985 user = ctx.user()
2971 2986
2972 2987 if self.filecopiesmode == b'changeset-sidedata':
2973 2988 writechangesetcopy = True
2974 2989 writefilecopymeta = True
2975 2990 writecopiesto = None
2976 2991 else:
2977 2992 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2978 2993 writefilecopymeta = writecopiesto != b'changeset-only'
2979 2994 writechangesetcopy = writecopiesto in (
2980 2995 b'changeset-only',
2981 2996 b'compatibility',
2982 2997 )
2983 2998 p1copies, p2copies = None, None
2984 2999 if writechangesetcopy:
2985 3000 p1copies = ctx.p1copies()
2986 3001 p2copies = ctx.p2copies()
2987 3002 filesadded, filesremoved = None, None
2988 3003 with self.lock(), self.transaction(b"commit") as tr:
2989 3004 trp = weakref.proxy(tr)
2990 3005
2991 3006 if ctx.manifestnode():
2992 3007 # reuse an existing manifest revision
2993 3008 self.ui.debug(b'reusing known manifest\n')
2994 3009 mn = ctx.manifestnode()
2995 3010 files = ctx.files()
2996 3011 if writechangesetcopy:
2997 3012 filesadded = ctx.filesadded()
2998 3013 filesremoved = ctx.filesremoved()
2999 3014 elif ctx.files():
3000 3015 m1ctx = p1.manifestctx()
3001 3016 m2ctx = p2.manifestctx()
3002 3017 mctx = m1ctx.copy()
3003 3018
3004 3019 m = mctx.read()
3005 3020 m1 = m1ctx.read()
3006 3021 m2 = m2ctx.read()
3007 3022
3008 3023 # check in files
3009 3024 added = []
3010 3025 changed = []
3011 3026 removed = list(ctx.removed())
3012 3027 linkrev = len(self)
3013 3028 self.ui.note(_(b"committing files:\n"))
3014 3029 uipathfn = scmutil.getuipathfn(self)
3015 3030 for f in sorted(ctx.modified() + ctx.added()):
3016 3031 self.ui.note(uipathfn(f) + b"\n")
3017 3032 try:
3018 3033 fctx = ctx[f]
3019 3034 if fctx is None:
3020 3035 removed.append(f)
3021 3036 else:
3022 3037 added.append(f)
3023 3038 m[f] = self._filecommit(
3024 3039 fctx,
3025 3040 m1,
3026 3041 m2,
3027 3042 linkrev,
3028 3043 trp,
3029 3044 changed,
3030 3045 writefilecopymeta,
3031 3046 )
3032 3047 m.setflag(f, fctx.flags())
3033 3048 except OSError:
3034 3049 self.ui.warn(
3035 3050 _(b"trouble committing %s!\n") % uipathfn(f)
3036 3051 )
3037 3052 raise
3038 3053 except IOError as inst:
3039 3054 errcode = getattr(inst, 'errno', errno.ENOENT)
3040 3055 if error or errcode and errcode != errno.ENOENT:
3041 3056 self.ui.warn(
3042 3057 _(b"trouble committing %s!\n") % uipathfn(f)
3043 3058 )
3044 3059 raise
3045 3060
3046 3061 # update manifest
3047 3062 removed = [f for f in removed if f in m1 or f in m2]
3048 3063 drop = sorted([f for f in removed if f in m])
3049 3064 for f in drop:
3050 3065 del m[f]
3051 3066 if p2.rev() != nullrev:
3052 3067
3053 3068 @util.cachefunc
3054 3069 def mas():
3055 3070 p1n = p1.node()
3056 3071 p2n = p2.node()
3057 3072 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3058 3073 if not cahs:
3059 3074 cahs = [nullrev]
3060 3075 return [self[r].manifest() for r in cahs]
3061 3076
3062 3077 def deletionfromparent(f):
3063 3078 # When a file is removed relative to p1 in a merge, this
3064 3079 # function determines whether the absence is due to a
3065 3080 # deletion from a parent, or whether the merge commit
3066 3081 # itself deletes the file. We decide this by doing a
3067 3082 # simplified three way merge of the manifest entry for
3068 3083 # the file. There are two ways we decide the merge
3069 3084 # itself didn't delete a file:
3070 3085 # - neither parent (nor the merge) contain the file
3071 3086 # - exactly one parent contains the file, and that
3072 3087 # parent has the same filelog entry as the merge
3073 3088 # ancestor (or all of them if there two). In other
3074 3089 # words, that parent left the file unchanged while the
3075 3090 # other one deleted it.
3076 3091 # One way to think about this is that deleting a file is
3077 3092 # similar to emptying it, so the list of changed files
3078 3093 # should be similar either way. The computation
3079 3094 # described above is not done directly in _filecommit
3080 3095 # when creating the list of changed files, however
3081 3096 # it does something very similar by comparing filelog
3082 3097 # nodes.
3083 3098 if f in m1:
3084 3099 return f not in m2 and all(
3085 3100 f in ma and ma.find(f) == m1.find(f)
3086 3101 for ma in mas()
3087 3102 )
3088 3103 elif f in m2:
3089 3104 return all(
3090 3105 f in ma and ma.find(f) == m2.find(f)
3091 3106 for ma in mas()
3092 3107 )
3093 3108 else:
3094 3109 return True
3095 3110
3096 3111 removed = [f for f in removed if not deletionfromparent(f)]
3097 3112
3098 3113 files = changed + removed
3099 3114 md = None
3100 3115 if not files:
3101 3116 # if no "files" actually changed in terms of the changelog,
3102 3117 # try hard to detect unmodified manifest entry so that the
3103 3118 # exact same commit can be reproduced later on convert.
3104 3119 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3105 3120 if not files and md:
3106 3121 self.ui.debug(
3107 3122 b'not reusing manifest (no file change in '
3108 3123 b'changelog, but manifest differs)\n'
3109 3124 )
3110 3125 if files or md:
3111 3126 self.ui.note(_(b"committing manifest\n"))
3112 3127 # we're using narrowmatch here since it's already applied at
3113 3128 # other stages (such as dirstate.walk), so we're already
3114 3129 # ignoring things outside of narrowspec in most cases. The
3115 3130 # one case where we might have files outside the narrowspec
3116 3131 # at this point is merges, and we already error out in the
3117 3132 # case where the merge has files outside of the narrowspec,
3118 3133 # so this is safe.
3119 3134 mn = mctx.write(
3120 3135 trp,
3121 3136 linkrev,
3122 3137 p1.manifestnode(),
3123 3138 p2.manifestnode(),
3124 3139 added,
3125 3140 drop,
3126 3141 match=self.narrowmatch(),
3127 3142 )
3128 3143
3129 3144 if writechangesetcopy:
3130 3145 filesadded = [
3131 3146 f for f in changed if not (f in m1 or f in m2)
3132 3147 ]
3133 3148 filesremoved = removed
3134 3149 else:
3135 3150 self.ui.debug(
3136 3151 b'reusing manifest from p1 (listed files '
3137 3152 b'actually unchanged)\n'
3138 3153 )
3139 3154 mn = p1.manifestnode()
3140 3155 else:
3141 3156 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3142 3157 mn = p1.manifestnode()
3143 3158 files = []
3144 3159
3145 3160 if writecopiesto == b'changeset-only':
3146 3161 # If writing only to changeset extras, use None to indicate that
3147 3162 # no entry should be written. If writing to both, write an empty
3148 3163 # entry to prevent the reader from falling back to reading
3149 3164 # filelogs.
3150 3165 p1copies = p1copies or None
3151 3166 p2copies = p2copies or None
3152 3167 filesadded = filesadded or None
3153 3168 filesremoved = filesremoved or None
3154 3169
3155 3170 if origctx and origctx.manifestnode() == mn:
3156 3171 files = origctx.files()
3157 3172
3158 3173 # update changelog
3159 3174 self.ui.note(_(b"committing changelog\n"))
3160 3175 self.changelog.delayupdate(tr)
3161 3176 n = self.changelog.add(
3162 3177 mn,
3163 3178 files,
3164 3179 ctx.description(),
3165 3180 trp,
3166 3181 p1.node(),
3167 3182 p2.node(),
3168 3183 user,
3169 3184 ctx.date(),
3170 3185 ctx.extra().copy(),
3171 3186 p1copies,
3172 3187 p2copies,
3173 3188 filesadded,
3174 3189 filesremoved,
3175 3190 )
3176 3191 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3177 3192 self.hook(
3178 3193 b'pretxncommit',
3179 3194 throw=True,
3180 3195 node=hex(n),
3181 3196 parent1=xp1,
3182 3197 parent2=xp2,
3183 3198 )
3184 3199 # set the new commit is proper phase
3185 3200 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3186 3201 if targetphase:
3187 3202 # retract boundary do not alter parent changeset.
3188 3203 # if a parent have higher the resulting phase will
3189 3204 # be compliant anyway
3190 3205 #
3191 3206 # if minimal phase was 0 we don't need to retract anything
3192 3207 phases.registernew(self, tr, targetphase, [n])
3193 3208 return n
3194 3209
3195 3210 @unfilteredmethod
3196 3211 def destroying(self):
3197 3212 '''Inform the repository that nodes are about to be destroyed.
3198 3213 Intended for use by strip and rollback, so there's a common
3199 3214 place for anything that has to be done before destroying history.
3200 3215
3201 3216 This is mostly useful for saving state that is in memory and waiting
3202 3217 to be flushed when the current lock is released. Because a call to
3203 3218 destroyed is imminent, the repo will be invalidated causing those
3204 3219 changes to stay in memory (waiting for the next unlock), or vanish
3205 3220 completely.
3206 3221 '''
3207 3222 # When using the same lock to commit and strip, the phasecache is left
3208 3223 # dirty after committing. Then when we strip, the repo is invalidated,
3209 3224 # causing those changes to disappear.
3210 3225 if '_phasecache' in vars(self):
3211 3226 self._phasecache.write()
3212 3227
3213 3228 @unfilteredmethod
3214 3229 def destroyed(self):
3215 3230 '''Inform the repository that nodes have been destroyed.
3216 3231 Intended for use by strip and rollback, so there's a common
3217 3232 place for anything that has to be done after destroying history.
3218 3233 '''
3219 3234 # When one tries to:
3220 3235 # 1) destroy nodes thus calling this method (e.g. strip)
3221 3236 # 2) use phasecache somewhere (e.g. commit)
3222 3237 #
3223 3238 # then 2) will fail because the phasecache contains nodes that were
3224 3239 # removed. We can either remove phasecache from the filecache,
3225 3240 # causing it to reload next time it is accessed, or simply filter
3226 3241 # the removed nodes now and write the updated cache.
3227 3242 self._phasecache.filterunknown(self)
3228 3243 self._phasecache.write()
3229 3244
3230 3245 # refresh all repository caches
3231 3246 self.updatecaches()
3232 3247
3233 3248 # Ensure the persistent tag cache is updated. Doing it now
3234 3249 # means that the tag cache only has to worry about destroyed
3235 3250 # heads immediately after a strip/rollback. That in turn
3236 3251 # guarantees that "cachetip == currenttip" (comparing both rev
3237 3252 # and node) always means no nodes have been added or destroyed.
3238 3253
3239 3254 # XXX this is suboptimal when qrefresh'ing: we strip the current
3240 3255 # head, refresh the tag cache, then immediately add a new head.
3241 3256 # But I think doing it this way is necessary for the "instant
3242 3257 # tag cache retrieval" case to work.
3243 3258 self.invalidate()
3244 3259
3245 3260 def status(
3246 3261 self,
3247 3262 node1=b'.',
3248 3263 node2=None,
3249 3264 match=None,
3250 3265 ignored=False,
3251 3266 clean=False,
3252 3267 unknown=False,
3253 3268 listsubrepos=False,
3254 3269 ):
3255 3270 '''a convenience method that calls node1.status(node2)'''
3256 3271 return self[node1].status(
3257 3272 node2, match, ignored, clean, unknown, listsubrepos
3258 3273 )
3259 3274
3260 3275 def addpostdsstatus(self, ps):
3261 3276 """Add a callback to run within the wlock, at the point at which status
3262 3277 fixups happen.
3263 3278
3264 3279 On status completion, callback(wctx, status) will be called with the
3265 3280 wlock held, unless the dirstate has changed from underneath or the wlock
3266 3281 couldn't be grabbed.
3267 3282
3268 3283 Callbacks should not capture and use a cached copy of the dirstate --
3269 3284 it might change in the meanwhile. Instead, they should access the
3270 3285 dirstate via wctx.repo().dirstate.
3271 3286
3272 3287 This list is emptied out after each status run -- extensions should
3273 3288 make sure it adds to this list each time dirstate.status is called.
3274 3289 Extensions should also make sure they don't call this for statuses
3275 3290 that don't involve the dirstate.
3276 3291 """
3277 3292
3278 3293 # The list is located here for uniqueness reasons -- it is actually
3279 3294 # managed by the workingctx, but that isn't unique per-repo.
3280 3295 self._postdsstatus.append(ps)
3281 3296
3282 3297 def postdsstatus(self):
3283 3298 """Used by workingctx to get the list of post-dirstate-status hooks."""
3284 3299 return self._postdsstatus
3285 3300
3286 3301 def clearpostdsstatus(self):
3287 3302 """Used by workingctx to clear post-dirstate-status hooks."""
3288 3303 del self._postdsstatus[:]
3289 3304
3290 3305 def heads(self, start=None):
3291 3306 if start is None:
3292 3307 cl = self.changelog
3293 3308 headrevs = reversed(cl.headrevs())
3294 3309 return [cl.node(rev) for rev in headrevs]
3295 3310
3296 3311 heads = self.changelog.heads(start)
3297 3312 # sort the output in rev descending order
3298 3313 return sorted(heads, key=self.changelog.rev, reverse=True)
3299 3314
3300 3315 def branchheads(self, branch=None, start=None, closed=False):
3301 3316 '''return a (possibly filtered) list of heads for the given branch
3302 3317
3303 3318 Heads are returned in topological order, from newest to oldest.
3304 3319 If branch is None, use the dirstate branch.
3305 3320 If start is not None, return only heads reachable from start.
3306 3321 If closed is True, return heads that are marked as closed as well.
3307 3322 '''
3308 3323 if branch is None:
3309 3324 branch = self[None].branch()
3310 3325 branches = self.branchmap()
3311 3326 if not branches.hasbranch(branch):
3312 3327 return []
3313 3328 # the cache returns heads ordered lowest to highest
3314 3329 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3315 3330 if start is not None:
3316 3331 # filter out the heads that cannot be reached from startrev
3317 3332 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3318 3333 bheads = [h for h in bheads if h in fbheads]
3319 3334 return bheads
3320 3335
3321 3336 def branches(self, nodes):
3322 3337 if not nodes:
3323 3338 nodes = [self.changelog.tip()]
3324 3339 b = []
3325 3340 for n in nodes:
3326 3341 t = n
3327 3342 while True:
3328 3343 p = self.changelog.parents(n)
3329 3344 if p[1] != nullid or p[0] == nullid:
3330 3345 b.append((t, n, p[0], p[1]))
3331 3346 break
3332 3347 n = p[0]
3333 3348 return b
3334 3349
3335 3350 def between(self, pairs):
3336 3351 r = []
3337 3352
3338 3353 for top, bottom in pairs:
3339 3354 n, l, i = top, [], 0
3340 3355 f = 1
3341 3356
3342 3357 while n != bottom and n != nullid:
3343 3358 p = self.changelog.parents(n)[0]
3344 3359 if i == f:
3345 3360 l.append(n)
3346 3361 f = f * 2
3347 3362 n = p
3348 3363 i += 1
3349 3364
3350 3365 r.append(l)
3351 3366
3352 3367 return r
3353 3368
3354 3369 def checkpush(self, pushop):
3355 3370 """Extensions can override this function if additional checks have
3356 3371 to be performed before pushing, or call it if they override push
3357 3372 command.
3358 3373 """
3359 3374
3360 3375 @unfilteredpropertycache
3361 3376 def prepushoutgoinghooks(self):
3362 3377 """Return util.hooks consists of a pushop with repo, remote, outgoing
3363 3378 methods, which are called before pushing changesets.
3364 3379 """
3365 3380 return util.hooks()
3366 3381
3367 3382 def pushkey(self, namespace, key, old, new):
3368 3383 try:
3369 3384 tr = self.currenttransaction()
3370 3385 hookargs = {}
3371 3386 if tr is not None:
3372 3387 hookargs.update(tr.hookargs)
3373 3388 hookargs = pycompat.strkwargs(hookargs)
3374 3389 hookargs['namespace'] = namespace
3375 3390 hookargs['key'] = key
3376 3391 hookargs['old'] = old
3377 3392 hookargs['new'] = new
3378 3393 self.hook(b'prepushkey', throw=True, **hookargs)
3379 3394 except error.HookAbort as exc:
3380 3395 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3381 3396 if exc.hint:
3382 3397 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3383 3398 return False
3384 3399 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3385 3400 ret = pushkey.push(self, namespace, key, old, new)
3386 3401
3387 3402 def runhook():
3388 3403 self.hook(
3389 3404 b'pushkey',
3390 3405 namespace=namespace,
3391 3406 key=key,
3392 3407 old=old,
3393 3408 new=new,
3394 3409 ret=ret,
3395 3410 )
3396 3411
3397 3412 self._afterlock(runhook)
3398 3413 return ret
3399 3414
3400 3415 def listkeys(self, namespace):
3401 3416 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3402 3417 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3403 3418 values = pushkey.list(self, namespace)
3404 3419 self.hook(b'listkeys', namespace=namespace, values=values)
3405 3420 return values
3406 3421
3407 3422 def debugwireargs(self, one, two, three=None, four=None, five=None):
3408 3423 '''used to test argument passing over the wire'''
3409 3424 return b"%s %s %s %s %s" % (
3410 3425 one,
3411 3426 two,
3412 3427 pycompat.bytestr(three),
3413 3428 pycompat.bytestr(four),
3414 3429 pycompat.bytestr(five),
3415 3430 )
3416 3431
3417 3432 def savecommitmessage(self, text):
3418 3433 fp = self.vfs(b'last-message.txt', b'wb')
3419 3434 try:
3420 3435 fp.write(text)
3421 3436 finally:
3422 3437 fp.close()
3423 3438 return self.pathto(fp.name[len(self.root) + 1 :])
3424 3439
3425 3440
3426 3441 # used to avoid circular references so destructors work
3427 3442 def aftertrans(files):
3428 3443 renamefiles = [tuple(t) for t in files]
3429 3444
3430 3445 def a():
3431 3446 for vfs, src, dest in renamefiles:
3432 3447 # if src and dest refer to a same file, vfs.rename is a no-op,
3433 3448 # leaving both src and dest on disk. delete dest to make sure
3434 3449 # the rename couldn't be such a no-op.
3435 3450 vfs.tryunlink(dest)
3436 3451 try:
3437 3452 vfs.rename(src, dest)
3438 3453 except OSError: # journal file does not yet exist
3439 3454 pass
3440 3455
3441 3456 return a
3442 3457
3443 3458
3444 3459 def undoname(fn):
3445 3460 base, name = os.path.split(fn)
3446 3461 assert name.startswith(b'journal')
3447 3462 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3448 3463
3449 3464
3450 3465 def instance(ui, path, create, intents=None, createopts=None):
3451 3466 localpath = util.urllocalpath(path)
3452 3467 if create:
3453 3468 createrepository(ui, localpath, createopts=createopts)
3454 3469
3455 3470 return makelocalrepository(ui, localpath, intents=intents)
3456 3471
3457 3472
3458 3473 def islocal(path):
3459 3474 return True
3460 3475
3461 3476
3462 3477 def defaultcreateopts(ui, createopts=None):
3463 3478 """Populate the default creation options for a repository.
3464 3479
3465 3480 A dictionary of explicitly requested creation options can be passed
3466 3481 in. Missing keys will be populated.
3467 3482 """
3468 3483 createopts = dict(createopts or {})
3469 3484
3470 3485 if b'backend' not in createopts:
3471 3486 # experimental config: storage.new-repo-backend
3472 3487 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3473 3488
3474 3489 return createopts
3475 3490
3476 3491
3477 3492 def newreporequirements(ui, createopts):
3478 3493 """Determine the set of requirements for a new local repository.
3479 3494
3480 3495 Extensions can wrap this function to specify custom requirements for
3481 3496 new repositories.
3482 3497 """
3483 3498 # If the repo is being created from a shared repository, we copy
3484 3499 # its requirements.
3485 3500 if b'sharedrepo' in createopts:
3486 3501 requirements = set(createopts[b'sharedrepo'].requirements)
3487 3502 if createopts.get(b'sharedrelative'):
3488 3503 requirements.add(b'relshared')
3489 3504 else:
3490 3505 requirements.add(b'shared')
3491 3506
3492 3507 return requirements
3493 3508
3494 3509 if b'backend' not in createopts:
3495 3510 raise error.ProgrammingError(
3496 3511 b'backend key not present in createopts; '
3497 3512 b'was defaultcreateopts() called?'
3498 3513 )
3499 3514
3500 3515 if createopts[b'backend'] != b'revlogv1':
3501 3516 raise error.Abort(
3502 3517 _(
3503 3518 b'unable to determine repository requirements for '
3504 3519 b'storage backend: %s'
3505 3520 )
3506 3521 % createopts[b'backend']
3507 3522 )
3508 3523
3509 3524 requirements = {b'revlogv1'}
3510 3525 if ui.configbool(b'format', b'usestore'):
3511 3526 requirements.add(b'store')
3512 3527 if ui.configbool(b'format', b'usefncache'):
3513 3528 requirements.add(b'fncache')
3514 3529 if ui.configbool(b'format', b'dotencode'):
3515 3530 requirements.add(b'dotencode')
3516 3531
3517 3532 compengine = ui.config(b'format', b'revlog-compression')
3518 3533 if compengine not in util.compengines:
3519 3534 raise error.Abort(
3520 3535 _(
3521 3536 b'compression engine %s defined by '
3522 3537 b'format.revlog-compression not available'
3523 3538 )
3524 3539 % compengine,
3525 3540 hint=_(
3526 3541 b'run "hg debuginstall" to list available '
3527 3542 b'compression engines'
3528 3543 ),
3529 3544 )
3530 3545
3531 3546 # zlib is the historical default and doesn't need an explicit requirement.
3532 3547 elif compengine == b'zstd':
3533 3548 requirements.add(b'revlog-compression-zstd')
3534 3549 elif compengine != b'zlib':
3535 3550 requirements.add(b'exp-compression-%s' % compengine)
3536 3551
3537 3552 if scmutil.gdinitconfig(ui):
3538 3553 requirements.add(b'generaldelta')
3539 3554 if ui.configbool(b'format', b'sparse-revlog'):
3540 3555 requirements.add(SPARSEREVLOG_REQUIREMENT)
3541 3556
3542 3557 # experimental config: format.exp-use-side-data
3543 3558 if ui.configbool(b'format', b'exp-use-side-data'):
3544 3559 requirements.add(SIDEDATA_REQUIREMENT)
3545 3560 # experimental config: format.exp-use-copies-side-data-changeset
3546 3561 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3547 3562 requirements.add(SIDEDATA_REQUIREMENT)
3548 3563 requirements.add(COPIESSDC_REQUIREMENT)
3549 3564 if ui.configbool(b'experimental', b'treemanifest'):
3550 3565 requirements.add(b'treemanifest')
3551 3566
3552 3567 revlogv2 = ui.config(b'experimental', b'revlogv2')
3553 3568 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3554 3569 requirements.remove(b'revlogv1')
3555 3570 # generaldelta is implied by revlogv2.
3556 3571 requirements.discard(b'generaldelta')
3557 3572 requirements.add(REVLOGV2_REQUIREMENT)
3558 3573 # experimental config: format.internal-phase
3559 3574 if ui.configbool(b'format', b'internal-phase'):
3560 3575 requirements.add(b'internal-phase')
3561 3576
3562 3577 if createopts.get(b'narrowfiles'):
3563 3578 requirements.add(repository.NARROW_REQUIREMENT)
3564 3579
3565 3580 if createopts.get(b'lfs'):
3566 3581 requirements.add(b'lfs')
3567 3582
3568 3583 if ui.configbool(b'format', b'bookmarks-in-store'):
3569 3584 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3570 3585
3571 3586 return requirements
3572 3587
3573 3588
3574 3589 def filterknowncreateopts(ui, createopts):
3575 3590 """Filters a dict of repo creation options against options that are known.
3576 3591
3577 3592 Receives a dict of repo creation options and returns a dict of those
3578 3593 options that we don't know how to handle.
3579 3594
3580 3595 This function is called as part of repository creation. If the
3581 3596 returned dict contains any items, repository creation will not
3582 3597 be allowed, as it means there was a request to create a repository
3583 3598 with options not recognized by loaded code.
3584 3599
3585 3600 Extensions can wrap this function to filter out creation options
3586 3601 they know how to handle.
3587 3602 """
3588 3603 known = {
3589 3604 b'backend',
3590 3605 b'lfs',
3591 3606 b'narrowfiles',
3592 3607 b'sharedrepo',
3593 3608 b'sharedrelative',
3594 3609 b'shareditems',
3595 3610 b'shallowfilestore',
3596 3611 }
3597 3612
3598 3613 return {k: v for k, v in createopts.items() if k not in known}
3599 3614
3600 3615
3601 3616 def createrepository(ui, path, createopts=None):
3602 3617 """Create a new repository in a vfs.
3603 3618
3604 3619 ``path`` path to the new repo's working directory.
3605 3620 ``createopts`` options for the new repository.
3606 3621
3607 3622 The following keys for ``createopts`` are recognized:
3608 3623
3609 3624 backend
3610 3625 The storage backend to use.
3611 3626 lfs
3612 3627 Repository will be created with ``lfs`` requirement. The lfs extension
3613 3628 will automatically be loaded when the repository is accessed.
3614 3629 narrowfiles
3615 3630 Set up repository to support narrow file storage.
3616 3631 sharedrepo
3617 3632 Repository object from which storage should be shared.
3618 3633 sharedrelative
3619 3634 Boolean indicating if the path to the shared repo should be
3620 3635 stored as relative. By default, the pointer to the "parent" repo
3621 3636 is stored as an absolute path.
3622 3637 shareditems
3623 3638 Set of items to share to the new repository (in addition to storage).
3624 3639 shallowfilestore
3625 3640 Indicates that storage for files should be shallow (not all ancestor
3626 3641 revisions are known).
3627 3642 """
3628 3643 createopts = defaultcreateopts(ui, createopts=createopts)
3629 3644
3630 3645 unknownopts = filterknowncreateopts(ui, createopts)
3631 3646
3632 3647 if not isinstance(unknownopts, dict):
3633 3648 raise error.ProgrammingError(
3634 3649 b'filterknowncreateopts() did not return a dict'
3635 3650 )
3636 3651
3637 3652 if unknownopts:
3638 3653 raise error.Abort(
3639 3654 _(
3640 3655 b'unable to create repository because of unknown '
3641 3656 b'creation option: %s'
3642 3657 )
3643 3658 % b', '.join(sorted(unknownopts)),
3644 3659 hint=_(b'is a required extension not loaded?'),
3645 3660 )
3646 3661
3647 3662 requirements = newreporequirements(ui, createopts=createopts)
3648 3663
3649 3664 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3650 3665
3651 3666 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3652 3667 if hgvfs.exists():
3653 3668 raise error.RepoError(_(b'repository %s already exists') % path)
3654 3669
3655 3670 if b'sharedrepo' in createopts:
3656 3671 sharedpath = createopts[b'sharedrepo'].sharedpath
3657 3672
3658 3673 if createopts.get(b'sharedrelative'):
3659 3674 try:
3660 3675 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3661 3676 except (IOError, ValueError) as e:
3662 3677 # ValueError is raised on Windows if the drive letters differ
3663 3678 # on each path.
3664 3679 raise error.Abort(
3665 3680 _(b'cannot calculate relative path'),
3666 3681 hint=stringutil.forcebytestr(e),
3667 3682 )
3668 3683
3669 3684 if not wdirvfs.exists():
3670 3685 wdirvfs.makedirs()
3671 3686
3672 3687 hgvfs.makedir(notindexed=True)
3673 3688 if b'sharedrepo' not in createopts:
3674 3689 hgvfs.mkdir(b'cache')
3675 3690 hgvfs.mkdir(b'wcache')
3676 3691
3677 3692 if b'store' in requirements and b'sharedrepo' not in createopts:
3678 3693 hgvfs.mkdir(b'store')
3679 3694
3680 3695 # We create an invalid changelog outside the store so very old
3681 3696 # Mercurial versions (which didn't know about the requirements
3682 3697 # file) encounter an error on reading the changelog. This
3683 3698 # effectively locks out old clients and prevents them from
3684 3699 # mucking with a repo in an unknown format.
3685 3700 #
3686 3701 # The revlog header has version 2, which won't be recognized by
3687 3702 # such old clients.
3688 3703 hgvfs.append(
3689 3704 b'00changelog.i',
3690 3705 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3691 3706 b'layout',
3692 3707 )
3693 3708
3694 3709 scmutil.writerequires(hgvfs, requirements)
3695 3710
3696 3711 # Write out file telling readers where to find the shared store.
3697 3712 if b'sharedrepo' in createopts:
3698 3713 hgvfs.write(b'sharedpath', sharedpath)
3699 3714
3700 3715 if createopts.get(b'shareditems'):
3701 3716 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3702 3717 hgvfs.write(b'shared', shared)
3703 3718
3704 3719
3705 3720 def poisonrepository(repo):
3706 3721 """Poison a repository instance so it can no longer be used."""
3707 3722 # Perform any cleanup on the instance.
3708 3723 repo.close()
3709 3724
3710 3725 # Our strategy is to replace the type of the object with one that
3711 3726 # has all attribute lookups result in error.
3712 3727 #
3713 3728 # But we have to allow the close() method because some constructors
3714 3729 # of repos call close() on repo references.
3715 3730 class poisonedrepository(object):
3716 3731 def __getattribute__(self, item):
3717 3732 if item == 'close':
3718 3733 return object.__getattribute__(self, item)
3719 3734
3720 3735 raise error.ProgrammingError(
3721 3736 b'repo instances should not be used after unshare'
3722 3737 )
3723 3738
3724 3739 def close(self):
3725 3740 pass
3726 3741
3727 3742 # We may have a repoview, which intercepts __setattr__. So be sure
3728 3743 # we operate at the lowest level possible.
3729 3744 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now